From edd163687ea59f01d6b43c9e1fdaa0126fa30191 Mon Sep 17 00:00:00 2001 From: "Stephen M. Cameron" Date: Tue, 8 Dec 2009 14:09:11 -0800 Subject: [SCSI] hpsa: add driver for HP Smart Array controllers. This driver supports a subset of HP Smart Array Controllers. It is a SCSI alternative to the cciss driver. [akpm@linux-foundation.org: avoid helpful cleanup patches] [achiang@hp.com: make device attrs static] [akpm@linux-foundation.org: msleep() does set_current_state() itself] Signed-off-by: Stephen M. Cameron Signed-off-by: Mike Miller Signed-off-by: Alex Chiang Signed-off-by: Andrew Morton Signed-off-by: James Bottomley --- drivers/scsi/Kconfig | 10 + drivers/scsi/Makefile | 1 + drivers/scsi/hpsa.c | 3531 +++++++++++++++++++++++++++++++++++++++++++++++ drivers/scsi/hpsa.h | 273 ++++ drivers/scsi/hpsa_cmd.h | 326 +++++ 5 files changed, 4141 insertions(+) create mode 100644 drivers/scsi/hpsa.c create mode 100644 drivers/scsi/hpsa.h create mode 100644 drivers/scsi/hpsa_cmd.h (limited to 'drivers/scsi') diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig index 36900c71a59..9191d1ea645 100644 --- a/drivers/scsi/Kconfig +++ b/drivers/scsi/Kconfig @@ -388,6 +388,16 @@ config BLK_DEV_3W_XXXX_RAID Please read the comments at the top of . +config SCSI_HPSA + tristate "HP Smart Array SCSI driver" + depends on PCI && SCSI + help + This driver supports HP Smart Array Controllers (circa 2009). + It is a SCSI alternative to the cciss driver, which is a block + driver. Anyone wishing to use HP Smart Array controllers who + would prefer the devices be presented to linux as SCSI devices, + rather than as generic block devices should say Y here. + config SCSI_3W_9XXX tristate "3ware 9xxx SATA-RAID support" depends on PCI && SCSI diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile index 280d3c657d6..92a8c500b23 100644 --- a/drivers/scsi/Makefile +++ b/drivers/scsi/Makefile @@ -91,6 +91,7 @@ obj-$(CONFIG_SCSI_BFA_FC) += bfa/ obj-$(CONFIG_SCSI_PAS16) += pas16.o obj-$(CONFIG_SCSI_T128) += t128.o obj-$(CONFIG_SCSI_DMX3191D) += dmx3191d.o +obj-$(CONFIG_SCSI_HPSA) += hpsa.o obj-$(CONFIG_SCSI_DTC3280) += dtc.o obj-$(CONFIG_SCSI_SYM53C8XX_2) += sym53c8xx_2/ obj-$(CONFIG_SCSI_ZALON) += zalon7xx.o diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c new file mode 100644 index 00000000000..bb96fdd58e2 --- /dev/null +++ b/drivers/scsi/hpsa.c @@ -0,0 +1,3531 @@ +/* + * Disk Array driver for HP Smart Array SAS controllers + * Copyright 2000, 2009 Hewlett-Packard Development Company, L.P. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or + * NON INFRINGEMENT. See the GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + * + * Questions/Comments/Bugfixes to iss_storagedev@hp.com + * + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "hpsa_cmd.h" +#include "hpsa.h" + +/* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ +#define HPSA_DRIVER_VERSION "1.0.0" +#define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")" + +/* How long to wait (in milliseconds) for board to go into simple mode */ +#define MAX_CONFIG_WAIT 30000 +#define MAX_IOCTL_CONFIG_WAIT 1000 + +/*define how many times we will try a command because of bus resets */ +#define MAX_CMD_RETRIES 3 + +/* Embedded module documentation macros - see modules.h */ +MODULE_AUTHOR("Hewlett-Packard Company"); +MODULE_DESCRIPTION("Driver for HP Smart Array Controller version " \ + HPSA_DRIVER_VERSION); +MODULE_SUPPORTED_DEVICE("HP Smart Array Controllers"); +MODULE_VERSION(HPSA_DRIVER_VERSION); +MODULE_LICENSE("GPL"); + +static int hpsa_allow_any; +module_param(hpsa_allow_any, int, S_IRUGO|S_IWUSR); +MODULE_PARM_DESC(hpsa_allow_any, + "Allow hpsa driver to access unknown HP Smart Array hardware"); + +/* define the PCI info for the cards we can control */ +static const struct pci_device_id hpsa_pci_device_id[] = { + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3223}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x3234}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSC, 0x103C, 0x323D}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3241}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3243}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324a}, + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324b}, + {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0}, + {0,} +}; + +MODULE_DEVICE_TABLE(pci, hpsa_pci_device_id); + +/* board_id = Subsystem Device ID & Vendor ID + * product = Marketing Name for the board + * access = Address of the struct of function pointers + */ +static struct board_type products[] = { + {0x3223103C, "Smart Array P800", &SA5_access}, + {0x3234103C, "Smart Array P400", &SA5_access}, + {0x323d103c, "Smart Array P700M", &SA5_access}, + {0x3241103C, "Smart Array P212", &SA5_access}, + {0x3243103C, "Smart Array P410", &SA5_access}, + {0x3245103C, "Smart Array P410i", &SA5_access}, + {0x3247103C, "Smart Array P411", &SA5_access}, + {0x3249103C, "Smart Array P812", &SA5_access}, + {0x324a103C, "Smart Array P712m", &SA5_access}, + {0x324b103C, "Smart Array P711m", &SA5_access}, + {0xFFFF103C, "Unknown Smart Array", &SA5_access}, +}; + +static int number_of_controllers; + +static irqreturn_t do_hpsa_intr(int irq, void *dev_id); +static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg); +static void start_io(struct ctlr_info *h); + +#ifdef CONFIG_COMPAT +static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg); +#endif + +static void cmd_free(struct ctlr_info *h, struct CommandList *c); +static void cmd_special_free(struct ctlr_info *h, struct CommandList *c); +static struct CommandList *cmd_alloc(struct ctlr_info *h); +static struct CommandList *cmd_special_alloc(struct ctlr_info *h); +static void fill_cmd(struct CommandList *c, __u8 cmd, struct ctlr_info *h, + void *buff, size_t size, __u8 page_code, unsigned char *scsi3addr, + int cmd_type); + +static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd, + void (*done)(struct scsi_cmnd *)); + +static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd); +static int hpsa_slave_alloc(struct scsi_device *sdev); +static void hpsa_slave_destroy(struct scsi_device *sdev); + +static ssize_t raid_level_show(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t lunid_show(struct device *dev, + struct device_attribute *attr, char *buf); +static ssize_t unique_id_show(struct device *dev, + struct device_attribute *attr, char *buf); +static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno); +static ssize_t host_store_rescan(struct device *dev, + struct device_attribute *attr, const char *buf, size_t count); +static int check_for_unit_attention(struct ctlr_info *h, + struct CommandList *c); +static void check_ioctl_unit_attention(struct ctlr_info *h, + struct CommandList *c); + +static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL); +static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL); +static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL); +static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); + +static struct device_attribute *hpsa_sdev_attrs[] = { + &dev_attr_raid_level, + &dev_attr_lunid, + &dev_attr_unique_id, + NULL, +}; + +static struct device_attribute *hpsa_shost_attrs[] = { + &dev_attr_rescan, + NULL, +}; + +static struct scsi_host_template hpsa_driver_template = { + .module = THIS_MODULE, + .name = "hpsa", + .proc_name = "hpsa", + .queuecommand = hpsa_scsi_queue_command, + .can_queue = 512, + .this_id = -1, + .sg_tablesize = MAXSGENTRIES, + .cmd_per_lun = 512, + .use_clustering = ENABLE_CLUSTERING, + .eh_device_reset_handler = hpsa_eh_device_reset_handler, + .ioctl = hpsa_ioctl, + .slave_alloc = hpsa_slave_alloc, + .slave_destroy = hpsa_slave_destroy, +#ifdef CONFIG_COMPAT + .compat_ioctl = hpsa_compat_ioctl, +#endif + .sdev_attrs = hpsa_sdev_attrs, + .shost_attrs = hpsa_shost_attrs, +}; + +static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev) +{ + unsigned long *priv = shost_priv(sdev->host); + return (struct ctlr_info *) *priv; +} + +static struct task_struct *hpsa_scan_thread; +static DEFINE_MUTEX(hpsa_scan_mutex); +static LIST_HEAD(hpsa_scan_q); +static int hpsa_scan_func(void *data); + +/** + * add_to_scan_list() - add controller to rescan queue + * @h: Pointer to the controller. + * + * Adds the controller to the rescan queue if not already on the queue. + * + * returns 1 if added to the queue, 0 if skipped (could be on the + * queue already, or the controller could be initializing or shutting + * down). + **/ +static int add_to_scan_list(struct ctlr_info *h) +{ + struct ctlr_info *test_h; + int found = 0; + int ret = 0; + + if (h->busy_initializing) + return 0; + + /* + * If we don't get the lock, it means the driver is unloading + * and there's no point in scheduling a new scan. + */ + if (!mutex_trylock(&h->busy_shutting_down)) + return 0; + + mutex_lock(&hpsa_scan_mutex); + list_for_each_entry(test_h, &hpsa_scan_q, scan_list) { + if (test_h == h) { + found = 1; + break; + } + } + if (!found && !h->busy_scanning) { + INIT_COMPLETION(h->scan_wait); + list_add_tail(&h->scan_list, &hpsa_scan_q); + ret = 1; + } + mutex_unlock(&hpsa_scan_mutex); + mutex_unlock(&h->busy_shutting_down); + + return ret; +} + +/** + * remove_from_scan_list() - remove controller from rescan queue + * @h: Pointer to the controller. + * + * Removes the controller from the rescan queue if present. Blocks if + * the controller is currently conducting a rescan. The controller + * can be in one of three states: + * 1. Doesn't need a scan + * 2. On the scan list, but not scanning yet (we remove it) + * 3. Busy scanning (and not on the list). In this case we want to wait for + * the scan to complete to make sure the scanning thread for this + * controller is completely idle. + **/ +static void remove_from_scan_list(struct ctlr_info *h) +{ + struct ctlr_info *test_h, *tmp_h; + + mutex_lock(&hpsa_scan_mutex); + list_for_each_entry_safe(test_h, tmp_h, &hpsa_scan_q, scan_list) { + if (test_h == h) { /* state 2. */ + list_del(&h->scan_list); + complete_all(&h->scan_wait); + mutex_unlock(&hpsa_scan_mutex); + return; + } + } + if (h->busy_scanning) { /* state 3. */ + mutex_unlock(&hpsa_scan_mutex); + wait_for_completion(&h->scan_wait); + } else { /* state 1, nothing to do. */ + mutex_unlock(&hpsa_scan_mutex); + } +} + +/* hpsa_scan_func() - kernel thread used to rescan controllers + * @data: Ignored. + * + * A kernel thread used scan for drive topology changes on + * controllers. The thread processes only one controller at a time + * using a queue. Controllers are added to the queue using + * add_to_scan_list() and removed from the queue either after done + * processing or using remove_from_scan_list(). + * + * returns 0. + **/ +static int hpsa_scan_func(__attribute__((unused)) void *data) +{ + struct ctlr_info *h; + int host_no; + + while (1) { + set_current_state(TASK_INTERRUPTIBLE); + schedule(); + if (kthread_should_stop()) + break; + + while (1) { + mutex_lock(&hpsa_scan_mutex); + if (list_empty(&hpsa_scan_q)) { + mutex_unlock(&hpsa_scan_mutex); + break; + } + h = list_entry(hpsa_scan_q.next, struct ctlr_info, + scan_list); + list_del(&h->scan_list); + h->busy_scanning = 1; + mutex_unlock(&hpsa_scan_mutex); + host_no = h->scsi_host ? h->scsi_host->host_no : -1; + hpsa_update_scsi_devices(h, host_no); + complete_all(&h->scan_wait); + mutex_lock(&hpsa_scan_mutex); + h->busy_scanning = 0; + mutex_unlock(&hpsa_scan_mutex); + } + } + return 0; +} + +static int check_for_unit_attention(struct ctlr_info *h, + struct CommandList *c) +{ + if (c->err_info->SenseInfo[2] != UNIT_ATTENTION) + return 0; + + switch (c->err_info->SenseInfo[12]) { + case STATE_CHANGED: + dev_warn(&h->pdev->dev, "hpsa%d: a state change " + "detected, command retried\n", h->ctlr); + break; + case LUN_FAILED: + dev_warn(&h->pdev->dev, "hpsa%d: LUN failure " + "detected, action required\n", h->ctlr); + break; + case REPORT_LUNS_CHANGED: + dev_warn(&h->pdev->dev, "hpsa%d: report LUN data " + "changed\n", h->ctlr); + /* + * Here, we could call add_to_scan_list and wake up the scan thread, + * except that it's quite likely that we will get more than one + * REPORT_LUNS_CHANGED condition in quick succession, which means + * that those which occur after the first one will likely happen + * *during* the hpsa_scan_thread's rescan. And the rescan code is not + * robust enough to restart in the middle, undoing what it has already + * done, and it's not clear that it's even possible to do this, since + * part of what it does is notify the SCSI mid layer, which starts + * doing it's own i/o to read partition tables and so on, and the + * driver doesn't have visibility to know what might need undoing. + * In any event, if possible, it is horribly complicated to get right + * so we just don't do it for now. + * + * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012. + */ + break; + case POWER_OR_RESET: + dev_warn(&h->pdev->dev, "hpsa%d: a power on " + "or device reset detected\n", h->ctlr); + break; + case UNIT_ATTENTION_CLEARED: + dev_warn(&h->pdev->dev, "hpsa%d: unit attention " + "cleared by another initiator\n", h->ctlr); + break; + default: + dev_warn(&h->pdev->dev, "hpsa%d: unknown " + "unit attention detected\n", h->ctlr); + break; + } + return 1; +} + +static ssize_t host_store_rescan(struct device *dev, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ctlr_info *h; + struct Scsi_Host *shost = class_to_shost(dev); + unsigned long *priv = shost_priv(shost); + h = (struct ctlr_info *) *priv; + if (add_to_scan_list(h)) { + wake_up_process(hpsa_scan_thread); + wait_for_completion_interruptible(&h->scan_wait); + } + return count; +} + +/* Enqueuing and dequeuing functions for cmdlists. */ +static inline void addQ(struct hlist_head *list, struct CommandList *c) +{ + hlist_add_head(&c->list, list); +} + +static void enqueue_cmd_and_start_io(struct ctlr_info *h, + struct CommandList *c) +{ + unsigned long flags; + spin_lock_irqsave(&h->lock, flags); + addQ(&h->reqQ, c); + h->Qdepth++; + start_io(h); + spin_unlock_irqrestore(&h->lock, flags); +} + +static inline void removeQ(struct CommandList *c) +{ + if (WARN_ON(hlist_unhashed(&c->list))) + return; + hlist_del_init(&c->list); +} + +static inline int is_hba_lunid(unsigned char scsi3addr[]) +{ + return memcmp(scsi3addr, RAID_CTLR_LUNID, 8) == 0; +} + +static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[]) +{ + return (scsi3addr[3] & 0xC0) == 0x40; +} + +static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG", + "UNKNOWN" +}; +#define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1) + +static ssize_t raid_level_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + ssize_t l = 0; + int rlevel; + struct ctlr_info *h; + struct scsi_device *sdev; + struct hpsa_scsi_dev_t *hdev; + unsigned long flags; + + sdev = to_scsi_device(dev); + h = sdev_to_hba(sdev); + spin_lock_irqsave(&h->lock, flags); + hdev = sdev->hostdata; + if (!hdev) { + spin_unlock_irqrestore(&h->lock, flags); + return -ENODEV; + } + + /* Is this even a logical drive? */ + if (!is_logical_dev_addr_mode(hdev->scsi3addr)) { + spin_unlock_irqrestore(&h->lock, flags); + l = snprintf(buf, PAGE_SIZE, "N/A\n"); + return l; + } + + rlevel = hdev->raid_level; + spin_unlock_irqrestore(&h->lock, flags); + if (rlevel < 0 || rlevel > RAID_UNKNOWN) + rlevel = RAID_UNKNOWN; + l = snprintf(buf, PAGE_SIZE, "RAID %s\n", raid_label[rlevel]); + return l; +} + +static ssize_t lunid_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ctlr_info *h; + struct scsi_device *sdev; + struct hpsa_scsi_dev_t *hdev; + unsigned long flags; + unsigned char lunid[8]; + + sdev = to_scsi_device(dev); + h = sdev_to_hba(sdev); + spin_lock_irqsave(&h->lock, flags); + hdev = sdev->hostdata; + if (!hdev) { + spin_unlock_irqrestore(&h->lock, flags); + return -ENODEV; + } + memcpy(lunid, hdev->scsi3addr, sizeof(lunid)); + spin_unlock_irqrestore(&h->lock, flags); + return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n", + lunid[0], lunid[1], lunid[2], lunid[3], + lunid[4], lunid[5], lunid[6], lunid[7]); +} + +static ssize_t unique_id_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct ctlr_info *h; + struct scsi_device *sdev; + struct hpsa_scsi_dev_t *hdev; + unsigned long flags; + unsigned char sn[16]; + + sdev = to_scsi_device(dev); + h = sdev_to_hba(sdev); + spin_lock_irqsave(&h->lock, flags); + hdev = sdev->hostdata; + if (!hdev) { + spin_unlock_irqrestore(&h->lock, flags); + return -ENODEV; + } + memcpy(sn, hdev->device_id, sizeof(sn)); + spin_unlock_irqrestore(&h->lock, flags); + return snprintf(buf, 16 * 2 + 2, + "%02X%02X%02X%02X%02X%02X%02X%02X" + "%02X%02X%02X%02X%02X%02X%02X%02X\n", + sn[0], sn[1], sn[2], sn[3], + sn[4], sn[5], sn[6], sn[7], + sn[8], sn[9], sn[10], sn[11], + sn[12], sn[13], sn[14], sn[15]); +} + +static int hpsa_find_target_lun(struct ctlr_info *h, + unsigned char scsi3addr[], int bus, int *target, int *lun) +{ + /* finds an unused bus, target, lun for a new physical device + * assumes h->devlock is held + */ + int i, found = 0; + DECLARE_BITMAP(lun_taken, HPSA_MAX_SCSI_DEVS_PER_HBA); + + memset(&lun_taken[0], 0, HPSA_MAX_SCSI_DEVS_PER_HBA >> 3); + + for (i = 0; i < h->ndevices; i++) { + if (h->dev[i]->bus == bus && h->dev[i]->target != -1) + set_bit(h->dev[i]->target, lun_taken); + } + + for (i = 0; i < HPSA_MAX_SCSI_DEVS_PER_HBA; i++) { + if (!test_bit(i, lun_taken)) { + /* *bus = 1; */ + *target = i; + *lun = 0; + found = 1; + break; + } + } + return !found; +} + +/* Add an entry into h->dev[] array. */ +static int hpsa_scsi_add_entry(struct ctlr_info *h, int hostno, + struct hpsa_scsi_dev_t *device, + struct hpsa_scsi_dev_t *added[], int *nadded) +{ + /* assumes h->devlock is held */ + int n = h->ndevices; + int i; + unsigned char addr1[8], addr2[8]; + struct hpsa_scsi_dev_t *sd; + + if (n >= HPSA_MAX_SCSI_DEVS_PER_HBA) { + dev_err(&h->pdev->dev, "too many devices, some will be " + "inaccessible.\n"); + return -1; + } + + /* physical devices do not have lun or target assigned until now. */ + if (device->lun != -1) + /* Logical device, lun is already assigned. */ + goto lun_assigned; + + /* If this device a non-zero lun of a multi-lun device + * byte 4 of the 8-byte LUN addr will contain the logical + * unit no, zero otherise. + */ + if (device->scsi3addr[4] == 0) { + /* This is not a non-zero lun of a multi-lun device */ + if (hpsa_find_target_lun(h, device->scsi3addr, + device->bus, &device->target, &device->lun) != 0) + return -1; + goto lun_assigned; + } + + /* This is a non-zero lun of a multi-lun device. + * Search through our list and find the device which + * has the same 8 byte LUN address, excepting byte 4. + * Assign the same bus and target for this new LUN. + * Use the logical unit number from the firmware. + */ + memcpy(addr1, device->scsi3addr, 8); + addr1[4] = 0; + for (i = 0; i < n; i++) { + sd = h->dev[i]; + memcpy(addr2, sd->scsi3addr, 8); + addr2[4] = 0; + /* differ only in byte 4? */ + if (memcmp(addr1, addr2, 8) == 0) { + device->bus = sd->bus; + device->target = sd->target; + device->lun = device->scsi3addr[4]; + break; + } + } + if (device->lun == -1) { + dev_warn(&h->pdev->dev, "physical device with no LUN=0," + " suspect firmware bug or unsupported hardware " + "configuration.\n"); + return -1; + } + +lun_assigned: + + h->dev[n] = device; + h->ndevices++; + added[*nadded] = device; + (*nadded)++; + + /* initially, (before registering with scsi layer) we don't + * know our hostno and we don't want to print anything first + * time anyway (the scsi layer's inquiries will show that info) + */ + /* if (hostno != -1) */ + dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d added.\n", + scsi_device_type(device->devtype), hostno, + device->bus, device->target, device->lun); + return 0; +} + +/* Remove an entry from h->dev[] array. */ +static void hpsa_scsi_remove_entry(struct ctlr_info *h, int hostno, int entry, + struct hpsa_scsi_dev_t *removed[], int *nremoved) +{ + /* assumes h->devlock is held */ + int i; + struct hpsa_scsi_dev_t *sd; + + if (entry < 0 || entry >= HPSA_MAX_SCSI_DEVS_PER_HBA) + BUG(); + + sd = h->dev[entry]; + removed[*nremoved] = h->dev[entry]; + (*nremoved)++; + + for (i = entry; i < h->ndevices-1; i++) + h->dev[i] = h->dev[i+1]; + h->ndevices--; + dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d removed.\n", + scsi_device_type(sd->devtype), hostno, sd->bus, sd->target, + sd->lun); +} + +#define SCSI3ADDR_EQ(a, b) ( \ + (a)[7] == (b)[7] && \ + (a)[6] == (b)[6] && \ + (a)[5] == (b)[5] && \ + (a)[4] == (b)[4] && \ + (a)[3] == (b)[3] && \ + (a)[2] == (b)[2] && \ + (a)[1] == (b)[1] && \ + (a)[0] == (b)[0]) + +static void fixup_botched_add(struct ctlr_info *h, + struct hpsa_scsi_dev_t *added) +{ + /* called when scsi_add_device fails in order to re-adjust + * h->dev[] to match the mid layer's view. + */ + unsigned long flags; + int i, j; + + spin_lock_irqsave(&h->lock, flags); + for (i = 0; i < h->ndevices; i++) { + if (h->dev[i] == added) { + for (j = i; j < h->ndevices-1; j++) + h->dev[j] = h->dev[j+1]; + h->ndevices--; + break; + } + } + spin_unlock_irqrestore(&h->lock, flags); + kfree(added); +} + +static inline int device_is_the_same(struct hpsa_scsi_dev_t *dev1, + struct hpsa_scsi_dev_t *dev2) +{ + if ((is_logical_dev_addr_mode(dev1->scsi3addr) || + (dev1->lun != -1 && dev2->lun != -1)) && + dev1->devtype != 0x0C) + return (memcmp(dev1, dev2, sizeof(*dev1)) == 0); + + /* we compare everything except lun and target as these + * are not yet assigned. Compare parts likely + * to differ first + */ + if (memcmp(dev1->scsi3addr, dev2->scsi3addr, + sizeof(dev1->scsi3addr)) != 0) + return 0; + if (memcmp(dev1->device_id, dev2->device_id, + sizeof(dev1->device_id)) != 0) + return 0; + if (memcmp(dev1->model, dev2->model, sizeof(dev1->model)) != 0) + return 0; + if (memcmp(dev1->vendor, dev2->vendor, sizeof(dev1->vendor)) != 0) + return 0; + if (memcmp(dev1->revision, dev2->revision, sizeof(dev1->revision)) != 0) + return 0; + if (dev1->devtype != dev2->devtype) + return 0; + if (dev1->raid_level != dev2->raid_level) + return 0; + if (dev1->bus != dev2->bus) + return 0; + return 1; +} + +/* Find needle in haystack. If exact match found, return DEVICE_SAME, + * and return needle location in *index. If scsi3addr matches, but not + * vendor, model, serial num, etc. return DEVICE_CHANGED, and return needle + * location in *index. If needle not found, return DEVICE_NOT_FOUND. + */ +static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle, + struct hpsa_scsi_dev_t *haystack[], int haystack_size, + int *index) +{ + int i; +#define DEVICE_NOT_FOUND 0 +#define DEVICE_CHANGED 1 +#define DEVICE_SAME 2 + for (i = 0; i < haystack_size; i++) { + if (SCSI3ADDR_EQ(needle->scsi3addr, haystack[i]->scsi3addr)) { + *index = i; + if (device_is_the_same(needle, haystack[i])) + return DEVICE_SAME; + else + return DEVICE_CHANGED; + } + } + *index = -1; + return DEVICE_NOT_FOUND; +} + +static int adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno, + struct hpsa_scsi_dev_t *sd[], int nsds) +{ + /* sd contains scsi3 addresses and devtypes, and inquiry + * data. This function takes what's in sd to be the current + * reality and updates h->dev[] to reflect that reality. + */ + int i, entry, device_change, changes = 0; + struct hpsa_scsi_dev_t *csd; + unsigned long flags; + struct hpsa_scsi_dev_t **added, **removed; + int nadded, nremoved; + struct Scsi_Host *sh = NULL; + + added = kzalloc(sizeof(*added) * HPSA_MAX_SCSI_DEVS_PER_HBA, + GFP_KERNEL); + removed = kzalloc(sizeof(*removed) * HPSA_MAX_SCSI_DEVS_PER_HBA, + GFP_KERNEL); + + if (!added || !removed) { + dev_warn(&h->pdev->dev, "out of memory in " + "adjust_hpsa_scsi_table\n"); + goto free_and_out; + } + + spin_lock_irqsave(&h->devlock, flags); + + /* find any devices in h->dev[] that are not in + * sd[] and remove them from h->dev[], and for any + * devices which have changed, remove the old device + * info and add the new device info. + */ + i = 0; + nremoved = 0; + nadded = 0; + while (i < h->ndevices) { + csd = h->dev[i]; + device_change = hpsa_scsi_find_entry(csd, sd, nsds, &entry); + if (device_change == DEVICE_NOT_FOUND) { + changes++; + hpsa_scsi_remove_entry(h, hostno, i, + removed, &nremoved); + continue; /* remove ^^^, hence i not incremented */ + } else if (device_change == DEVICE_CHANGED) { + changes++; + hpsa_scsi_remove_entry(h, hostno, i, + removed, &nremoved); + (void) hpsa_scsi_add_entry(h, hostno, sd[entry], + added, &nadded); + /* add can't fail, we just removed one. */ + sd[entry] = NULL; /* prevent it from being freed */ + } + i++; + } + + /* Now, make sure every device listed in sd[] is also + * listed in h->dev[], adding them if they aren't found + */ + + for (i = 0; i < nsds; i++) { + if (!sd[i]) /* if already added above. */ + continue; + device_change = hpsa_scsi_find_entry(sd[i], h->dev, + h->ndevices, &entry); + if (device_change == DEVICE_NOT_FOUND) { + changes++; + if (hpsa_scsi_add_entry(h, hostno, sd[i], + added, &nadded) != 0) + break; + sd[i] = NULL; /* prevent from being freed later. */ + } else if (device_change == DEVICE_CHANGED) { + /* should never happen... */ + changes++; + dev_warn(&h->pdev->dev, + "device unexpectedly changed.\n"); + /* but if it does happen, we just ignore that device */ + } + } + spin_unlock_irqrestore(&h->devlock, flags); + + /* Don't notify scsi mid layer of any changes the first time through + * (or if there are no changes) scsi_scan_host will do it later the + * first time through. + */ + if (hostno == -1 || !changes) + goto free_and_out; + + sh = h->scsi_host; + /* Notify scsi mid layer of any removed devices */ + for (i = 0; i < nremoved; i++) { + struct scsi_device *sdev = + scsi_device_lookup(sh, removed[i]->bus, + removed[i]->target, removed[i]->lun); + if (sdev != NULL) { + scsi_remove_device(sdev); + scsi_device_put(sdev); + } else { + /* We don't expect to get here. + * future cmds to this device will get selection + * timeout as if the device was gone. + */ + dev_warn(&h->pdev->dev, "didn't find c%db%dt%dl%d " + " for removal.", hostno, removed[i]->bus, + removed[i]->target, removed[i]->lun); + } + kfree(removed[i]); + removed[i] = NULL; + } + + /* Notify scsi mid layer of any added devices */ + for (i = 0; i < nadded; i++) { + if (scsi_add_device(sh, added[i]->bus, + added[i]->target, added[i]->lun) == 0) + continue; + dev_warn(&h->pdev->dev, "scsi_add_device c%db%dt%dl%d failed, " + "device not added.\n", hostno, added[i]->bus, + added[i]->target, added[i]->lun); + /* now we have to remove it from h->dev, + * since it didn't get added to scsi mid layer + */ + fixup_botched_add(h, added[i]); + } + +free_and_out: + kfree(added); + kfree(removed); + return 0; +} + +/* + * Lookup bus/target/lun and retrun corresponding struct hpsa_scsi_dev_t * + * Assume's h->devlock is held. + */ +static struct hpsa_scsi_dev_t *lookup_hpsa_scsi_dev(struct ctlr_info *h, + int bus, int target, int lun) +{ + int i; + struct hpsa_scsi_dev_t *sd; + + for (i = 0; i < h->ndevices; i++) { + sd = h->dev[i]; + if (sd->bus == bus && sd->target == target && sd->lun == lun) + return sd; + } + return NULL; +} + +/* link sdev->hostdata to our per-device structure. */ +static int hpsa_slave_alloc(struct scsi_device *sdev) +{ + struct hpsa_scsi_dev_t *sd; + unsigned long flags; + struct ctlr_info *h; + + h = sdev_to_hba(sdev); + spin_lock_irqsave(&h->devlock, flags); + sd = lookup_hpsa_scsi_dev(h, sdev_channel(sdev), + sdev_id(sdev), sdev->lun); + if (sd != NULL) + sdev->hostdata = sd; + spin_unlock_irqrestore(&h->devlock, flags); + return 0; +} + +static void hpsa_slave_destroy(struct scsi_device *sdev) +{ + return; /* nothing to do. */ +} + +static void hpsa_scsi_setup(struct ctlr_info *h) +{ + h->ndevices = 0; + h->scsi_host = NULL; + spin_lock_init(&h->devlock); + return; +} + +static void complete_scsi_command(struct CommandList *cp, + int timeout, __u32 tag) +{ + struct scsi_cmnd *cmd; + struct ctlr_info *h; + struct ErrorInfo *ei; + + unsigned char sense_key; + unsigned char asc; /* additional sense code */ + unsigned char ascq; /* additional sense code qualifier */ + + ei = cp->err_info; + cmd = (struct scsi_cmnd *) cp->scsi_cmd; + h = cp->h; + + scsi_dma_unmap(cmd); /* undo the DMA mappings */ + + cmd->result = (DID_OK << 16); /* host byte */ + cmd->result |= (COMMAND_COMPLETE << 8); /* msg byte */ + cmd->result |= (ei->ScsiStatus << 1); + + /* copy the sense data whether we need to or not. */ + memcpy(cmd->sense_buffer, ei->SenseInfo, + ei->SenseLen > SCSI_SENSE_BUFFERSIZE ? + SCSI_SENSE_BUFFERSIZE : + ei->SenseLen); + scsi_set_resid(cmd, ei->ResidualCnt); + + if (ei->CommandStatus == 0) { + cmd->scsi_done(cmd); + cmd_free(h, cp); + return; + } + + /* an error has occurred */ + switch (ei->CommandStatus) { + + case CMD_TARGET_STATUS: + if (ei->ScsiStatus) { + /* Get sense key */ + sense_key = 0xf & ei->SenseInfo[2]; + /* Get additional sense code */ + asc = ei->SenseInfo[12]; + /* Get addition sense code qualifier */ + ascq = ei->SenseInfo[13]; + } + + if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { + if (check_for_unit_attention(h, cp)) { + cmd->result = DID_SOFT_ERROR << 16; + break; + } + if (sense_key == ILLEGAL_REQUEST) { + /* + * SCSI REPORT_LUNS is commonly unsupported on + * Smart Array. Suppress noisy complaint. + */ + if (cp->Request.CDB[0] == REPORT_LUNS) + break; + + /* If ASC/ASCQ indicate Logical Unit + * Not Supported condition, + */ + if ((asc == 0x25) && (ascq == 0x0)) { + dev_warn(&h->pdev->dev, "cp %p " + "has check condition\n", cp); + break; + } + } + + if (sense_key == NOT_READY) { + /* If Sense is Not Ready, Logical Unit + * Not ready, Manual Intervention + * required + */ + if ((asc == 0x04) && (ascq == 0x03)) { + cmd->result = DID_NO_CONNECT << 16; + dev_warn(&h->pdev->dev, "cp %p " + "has check condition: unit " + "not ready, manual " + "intervention required\n", cp); + break; + } + } + + + /* Must be some other type of check condition */ + dev_warn(&h->pdev->dev, "cp %p has check condition: " + "unknown type: " + "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " + "Returning result: 0x%x, " + "cmd=[%02x %02x %02x %02x %02x " + "%02x %02x %02x %02x %02x]\n", + cp, sense_key, asc, ascq, + cmd->result, + cmd->cmnd[0], cmd->cmnd[1], + cmd->cmnd[2], cmd->cmnd[3], + cmd->cmnd[4], cmd->cmnd[5], + cmd->cmnd[6], cmd->cmnd[7], + cmd->cmnd[8], cmd->cmnd[9]); + break; + } + + + /* Problem was not a check condition + * Pass it up to the upper layers... + */ + if (ei->ScsiStatus) { + dev_warn(&h->pdev->dev, "cp %p has status 0x%x " + "Sense: 0x%x, ASC: 0x%x, ASCQ: 0x%x, " + "Returning result: 0x%x\n", + cp, ei->ScsiStatus, + sense_key, asc, ascq, + cmd->result); + } else { /* scsi status is zero??? How??? */ + dev_warn(&h->pdev->dev, "cp %p SCSI status was 0. " + "Returning no connection.\n", cp), + + /* Ordinarily, this case should never happen, + * but there is a bug in some released firmware + * revisions that allows it to happen if, for + * example, a 4100 backplane loses power and + * the tape drive is in it. We assume that + * it's a fatal error of some kind because we + * can't show that it wasn't. We will make it + * look like selection timeout since that is + * the most common reason for this to occur, + * and it's severe enough. + */ + + cmd->result = DID_NO_CONNECT << 16; + } + break; + + case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ + break; + case CMD_DATA_OVERRUN: + dev_warn(&h->pdev->dev, "cp %p has" + " completed with data overrun " + "reported\n", cp); + break; + case CMD_INVALID: { + /* print_bytes(cp, sizeof(*cp), 1, 0); + print_cmd(cp); */ + /* We get CMD_INVALID if you address a non-existent device + * instead of a selection timeout (no response). You will + * see this if you yank out a drive, then try to access it. + * This is kind of a shame because it means that any other + * CMD_INVALID (e.g. driver bug) will get interpreted as a + * missing target. */ + cmd->result = DID_NO_CONNECT << 16; + } + break; + case CMD_PROTOCOL_ERR: + dev_warn(&h->pdev->dev, "cp %p has " + "protocol error \n", cp); + break; + case CMD_HARDWARE_ERR: + cmd->result = DID_ERROR << 16; + dev_warn(&h->pdev->dev, "cp %p had hardware error\n", cp); + break; + case CMD_CONNECTION_LOST: + cmd->result = DID_ERROR << 16; + dev_warn(&h->pdev->dev, "cp %p had connection lost\n", cp); + break; + case CMD_ABORTED: + cmd->result = DID_ABORT << 16; + dev_warn(&h->pdev->dev, "cp %p was aborted with status 0x%x\n", + cp, ei->ScsiStatus); + break; + case CMD_ABORT_FAILED: + cmd->result = DID_ERROR << 16; + dev_warn(&h->pdev->dev, "cp %p reports abort failed\n", cp); + break; + case CMD_UNSOLICITED_ABORT: + cmd->result = DID_ABORT << 16; + dev_warn(&h->pdev->dev, "cp %p aborted do to an unsolicited " + "abort\n", cp); + break; + case CMD_TIMEOUT: + cmd->result = DID_TIME_OUT << 16; + dev_warn(&h->pdev->dev, "cp %p timedout\n", cp); + break; + default: + cmd->result = DID_ERROR << 16; + dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", + cp, ei->CommandStatus); + } + cmd->scsi_done(cmd); + cmd_free(h, cp); +} + +static int hpsa_scsi_detect(struct ctlr_info *h) +{ + struct Scsi_Host *sh; + int error; + + sh = scsi_host_alloc(&hpsa_driver_template, sizeof(h)); + if (sh == NULL) + goto fail; + + sh->io_port = 0; + sh->n_io_port = 0; + sh->this_id = -1; + sh->max_channel = 3; + sh->max_cmd_len = MAX_COMMAND_SIZE; + sh->max_lun = HPSA_MAX_LUN; + sh->max_id = HPSA_MAX_LUN; + h->scsi_host = sh; + sh->hostdata[0] = (unsigned long) h; + sh->irq = h->intr[SIMPLE_MODE_INT]; + sh->unique_id = sh->irq; + error = scsi_add_host(sh, &h->pdev->dev); + if (error) + goto fail_host_put; + scsi_scan_host(sh); + return 0; + + fail_host_put: + dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_add_host" + " failed for controller %d\n", h->ctlr); + scsi_host_put(sh); + return -1; + fail: + dev_err(&h->pdev->dev, "hpsa_scsi_detect: scsi_host_alloc" + " failed for controller %d\n", h->ctlr); + return -1; +} + +static void hpsa_pci_unmap(struct pci_dev *pdev, + struct CommandList *c, int sg_used, int data_direction) +{ + int i; + union u64bit addr64; + + for (i = 0; i < sg_used; i++) { + addr64.val32.lower = c->SG[i].Addr.lower; + addr64.val32.upper = c->SG[i].Addr.upper; + pci_unmap_single(pdev, (dma_addr_t) addr64.val, c->SG[i].Len, + data_direction); + } +} + +static void hpsa_map_one(struct pci_dev *pdev, + struct CommandList *cp, + unsigned char *buf, + size_t buflen, + int data_direction) +{ + __u64 addr64; + + if (buflen == 0 || data_direction == PCI_DMA_NONE) { + cp->Header.SGList = 0; + cp->Header.SGTotal = 0; + return; + } + + addr64 = (__u64) pci_map_single(pdev, buf, buflen, data_direction); + cp->SG[0].Addr.lower = + (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF); + cp->SG[0].Addr.upper = + (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF); + cp->SG[0].Len = buflen; + cp->Header.SGList = (__u8) 1; /* no. SGs contig in this cmd */ + cp->Header.SGTotal = (__u16) 1; /* total sgs in this cmd list */ +} + +static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h, + struct CommandList *c) +{ + DECLARE_COMPLETION_ONSTACK(wait); + + c->waiting = &wait; + enqueue_cmd_and_start_io(h, c); + wait_for_completion(&wait); +} + +static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h, + struct CommandList *c, int data_direction) +{ + int retry_count = 0; + + do { + memset(c->err_info, 0, sizeof(c->err_info)); + hpsa_scsi_do_simple_cmd_core(h, c); + retry_count++; + } while (check_for_unit_attention(h, c) && retry_count <= 3); + hpsa_pci_unmap(h->pdev, c, 1, data_direction); +} + +static void hpsa_scsi_interpret_error(struct CommandList *cp) +{ + struct ErrorInfo *ei; + struct device *d = &cp->h->pdev->dev; + + ei = cp->err_info; + switch (ei->CommandStatus) { + case CMD_TARGET_STATUS: + dev_warn(d, "cmd %p has completed with errors\n", cp); + dev_warn(d, "cmd %p has SCSI Status = %x\n", cp, + ei->ScsiStatus); + if (ei->ScsiStatus == 0) + dev_warn(d, "SCSI status is abnormally zero. " + "(probably indicates selection timeout " + "reported incorrectly due to a known " + "firmware bug, circa July, 2001.)\n"); + break; + case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ + dev_info(d, "UNDERRUN\n"); + break; + case CMD_DATA_OVERRUN: + dev_warn(d, "cp %p has completed with data overrun\n", cp); + break; + case CMD_INVALID: { + /* controller unfortunately reports SCSI passthru's + * to non-existent targets as invalid commands. + */ + dev_warn(d, "cp %p is reported invalid (probably means " + "target device no longer present)\n", cp); + /* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0); + print_cmd(cp); */ + } + break; + case CMD_PROTOCOL_ERR: + dev_warn(d, "cp %p has protocol error \n", cp); + break; + case CMD_HARDWARE_ERR: + /* cmd->result = DID_ERROR << 16; */ + dev_warn(d, "cp %p had hardware error\n", cp); + break; + case CMD_CONNECTION_LOST: + dev_warn(d, "cp %p had connection lost\n", cp); + break; + case CMD_ABORTED: + dev_warn(d, "cp %p was aborted\n", cp); + break; + case CMD_ABORT_FAILED: + dev_warn(d, "cp %p reports abort failed\n", cp); + break; + case CMD_UNSOLICITED_ABORT: + dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp); + break; + case CMD_TIMEOUT: + dev_warn(d, "cp %p timed out\n", cp); + break; + default: + dev_warn(d, "cp %p returned unknown status %x\n", cp, + ei->CommandStatus); + } +} + +static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, + unsigned char page, unsigned char *buf, + unsigned char bufsize) +{ + int rc = IO_OK; + struct CommandList *c; + struct ErrorInfo *ei; + + c = cmd_special_alloc(h); + + if (c == NULL) { /* trouble... */ + dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); + return -1; + } + + fill_cmd(c, HPSA_INQUIRY, h, buf, bufsize, page, scsi3addr, TYPE_CMD); + hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); + ei = c->err_info; + if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { + hpsa_scsi_interpret_error(c); + rc = -1; + } + cmd_special_free(h, c); + return rc; +} + +static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr) +{ + int rc = IO_OK; + struct CommandList *c; + struct ErrorInfo *ei; + + c = cmd_special_alloc(h); + + if (c == NULL) { /* trouble... */ + dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); + return -1; + } + + fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG); + hpsa_scsi_do_simple_cmd_core(h, c); + /* no unmap needed here because no data xfer. */ + + ei = c->err_info; + if (ei->CommandStatus != 0) { + hpsa_scsi_interpret_error(c); + rc = -1; + } + cmd_special_free(h, c); + return rc; +} + +static void hpsa_get_raid_level(struct ctlr_info *h, + unsigned char *scsi3addr, unsigned char *raid_level) +{ + int rc; + unsigned char *buf; + + *raid_level = RAID_UNKNOWN; + buf = kzalloc(64, GFP_KERNEL); + if (!buf) + return; + rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64); + if (rc == 0) + *raid_level = buf[8]; + if (*raid_level > RAID_UNKNOWN) + *raid_level = RAID_UNKNOWN; + kfree(buf); + return; +} + +/* Get the device id from inquiry page 0x83 */ +static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr, + unsigned char *device_id, int buflen) +{ + int rc; + unsigned char *buf; + + if (buflen > 16) + buflen = 16; + buf = kzalloc(64, GFP_KERNEL); + if (!buf) + return -1; + rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64); + if (rc == 0) + memcpy(device_id, &buf[8], buflen); + kfree(buf); + return rc != 0; +} + +static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical, + struct ReportLUNdata *buf, int bufsize, + int extended_response) +{ + int rc = IO_OK; + struct CommandList *c; + unsigned char scsi3addr[8]; + struct ErrorInfo *ei; + + c = cmd_special_alloc(h); + if (c == NULL) { /* trouble... */ + dev_err(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); + return -1; + } + + memset(&scsi3addr[0], 0, 8); /* address the controller */ + + fill_cmd(c, logical ? HPSA_REPORT_LOG : HPSA_REPORT_PHYS, h, + buf, bufsize, 0, scsi3addr, TYPE_CMD); + if (extended_response) + c->Request.CDB[1] = extended_response; + hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); + ei = c->err_info; + if (ei->CommandStatus != 0 && + ei->CommandStatus != CMD_DATA_UNDERRUN) { + hpsa_scsi_interpret_error(c); + rc = -1; + } + cmd_special_free(h, c); + return rc; +} + +static inline int hpsa_scsi_do_report_phys_luns(struct ctlr_info *h, + struct ReportLUNdata *buf, + int bufsize, int extended_response) +{ + return hpsa_scsi_do_report_luns(h, 0, buf, bufsize, extended_response); +} + +static inline int hpsa_scsi_do_report_log_luns(struct ctlr_info *h, + struct ReportLUNdata *buf, int bufsize) +{ + return hpsa_scsi_do_report_luns(h, 1, buf, bufsize, 0); +} + +static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device, + int bus, int target, int lun) +{ + device->bus = bus; + device->target = target; + device->lun = lun; +} + +static int hpsa_update_device_info(struct ctlr_info *h, + unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device) +{ +#define OBDR_TAPE_INQ_SIZE 49 + unsigned char *inq_buff = NULL; + + inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); + if (!inq_buff) + goto bail_out; + + memset(inq_buff, 0, OBDR_TAPE_INQ_SIZE); + /* Do an inquiry to the device to see what it is. */ + if (hpsa_scsi_do_inquiry(h, scsi3addr, 0, inq_buff, + (unsigned char) OBDR_TAPE_INQ_SIZE) != 0) { + /* Inquiry failed (msg printed already) */ + dev_err(&h->pdev->dev, + "hpsa_update_device_info: inquiry failed\n"); + goto bail_out; + } + + /* As a side effect, record the firmware version number + * if we happen to be talking to the RAID controller. + */ + if (is_hba_lunid(scsi3addr)) + memcpy(h->firm_ver, &inq_buff[32], 4); + + this_device->devtype = (inq_buff[0] & 0x1f); + memcpy(this_device->scsi3addr, scsi3addr, 8); + memcpy(this_device->vendor, &inq_buff[8], + sizeof(this_device->vendor)); + memcpy(this_device->model, &inq_buff[16], + sizeof(this_device->model)); + memcpy(this_device->revision, &inq_buff[32], + sizeof(this_device->revision)); + memset(this_device->device_id, 0, + sizeof(this_device->device_id)); + hpsa_get_device_id(h, scsi3addr, this_device->device_id, + sizeof(this_device->device_id)); + + if (this_device->devtype == TYPE_DISK && + is_logical_dev_addr_mode(scsi3addr)) + hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); + else + this_device->raid_level = RAID_UNKNOWN; + + kfree(inq_buff); + return 0; + +bail_out: + kfree(inq_buff); + return 1; +} + +static unsigned char *msa2xxx_model[] = { + "MSA2012", + "MSA2024", + "MSA2312", + "MSA2324", + NULL, +}; + +static int is_msa2xxx(struct ctlr_info *h, struct hpsa_scsi_dev_t *device) +{ + int i; + + for (i = 0; msa2xxx_model[i]; i++) + if (strncmp(device->model, msa2xxx_model[i], + strlen(msa2xxx_model[i])) == 0) + return 1; + return 0; +} + +/* Helper function to assign bus, target, lun mapping of devices. + * Puts non-msa2xxx logical volumes on bus 0, msa2xxx logical + * volumes on bus 1, physical devices on bus 2. and the hba on bus 3. + * Logical drive target and lun are assigned at this time, but + * physical device lun and target assignment are deferred (assigned + * in hpsa_find_target_lun, called by hpsa_scsi_add_entry.) + */ +static void figure_bus_target_lun(struct ctlr_info *h, + __u8 *lunaddrbytes, int *bus, int *target, int *lun, + struct hpsa_scsi_dev_t *device) +{ + + __u32 lunid; + + if (is_logical_dev_addr_mode(lunaddrbytes)) { + /* logical device */ + memcpy(&lunid, lunaddrbytes, sizeof(lunid)); + lunid = le32_to_cpu(lunid); + + if (is_msa2xxx(h, device)) { + *bus = 1; + *target = (lunid >> 16) & 0x3fff; + *lun = lunid & 0x00ff; + } else { + *bus = 0; + *lun = 0; + *target = lunid & 0x3fff; + } + } else { + /* physical device */ + if (is_hba_lunid(lunaddrbytes)) + *bus = 3; + else + *bus = 2; + *target = -1; + *lun = -1; /* we will fill these in later. */ + } +} + +/* + * If there is no lun 0 on a target, linux won't find any devices. + * For the MSA2xxx boxes, we have to manually detect the enclosure + * which is at lun zero, as CCISS_REPORT_PHYSICAL_LUNS doesn't report + * it for some reason. *tmpdevice is the target we're adding, + * this_device is a pointer into the current element of currentsd[] + * that we're building up in update_scsi_devices(), below. + * lunzerobits is a bitmap that tracks which targets already have a + * lun 0 assigned. + * Returns 1 if an enclosure was added, 0 if not. + */ +static int add_msa2xxx_enclosure_device(struct ctlr_info *h, + struct hpsa_scsi_dev_t *tmpdevice, + struct hpsa_scsi_dev_t *this_device, __u8 *lunaddrbytes, + int bus, int target, int lun, unsigned long lunzerobits[], + int *nmsa2xxx_enclosures) +{ + unsigned char scsi3addr[8]; + + if (test_bit(target, lunzerobits)) + return 0; /* There is already a lun 0 on this target. */ + + if (!is_logical_dev_addr_mode(lunaddrbytes)) + return 0; /* It's the logical targets that may lack lun 0. */ + + if (!is_msa2xxx(h, tmpdevice)) + return 0; /* It's only the MSA2xxx that have this problem. */ + + if (lun == 0) /* if lun is 0, then obviously we have a lun 0. */ + return 0; + + if (is_hba_lunid(scsi3addr)) + return 0; /* Don't add the RAID controller here. */ + +#define MAX_MSA2XXX_ENCLOSURES 32 + if (*nmsa2xxx_enclosures >= MAX_MSA2XXX_ENCLOSURES) { + dev_warn(&h->pdev->dev, "Maximum number of MSA2XXX " + "enclosures exceeded. Check your hardware " + "configuration."); + return 0; + } + + memset(scsi3addr, 0, 8); + scsi3addr[3] = target; + if (hpsa_update_device_info(h, scsi3addr, this_device)) + return 0; + (*nmsa2xxx_enclosures)++; + hpsa_set_bus_target_lun(this_device, bus, target, 0); + set_bit(target, lunzerobits); + return 1; +} + +/* + * Do CISS_REPORT_PHYS and CISS_REPORT_LOG. Data is returned in physdev, + * logdev. The number of luns in physdev and logdev are returned in + * *nphysicals and *nlogicals, respectively. + * Returns 0 on success, -1 otherwise. + */ +static int hpsa_gather_lun_info(struct ctlr_info *h, + int reportlunsize, + struct ReportLUNdata *physdev, __u32 *nphysicals, + struct ReportLUNdata *logdev, __u32 *nlogicals) +{ + if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) { + dev_err(&h->pdev->dev, "report physical LUNs failed.\n"); + return -1; + } + memcpy(nphysicals, &physdev->LUNListLength[0], sizeof(*nphysicals)); + *nphysicals = be32_to_cpu(*nphysicals) / 8; +#ifdef DEBUG + dev_info(&h->pdev->dev, "number of physical luns is %d\n", *nphysicals); +#endif + if (*nphysicals > HPSA_MAX_PHYS_LUN) { + dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded." + " %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, + *nphysicals - HPSA_MAX_PHYS_LUN); + *nphysicals = HPSA_MAX_PHYS_LUN; + } + if (hpsa_scsi_do_report_log_luns(h, logdev, reportlunsize)) { + dev_err(&h->pdev->dev, "report logical LUNs failed.\n"); + return -1; + } + memcpy(nlogicals, &logdev->LUNListLength[0], sizeof(*nlogicals)); + *nlogicals = be32_to_cpu(*nlogicals) / 8; +#ifdef DEBUG + dev_info(&h->pdev->dev, "number of logical luns is %d\n", *nlogicals); +#endif + /* Reject Logicals in excess of our max capability. */ + if (*nlogicals > HPSA_MAX_LUN) { + dev_warn(&h->pdev->dev, + "maximum logical LUNs (%d) exceeded. " + "%d LUNs ignored.\n", HPSA_MAX_LUN, + *nlogicals - HPSA_MAX_LUN); + *nlogicals = HPSA_MAX_LUN; + } + if (*nlogicals + *nphysicals > HPSA_MAX_PHYS_LUN) { + dev_warn(&h->pdev->dev, + "maximum logical + physical LUNs (%d) exceeded. " + "%d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, + *nphysicals + *nlogicals - HPSA_MAX_PHYS_LUN); + *nlogicals = HPSA_MAX_PHYS_LUN - *nphysicals; + } + return 0; +} + +static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno) +{ + /* the idea here is we could get notified + * that some devices have changed, so we do a report + * physical luns and report logical luns cmd, and adjust + * our list of devices accordingly. + * + * The scsi3addr's of devices won't change so long as the + * adapter is not reset. That means we can rescan and + * tell which devices we already know about, vs. new + * devices, vs. disappearing devices. + */ + struct ReportLUNdata *physdev_list = NULL; + struct ReportLUNdata *logdev_list = NULL; + unsigned char *inq_buff = NULL; + __u32 nphysicals = 0; + __u32 nlogicals = 0; + __u32 ndev_allocated = 0; + struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice; + int ncurrent = 0; + int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8; + int i, nmsa2xxx_enclosures, ndevs_to_allocate; + int bus, target, lun; + DECLARE_BITMAP(lunzerobits, HPSA_MAX_TARGETS_PER_CTLR); + + currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_SCSI_DEVS_PER_HBA, + GFP_KERNEL); + physdev_list = kzalloc(reportlunsize, GFP_KERNEL); + logdev_list = kzalloc(reportlunsize, GFP_KERNEL); + inq_buff = kmalloc(OBDR_TAPE_INQ_SIZE, GFP_KERNEL); + tmpdevice = kzalloc(sizeof(*tmpdevice), GFP_KERNEL); + + if (!currentsd || !physdev_list || !logdev_list || + !inq_buff || !tmpdevice) { + dev_err(&h->pdev->dev, "out of memory\n"); + goto out; + } + memset(lunzerobits, 0, sizeof(lunzerobits)); + + if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals, + logdev_list, &nlogicals)) + goto out; + + /* We might see up to 32 MSA2xxx enclosures, actually 8 of them + * but each of them 4 times through different paths. The plus 1 + * is for the RAID controller. + */ + ndevs_to_allocate = nphysicals + nlogicals + MAX_MSA2XXX_ENCLOSURES + 1; + + /* Allocate the per device structures */ + for (i = 0; i < ndevs_to_allocate; i++) { + currentsd[i] = kzalloc(sizeof(*currentsd[i]), GFP_KERNEL); + if (!currentsd[i]) { + dev_warn(&h->pdev->dev, "out of memory at %s:%d\n", + __FILE__, __LINE__); + goto out; + } + ndev_allocated++; + } + + /* adjust our table of devices */ + nmsa2xxx_enclosures = 0; + for (i = 0; i < nphysicals + nlogicals + 1; i++) { + __u8 *lunaddrbytes; + + /* Figure out where the LUN ID info is coming from */ + if (i < nphysicals) + lunaddrbytes = &physdev_list->LUN[i][0]; + else + if (i < nphysicals + nlogicals) + lunaddrbytes = + &logdev_list->LUN[i-nphysicals][0]; + else /* jam in the RAID controller at the end */ + lunaddrbytes = RAID_CTLR_LUNID; + + /* skip masked physical devices. */ + if (lunaddrbytes[3] & 0xC0 && i < nphysicals) + continue; + + /* Get device type, vendor, model, device id */ + if (hpsa_update_device_info(h, lunaddrbytes, tmpdevice)) + continue; /* skip it if we can't talk to it. */ + figure_bus_target_lun(h, lunaddrbytes, &bus, &target, &lun, + tmpdevice); + this_device = currentsd[ncurrent]; + + /* + * For the msa2xxx boxes, we have to insert a LUN 0 which + * doesn't show up in CCISS_REPORT_PHYSICAL data, but there + * is nonetheless an enclosure device there. We have to + * present that otherwise linux won't find anything if + * there is no lun 0. + */ + if (add_msa2xxx_enclosure_device(h, tmpdevice, this_device, + lunaddrbytes, bus, target, lun, lunzerobits, + &nmsa2xxx_enclosures)) { + ncurrent++; + this_device = currentsd[ncurrent]; + } + + *this_device = *tmpdevice; + hpsa_set_bus_target_lun(this_device, bus, target, lun); + + switch (this_device->devtype) { + case TYPE_ROM: { + /* We don't *really* support actual CD-ROM devices, + * just "One Button Disaster Recovery" tape drive + * which temporarily pretends to be a CD-ROM drive. + * So we check that the device is really an OBDR tape + * device by checking for "$DR-10" in bytes 43-48 of + * the inquiry data. + */ + char obdr_sig[7]; +#define OBDR_TAPE_SIG "$DR-10" + strncpy(obdr_sig, &inq_buff[43], 6); + obdr_sig[6] = '\0'; + if (strncmp(obdr_sig, OBDR_TAPE_SIG, 6) != 0) + /* Not OBDR device, ignore it. */ + break; + } + ncurrent++; + break; + case TYPE_DISK: + if (i < nphysicals) + break; + ncurrent++; + break; + case TYPE_TAPE: + case TYPE_MEDIUM_CHANGER: + ncurrent++; + break; + case TYPE_RAID: + /* Only present the Smartarray HBA as a RAID controller. + * If it's a RAID controller other than the HBA itself + * (an external RAID controller, MSA500 or similar) + * don't present it. + */ + if (!is_hba_lunid(lunaddrbytes)) + break; + ncurrent++; + break; + default: + break; + } + if (ncurrent >= HPSA_MAX_SCSI_DEVS_PER_HBA) + break; + } + adjust_hpsa_scsi_table(h, hostno, currentsd, ncurrent); +out: + kfree(tmpdevice); + for (i = 0; i < ndev_allocated; i++) + kfree(currentsd[i]); + kfree(currentsd); + kfree(inq_buff); + kfree(physdev_list); + kfree(logdev_list); + return; +} + +/* hpsa_scatter_gather takes a struct scsi_cmnd, (cmd), and does the pci + * dma mapping and fills in the scatter gather entries of the + * hpsa command, cp. + */ +static int hpsa_scatter_gather(struct pci_dev *pdev, + struct CommandList *cp, + struct scsi_cmnd *cmd) +{ + unsigned int len; + struct scatterlist *sg; + __u64 addr64; + int use_sg, i; + + BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES); + + use_sg = scsi_dma_map(cmd); + if (use_sg < 0) + return use_sg; + + if (!use_sg) + goto sglist_finished; + + scsi_for_each_sg(cmd, sg, use_sg, i) { + addr64 = (__u64) sg_dma_address(sg); + len = sg_dma_len(sg); + cp->SG[i].Addr.lower = + (__u32) (addr64 & (__u64) 0x00000000FFFFFFFF); + cp->SG[i].Addr.upper = + (__u32) ((addr64 >> 32) & (__u64) 0x00000000FFFFFFFF); + cp->SG[i].Len = len; + cp->SG[i].Ext = 0; /* we are not chaining */ + } + +sglist_finished: + + cp->Header.SGList = (__u8) use_sg; /* no. SGs contig in this cmd */ + cp->Header.SGTotal = (__u16) use_sg; /* total sgs in this cmd list */ + return 0; +} + + +static int hpsa_scsi_queue_command(struct scsi_cmnd *cmd, + void (*done)(struct scsi_cmnd *)) +{ + struct ctlr_info *h; + struct hpsa_scsi_dev_t *dev; + unsigned char scsi3addr[8]; + struct CommandList *c; + unsigned long flags; + + /* Get the ptr to our adapter structure out of cmd->host. */ + h = sdev_to_hba(cmd->device); + dev = cmd->device->hostdata; + if (!dev) { + cmd->result = DID_NO_CONNECT << 16; + done(cmd); + return 0; + } + memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); + + /* Need a lock as this is being allocated from the pool */ + spin_lock_irqsave(&h->lock, flags); + c = cmd_alloc(h); + spin_unlock_irqrestore(&h->lock, flags); + if (c == NULL) { /* trouble... */ + dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); + return SCSI_MLQUEUE_HOST_BUSY; + } + + /* Fill in the command list header */ + + cmd->scsi_done = done; /* save this for use by completion code */ + + /* save c in case we have to abort it */ + cmd->host_scribble = (unsigned char *) c; + + c->cmd_type = CMD_SCSI; + c->scsi_cmd = cmd; + c->Header.ReplyQueue = 0; /* unused in simple mode */ + memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8); + c->Header.Tag.lower = c->busaddr; /* Use k. address of cmd as tag */ + + /* Fill in the request block... */ + + c->Request.Timeout = 0; + memset(c->Request.CDB, 0, sizeof(c->Request.CDB)); + BUG_ON(cmd->cmd_len > sizeof(c->Request.CDB)); + c->Request.CDBLen = cmd->cmd_len; + memcpy(c->Request.CDB, cmd->cmnd, cmd->cmd_len); + c->Request.Type.Type = TYPE_CMD; + c->Request.Type.Attribute = ATTR_SIMPLE; + switch (cmd->sc_data_direction) { + case DMA_TO_DEVICE: + c->Request.Type.Direction = XFER_WRITE; + break; + case DMA_FROM_DEVICE: + c->Request.Type.Direction = XFER_READ; + break; + case DMA_NONE: + c->Request.Type.Direction = XFER_NONE; + break; + case DMA_BIDIRECTIONAL: + /* This can happen if a buggy application does a scsi passthru + * and sets both inlen and outlen to non-zero. ( see + * ../scsi/scsi_ioctl.c:scsi_ioctl_send_command() ) + */ + + c->Request.Type.Direction = XFER_RSVD; + /* This is technically wrong, and hpsa controllers should + * reject it with CMD_INVALID, which is the most correct + * response, but non-fibre backends appear to let it + * slide by, and give the same results as if this field + * were set correctly. Either way is acceptable for + * our purposes here. + */ + + break; + + default: + dev_err(&h->pdev->dev, "unknown data direction: %d\n", + cmd->sc_data_direction); + BUG(); + break; + } + + if (hpsa_scatter_gather(h->pdev, c, cmd) < 0) { /* Fill SG list */ + cmd_free(h, c); + return SCSI_MLQUEUE_HOST_BUSY; + } + enqueue_cmd_and_start_io(h, c); + /* the cmd'll come back via intr handler in complete_scsi_command() */ + return 0; +} + +static void hpsa_unregister_scsi(struct ctlr_info *h) +{ + /* we are being forcibly unloaded, and may not refuse. */ + scsi_remove_host(h->scsi_host); + scsi_host_put(h->scsi_host); + h->scsi_host = NULL; +} + +static int hpsa_register_scsi(struct ctlr_info *h) +{ + int rc; + + hpsa_update_scsi_devices(h, -1); + rc = hpsa_scsi_detect(h); + if (rc != 0) + dev_err(&h->pdev->dev, "hpsa_register_scsi: failed" + " hpsa_scsi_detect(), rc is %d\n", rc); + return rc; +} + +static int wait_for_device_to_become_ready(struct ctlr_info *h, + unsigned char lunaddr[]) +{ + int rc = 0; + int count = 0; + int waittime = 1; /* seconds */ + struct CommandList *c; + + c = cmd_special_alloc(h); + if (!c) { + dev_warn(&h->pdev->dev, "out of memory in " + "wait_for_device_to_become_ready.\n"); + return IO_ERROR; + } + + /* Send test unit ready until device ready, or give up. */ + while (count < HPSA_TUR_RETRY_LIMIT) { + + /* Wait for a bit. do this first, because if we send + * the TUR right away, the reset will just abort it. + */ + msleep(1000 * waittime); + count++; + + /* Increase wait time with each try, up to a point. */ + if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) + waittime = waittime * 2; + + /* Send the Test Unit Ready */ + fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, lunaddr, TYPE_CMD); + hpsa_scsi_do_simple_cmd_core(h, c); + /* no unmap needed here because no data xfer. */ + + if (c->err_info->CommandStatus == CMD_SUCCESS) + break; + + if (c->err_info->CommandStatus == CMD_TARGET_STATUS && + c->err_info->ScsiStatus == SAM_STAT_CHECK_CONDITION && + (c->err_info->SenseInfo[2] == NO_SENSE || + c->err_info->SenseInfo[2] == UNIT_ATTENTION)) + break; + + dev_warn(&h->pdev->dev, "waiting %d secs " + "for device to become ready.\n", waittime); + rc = 1; /* device not ready. */ + } + + if (rc) + dev_warn(&h->pdev->dev, "giving up on device.\n"); + else + dev_warn(&h->pdev->dev, "device is ready.\n"); + + cmd_special_free(h, c); + return rc; +} + +/* Need at least one of these error handlers to keep ../scsi/hosts.c from + * complaining. Doing a host- or bus-reset can't do anything good here. + */ +static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd) +{ + int rc; + struct ctlr_info *h; + struct hpsa_scsi_dev_t *dev; + + /* find the controller to which the command to be aborted was sent */ + h = sdev_to_hba(scsicmd->device); + if (h == NULL) /* paranoia */ + return FAILED; + dev_warn(&h->pdev->dev, "resetting drive\n"); + + dev = scsicmd->device->hostdata; + if (!dev) { + dev_err(&h->pdev->dev, "hpsa_eh_device_reset_handler: " + "device lookup failed.\n"); + return FAILED; + } + /* send a reset to the SCSI LUN which the command was sent to */ + rc = hpsa_send_reset(h, dev->scsi3addr); + if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0) + return SUCCESS; + + dev_warn(&h->pdev->dev, "resetting device failed.\n"); + return FAILED; +} + +/* + * For operations that cannot sleep, a command block is allocated at init, + * and managed by cmd_alloc() and cmd_free() using a simple bitmap to track + * which ones are free or in use. Lock must be held when calling this. + * cmd_free() is the complement. + */ +static struct CommandList *cmd_alloc(struct ctlr_info *h) +{ + struct CommandList *c; + int i; + union u64bit temp64; + dma_addr_t cmd_dma_handle, err_dma_handle; + + do { + i = find_first_zero_bit(h->cmd_pool_bits, h->nr_cmds); + if (i == h->nr_cmds) + return NULL; + } while (test_and_set_bit + (i & (BITS_PER_LONG - 1), + h->cmd_pool_bits + (i / BITS_PER_LONG)) != 0); + c = h->cmd_pool + i; + memset(c, 0, sizeof(*c)); + cmd_dma_handle = h->cmd_pool_dhandle + + i * sizeof(*c); + c->err_info = h->errinfo_pool + i; + memset(c->err_info, 0, sizeof(*c->err_info)); + err_dma_handle = h->errinfo_pool_dhandle + + i * sizeof(*c->err_info); + h->nr_allocs++; + + c->cmdindex = i; + + INIT_HLIST_NODE(&c->list); + c->busaddr = (__u32) cmd_dma_handle; + temp64.val = (__u64) err_dma_handle; + c->ErrDesc.Addr.lower = temp64.val32.lower; + c->ErrDesc.Addr.upper = temp64.val32.upper; + c->ErrDesc.Len = sizeof(*c->err_info); + + c->h = h; + return c; +} + +/* For operations that can wait for kmalloc to possibly sleep, + * this routine can be called. Lock need not be held to call + * cmd_special_alloc. cmd_special_free() is the complement. + */ +static struct CommandList *cmd_special_alloc(struct ctlr_info *h) +{ + struct CommandList *c; + union u64bit temp64; + dma_addr_t cmd_dma_handle, err_dma_handle; + + c = pci_alloc_consistent(h->pdev, sizeof(*c), &cmd_dma_handle); + if (c == NULL) + return NULL; + memset(c, 0, sizeof(*c)); + + c->cmdindex = -1; + + c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info), + &err_dma_handle); + + if (c->err_info == NULL) { + pci_free_consistent(h->pdev, + sizeof(*c), c, cmd_dma_handle); + return NULL; + } + memset(c->err_info, 0, sizeof(*c->err_info)); + + INIT_HLIST_NODE(&c->list); + c->busaddr = (__u32) cmd_dma_handle; + temp64.val = (__u64) err_dma_handle; + c->ErrDesc.Addr.lower = temp64.val32.lower; + c->ErrDesc.Addr.upper = temp64.val32.upper; + c->ErrDesc.Len = sizeof(*c->err_info); + + c->h = h; + return c; +} + +static void cmd_free(struct ctlr_info *h, struct CommandList *c) +{ + int i; + + i = c - h->cmd_pool; + clear_bit(i & (BITS_PER_LONG - 1), + h->cmd_pool_bits + (i / BITS_PER_LONG)); + h->nr_frees++; +} + +static void cmd_special_free(struct ctlr_info *h, struct CommandList *c) +{ + union u64bit temp64; + + temp64.val32.lower = c->ErrDesc.Addr.lower; + temp64.val32.upper = c->