diff options
Diffstat (limited to 'drivers/scsi/hpsa.c')
| -rw-r--r-- | drivers/scsi/hpsa.c | 3011 | 
1 files changed, 2753 insertions, 258 deletions
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c index 891c86b6625..31184b35370 100644 --- a/drivers/scsi/hpsa.c +++ b/drivers/scsi/hpsa.c @@ -1,6 +1,6 @@  /*   *    Disk Array driver for HP Smart Array SAS controllers - *    Copyright 2000, 2009 Hewlett-Packard Development Company, L.P. + *    Copyright 2000, 2014 Hewlett-Packard Development Company, L.P.   *   *    This program is free software; you can redistribute it and/or modify   *    it under the terms of the GNU General Public License as published by @@ -29,7 +29,6 @@  #include <linux/delay.h>  #include <linux/fs.h>  #include <linux/timer.h> -#include <linux/seq_file.h>  #include <linux/init.h>  #include <linux/spinlock.h>  #include <linux/compat.h> @@ -48,13 +47,14 @@  #include <linux/string.h>  #include <linux/bitmap.h>  #include <linux/atomic.h> -#include <linux/kthread.h>  #include <linux/jiffies.h> +#include <linux/percpu.h> +#include <asm/div64.h>  #include "hpsa_cmd.h"  #include "hpsa.h"  /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */ -#define HPSA_DRIVER_VERSION "3.4.0-1" +#define HPSA_DRIVER_VERSION "3.4.4-1"  #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"  #define HPSA "hpsa" @@ -96,11 +96,9 @@ static const struct pci_device_id hpsa_pci_device_id[] = {  	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3351},  	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3352},  	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3353}, -	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x334D},  	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3354},  	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3355},  	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSF,     0x103C, 0x3356}, -	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1920},  	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1921},  	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1922},  	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSH,     0x103C, 0x1923}, @@ -118,9 +116,20 @@ static const struct pci_device_id hpsa_pci_device_id[] = {  	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C3},  	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C4},  	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C5}, +	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C6},  	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C7},  	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C8},  	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21C9}, +	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CA}, +	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CB}, +	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CC}, +	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CD}, +	{PCI_VENDOR_ID_HP,     PCI_DEVICE_ID_HP_CISSI,     0x103C, 0x21CE}, +	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0076}, +	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0087}, +	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x007D}, +	{PCI_VENDOR_ID_HP_3PAR, 0x0075, 0x1590, 0x0088}, +	{PCI_VENDOR_ID_HP, 0x333f, 0x103c, 0x333f},  	{PCI_VENDOR_ID_HP,     PCI_ANY_ID,	PCI_ANY_ID, PCI_ANY_ID,  		PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},  	{0,} @@ -144,7 +153,6 @@ static struct board_type products[] = {  	{0x3351103C, "Smart Array P420", &SA5_access},  	{0x3352103C, "Smart Array P421", &SA5_access},  	{0x3353103C, "Smart Array P822", &SA5_access}, -	{0x334D103C, "Smart Array P822se", &SA5_access},  	{0x3354103C, "Smart Array P420i", &SA5_access},  	{0x3355103C, "Smart Array P220i", &SA5_access},  	{0x3356103C, "Smart Array P721m", &SA5_access}, @@ -164,22 +172,30 @@ static struct board_type products[] = {  	{0x21C3103C, "Smart Array", &SA5_access},  	{0x21C4103C, "Smart Array", &SA5_access},  	{0x21C5103C, "Smart Array", &SA5_access}, +	{0x21C6103C, "Smart Array", &SA5_access},  	{0x21C7103C, "Smart Array", &SA5_access},  	{0x21C8103C, "Smart Array", &SA5_access},  	{0x21C9103C, "Smart Array", &SA5_access}, +	{0x21CA103C, "Smart Array", &SA5_access}, +	{0x21CB103C, "Smart Array", &SA5_access}, +	{0x21CC103C, "Smart Array", &SA5_access}, +	{0x21CD103C, "Smart Array", &SA5_access}, +	{0x21CE103C, "Smart Array", &SA5_access}, +	{0x00761590, "HP Storage P1224 Array Controller", &SA5_access}, +	{0x00871590, "HP Storage P1224e Array Controller", &SA5_access}, +	{0x007D1590, "HP Storage P1228 Array Controller", &SA5_access}, +	{0x00881590, "HP Storage P1228e Array Controller", &SA5_access}, +	{0x333f103c, "HP StorageWorks 1210m Array Controller", &SA5_access},  	{0xFFFF103C, "Unknown Smart Array", &SA5_access},  };  static int number_of_controllers; -static struct list_head hpsa_ctlr_list = LIST_HEAD_INIT(hpsa_ctlr_list); -static spinlock_t lockup_detector_lock; -static struct task_struct *hpsa_lockup_detector; -  static irqreturn_t do_hpsa_intr_intx(int irq, void *dev_id);  static irqreturn_t do_hpsa_intr_msi(int irq, void *dev_id);  static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg); -static void start_io(struct ctlr_info *h); +static void lock_and_start_io(struct ctlr_info *h); +static void start_io(struct ctlr_info *h, unsigned long *flags);  #ifdef CONFIG_COMPAT  static int hpsa_compat_ioctl(struct scsi_device *dev, int cmd, void *arg); @@ -190,8 +206,9 @@ static void cmd_special_free(struct ctlr_info *h, struct CommandList *c);  static struct CommandList *cmd_alloc(struct ctlr_info *h);  static struct CommandList *cmd_special_alloc(struct ctlr_info *h);  static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, -	void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, +	void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,  	int cmd_type); +#define VPD_PAGE (1 << 8)  static int hpsa_scsi_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);  static void hpsa_scan_start(struct Scsi_Host *); @@ -212,7 +229,7 @@ static void check_ioctl_unit_attention(struct ctlr_info *h,  	struct CommandList *c);  /* performant mode helper functions */  static void calc_bucket_map(int *bucket, int num_buckets, -	int nsgs, int *bucket_map); +	int nsgs, int min_blocks, int *bucket_map);  static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h);  static inline u32 next_command(struct ctlr_info *h, u8 q);  static int hpsa_find_cfg_addrs(struct pci_dev *pdev, void __iomem *vaddr, @@ -224,8 +241,14 @@ static int hpsa_lookup_board_id(struct pci_dev *pdev, u32 *board_id);  static int hpsa_wait_for_board_state(struct pci_dev *pdev, void __iomem *vaddr,  				     int wait_for_ready);  static inline void finish_cmd(struct CommandList *c); +static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h);  #define BOARD_NOT_READY 0  #define BOARD_READY 1 +static void hpsa_drain_accel_commands(struct ctlr_info *h); +static void hpsa_flush_cache(struct ctlr_info *h); +static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, +	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, +	u8 *scsi3addr);  static inline struct ctlr_info *sdev_to_hba(struct scsi_device *sdev)  { @@ -288,6 +311,55 @@ static int check_for_busy(struct ctlr_info *h, struct CommandList *c)  	return 1;  } +static ssize_t host_store_hp_ssd_smart_path_status(struct device *dev, +					 struct device_attribute *attr, +					 const char *buf, size_t count) +{ +	int status, len; +	struct ctlr_info *h; +	struct Scsi_Host *shost = class_to_shost(dev); +	char tmpbuf[10]; + +	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) +		return -EACCES; +	len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; +	strncpy(tmpbuf, buf, len); +	tmpbuf[len] = '\0'; +	if (sscanf(tmpbuf, "%d", &status) != 1) +		return -EINVAL; +	h = shost_to_hba(shost); +	h->acciopath_status = !!status; +	dev_warn(&h->pdev->dev, +		"hpsa: HP SSD Smart Path %s via sysfs update.\n", +		h->acciopath_status ? "enabled" : "disabled"); +	return count; +} + +static ssize_t host_store_raid_offload_debug(struct device *dev, +					 struct device_attribute *attr, +					 const char *buf, size_t count) +{ +	int debug_level, len; +	struct ctlr_info *h; +	struct Scsi_Host *shost = class_to_shost(dev); +	char tmpbuf[10]; + +	if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) +		return -EACCES; +	len = count > sizeof(tmpbuf) - 1 ? sizeof(tmpbuf) - 1 : count; +	strncpy(tmpbuf, buf, len); +	tmpbuf[len] = '\0'; +	if (sscanf(tmpbuf, "%d", &debug_level) != 1) +		return -EINVAL; +	if (debug_level < 0) +		debug_level = 0; +	h = shost_to_hba(shost); +	h->raid_offload_debug = debug_level; +	dev_warn(&h->pdev->dev, "hpsa: Set raid_offload_debug level = %d\n", +		h->raid_offload_debug); +	return count; +} +  static ssize_t host_store_rescan(struct device *dev,  				 struct device_attribute *attr,  				 const char *buf, size_t count) @@ -335,6 +407,17 @@ static ssize_t host_show_transport_mode(struct device *dev,  			"performant" : "simple");  } +static ssize_t host_show_hp_ssd_smart_path_status(struct device *dev, +	struct device_attribute *attr, char *buf) +{ +	struct ctlr_info *h; +	struct Scsi_Host *shost = class_to_shost(dev); + +	h = shost_to_hba(shost); +	return snprintf(buf, 30, "HP SSD Smart Path %s\n", +		(h->acciopath_status == 1) ?  "enabled" : "disabled"); +} +  /* List of controllers which cannot be hard reset on kexec with reset_devices */  static u32 unresettable_controller[] = {  	0x324a103C, /* Smart Array P712m */ @@ -424,6 +507,13 @@ static inline int is_logical_dev_addr_mode(unsigned char scsi3addr[])  static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",  	"1(ADM)", "UNKNOWN"  }; +#define HPSA_RAID_0	0 +#define HPSA_RAID_4	1 +#define HPSA_RAID_1	2	/* also used for RAID 10 */ +#define HPSA_RAID_5	3	/* also used for RAID 50 */ +#define HPSA_RAID_51	4 +#define HPSA_RAID_6	5	/* also used for RAID 60 */ +#define HPSA_RAID_ADM	6	/* also used for RAID 1+0 ADM */  #define RAID_UNKNOWN (ARRAY_SIZE(raid_label) - 1)  static ssize_t raid_level_show(struct device *dev, @@ -512,10 +602,39 @@ static ssize_t unique_id_show(struct device *dev,  			sn[12], sn[13], sn[14], sn[15]);  } +static ssize_t host_show_hp_ssd_smart_path_enabled(struct device *dev, +	     struct device_attribute *attr, char *buf) +{ +	struct ctlr_info *h; +	struct scsi_device *sdev; +	struct hpsa_scsi_dev_t *hdev; +	unsigned long flags; +	int offload_enabled; + +	sdev = to_scsi_device(dev); +	h = sdev_to_hba(sdev); +	spin_lock_irqsave(&h->lock, flags); +	hdev = sdev->hostdata; +	if (!hdev) { +		spin_unlock_irqrestore(&h->lock, flags); +		return -ENODEV; +	} +	offload_enabled = hdev->offload_enabled; +	spin_unlock_irqrestore(&h->lock, flags); +	return snprintf(buf, 20, "%d\n", offload_enabled); +} +  static DEVICE_ATTR(raid_level, S_IRUGO, raid_level_show, NULL);  static DEVICE_ATTR(lunid, S_IRUGO, lunid_show, NULL);  static DEVICE_ATTR(unique_id, S_IRUGO, unique_id_show, NULL);  static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan); +static DEVICE_ATTR(hp_ssd_smart_path_enabled, S_IRUGO, +			host_show_hp_ssd_smart_path_enabled, NULL); +static DEVICE_ATTR(hp_ssd_smart_path_status, S_IWUSR|S_IRUGO|S_IROTH, +		host_show_hp_ssd_smart_path_status, +		host_store_hp_ssd_smart_path_status); +static DEVICE_ATTR(raid_offload_debug, S_IWUSR, NULL, +			host_store_raid_offload_debug);  static DEVICE_ATTR(firmware_revision, S_IRUGO,  	host_show_firmware_revision, NULL);  static DEVICE_ATTR(commands_outstanding, S_IRUGO, @@ -529,6 +648,7 @@ static struct device_attribute *hpsa_sdev_attrs[] = {  	&dev_attr_raid_level,  	&dev_attr_lunid,  	&dev_attr_unique_id, +	&dev_attr_hp_ssd_smart_path_enabled,  	NULL,  }; @@ -538,6 +658,8 @@ static struct device_attribute *hpsa_shost_attrs[] = {  	&dev_attr_commands_outstanding,  	&dev_attr_transport_mode,  	&dev_attr_resettable, +	&dev_attr_hp_ssd_smart_path_status, +	&dev_attr_raid_offload_debug,  	NULL,  }; @@ -562,6 +684,7 @@ static struct scsi_host_template hpsa_driver_template = {  	.sdev_attrs = hpsa_sdev_attrs,  	.shost_attrs = hpsa_shost_attrs,  	.max_sectors = 8192, +	.no_write_same = 1,  }; @@ -574,9 +697,12 @@ static inline void addQ(struct list_head *list, struct CommandList *c)  static inline u32 next_command(struct ctlr_info *h, u8 q)  {  	u32 a; -	struct reply_pool *rq = &h->reply_queue[q]; +	struct reply_queue_buffer *rq = &h->reply_queue[q];  	unsigned long flags; +	if (h->transMethod & CFGTBL_Trans_io_accel1) +		return h->access.command_completed(h, q); +  	if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))  		return h->access.command_completed(h, q); @@ -597,6 +723,32 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)  	return a;  } +/* + * There are some special bits in the bus address of the + * command that we have to set for the controller to know + * how to process the command: + * + * Normal performant mode: + * bit 0: 1 means performant mode, 0 means simple mode. + * bits 1-3 = block fetch table entry + * bits 4-6 = command type (== 0) + * + * ioaccel1 mode: + * bit 0 = "performant mode" bit. + * bits 1-3 = block fetch table entry + * bits 4-6 = command type (== 110) + * (command type is needed because ioaccel1 mode + * commands are submitted through the same register as normal + * mode commands, so this is how the controller knows whether + * the command is normal mode or ioaccel1 mode.) + * + * ioaccel2 mode: + * bit 0 = "performant mode" bit. + * bits 1-4 = block fetch table entry (note extra bit) + * bits 4-6 = not needed, because ioaccel2 mode has + * a separate special register for submitting commands. + */ +  /* set_performant_mode: Modify the tag for cciss performant   * set bit 0 for pull model, bits 3-1 for block fetch   * register number @@ -605,12 +757,47 @@ static void set_performant_mode(struct ctlr_info *h, struct CommandList *c)  {  	if (likely(h->transMethod & CFGTBL_Trans_Performant)) {  		c->busaddr |= 1 | (h->blockFetchTable[c->Header.SGList] << 1); -		if (likely(h->msix_vector)) +		if (likely(h->msix_vector > 0))  			c->Header.ReplyQueue =  				raw_smp_processor_id() % h->nreply_queues;  	}  } +static void set_ioaccel1_performant_mode(struct ctlr_info *h, +						struct CommandList *c) +{ +	struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; + +	/* Tell the controller to post the reply to the queue for this +	 * processor.  This seems to give the best I/O throughput. +	 */ +	cp->ReplyQueue = smp_processor_id() % h->nreply_queues; +	/* Set the bits in the address sent down to include: +	 *  - performant mode bit (bit 0) +	 *  - pull count (bits 1-3) +	 *  - command type (bits 4-6) +	 */ +	c->busaddr |= 1 | (h->ioaccel1_blockFetchTable[c->Header.SGList] << 1) | +					IOACCEL1_BUSADDR_CMDTYPE; +} + +static void set_ioaccel2_performant_mode(struct ctlr_info *h, +						struct CommandList *c) +{ +	struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; + +	/* Tell the controller to post the reply to the queue for this +	 * processor.  This seems to give the best I/O throughput. +	 */ +	cp->reply_queue = smp_processor_id() % h->nreply_queues; +	/* Set the bits in the address sent down to include: +	 *  - performant mode bit not used in ioaccel mode 2 +	 *  - pull count (bits 0-3) +	 *  - command type isn't needed for ioaccel2 +	 */ +	c->busaddr |= (h->ioaccel2_blockFetchTable[cp->sg_count]); +} +  static int is_firmware_flash_cmd(u8 *cdb)  {  	return cdb[0] == BMIC_WRITE && cdb[6] == BMIC_FLASH_FIRMWARE; @@ -645,13 +832,22 @@ static void enqueue_cmd_and_start_io(struct ctlr_info *h,  {  	unsigned long flags; -	set_performant_mode(h, c); +	switch (c->cmd_type) { +	case CMD_IOACCEL1: +		set_ioaccel1_performant_mode(h, c); +		break; +	case CMD_IOACCEL2: +		set_ioaccel2_performant_mode(h, c); +		break; +	default: +		set_performant_mode(h, c); +	}  	dial_down_lockup_detection_during_fw_flash(h, c);  	spin_lock_irqsave(&h->lock, flags);  	addQ(&h->reqQ, c);  	h->Qdepth++; +	start_io(h, &flags);  	spin_unlock_irqrestore(&h->lock, flags); -	start_io(h);  }  static inline void removeQ(struct CommandList *c) @@ -789,6 +985,14 @@ static void hpsa_scsi_update_entry(struct ctlr_info *h, int hostno,  	/* Raid level changed. */  	h->dev[entry]->raid_level = new_entry->raid_level; + +	/* Raid offload parameters changed. */ +	h->dev[entry]->offload_config = new_entry->offload_config; +	h->dev[entry]->offload_enabled = new_entry->offload_enabled; +	h->dev[entry]->ioaccel_handle = new_entry->ioaccel_handle; +	h->dev[entry]->offload_to_mirror = new_entry->offload_to_mirror; +	h->dev[entry]->raid_map = new_entry->raid_map; +  	dev_info(&h->pdev->dev, "%s device c%db%dt%dl%d updated.\n",  		scsi_device_type(new_entry->devtype), hostno, new_entry->bus,  		new_entry->target, new_entry->lun); @@ -909,6 +1113,10 @@ static inline int device_updated(struct hpsa_scsi_dev_t *dev1,  	 */  	if (dev1->raid_level != dev2->raid_level)  		return 1; +	if (dev1->offload_config != dev2->offload_config) +		return 1; +	if (dev1->offload_enabled != dev2->offload_enabled) +		return 1;  	return 0;  } @@ -939,6 +1147,9 @@ static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,  					return DEVICE_UPDATED;  				return DEVICE_SAME;  			} else { +				/* Keep offline devices offline */ +				if (needle->volume_offline) +					return DEVICE_NOT_FOUND;  				return DEVICE_CHANGED;  			}  		} @@ -947,6 +1158,110 @@ static int hpsa_scsi_find_entry(struct hpsa_scsi_dev_t *needle,  	return DEVICE_NOT_FOUND;  } +static void hpsa_monitor_offline_device(struct ctlr_info *h, +					unsigned char scsi3addr[]) +{ +	struct offline_device_entry *device; +	unsigned long flags; + +	/* Check to see if device is already on the list */ +	spin_lock_irqsave(&h->offline_device_lock, flags); +	list_for_each_entry(device, &h->offline_device_list, offline_list) { +		if (memcmp(device->scsi3addr, scsi3addr, +			sizeof(device->scsi3addr)) == 0) { +			spin_unlock_irqrestore(&h->offline_device_lock, flags); +			return; +		} +	} +	spin_unlock_irqrestore(&h->offline_device_lock, flags); + +	/* Device is not on the list, add it. */ +	device = kmalloc(sizeof(*device), GFP_KERNEL); +	if (!device) { +		dev_warn(&h->pdev->dev, "out of memory in %s\n", __func__); +		return; +	} +	memcpy(device->scsi3addr, scsi3addr, sizeof(device->scsi3addr)); +	spin_lock_irqsave(&h->offline_device_lock, flags); +	list_add_tail(&device->offline_list, &h->offline_device_list); +	spin_unlock_irqrestore(&h->offline_device_lock, flags); +} + +/* Print a message explaining various offline volume states */ +static void hpsa_show_volume_status(struct ctlr_info *h, +	struct hpsa_scsi_dev_t *sd) +{ +	if (sd->volume_offline == HPSA_VPD_LV_STATUS_UNSUPPORTED) +		dev_info(&h->pdev->dev, +			"C%d:B%d:T%d:L%d Volume status is not available through vital product data pages.\n", +			h->scsi_host->host_no, +			sd->bus, sd->target, sd->lun); +	switch (sd->volume_offline) { +	case HPSA_LV_OK: +		break; +	case HPSA_LV_UNDERGOING_ERASE: +		dev_info(&h->pdev->dev, +			"C%d:B%d:T%d:L%d Volume is undergoing background erase process.\n", +			h->scsi_host->host_no, +			sd->bus, sd->target, sd->lun); +		break; +	case HPSA_LV_UNDERGOING_RPI: +		dev_info(&h->pdev->dev, +			"C%d:B%d:T%d:L%d Volume is undergoing rapid parity initialization process.\n", +			h->scsi_host->host_no, +			sd->bus, sd->target, sd->lun); +		break; +	case HPSA_LV_PENDING_RPI: +		dev_info(&h->pdev->dev, +				"C%d:B%d:T%d:L%d Volume is queued for rapid parity initialization process.\n", +				h->scsi_host->host_no, +				sd->bus, sd->target, sd->lun); +		break; +	case HPSA_LV_ENCRYPTED_NO_KEY: +		dev_info(&h->pdev->dev, +			"C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because key is not present.\n", +			h->scsi_host->host_no, +			sd->bus, sd->target, sd->lun); +		break; +	case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: +		dev_info(&h->pdev->dev, +			"C%d:B%d:T%d:L%d Volume is not encrypted and cannot be accessed because controller is in encryption-only mode.\n", +			h->scsi_host->host_no, +			sd->bus, sd->target, sd->lun); +		break; +	case HPSA_LV_UNDERGOING_ENCRYPTION: +		dev_info(&h->pdev->dev, +			"C%d:B%d:T%d:L%d Volume is undergoing encryption process.\n", +			h->scsi_host->host_no, +			sd->bus, sd->target, sd->lun); +		break; +	case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING: +		dev_info(&h->pdev->dev, +			"C%d:B%d:T%d:L%d Volume is undergoing encryption re-keying process.\n", +			h->scsi_host->host_no, +			sd->bus, sd->target, sd->lun); +		break; +	case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: +		dev_info(&h->pdev->dev, +			"C%d:B%d:T%d:L%d Volume is encrypted and cannot be accessed because controller does not have encryption enabled.\n", +			h->scsi_host->host_no, +			sd->bus, sd->target, sd->lun); +		break; +	case HPSA_LV_PENDING_ENCRYPTION: +		dev_info(&h->pdev->dev, +			"C%d:B%d:T%d:L%d Volume is pending migration to encrypted state, but process has not started.\n", +			h->scsi_host->host_no, +			sd->bus, sd->target, sd->lun); +		break; +	case HPSA_LV_PENDING_ENCRYPTION_REKEYING: +		dev_info(&h->pdev->dev, +			"C%d:B%d:T%d:L%d Volume is encrypted and is pending encryption rekeying.\n", +			h->scsi_host->host_no, +			sd->bus, sd->target, sd->lun); +		break; +	} +} +  static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,  	struct hpsa_scsi_dev_t *sd[], int nsds)  { @@ -1011,6 +1326,20 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,  	for (i = 0; i < nsds; i++) {  		if (!sd[i]) /* if already added above. */  			continue; + +		/* Don't add devices which are NOT READY, FORMAT IN PROGRESS +		 * as the SCSI mid-layer does not handle such devices well. +		 * It relentlessly loops sending TUR at 3Hz, then READ(10) +		 * at 160Hz, and prevents the system from coming up. +		 */ +		if (sd[i]->volume_offline) { +			hpsa_show_volume_status(h, sd[i]); +			dev_info(&h->pdev->dev, "c%db%dt%dl%d: temporarily offline\n", +				h->scsi_host->host_no, +				sd[i]->bus, sd[i]->target, sd[i]->lun); +			continue; +		} +  		device_change = hpsa_scsi_find_entry(sd[i], h->dev,  					h->ndevices, &entry);  		if (device_change == DEVICE_NOT_FOUND) { @@ -1029,6 +1358,17 @@ static void adjust_hpsa_scsi_table(struct ctlr_info *h, int hostno,  	}  	spin_unlock_irqrestore(&h->devlock, flags); +	/* Monitor devices which are in one of several NOT READY states to be +	 * brought online later. This must be done without holding h->devlock, +	 * so don't touch h->dev[] +	 */ +	for (i = 0; i < nsds; i++) { +		if (!sd[i]) /* if already added above. */ +			continue; +		if (sd[i]->volume_offline) +			hpsa_monitor_offline_device(h, sd[i]->scsi3addr); +	} +  	/* Don't notify scsi mid layer of any changes the first time through  	 * (or if there are no changes) scsi_scan_host will do it later the  	 * first time through. @@ -1194,11 +1534,156 @@ static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,  	pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);  } + +/* Decode the various types of errors on ioaccel2 path. + * Return 1 for any error that should generate a RAID path retry. + * Return 0 for errors that don't require a RAID path retry. + */ +static int handle_ioaccel_mode2_error(struct ctlr_info *h, +					struct CommandList *c, +					struct scsi_cmnd *cmd, +					struct io_accel2_cmd *c2) +{ +	int data_len; +	int retry = 0; + +	switch (c2->error_data.serv_response) { +	case IOACCEL2_SERV_RESPONSE_COMPLETE: +		switch (c2->error_data.status) { +		case IOACCEL2_STATUS_SR_TASK_COMP_GOOD: +			break; +		case IOACCEL2_STATUS_SR_TASK_COMP_CHK_COND: +			dev_warn(&h->pdev->dev, +				"%s: task complete with check condition.\n", +				"HP SSD Smart Path"); +			cmd->result |= SAM_STAT_CHECK_CONDITION; +			if (c2->error_data.data_present != +					IOACCEL2_SENSE_DATA_PRESENT) { +				memset(cmd->sense_buffer, 0, +					SCSI_SENSE_BUFFERSIZE); +				break; +			} +			/* copy the sense data */ +			data_len = c2->error_data.sense_data_len; +			if (data_len > SCSI_SENSE_BUFFERSIZE) +				data_len = SCSI_SENSE_BUFFERSIZE; +			if (data_len > sizeof(c2->error_data.sense_data_buff)) +				data_len = +					sizeof(c2->error_data.sense_data_buff); +			memcpy(cmd->sense_buffer, +				c2->error_data.sense_data_buff, data_len); +			retry = 1; +			break; +		case IOACCEL2_STATUS_SR_TASK_COMP_BUSY: +			dev_warn(&h->pdev->dev, +				"%s: task complete with BUSY status.\n", +				"HP SSD Smart Path"); +			retry = 1; +			break; +		case IOACCEL2_STATUS_SR_TASK_COMP_RES_CON: +			dev_warn(&h->pdev->dev, +				"%s: task complete with reservation conflict.\n", +				"HP SSD Smart Path"); +			retry = 1; +			break; +		case IOACCEL2_STATUS_SR_TASK_COMP_SET_FULL: +			/* Make scsi midlayer do unlimited retries */ +			cmd->result = DID_IMM_RETRY << 16; +			break; +		case IOACCEL2_STATUS_SR_TASK_COMP_ABORTED: +			dev_warn(&h->pdev->dev, +				"%s: task complete with aborted status.\n", +				"HP SSD Smart Path"); +			retry = 1; +			break; +		default: +			dev_warn(&h->pdev->dev, +				"%s: task complete with unrecognized status: 0x%02x\n", +				"HP SSD Smart Path", c2->error_data.status); +			retry = 1; +			break; +		} +		break; +	case IOACCEL2_SERV_RESPONSE_FAILURE: +		/* don't expect to get here. */ +		dev_warn(&h->pdev->dev, +			"unexpected delivery or target failure, status = 0x%02x\n", +			c2->error_data.status); +		retry = 1; +		break; +	case IOACCEL2_SERV_RESPONSE_TMF_COMPLETE: +		break; +	case IOACCEL2_SERV_RESPONSE_TMF_SUCCESS: +		break; +	case IOACCEL2_SERV_RESPONSE_TMF_REJECTED: +		dev_warn(&h->pdev->dev, "task management function rejected.\n"); +		retry = 1; +		break; +	case IOACCEL2_SERV_RESPONSE_TMF_WRONG_LUN: +		dev_warn(&h->pdev->dev, "task management function invalid LUN\n"); +		break; +	default: +		dev_warn(&h->pdev->dev, +			"%s: Unrecognized server response: 0x%02x\n", +			"HP SSD Smart Path", +			c2->error_data.serv_response); +		retry = 1; +		break; +	} + +	return retry;	/* retry on raid path? */ +} + +static void process_ioaccel2_completion(struct ctlr_info *h, +		struct CommandList *c, struct scsi_cmnd *cmd, +		struct hpsa_scsi_dev_t *dev) +{ +	struct io_accel2_cmd *c2 = &h->ioaccel2_cmd_pool[c->cmdindex]; +	int raid_retry = 0; + +	/* check for good status */ +	if (likely(c2->error_data.serv_response == 0 && +			c2->error_data.status == 0)) { +		cmd_free(h, c); +		cmd->scsi_done(cmd); +		return; +	} + +	/* Any RAID offload error results in retry which will use +	 * the normal I/O path so the controller can handle whatever's +	 * wrong. +	 */ +	if (is_logical_dev_addr_mode(dev->scsi3addr) && +		c2->error_data.serv_response == +			IOACCEL2_SERV_RESPONSE_FAILURE) { +		dev->offload_enabled = 0; +		h->drv_req_rescan = 1;	/* schedule controller for a rescan */ +		cmd->result = DID_SOFT_ERROR << 16; +		cmd_free(h, c); +		cmd->scsi_done(cmd); +		return; +	} +	raid_retry = handle_ioaccel_mode2_error(h, c, cmd, c2); +	/* If error found, disable Smart Path, schedule a rescan, +	 * and force a retry on the standard path. +	 */ +	if (raid_retry) { +		dev_warn(&h->pdev->dev, "%s: Retrying on standard path.\n", +			"HP SSD Smart Path"); +		dev->offload_enabled = 0; /* Disable Smart Path */ +		h->drv_req_rescan = 1;	  /* schedule controller rescan */ +		cmd->result = DID_SOFT_ERROR << 16; +	} +	cmd_free(h, c); +	cmd->scsi_done(cmd); +} +  static void complete_scsi_command(struct CommandList *cp)  {  	struct scsi_cmnd *cmd;  	struct ctlr_info *h;  	struct ErrorInfo *ei; +	struct hpsa_scsi_dev_t *dev;  	unsigned char sense_key;  	unsigned char asc;      /* additional sense code */ @@ -1208,13 +1693,19 @@ static void complete_scsi_command(struct CommandList *cp)  	ei = cp->err_info;  	cmd = (struct scsi_cmnd *) cp->scsi_cmd;  	h = cp->h; +	dev = cmd->device->hostdata;  	scsi_dma_unmap(cmd); /* undo the DMA mappings */ -	if (cp->Header.SGTotal > h->max_cmd_sg_entries) +	if ((cp->cmd_type == CMD_SCSI) && +		(cp->Header.SGTotal > h->max_cmd_sg_entries))  		hpsa_unmap_sg_chain_block(h, cp);  	cmd->result = (DID_OK << 16); 		/* host byte */  	cmd->result |= (COMMAND_COMPLETE << 8);	/* msg byte */ + +	if (cp->cmd_type == CMD_IOACCEL2) +		return process_ioaccel2_completion(h, cp, cmd, dev); +  	cmd->result |= ei->ScsiStatus;  	/* copy the sense data whether we need to or not. */ @@ -1234,6 +1725,32 @@ static void complete_scsi_command(struct CommandList *cp)  		return;  	} +	/* For I/O accelerator commands, copy over some fields to the normal +	 * CISS header used below for error handling. +	 */ +	if (cp->cmd_type == CMD_IOACCEL1) { +		struct io_accel1_cmd *c = &h->ioaccel_cmd_pool[cp->cmdindex]; +		cp->Header.SGList = cp->Header.SGTotal = scsi_sg_count(cmd); +		cp->Request.CDBLen = c->io_flags & IOACCEL1_IOFLAGS_CDBLEN_MASK; +		cp->Header.Tag.lower = c->Tag.lower; +		cp->Header.Tag.upper = c->Tag.upper; +		memcpy(cp->Header.LUN.LunAddrBytes, c->CISS_LUN, 8); +		memcpy(cp->Request.CDB, c->CDB, cp->Request.CDBLen); + +		/* Any RAID offload error results in retry which will use +		 * the normal I/O path so the controller can handle whatever's +		 * wrong. +		 */ +		if (is_logical_dev_addr_mode(dev->scsi3addr)) { +			if (ei->CommandStatus == CMD_IOACCEL_DISABLED) +				dev->offload_enabled = 0; +			cmd->result = DID_SOFT_ERROR << 16; +			cmd_free(h, cp); +			cmd->scsi_done(cmd); +			return; +		} +	} +  	/* an error has occurred */  	switch (ei->CommandStatus) { @@ -1248,10 +1765,8 @@ static void complete_scsi_command(struct CommandList *cp)  		}  		if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) { -			if (check_for_unit_attention(h, cp)) { -				cmd->result = DID_SOFT_ERROR << 16; +			if (check_for_unit_attention(h, cp))  				break; -			}  			if (sense_key == ILLEGAL_REQUEST) {  				/*  				 * SCSI REPORT_LUNS is commonly unsupported on @@ -1289,7 +1804,7 @@ static void complete_scsi_command(struct CommandList *cp)  					"has check condition: aborted command: "  					"ASC: 0x%x, ASCQ: 0x%x\n",  					cp, asc, ascq); -				cmd->result = DID_SOFT_ERROR << 16; +				cmd->result |= DID_SOFT_ERROR << 16;  				break;  			}  			/* Must be some other type of check condition */ @@ -1398,6 +1913,14 @@ static void complete_scsi_command(struct CommandList *cp)  		cmd->result = DID_ERROR << 16;  		dev_warn(&h->pdev->dev, "Command unabortable\n");  		break; +	case CMD_IOACCEL_DISABLED: +		/* This only handles the direct pass-through case since RAID +		 * offload is handled above.  Just attempt a retry. +		 */ +		cmd->result = DID_SOFT_ERROR << 16; +		dev_warn(&h->pdev->dev, +				"cp %p had HP SSD Smart Path error\n", cp); +		break;  	default:  		cmd->result = DID_ERROR << 16;  		dev_warn(&h->pdev->dev, "cp %p returned unknown status %x\n", @@ -1447,6 +1970,7 @@ static int hpsa_map_one(struct pci_dev *pdev,  	cp->SG[0].Addr.upper =  	  (u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);  	cp->SG[0].Len = buflen; +	cp->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining */  	cp->Header.SGList = (u8) 1;   /* no. SGs contig in this cmd */  	cp->Header.SGTotal = (u16) 1; /* total sgs in this cmd list */  	return 0; @@ -1462,20 +1986,26 @@ static inline void hpsa_scsi_do_simple_cmd_core(struct ctlr_info *h,  	wait_for_completion(&wait);  } +static u32 lockup_detected(struct ctlr_info *h) +{ +	int cpu; +	u32 rc, *lockup_detected; + +	cpu = get_cpu(); +	lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); +	rc = *lockup_detected; +	put_cpu(); +	return rc; +} +  static void hpsa_scsi_do_simple_cmd_core_if_no_lockup(struct ctlr_info *h,  	struct CommandList *c)  { -	unsigned long flags; -  	/* If controller lockup detected, fake a hardware error. */ -	spin_lock_irqsave(&h->lock, flags); -	if (unlikely(h->lockup_detected)) { -		spin_unlock_irqrestore(&h->lock, flags); +	if (unlikely(lockup_detected(h)))  		c->err_info->CommandStatus = CMD_HARDWARE_ERR; -	} else { -		spin_unlock_irqrestore(&h->lock, flags); +	else  		hpsa_scsi_do_simple_cmd_core(h, c); -	}  }  #define MAX_DRIVER_CMD_RETRIES 25 @@ -1499,17 +2029,37 @@ static void hpsa_scsi_do_simple_cmd_with_retry(struct ctlr_info *h,  	hpsa_pci_unmap(h->pdev, c, 1, data_direction);  } -static void hpsa_scsi_interpret_error(struct CommandList *cp) +static void hpsa_print_cmd(struct ctlr_info *h, char *txt, +				struct CommandList *c)  { -	struct ErrorInfo *ei; +	const u8 *cdb = c->Request.CDB; +	const u8 *lun = c->Header.LUN.LunAddrBytes; + +	dev_warn(&h->pdev->dev, "%s: LUN:%02x%02x%02x%02x%02x%02x%02x%02x" +	" CDB:%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", +		txt, lun[0], lun[1], lun[2], lun[3], +		lun[4], lun[5], lun[6], lun[7], +		cdb[0], cdb[1], cdb[2], cdb[3], +		cdb[4], cdb[5], cdb[6], cdb[7], +		cdb[8], cdb[9], cdb[10], cdb[11], +		cdb[12], cdb[13], cdb[14], cdb[15]); +} + +static void hpsa_scsi_interpret_error(struct ctlr_info *h, +			struct CommandList *cp) +{ +	const struct ErrorInfo *ei = cp->err_info;  	struct device *d = &cp->h->pdev->dev; +	const u8 *sd = ei->SenseInfo; -	ei = cp->err_info;  	switch (ei->CommandStatus) {  	case CMD_TARGET_STATUS: -		dev_warn(d, "cmd %p has completed with errors\n", cp); -		dev_warn(d, "cmd %p has SCSI Status = %x\n", cp, -				ei->ScsiStatus); +		hpsa_print_cmd(h, "SCSI status", cp); +		if (ei->ScsiStatus == SAM_STAT_CHECK_CONDITION) +			dev_warn(d, "SCSI Status = 02, Sense key = %02x, ASC = %02x, ASCQ = %02x\n", +				sd[2] & 0x0f, sd[12], sd[13]); +		else +			dev_warn(d, "SCSI Status = %02x\n", ei->ScsiStatus);  		if (ei->ScsiStatus == 0)  			dev_warn(d, "SCSI status is abnormally zero.  "  			"(probably indicates selection timeout " @@ -1517,54 +2067,51 @@ static void hpsa_scsi_interpret_error(struct CommandList *cp)  			"firmware bug, circa July, 2001.)\n");  		break;  	case CMD_DATA_UNDERRUN: /* let mid layer handle it. */ -			dev_info(d, "UNDERRUN\n");  		break;  	case CMD_DATA_OVERRUN: -		dev_warn(d, "cp %p has completed with data overrun\n", cp); +		hpsa_print_cmd(h, "overrun condition", cp);  		break;  	case CMD_INVALID: {  		/* controller unfortunately reports SCSI passthru's  		 * to non-existent targets as invalid commands.  		 */ -		dev_warn(d, "cp %p is reported invalid (probably means " -			"target device no longer present)\n", cp); -		/* print_bytes((unsigned char *) cp, sizeof(*cp), 1, 0); -		print_cmd(cp);  */ +		hpsa_print_cmd(h, "invalid command", cp); +		dev_warn(d, "probably means device no longer present\n");  		}  		break;  	case CMD_PROTOCOL_ERR: -		dev_warn(d, "cp %p has protocol error \n", cp); +		hpsa_print_cmd(h, "protocol error", cp);  		break;  	case CMD_HARDWARE_ERR: -		/* cmd->result = DID_ERROR << 16; */ -		dev_warn(d, "cp %p had hardware error\n", cp); +		hpsa_print_cmd(h, "hardware error", cp);  		break;  	case CMD_CONNECTION_LOST: -		dev_warn(d, "cp %p had connection lost\n", cp); +		hpsa_print_cmd(h, "connection lost", cp);  		break;  	case CMD_ABORTED: -		dev_warn(d, "cp %p was aborted\n", cp); +		hpsa_print_cmd(h, "aborted", cp);  		break;  	case CMD_ABORT_FAILED: -		dev_warn(d, "cp %p reports abort failed\n", cp); +		hpsa_print_cmd(h, "abort failed", cp);  		break;  	case CMD_UNSOLICITED_ABORT: -		dev_warn(d, "cp %p aborted due to an unsolicited abort\n", cp); +		hpsa_print_cmd(h, "unsolicited abort", cp);  		break;  	case CMD_TIMEOUT: -		dev_warn(d, "cp %p timed out\n", cp); +		hpsa_print_cmd(h, "timed out", cp);  		break;  	case CMD_UNABORTABLE: -		dev_warn(d, "Command unabortable\n"); +		hpsa_print_cmd(h, "unabortable", cp);  		break;  	default: -		dev_warn(d, "cp %p returned unknown status %x\n", cp, +		hpsa_print_cmd(h, "unknown status", cp); +		dev_warn(d, "Unknown command status %x\n",  				ei->CommandStatus);  	}  }  static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr, -			unsigned char page, unsigned char *buf, +			u16 page, unsigned char *buf,  			unsigned char bufsize)  {  	int rc = IO_OK; @@ -1586,7 +2133,7 @@ static int hpsa_scsi_do_inquiry(struct ctlr_info *h, unsigned char *scsi3addr,  	hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE);  	ei = c->err_info;  	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { -		hpsa_scsi_interpret_error(c); +		hpsa_scsi_interpret_error(h, c);  		rc = -1;  	}  out: @@ -1594,7 +2141,39 @@ out:  	return rc;  } -static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr) +static int hpsa_bmic_ctrl_mode_sense(struct ctlr_info *h, +		unsigned char *scsi3addr, unsigned char page, +		struct bmic_controller_parameters *buf, size_t bufsize) +{ +	int rc = IO_OK; +	struct CommandList *c; +	struct ErrorInfo *ei; + +	c = cmd_special_alloc(h); + +	if (c == NULL) {			/* trouble... */ +		dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); +		return -ENOMEM; +	} + +	if (fill_cmd(c, BMIC_SENSE_CONTROLLER_PARAMETERS, h, buf, bufsize, +			page, scsi3addr, TYPE_CMD)) { +		rc = -1; +		goto out; +	} +	hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); +	ei = c->err_info; +	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { +		hpsa_scsi_interpret_error(h, c); +		rc = -1; +	} +out: +	cmd_special_free(h, c); +	return rc; +	} + +static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr, +	u8 reset_type)  {  	int rc = IO_OK;  	struct CommandList *c; @@ -1608,14 +2187,15 @@ static int hpsa_send_reset(struct ctlr_info *h, unsigned char *scsi3addr)  	}  	/* fill_cmd can't fail here, no data buffer to map. */ -	(void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, -			NULL, 0, 0, scsi3addr, TYPE_MSG); +	(void) fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, +			scsi3addr, TYPE_MSG); +	c->Request.CDB[1] = reset_type; /* fill_cmd defaults to LUN reset */  	hpsa_scsi_do_simple_cmd_core(h, c);  	/* no unmap needed here because no data xfer. */  	ei = c->err_info;  	if (ei->CommandStatus != 0) { -		hpsa_scsi_interpret_error(c); +		hpsa_scsi_interpret_error(h, c);  		rc = -1;  	}  	cmd_special_free(h, c); @@ -1632,7 +2212,7 @@ static void hpsa_get_raid_level(struct ctlr_info *h,  	buf = kzalloc(64, GFP_KERNEL);  	if (!buf)  		return; -	rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0xC1, buf, 64); +	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0xC1, buf, 64);  	if (rc == 0)  		*raid_level = buf[8];  	if (*raid_level > RAID_UNKNOWN) @@ -1641,6 +2221,204 @@ static void hpsa_get_raid_level(struct ctlr_info *h,  	return;  } +#define HPSA_MAP_DEBUG +#ifdef HPSA_MAP_DEBUG +static void hpsa_debug_map_buff(struct ctlr_info *h, int rc, +				struct raid_map_data *map_buff) +{ +	struct raid_map_disk_data *dd = &map_buff->data[0]; +	int map, row, col; +	u16 map_cnt, row_cnt, disks_per_row; + +	if (rc != 0) +		return; + +	/* Show details only if debugging has been activated. */ +	if (h->raid_offload_debug < 2) +		return; + +	dev_info(&h->pdev->dev, "structure_size = %u\n", +				le32_to_cpu(map_buff->structure_size)); +	dev_info(&h->pdev->dev, "volume_blk_size = %u\n", +			le32_to_cpu(map_buff->volume_blk_size)); +	dev_info(&h->pdev->dev, "volume_blk_cnt = 0x%llx\n", +			le64_to_cpu(map_buff->volume_blk_cnt)); +	dev_info(&h->pdev->dev, "physicalBlockShift = %u\n", +			map_buff->phys_blk_shift); +	dev_info(&h->pdev->dev, "parity_rotation_shift = %u\n", +			map_buff->parity_rotation_shift); +	dev_info(&h->pdev->dev, "strip_size = %u\n", +			le16_to_cpu(map_buff->strip_size)); +	dev_info(&h->pdev->dev, "disk_starting_blk = 0x%llx\n", +			le64_to_cpu(map_buff->disk_starting_blk)); +	dev_info(&h->pdev->dev, "disk_blk_cnt = 0x%llx\n", +			le64_to_cpu(map_buff->disk_blk_cnt)); +	dev_info(&h->pdev->dev, "data_disks_per_row = %u\n", +			le16_to_cpu(map_buff->data_disks_per_row)); +	dev_info(&h->pdev->dev, "metadata_disks_per_row = %u\n", +			le16_to_cpu(map_buff->metadata_disks_per_row)); +	dev_info(&h->pdev->dev, "row_cnt = %u\n", +			le16_to_cpu(map_buff->row_cnt)); +	dev_info(&h->pdev->dev, "layout_map_count = %u\n", +			le16_to_cpu(map_buff->layout_map_count)); +	dev_info(&h->pdev->dev, "flags = %u\n", +			le16_to_cpu(map_buff->flags)); +	if (map_buff->flags & RAID_MAP_FLAG_ENCRYPT_ON) +		dev_info(&h->pdev->dev, "encrypytion = ON\n"); +	else +		dev_info(&h->pdev->dev, "encrypytion = OFF\n"); +	dev_info(&h->pdev->dev, "dekindex = %u\n", +			le16_to_cpu(map_buff->dekindex)); + +	map_cnt = le16_to_cpu(map_buff->layout_map_count); +	for (map = 0; map < map_cnt; map++) { +		dev_info(&h->pdev->dev, "Map%u:\n", map); +		row_cnt = le16_to_cpu(map_buff->row_cnt); +		for (row = 0; row < row_cnt; row++) { +			dev_info(&h->pdev->dev, "  Row%u:\n", row); +			disks_per_row = +				le16_to_cpu(map_buff->data_disks_per_row); +			for (col = 0; col < disks_per_row; col++, dd++) +				dev_info(&h->pdev->dev, +					"    D%02u: h=0x%04x xor=%u,%u\n", +					col, dd->ioaccel_handle, +					dd->xor_mult[0], dd->xor_mult[1]); +			disks_per_row = +				le16_to_cpu(map_buff->metadata_disks_per_row); +			for (col = 0; col < disks_per_row; col++, dd++) +				dev_info(&h->pdev->dev, +					"    M%02u: h=0x%04x xor=%u,%u\n", +					col, dd->ioaccel_handle, +					dd->xor_mult[0], dd->xor_mult[1]); +		} +	} +} +#else +static void hpsa_debug_map_buff(__attribute__((unused)) struct ctlr_info *h, +			__attribute__((unused)) int rc, +			__attribute__((unused)) struct raid_map_data *map_buff) +{ +} +#endif + +static int hpsa_get_raid_map(struct ctlr_info *h, +	unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) +{ +	int rc = 0; +	struct CommandList *c; +	struct ErrorInfo *ei; + +	c = cmd_special_alloc(h); +	if (c == NULL) { +		dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n"); +		return -ENOMEM; +	} +	if (fill_cmd(c, HPSA_GET_RAID_MAP, h, &this_device->raid_map, +			sizeof(this_device->raid_map), 0, +			scsi3addr, TYPE_CMD)) { +		dev_warn(&h->pdev->dev, "Out of memory in hpsa_get_raid_map()\n"); +		cmd_special_free(h, c); +		return -ENOMEM; +	} +	hpsa_scsi_do_simple_cmd_with_retry(h, c, PCI_DMA_FROMDEVICE); +	ei = c->err_info; +	if (ei->CommandStatus != 0 && ei->CommandStatus != CMD_DATA_UNDERRUN) { +		hpsa_scsi_interpret_error(h, c); +		cmd_special_free(h, c); +		return -1; +	} +	cmd_special_free(h, c); + +	/* @todo in the future, dynamically allocate RAID map memory */ +	if (le32_to_cpu(this_device->raid_map.structure_size) > +				sizeof(this_device->raid_map)) { +		dev_warn(&h->pdev->dev, "RAID map size is too large!\n"); +		rc = -1; +	} +	hpsa_debug_map_buff(h, rc, &this_device->raid_map); +	return rc; +} + +static int hpsa_vpd_page_supported(struct ctlr_info *h, +	unsigned char scsi3addr[], u8 page) +{ +	int rc; +	int i; +	int pages; +	unsigned char *buf, bufsize; + +	buf = kzalloc(256, GFP_KERNEL); +	if (!buf) +		return 0; + +	/* Get the size of the page list first */ +	rc = hpsa_scsi_do_inquiry(h, scsi3addr, +				VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, +				buf, HPSA_VPD_HEADER_SZ); +	if (rc != 0) +		goto exit_unsupported; +	pages = buf[3]; +	if ((pages + HPSA_VPD_HEADER_SZ) <= 255) +		bufsize = pages + HPSA_VPD_HEADER_SZ; +	else +		bufsize = 255; + +	/* Get the whole VPD page list */ +	rc = hpsa_scsi_do_inquiry(h, scsi3addr, +				VPD_PAGE | HPSA_VPD_SUPPORTED_PAGES, +				buf, bufsize); +	if (rc != 0) +		goto exit_unsupported; + +	pages = buf[3]; +	for (i = 1; i <= pages; i++) +		if (buf[3 + i] == page) +			goto exit_supported; +exit_unsupported: +	kfree(buf); +	return 0; +exit_supported: +	kfree(buf); +	return 1; +} + +static void hpsa_get_ioaccel_status(struct ctlr_info *h, +	unsigned char *scsi3addr, struct hpsa_scsi_dev_t *this_device) +{ +	int rc; +	unsigned char *buf; +	u8 ioaccel_status; + +	this_device->offload_config = 0; +	this_device->offload_enabled = 0; + +	buf = kzalloc(64, GFP_KERNEL); +	if (!buf) +		return; +	if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_IOACCEL_STATUS)) +		goto out; +	rc = hpsa_scsi_do_inquiry(h, scsi3addr, +			VPD_PAGE | HPSA_VPD_LV_IOACCEL_STATUS, buf, 64); +	if (rc != 0) +		goto out; + +#define IOACCEL_STATUS_BYTE 4 +#define OFFLOAD_CONFIGURED_BIT 0x01 +#define OFFLOAD_ENABLED_BIT 0x02 +	ioaccel_status = buf[IOACCEL_STATUS_BYTE]; +	this_device->offload_config = +		!!(ioaccel_status & OFFLOAD_CONFIGURED_BIT); +	if (this_device->offload_config) { +		this_device->offload_enabled = +			!!(ioaccel_status & OFFLOAD_ENABLED_BIT); +		if (hpsa_get_raid_map(h, scsi3addr, this_device)) +			this_device->offload_enabled = 0; +	} +out: +	kfree(buf); +	return; +} +  /* Get the device id from inquiry page 0x83 */  static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,  	unsigned char *device_id, int buflen) @@ -1652,8 +2430,8 @@ static int hpsa_get_device_id(struct ctlr_info *h, unsigned char *scsi3addr,  		buflen = 16;  	buf = kzalloc(64, GFP_KERNEL);  	if (!buf) -		return -1; -	rc = hpsa_scsi_do_inquiry(h, scsi3addr, 0x83, buf, 64); +		return -ENOMEM; +	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | 0x83, buf, 64);  	if (rc == 0)  		memcpy(device_id, &buf[8], buflen);  	kfree(buf); @@ -1687,8 +2465,16 @@ static int hpsa_scsi_do_report_luns(struct ctlr_info *h, int logical,  	ei = c->err_info;  	if (ei->CommandStatus != 0 &&  	    ei->CommandStatus != CMD_DATA_UNDERRUN) { -		hpsa_scsi_interpret_error(c); +		hpsa_scsi_interpret_error(h, c);  		rc = -1; +	} else { +		if (buf->extended_response_flag != extended_response) { +			dev_err(&h->pdev->dev, +				"report luns requested format %u, got %u\n", +				extended_response, +				buf->extended_response_flag); +			rc = -1; +		}  	}  out:  	cmd_special_free(h, c); @@ -1716,6 +2502,111 @@ static inline void hpsa_set_bus_target_lun(struct hpsa_scsi_dev_t *device,  	device->lun = lun;  } +/* Use VPD inquiry to get details of volume status */ +static int hpsa_get_volume_status(struct ctlr_info *h, +					unsigned char scsi3addr[]) +{ +	int rc; +	int status; +	int size; +	unsigned char *buf; + +	buf = kzalloc(64, GFP_KERNEL); +	if (!buf) +		return HPSA_VPD_LV_STATUS_UNSUPPORTED; + +	/* Does controller have VPD for logical volume status? */ +	if (!hpsa_vpd_page_supported(h, scsi3addr, HPSA_VPD_LV_STATUS)) +		goto exit_failed; + +	/* Get the size of the VPD return buffer */ +	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, +					buf, HPSA_VPD_HEADER_SZ); +	if (rc != 0) +		goto exit_failed; +	size = buf[3]; + +	/* Now get the whole VPD buffer */ +	rc = hpsa_scsi_do_inquiry(h, scsi3addr, VPD_PAGE | HPSA_VPD_LV_STATUS, +					buf, size + HPSA_VPD_HEADER_SZ); +	if (rc != 0) +		goto exit_failed; +	status = buf[4]; /* status byte */ + +	kfree(buf); +	return status; +exit_failed: +	kfree(buf); +	return HPSA_VPD_LV_STATUS_UNSUPPORTED; +} + +/* Determine offline status of a volume. + * Return either: + *  0 (not offline) + *  0xff (offline for unknown reasons) + *  # (integer code indicating one of several NOT READY states + *     describing why a volume is to be kept offline) + */ +static int hpsa_volume_offline(struct ctlr_info *h, +					unsigned char scsi3addr[]) +{ +	struct CommandList *c; +	unsigned char *sense, sense_key, asc, ascq; +	int ldstat = 0; +	u16 cmd_status; +	u8 scsi_status; +#define ASC_LUN_NOT_READY 0x04 +#define ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS 0x04 +#define ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ 0x02 + +	c = cmd_alloc(h); +	if (!c) +		return 0; +	(void) fill_cmd(c, TEST_UNIT_READY, h, NULL, 0, 0, scsi3addr, TYPE_CMD); +	hpsa_scsi_do_simple_cmd_core(h, c); +	sense = c->err_info->SenseInfo; +	sense_key = sense[2]; +	asc = sense[12]; +	ascq = sense[13]; +	cmd_status = c->err_info->CommandStatus; +	scsi_status = c->err_info->ScsiStatus; +	cmd_free(h, c); +	/* Is the volume 'not ready'? */ +	if (cmd_status != CMD_TARGET_STATUS || +		scsi_status != SAM_STAT_CHECK_CONDITION || +		sense_key != NOT_READY || +		asc != ASC_LUN_NOT_READY)  { +		return 0; +	} + +	/* Determine the reason for not ready state */ +	ldstat = hpsa_get_volume_status(h, scsi3addr); + +	/* Keep volume offline in certain cases: */ +	switch (ldstat) { +	case HPSA_LV_UNDERGOING_ERASE: +	case HPSA_LV_UNDERGOING_RPI: +	case HPSA_LV_PENDING_RPI: +	case HPSA_LV_ENCRYPTED_NO_KEY: +	case HPSA_LV_PLAINTEXT_IN_ENCRYPT_ONLY_CONTROLLER: +	case HPSA_LV_UNDERGOING_ENCRYPTION: +	case HPSA_LV_UNDERGOING_ENCRYPTION_REKEYING: +	case HPSA_LV_ENCRYPTED_IN_NON_ENCRYPTED_CONTROLLER: +		return ldstat; +	case HPSA_VPD_LV_STATUS_UNSUPPORTED: +		/* If VPD status page isn't available, +		 * use ASC/ASCQ to determine state +		 */ +		if ((ascq == ASCQ_LUN_NOT_READY_FORMAT_IN_PROGRESS) || +			(ascq == ASCQ_LUN_NOT_READY_INITIALIZING_CMD_REQ)) +			return ldstat; +		break; +	default: +		break; +	} +	return 0; +} +  static int hpsa_update_device_info(struct ctlr_info *h,  	unsigned char scsi3addr[], struct hpsa_scsi_dev_t *this_device,  	unsigned char *is_OBDR_device) @@ -1754,10 +2645,22 @@ static int hpsa_update_device_info(struct ctlr_info *h,  		sizeof(this_device->device_id));  	if (this_device->devtype == TYPE_DISK && -		is_logical_dev_addr_mode(scsi3addr)) +		is_logical_dev_addr_mode(scsi3addr)) { +		int volume_offline; +  		hpsa_get_raid_level(h, scsi3addr, &this_device->raid_level); -	else +		if (h->fw_support & MISC_FW_RAID_OFFLOAD_BASIC) +			hpsa_get_ioaccel_status(h, scsi3addr, this_device); +		volume_offline = hpsa_volume_offline(h, scsi3addr); +		if (volume_offline < 0 || volume_offline > 0xff) +			volume_offline = HPSA_VPD_LV_STATUS_UNSUPPORTED; +		this_device->volume_offline = volume_offline & 0xff; +	} else {  		this_device->raid_level = RAID_UNKNOWN; +		this_device->offload_config = 0; +		this_device->offload_enabled = 0; +		this_device->volume_offline = 0; +	}  	if (is_OBDR_device) {  		/* See if this is a One-Button-Disaster-Recovery device @@ -1783,6 +2686,7 @@ static unsigned char *ext_target_model[] = {  	"MSA2312",  	"MSA2324",  	"P2000 G3 SAS", +	"MSA 2040 SAS",  	NULL,  }; @@ -1886,6 +2790,101 @@ static int add_ext_target_dev(struct ctlr_info *h,  }  /* + * Get address of physical disk used for an ioaccel2 mode command: + *	1. Extract ioaccel2 handle from the command. + *	2. Find a matching ioaccel2 handle from list of physical disks. + *	3. Return: + *		1 and set scsi3addr to address of matching physical + *		0 if no matching physical disk was found. + */ +static int hpsa_get_pdisk_of_ioaccel2(struct ctlr_info *h, +	struct CommandList *ioaccel2_cmd_to_abort, unsigned char *scsi3addr) +{ +	struct ReportExtendedLUNdata *physicals = NULL; +	int responsesize = 24;	/* size of physical extended response */ +	int extended = 2;	/* flag forces reporting 'other dev info'. */ +	int reportsize = sizeof(*physicals) + HPSA_MAX_PHYS_LUN * responsesize; +	u32 nphysicals = 0;	/* number of reported physical devs */ +	int found = 0;		/* found match (1) or not (0) */ +	u32 find;		/* handle we need to match */ +	int i; +	struct scsi_cmnd *scmd;	/* scsi command within request being aborted */ +	struct hpsa_scsi_dev_t *d; /* device of request being aborted */ +	struct io_accel2_cmd *c2a; /* ioaccel2 command to abort */ +	u32 it_nexus;		/* 4 byte device handle for the ioaccel2 cmd */ +	u32 scsi_nexus;		/* 4 byte device handle for the ioaccel2 cmd */ + +	if (ioaccel2_cmd_to_abort->cmd_type != CMD_IOACCEL2) +		return 0; /* no match */ + +	/* point to the ioaccel2 device handle */ +	c2a = &h->ioaccel2_cmd_pool[ioaccel2_cmd_to_abort->cmdindex]; +	if (c2a == NULL) +		return 0; /* no match */ + +	scmd = (struct scsi_cmnd *) ioaccel2_cmd_to_abort->scsi_cmd; +	if (scmd == NULL) +		return 0; /* no match */ + +	d = scmd->device->hostdata; +	if (d == NULL) +		return 0; /* no match */ + +	it_nexus = cpu_to_le32((u32) d->ioaccel_handle); +	scsi_nexus = cpu_to_le32((u32) c2a->scsi_nexus); +	find = c2a->scsi_nexus; + +	if (h->raid_offload_debug > 0) +		dev_info(&h->pdev->dev, +			"%s: scsi_nexus:0x%08x device id: 0x%02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x %02x%02x%02x%02x\n", +			__func__, scsi_nexus, +			d->device_id[0], d->device_id[1], d->device_id[2], +			d->device_id[3], d->device_id[4], d->device_id[5], +			d->device_id[6], d->device_id[7], d->device_id[8], +			d->device_id[9], d->device_id[10], d->device_id[11], +			d->device_id[12], d->device_id[13], d->device_id[14], +			d->device_id[15]); + +	/* Get the list of physical devices */ +	physicals = kzalloc(reportsize, GFP_KERNEL); +	if (physicals == NULL) +		return 0; +	if (hpsa_scsi_do_report_phys_luns(h, (struct ReportLUNdata *) physicals, +		reportsize, extended)) { +		dev_err(&h->pdev->dev, +			"Can't lookup %s device handle: report physical LUNs failed.\n", +			"HP SSD Smart Path"); +		kfree(physicals); +		return 0; +	} +	nphysicals = be32_to_cpu(*((__be32 *)physicals->LUNListLength)) / +							responsesize; + +	/* find ioaccel2 handle in list of physicals: */ +	for (i = 0; i < nphysicals; i++) { +		struct ext_report_lun_entry *entry = &physicals->LUN[i]; + +		/* handle is in bytes 28-31 of each lun */ +		if (entry->ioaccel_handle != find) +			continue; /* didn't match */ +		found = 1; +		memcpy(scsi3addr, entry->lunid, 8); +		if (h->raid_offload_debug > 0) +			dev_info(&h->pdev->dev, +				"%s: Searched h=0x%08x, Found h=0x%08x, scsiaddr 0x%8phN\n", +				__func__, find, +				entry->ioaccel_handle, scsi3addr); +		break; /* found it */ +	} + +	kfree(physicals); +	if (found) +		return 1; +	else +		return 0; + +} +/*   * Do CISS_REPORT_PHYS and CISS_REPORT_LOG.  Data is returned in physdev,   * logdev.  The number of luns in physdev and logdev are returned in   * *nphysicals and *nlogicals, respectively. @@ -1893,14 +2892,26 @@ static int add_ext_target_dev(struct ctlr_info *h,   */  static int hpsa_gather_lun_info(struct ctlr_info *h,  	int reportlunsize, -	struct ReportLUNdata *physdev, u32 *nphysicals, +	struct ReportLUNdata *physdev, u32 *nphysicals, int *physical_mode,  	struct ReportLUNdata *logdev, u32 *nlogicals)  { -	if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, 0)) { +	int physical_entry_size = 8; + +	*physical_mode = 0; + +	/* For I/O accelerator mode we need to read physical device handles */ +	if (h->transMethod & CFGTBL_Trans_io_accel1 || +		h->transMethod & CFGTBL_Trans_io_accel2) { +		*physical_mode = HPSA_REPORT_PHYS_EXTENDED; +		physical_entry_size = 24; +	} +	if (hpsa_scsi_do_report_phys_luns(h, physdev, reportlunsize, +							*physical_mode)) {  		dev_err(&h->pdev->dev, "report physical LUNs failed.\n");  		return -1;  	} -	*nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / 8; +	*nphysicals = be32_to_cpu(*((__be32 *)physdev->LUNListLength)) / +							physical_entry_size;  	if (*nphysicals > HPSA_MAX_PHYS_LUN) {  		dev_warn(&h->pdev->dev, "maximum physical LUNs (%d) exceeded."  			"  %d LUNs ignored.\n", HPSA_MAX_PHYS_LUN, @@ -1931,7 +2942,8 @@ static int hpsa_gather_lun_info(struct ctlr_info *h,  }  u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i, -	int nphysicals, int nlogicals, struct ReportLUNdata *physdev_list, +	int nphysicals, int nlogicals, +	struct ReportExtendedLUNdata *physdev_list,  	struct ReportLUNdata *logdev_list)  {  	/* Helper function, figure out where the LUN ID info is coming from @@ -1946,7 +2958,8 @@ u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,  		return RAID_CTLR_LUNID;  	if (i < logicals_start) -		return &physdev_list->LUN[i - (raid_ctlr_position == 0)][0]; +		return &physdev_list->LUN[i - +				(raid_ctlr_position == 0)].lunid[0];  	if (i < last_device)  		return &logdev_list->LUN[i - nphysicals - @@ -1955,6 +2968,29 @@ u8 *figure_lunaddrbytes(struct ctlr_info *h, int raid_ctlr_position, int i,  	return NULL;  } +static int hpsa_hba_mode_enabled(struct ctlr_info *h) +{ +	int rc; +	int hba_mode_enabled; +	struct bmic_controller_parameters *ctlr_params; +	ctlr_params = kzalloc(sizeof(struct bmic_controller_parameters), +		GFP_KERNEL); + +	if (!ctlr_params) +		return -ENOMEM; +	rc = hpsa_bmic_ctrl_mode_sense(h, RAID_CTLR_LUNID, 0, ctlr_params, +		sizeof(struct bmic_controller_parameters)); +	if (rc) { +		kfree(ctlr_params); +		return rc; +	} + +	hba_mode_enabled = +		((ctlr_params->nvram_flags & HBA_MODE_ENABLED_FLAG) != 0); +	kfree(ctlr_params); +	return hba_mode_enabled; +} +  static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)  {  	/* the idea here is we could get notified @@ -1967,16 +3003,18 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)  	 * tell which devices we already know about, vs. new  	 * devices, vs.  disappearing devices.  	 */ -	struct ReportLUNdata *physdev_list = NULL; +	struct ReportExtendedLUNdata *physdev_list = NULL;  	struct ReportLUNdata *logdev_list = NULL;  	u32 nphysicals = 0;  	u32 nlogicals = 0; +	int physical_mode = 0;  	u32 ndev_allocated = 0;  	struct hpsa_scsi_dev_t **currentsd, *this_device, *tmpdevice;  	int ncurrent = 0; -	int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 8; +	int reportlunsize = sizeof(*physdev_list) + HPSA_MAX_PHYS_LUN * 24;  	int i, n_ext_target_devs, ndevs_to_allocate;  	int raid_ctlr_position; +	int rescan_hba_mode;  	DECLARE_BITMAP(lunzerobits, MAX_EXT_TARGETS);  	currentsd = kzalloc(sizeof(*currentsd) * HPSA_MAX_DEVICES, GFP_KERNEL); @@ -1990,8 +3028,20 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)  	}  	memset(lunzerobits, 0, sizeof(lunzerobits)); -	if (hpsa_gather_lun_info(h, reportlunsize, physdev_list, &nphysicals, -			logdev_list, &nlogicals)) +	rescan_hba_mode = hpsa_hba_mode_enabled(h); +	if (rescan_hba_mode < 0) +		goto out; + +	if (!h->hba_mode_enabled && rescan_hba_mode) +		dev_warn(&h->pdev->dev, "HBA mode enabled\n"); +	else if (h->hba_mode_enabled && !rescan_hba_mode) +		dev_warn(&h->pdev->dev, "HBA mode disabled\n"); + +	h->hba_mode_enabled = rescan_hba_mode; + +	if (hpsa_gather_lun_info(h, reportlunsize, +			(struct ReportLUNdata *) physdev_list, &nphysicals, +			&physical_mode, logdev_list, &nlogicals))  		goto out;  	/* We might see up to the maximum number of logical and physical disks @@ -2018,7 +3068,7 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)  		ndev_allocated++;  	} -	if (unlikely(is_scsi_rev_5(h))) +	if (is_scsi_rev_5(h))  		raid_ctlr_position = 0;  	else  		raid_ctlr_position = nphysicals + nlogicals; @@ -2072,9 +3122,28 @@ static void hpsa_update_scsi_devices(struct ctlr_info *h, int hostno)  				ncurrent++;  			break;  		case TYPE_DISK: -			if (i < nphysicals) +			if (h->hba_mode_enabled) { +				/* never use raid mapper in HBA mode */ +				this_device->offload_enabled = 0; +				ncurrent++;  				break; -			ncurrent++; +			} else if (h->acciopath_status) { +				if (i >= nphysicals) { +					ncurrent++; +					break; +				} +			} else { +				if (i < nphysicals) +					break; +				ncurrent++; +				break; +			} +			if (physical_mode == HPSA_REPORT_PHYS_EXTENDED) { +				memcpy(&this_device->ioaccel_handle, +					&lunaddrbytes[20], +					sizeof(this_device->ioaccel_handle)); +				ncurrent++; +			}  			break;  		case TYPE_TAPE:  		case TYPE_MEDIUM_CHANGER: @@ -2144,7 +3213,7 @@ static int hpsa_scatter_gather(struct ctlr_info *h,  		curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);  		curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);  		curr_sg->Len = len; -		curr_sg->Ext = 0;  /* we are not chaining */ +		curr_sg->Ext = (i < scsi_sg_count(cmd) - 1) ? 0 : HPSA_SG_LAST;  		curr_sg++;  	} @@ -2168,6 +3237,726 @@ sglist_finished:  	return 0;  } +#define IO_ACCEL_INELIGIBLE (1) +static int fixup_ioaccel_cdb(u8 *cdb, int *cdb_len) +{ +	int is_write = 0; +	u32 block; +	u32 block_cnt; + +	/* Perform some CDB fixups if needed using 10 byte reads/writes only */ +	switch (cdb[0]) { +	case WRITE_6: +	case WRITE_12: +		is_write = 1; +	case READ_6: +	case READ_12: +		if (*cdb_len == 6) { +			block = (((u32) cdb[2]) << 8) | cdb[3]; +			block_cnt = cdb[4]; +		} else { +			BUG_ON(*cdb_len != 12); +			block = (((u32) cdb[2]) << 24) | +				(((u32) cdb[3]) << 16) | +				(((u32) cdb[4]) << 8) | +				cdb[5]; +			block_cnt = +				(((u32) cdb[6]) << 24) | +				(((u32) cdb[7]) << 16) | +				(((u32) cdb[8]) << 8) | +				cdb[9]; +		} +		if (block_cnt > 0xffff) +			return IO_ACCEL_INELIGIBLE; + +		cdb[0] = is_write ? WRITE_10 : READ_10; +		cdb[1] = 0; +		cdb[2] = (u8) (block >> 24); +		cdb[3] = (u8) (block >> 16); +		cdb[4] = (u8) (block >> 8); +		cdb[5] = (u8) (block); +		cdb[6] = 0; +		cdb[7] = (u8) (block_cnt >> 8); +		cdb[8] = (u8) (block_cnt); +		cdb[9] = 0; +		*cdb_len = 10; +		break; +	} +	return 0; +} + +static int hpsa_scsi_ioaccel1_queue_command(struct ctlr_info *h, +	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, +	u8 *scsi3addr) +{ +	struct scsi_cmnd *cmd = c->scsi_cmd; +	struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[c->cmdindex]; +	unsigned int len; +	unsigned int total_len = 0; +	struct scatterlist *sg; +	u64 addr64; +	int use_sg, i; +	struct SGDescriptor *curr_sg; +	u32 control = IOACCEL1_CONTROL_SIMPLEQUEUE; + +	/* TODO: implement chaining support */ +	if (scsi_sg_count(cmd) > h->ioaccel_maxsg) +		return IO_ACCEL_INELIGIBLE; + +	BUG_ON(cmd->cmd_len > IOACCEL1_IOFLAGS_CDBLEN_MAX); + +	if (fixup_ioaccel_cdb(cdb, &cdb_len)) +		return IO_ACCEL_INELIGIBLE; + +	c->cmd_type = CMD_IOACCEL1; + +	/* Adjust the DMA address to point to the accelerated command buffer */ +	c->busaddr = (u32) h->ioaccel_cmd_pool_dhandle + +				(c->cmdindex * sizeof(*cp)); +	BUG_ON(c->busaddr & 0x0000007F); + +	use_sg = scsi_dma_map(cmd); +	if (use_sg < 0) +		return use_sg; + +	if (use_sg) { +		curr_sg = cp->SG; +		scsi_for_each_sg(cmd, sg, use_sg, i) { +			addr64 = (u64) sg_dma_address(sg); +			len  = sg_dma_len(sg); +			total_len += len; +			curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL); +			curr_sg->Addr.upper = +				(u32) ((addr64 >> 32) & 0x0FFFFFFFFULL); +			curr_sg->Len = len; + +			if (i == (scsi_sg_count(cmd) - 1)) +				curr_sg->Ext = HPSA_SG_LAST; +			else +				curr_sg->Ext = 0;  /* we are not chaining */ +			curr_sg++; +		} + +		switch (cmd->sc_data_direction) { +		case DMA_TO_DEVICE: +			control |= IOACCEL1_CONTROL_DATA_OUT; +			break; +		case DMA_FROM_DEVICE: +			control |= IOACCEL1_CONTROL_DATA_IN; +			break; +		case DMA_NONE: +			control |= IOACCEL1_CONTROL_NODATAXFER; +			break; +		default: +			dev_err(&h->pdev->dev, "unknown data direction: %d\n", +			cmd->sc_data_direction); +			BUG(); +			break; +		} +	} else { +		control |= IOACCEL1_CONTROL_NODATAXFER; +	} + +	c->Header.SGList = use_sg; +	/* Fill out the command structure to submit */ +	cp->dev_handle = ioaccel_handle & 0xFFFF; +	cp->transfer_len = total_len; +	cp->io_flags = IOACCEL1_IOFLAGS_IO_REQ | +			(cdb_len & IOACCEL1_IOFLAGS_CDBLEN_MASK); +	cp->control = control; +	memcpy(cp->CDB, cdb, cdb_len); +	memcpy(cp->CISS_LUN, scsi3addr, 8); +	/* Tag was already set at init time. */ +	enqueue_cmd_and_start_io(h, c); +	return 0; +} + +/* + * Queue a command directly to a device behind the controller using the + * I/O accelerator path. + */ +static int hpsa_scsi_ioaccel_direct_map(struct ctlr_info *h, +	struct CommandList *c) +{ +	struct scsi_cmnd *cmd = c->scsi_cmd; +	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; + +	return hpsa_scsi_ioaccel_queue_command(h, c, dev->ioaccel_handle, +		cmd->cmnd, cmd->cmd_len, dev->scsi3addr); +} + +/* + * Set encryption parameters for the ioaccel2 request + */ +static void set_encrypt_ioaccel2(struct ctlr_info *h, +	struct CommandList *c, struct io_accel2_cmd *cp) +{ +	struct scsi_cmnd *cmd = c->scsi_cmd; +	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; +	struct raid_map_data *map = &dev->raid_map; +	u64 first_block; + +	BUG_ON(!(dev->offload_config && dev->offload_enabled)); + +	/* Are we doing encryption on this device */ +	if (!(map->flags & RAID_MAP_FLAG_ENCRYPT_ON)) +		return; +	/* Set the data encryption key index. */ +	cp->dekindex = map->dekindex; + +	/* Set the encryption enable flag, encoded into direction field. */ +	cp->direction |= IOACCEL2_DIRECTION_ENCRYPT_MASK; + +	/* Set encryption tweak values based on logical block address +	 * If block size is 512, tweak value is LBA. +	 * For other block sizes, tweak is (LBA * block size)/ 512) +	 */ +	switch (cmd->cmnd[0]) { +	/* Required? 6-byte cdbs eliminated by fixup_ioaccel_cdb */ +	case WRITE_6: +	case READ_6: +		if (map->volume_blk_size == 512) { +			cp->tweak_lower = +				(((u32) cmd->cmnd[2]) << 8) | +					cmd->cmnd[3]; +			cp->tweak_upper = 0; +		} else { +			first_block = +				(((u64) cmd->cmnd[2]) << 8) | +					cmd->cmnd[3]; +			first_block = (first_block * map->volume_blk_size)/512; +			cp->tweak_lower = (u32)first_block; +			cp->tweak_upper = (u32)(first_block >> 32); +		} +		break; +	case WRITE_10: +	case READ_10: +		if (map->volume_blk_size == 512) { +			cp->tweak_lower = +				(((u32) cmd->cmnd[2]) << 24) | +				(((u32) cmd->cmnd[3]) << 16) | +				(((u32) cmd->cmnd[4]) << 8) | +					cmd->cmnd[5]; +			cp->tweak_upper = 0; +		} else { +			first_block = +				(((u64) cmd->cmnd[2]) << 24) | +				(((u64) cmd->cmnd[3]) << 16) | +				(((u64) cmd->cmnd[4]) << 8) | +					cmd->cmnd[5]; +			first_block = (first_block * map->volume_blk_size)/512; +			cp->tweak_lower = (u32)first_block; +			cp->tweak_upper = (u32)(first_block >> 32); +		} +		break; +	/* Required? 12-byte cdbs eliminated by fixup_ioaccel_cdb */ +	case WRITE_12: +	case READ_12: +		if (map->volume_blk_size == 512) { +			cp->tweak_lower = +				(((u32) cmd->cmnd[2]) << 24) | +				(((u32) cmd->cmnd[3]) << 16) | +				(((u32) cmd->cmnd[4]) << 8) | +					cmd->cmnd[5]; +			cp->tweak_upper = 0; +		} else { +			first_block = +				(((u64) cmd->cmnd[2]) << 24) | +				(((u64) cmd->cmnd[3]) << 16) | +				(((u64) cmd->cmnd[4]) << 8) | +					cmd->cmnd[5]; +			first_block = (first_block * map->volume_blk_size)/512; +			cp->tweak_lower = (u32)first_block; +			cp->tweak_upper = (u32)(first_block >> 32); +		} +		break; +	case WRITE_16: +	case READ_16: +		if (map->volume_blk_size == 512) { +			cp->tweak_lower = +				(((u32) cmd->cmnd[6]) << 24) | +				(((u32) cmd->cmnd[7]) << 16) | +				(((u32) cmd->cmnd[8]) << 8) | +					cmd->cmnd[9]; +			cp->tweak_upper = +				(((u32) cmd->cmnd[2]) << 24) | +				(((u32) cmd->cmnd[3]) << 16) | +				(((u32) cmd->cmnd[4]) << 8) | +					cmd->cmnd[5]; +		} else { +			first_block = +				(((u64) cmd->cmnd[2]) << 56) | +				(((u64) cmd->cmnd[3]) << 48) | +				(((u64) cmd->cmnd[4]) << 40) | +				(((u64) cmd->cmnd[5]) << 32) | +				(((u64) cmd->cmnd[6]) << 24) | +				(((u64) cmd->cmnd[7]) << 16) | +				(((u64) cmd->cmnd[8]) << 8) | +					cmd->cmnd[9]; +			first_block = (first_block * map->volume_blk_size)/512; +			cp->tweak_lower = (u32)first_block; +			cp->tweak_upper = (u32)(first_block >> 32); +		} +		break; +	default: +		dev_err(&h->pdev->dev, +			"ERROR: %s: IOACCEL request CDB size not supported for encryption\n", +			__func__); +		BUG(); +		break; +	} +} + +static int hpsa_scsi_ioaccel2_queue_command(struct ctlr_info *h, +	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, +	u8 *scsi3addr) +{ +	struct scsi_cmnd *cmd = c->scsi_cmd; +	struct io_accel2_cmd *cp = &h->ioaccel2_cmd_pool[c->cmdindex]; +	struct ioaccel2_sg_element *curr_sg; +	int use_sg, i; +	struct scatterlist *sg; +	u64 addr64; +	u32 len; +	u32 total_len = 0; + +	if (scsi_sg_count(cmd) > h->ioaccel_maxsg) +		return IO_ACCEL_INELIGIBLE; + +	if (fixup_ioaccel_cdb(cdb, &cdb_len)) +		return IO_ACCEL_INELIGIBLE; +	c->cmd_type = CMD_IOACCEL2; +	/* Adjust the DMA address to point to the accelerated command buffer */ +	c->busaddr = (u32) h->ioaccel2_cmd_pool_dhandle + +				(c->cmdindex * sizeof(*cp)); +	BUG_ON(c->busaddr & 0x0000007F); + +	memset(cp, 0, sizeof(*cp)); +	cp->IU_type = IOACCEL2_IU_TYPE; + +	use_sg = scsi_dma_map(cmd); +	if (use_sg < 0) +		return use_sg; + +	if (use_sg) { +		BUG_ON(use_sg > IOACCEL2_MAXSGENTRIES); +		curr_sg = cp->sg; +		scsi_for_each_sg(cmd, sg, use_sg, i) { +			addr64 = (u64) sg_dma_address(sg); +			len  = sg_dma_len(sg); +			total_len += len; +			curr_sg->address = cpu_to_le64(addr64); +			curr_sg->length = cpu_to_le32(len); +			curr_sg->reserved[0] = 0; +			curr_sg->reserved[1] = 0; +			curr_sg->reserved[2] = 0; +			curr_sg->chain_indicator = 0; +			curr_sg++; +		} + +		switch (cmd->sc_data_direction) { +		case DMA_TO_DEVICE: +			cp->direction &= ~IOACCEL2_DIRECTION_MASK; +			cp->direction |= IOACCEL2_DIR_DATA_OUT; +			break; +		case DMA_FROM_DEVICE: +			cp->direction &= ~IOACCEL2_DIRECTION_MASK; +			cp->direction |= IOACCEL2_DIR_DATA_IN; +			break; +		case DMA_NONE: +			cp->direction &= ~IOACCEL2_DIRECTION_MASK; +			cp->direction |= IOACCEL2_DIR_NO_DATA; +			break; +		default: +			dev_err(&h->pdev->dev, "unknown data direction: %d\n", +				cmd->sc_data_direction); +			BUG(); +			break; +		} +	} else { +		cp->direction &= ~IOACCEL2_DIRECTION_MASK; +		cp->direction |= IOACCEL2_DIR_NO_DATA; +	} + +	/* Set encryption parameters, if necessary */ +	set_encrypt_ioaccel2(h, c, cp); + +	cp->scsi_nexus = ioaccel_handle; +	cp->Tag = (c->cmdindex << DIRECT_LOOKUP_SHIFT) | +				DIRECT_LOOKUP_BIT; +	memcpy(cp->cdb, cdb, sizeof(cp->cdb)); + +	/* fill in sg elements */ +	cp->sg_count = (u8) use_sg; + +	cp->data_len = cpu_to_le32(total_len); +	cp->err_ptr = cpu_to_le64(c->busaddr + +			offsetof(struct io_accel2_cmd, error_data)); +	cp->err_len = cpu_to_le32((u32) sizeof(cp->error_data)); + +	enqueue_cmd_and_start_io(h, c); +	return 0; +} + +/* + * Queue a command to the correct I/O accelerator path. + */ +static int hpsa_scsi_ioaccel_queue_command(struct ctlr_info *h, +	struct CommandList *c, u32 ioaccel_handle, u8 *cdb, int cdb_len, +	u8 *scsi3addr) +{ +	if (h->transMethod & CFGTBL_Trans_io_accel1) +		return hpsa_scsi_ioaccel1_queue_command(h, c, ioaccel_handle, +						cdb, cdb_len, scsi3addr); +	else +		return hpsa_scsi_ioaccel2_queue_command(h, c, ioaccel_handle, +						cdb, cdb_len, scsi3addr); +} + +static void raid_map_helper(struct raid_map_data *map, +		int offload_to_mirror, u32 *map_index, u32 *current_group) +{ +	if (offload_to_mirror == 0)  { +		/* use physical disk in the first mirrored group. */ +		*map_index %= map->data_disks_per_row; +		return; +	} +	do { +		/* determine mirror group that *map_index indicates */ +		*current_group = *map_index / map->data_disks_per_row; +		if (offload_to_mirror == *current_group) +			continue; +		if (*current_group < (map->layout_map_count - 1)) { +			/* select map index from next group */ +			*map_index += map->data_disks_per_row; +			(*current_group)++; +		} else { +			/* select map index from first group */ +			*map_index %= map->data_disks_per_row; +			*current_group = 0; +		} +	} while (offload_to_mirror != *current_group); +} + +/* + * Attempt to perform offload RAID mapping for a logical volume I/O. + */ +static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h, +	struct CommandList *c) +{ +	struct scsi_cmnd *cmd = c->scsi_cmd; +	struct hpsa_scsi_dev_t *dev = cmd->device->hostdata; +	struct raid_map_data *map = &dev->raid_map; +	struct raid_map_disk_data *dd = &map->data[0]; +	int is_write = 0; +	u32 map_index; +	u64 first_block, last_block; +	u32 block_cnt; +	u32 blocks_per_row; +	u64 first_row, last_row; +	u32 first_row_offset, last_row_offset; +	u32 first_column, last_column; +	u64 r0_first_row, r0_last_row; +	u32 r5or6_blocks_per_row; +	u64 r5or6_first_row, r5or6_last_row; +	u32 r5or6_first_row_offset, r5or6_last_row_offset; +	u32 r5or6_first_column, r5or6_last_column; +	u32 total_disks_per_row; +	u32 stripesize; +	u32 first_group, last_group, current_group; +	u32 map_row; +	u32 disk_handle; +	u64 disk_block; +	u32 disk_block_cnt; +	u8 cdb[16]; +	u8 cdb_len; +#if BITS_PER_LONG == 32 +	u64 tmpdiv; +#endif +	int offload_to_mirror; + +	BUG_ON(!(dev->offload_config && dev->offload_enabled)); + +	/* check for valid opcode, get LBA and block count */ +	switch (cmd->cmnd[0]) { +	case WRITE_6: +		is_write = 1; +	case READ_6: +		first_block = +			(((u64) cmd->cmnd[2]) << 8) | +			cmd->cmnd[3]; +		block_cnt = cmd->cmnd[4]; +		break; +	case WRITE_10: +		is_write = 1; +	case READ_10: +		first_block = +			(((u64) cmd->cmnd[2]) << 24) | +			(((u64) cmd->cmnd[3]) << 16) | +			(((u64) cmd->cmnd[4]) << 8) | +			cmd->cmnd[5]; +		block_cnt = +			(((u32) cmd->cmnd[7]) << 8) | +			cmd->cmnd[8]; +		break; +	case WRITE_12: +		is_write = 1; +	case READ_12: +		first_block = +			(((u64) cmd->cmnd[2]) << 24) | +			(((u64) cmd->cmnd[3]) << 16) | +			(((u64) cmd->cmnd[4]) << 8) | +			cmd->cmnd[5]; +		block_cnt = +			(((u32) cmd->cmnd[6]) << 24) | +			(((u32) cmd->cmnd[7]) << 16) | +			(((u32) cmd->cmnd[8]) << 8) | +		cmd->cmnd[9]; +		break; +	case WRITE_16: +		is_write = 1; +	case READ_16: +		first_block = +			(((u64) cmd->cmnd[2]) << 56) | +			(((u64) cmd->cmnd[3]) << 48) | +			(((u64) cmd->cmnd[4]) << 40) | +			(((u64) cmd->cmnd[5]) << 32) | +			(((u64) cmd->cmnd[6]) << 24) | +			(((u64) cmd->cmnd[7]) << 16) | +			(((u64) cmd->cmnd[8]) << 8) | +			cmd->cmnd[9]; +		block_cnt = +			(((u32) cmd->cmnd[10]) << 24) | +			(((u32) cmd->cmnd[11]) << 16) | +			(((u32) cmd->cmnd[12]) << 8) | +			cmd->cmnd[13]; +		break; +	default: +		return IO_ACCEL_INELIGIBLE; /* process via normal I/O path */ +	} +	BUG_ON(block_cnt == 0); +	last_block = first_block + block_cnt - 1; + +	/* check for write to non-RAID-0 */ +	if (is_write && dev->raid_level != 0) +		return IO_ACCEL_INELIGIBLE; + +	/* check for invalid block or wraparound */ +	if (last_block >= map->volume_blk_cnt || last_block < first_block) +		return IO_ACCEL_INELIGIBLE; + +	/* calculate stripe information for the request */ +	blocks_per_row = map->data_disks_per_row * map->strip_size; +#if BITS_PER_LONG == 32 +	tmpdiv = first_block; +	(void) do_div(tmpdiv, blocks_per_row); +	first_row = tmpdiv; +	tmpdiv = last_block; +	(void) do_div(tmpdiv, blocks_per_row); +	last_row = tmpdiv; +	first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); +	last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); +	tmpdiv = first_row_offset; +	(void) do_div(tmpdiv,  map->strip_size); +	first_column = tmpdiv; +	tmpdiv = last_row_offset; +	(void) do_div(tmpdiv, map->strip_size); +	last_column = tmpdiv; +#else +	first_row = first_block / blocks_per_row; +	last_row = last_block / blocks_per_row; +	first_row_offset = (u32) (first_block - (first_row * blocks_per_row)); +	last_row_offset = (u32) (last_block - (last_row * blocks_per_row)); +	first_column = first_row_offset / map->strip_size; +	last_column = last_row_offset / map->strip_size; +#endif + +	/* if this isn't a single row/column then give to the controller */ +	if ((first_row != last_row) || (first_column != last_column)) +		return IO_ACCEL_INELIGIBLE; + +	/* proceeding with driver mapping */ +	total_disks_per_row = map->data_disks_per_row + +				map->metadata_disks_per_row; +	map_row = ((u32)(first_row >> map->parity_rotation_shift)) % +				map->row_cnt; +	map_index = (map_row * total_disks_per_row) + first_column; + +	switch (dev->raid_level) { +	case HPSA_RAID_0: +		break; /* nothing special to do */ +	case HPSA_RAID_1: +		/* Handles load balance across RAID 1 members. +		 * (2-drive R1 and R10 with even # of drives.) +		 * Appropriate for SSDs, not optimal for HDDs +		 */ +		BUG_ON(map->layout_map_count != 2); +		if (dev->offload_to_mirror) +			map_index += map->data_disks_per_row; +		dev->offload_to_mirror = !dev->offload_to_mirror; +		break; +	case HPSA_RAID_ADM: +		/* Handles N-way mirrors  (R1-ADM) +		 * and R10 with # of drives divisible by 3.) +		 */ +		BUG_ON(map->layout_map_count != 3); + +		offload_to_mirror = dev->offload_to_mirror; +		raid_map_helper(map, offload_to_mirror, +				&map_index, ¤t_group); +		/* set mirror group to use next time */ +		offload_to_mirror = +			(offload_to_mirror >= map->layout_map_count - 1) +			? 0 : offload_to_mirror + 1; +		/* FIXME: remove after debug/dev */ +		BUG_ON(offload_to_mirror >= map->layout_map_count); +		dev_warn(&h->pdev->dev, +			"DEBUG: Using physical disk map index %d from mirror group %d\n", +			map_index, offload_to_mirror); +		dev->offload_to_mirror = offload_to_mirror; +		/* Avoid direct use of dev->offload_to_mirror within this +		 * function since multiple threads might simultaneously +		 * increment it beyond the range of dev->layout_map_count -1. +		 */ +		break; +	case HPSA_RAID_5: +	case HPSA_RAID_6: +		if (map->layout_map_count <= 1) +			break; + +		/* Verify first and last block are in same RAID group */ +		r5or6_blocks_per_row = +			map->strip_size * map->data_disks_per_row; +		BUG_ON(r5or6_blocks_per_row == 0); +		stripesize = r5or6_blocks_per_row * map->layout_map_count; +#if BITS_PER_LONG == 32 +		tmpdiv = first_block; +		first_group = do_div(tmpdiv, stripesize); +		tmpdiv = first_group; +		(void) do_div(tmpdiv, r5or6_blocks_per_row); +		first_group = tmpdiv; +		tmpdiv = last_block; +		last_group = do_div(tmpdiv, stripesize); +		tmpdiv = last_group; +		(void) do_div(tmpdiv, r5or6_blocks_per_row); +		last_group = tmpdiv; +#else +		first_group = (first_block % stripesize) / r5or6_blocks_per_row; +		last_group = (last_block % stripesize) / r5or6_blocks_per_row; +#endif +		if (first_group != last_group) +			return IO_ACCEL_INELIGIBLE; + +		/* Verify request is in a single row of RAID 5/6 */ +#if BITS_PER_LONG == 32 +		tmpdiv = first_block; +		(void) do_div(tmpdiv, stripesize); +		first_row = r5or6_first_row = r0_first_row = tmpdiv; +		tmpdiv = last_block; +		(void) do_div(tmpdiv, stripesize); +		r5or6_last_row = r0_last_row = tmpdiv; +#else +		first_row = r5or6_first_row = r0_first_row = +						first_block / stripesize; +		r5or6_last_row = r0_last_row = last_block / stripesize; +#endif +		if (r5or6_first_row != r5or6_last_row) +			return IO_ACCEL_INELIGIBLE; + + +		/* Verify request is in a single column */ +#if BITS_PER_LONG == 32 +		tmpdiv = first_block; +		first_row_offset = do_div(tmpdiv, stripesize); +		tmpdiv = first_row_offset; +		first_row_offset = (u32) do_div(tmpdiv, r5or6_blocks_per_row); +		r5or6_first_row_offset = first_row_offset; +		tmpdiv = last_block; +		r5or6_last_row_offset = do_div(tmpdiv, stripesize); +		tmpdiv = r5or6_last_row_offset; +		r5or6_last_row_offset = do_div(tmpdiv, r5or6_blocks_per_row); +		tmpdiv = r5or6_first_row_offset; +		(void) do_div(tmpdiv, map->strip_size); +		first_column = r5or6_first_column = tmpdiv; +		tmpdiv = r5or6_last_row_offset; +		(void) do_div(tmpdiv, map->strip_size); +		r5or6_last_column = tmpdiv; +#else +		first_row_offset = r5or6_first_row_offset = +			(u32)((first_block % stripesize) % +						r5or6_blocks_per_row); + +		r5or6_last_row_offset = +			(u32)((last_block % stripesize) % +						r5or6_blocks_per_row); + +		first_column = r5or6_first_column = +			r5or6_first_row_offset / map->strip_size; +		r5or6_last_column = +			r5or6_last_row_offset / map->strip_size; +#endif +		if (r5or6_first_column != r5or6_last_column) +			return IO_ACCEL_INELIGIBLE; + +		/* Request is eligible */ +		map_row = ((u32)(first_row >> map->parity_rotation_shift)) % +			map->row_cnt; + +		map_index = (first_group * +			(map->row_cnt * total_disks_per_row)) + +			(map_row * total_disks_per_row) + first_column; +		break; +	default: +		return IO_ACCEL_INELIGIBLE; +	} + +	disk_handle = dd[map_index].ioaccel_handle; +	disk_block = map->disk_starting_blk + (first_row * map->strip_size) + +			(first_row_offset - (first_column * map->strip_size)); +	disk_block_cnt = block_cnt; + +	/* handle differing logical/physical block sizes */ +	if (map->phys_blk_shift) { +		disk_block <<= map->phys_blk_shift; +		disk_block_cnt <<= map->phys_blk_shift; +	} +	BUG_ON(disk_block_cnt > 0xffff); + +	/* build the new CDB for the physical disk I/O */ +	if (disk_block > 0xffffffff) { +		cdb[0] = is_write ? WRITE_16 : READ_16; +		cdb[1] = 0; +		cdb[2] = (u8) (disk_block >> 56); +		cdb[3] = (u8) (disk_block >> 48); +		cdb[4] = (u8) (disk_block >> 40); +		cdb[5] = (u8) (disk_block >> 32); +		cdb[6] = (u8) (disk_block >> 24); +		cdb[7] = (u8) (disk_block >> 16); +		cdb[8] = (u8) (disk_block >> 8); +		cdb[9] = (u8) (disk_block); +		cdb[10] = (u8) (disk_block_cnt >> 24); +		cdb[11] = (u8) (disk_block_cnt >> 16); +		cdb[12] = (u8) (disk_block_cnt >> 8); +		cdb[13] = (u8) (disk_block_cnt); +		cdb[14] = 0; +		cdb[15] = 0; +		cdb_len = 16; +	} else { +		cdb[0] = is_write ? WRITE_10 : READ_10; +		cdb[1] = 0; +		cdb[2] = (u8) (disk_block >> 24); +		cdb[3] = (u8) (disk_block >> 16); +		cdb[4] = (u8) (disk_block >> 8); +		cdb[5] = (u8) (disk_block); +		cdb[6] = 0; +		cdb[7] = (u8) (disk_block_cnt >> 8); +		cdb[8] = (u8) (disk_block_cnt); +		cdb[9] = 0; +		cdb_len = 10; +	} +	return hpsa_scsi_ioaccel_queue_command(h, c, disk_handle, cdb, cdb_len, +						dev->scsi3addr); +}  static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,  	void (*done)(struct scsi_cmnd *)) @@ -2176,7 +3965,7 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,  	struct hpsa_scsi_dev_t *dev;  	unsigned char scsi3addr[8];  	struct CommandList *c; -	unsigned long flags; +	int rc = 0;  	/* Get the ptr to our adapter structure out of cmd->host. */  	h = sdev_to_hba(cmd->device); @@ -2188,14 +3977,11 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,  	}  	memcpy(scsi3addr, dev->scsi3addr, sizeof(scsi3addr)); -	spin_lock_irqsave(&h->lock, flags); -	if (unlikely(h->lockup_detected)) { -		spin_unlock_irqrestore(&h->lock, flags); +	if (unlikely(lockup_detected(h))) {  		cmd->result = DID_ERROR << 16;  		done(cmd);  		return 0;  	} -	spin_unlock_irqrestore(&h->lock, flags);  	c = cmd_alloc(h);  	if (c == NULL) {			/* trouble... */  		dev_err(&h->pdev->dev, "cmd_alloc returned NULL!\n"); @@ -2211,6 +3997,32 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,  	c->cmd_type = CMD_SCSI;  	c->scsi_cmd = cmd; + +	/* Call alternate submit routine for I/O accelerated commands. +	 * Retries always go down the normal I/O path. +	 */ +	if (likely(cmd->retries == 0 && +		cmd->request->cmd_type == REQ_TYPE_FS && +		h->acciopath_status)) { +		if (dev->offload_enabled) { +			rc = hpsa_scsi_ioaccel_raid_map(h, c); +			if (rc == 0) +				return 0; /* Sent on ioaccel path */ +			if (rc < 0) {   /* scsi_dma_map failed. */ +				cmd_free(h, c); +				return SCSI_MLQUEUE_HOST_BUSY; +			} +		} else if (dev->ioaccel_handle) { +			rc = hpsa_scsi_ioaccel_direct_map(h, c); +			if (rc == 0) +				return 0; /* Sent on direct map path */ +			if (rc < 0) {   /* scsi_dma_map failed. */ +				cmd_free(h, c); +				return SCSI_MLQUEUE_HOST_BUSY; +			} +		} +	} +  	c->Header.ReplyQueue = 0;  /* unused in simple mode */  	memcpy(&c->Header.LUN.LunAddrBytes[0], &scsi3addr[0], 8);  	c->Header.Tag.lower = (c->cmdindex << DIRECT_LOOKUP_SHIFT); @@ -2270,11 +4082,35 @@ static int hpsa_scsi_queue_command_lck(struct scsi_cmnd *cmd,  static DEF_SCSI_QCMD(hpsa_scsi_queue_command) +static int do_not_scan_if_controller_locked_up(struct ctlr_info *h) +{ +	unsigned long flags; + +	/* +	 * Don't let rescans be initiated on a controller known +	 * to be locked up.  If the controller locks up *during* +	 * a rescan, that thread is probably hosed, but at least +	 * we can prevent new rescan threads from piling up on a +	 * locked up controller. +	 */ +	if (unlikely(lockup_detected(h))) { +		spin_lock_irqsave(&h->scan_lock, flags); +		h->scan_finished = 1; +		wake_up_all(&h->scan_wait_queue); +		spin_unlock_irqrestore(&h->scan_lock, flags); +		return 1; +	} +	return 0; +} +  static void hpsa_scan_start(struct Scsi_Host *sh)  {  	struct ctlr_info *h = shost_to_hba(sh);  	unsigned long flags; +	if (do_not_scan_if_controller_locked_up(h)) +		return; +  	/* wait until any scan already in progress is finished. */  	while (1) {  		spin_lock_irqsave(&h->scan_lock, flags); @@ -2291,6 +4127,9 @@ static void hpsa_scan_start(struct Scsi_Host *sh)  	h->scan_finished = 0; /* mark scan as in progress */  	spin_unlock_irqrestore(&h->scan_lock, flags); +	if (do_not_scan_if_controller_locked_up(h)) +		return; +  	hpsa_update_scsi_devices(h, h->scsi_host->host_no);  	spin_lock_irqsave(&h->scan_lock, flags); @@ -2354,7 +4193,10 @@ static int hpsa_register_scsi(struct ctlr_info *h)  	sh->max_lun = HPSA_MAX_LUN;  	sh->max_id = HPSA_MAX_LUN;  	sh->can_queue = h->nr_cmds; -	sh->cmd_per_lun = h->nr_cmds; +	if (h->hba_mode_enabled) +		sh->cmd_per_lun = 7; +	else +		sh->cmd_per_lun = h->nr_cmds;  	sh->sg_tablesize = h->maxsgentries;  	h->scsi_host = sh;  	sh->hostdata[0] = (unsigned long) h; @@ -2380,7 +4222,7 @@ static int hpsa_register_scsi(struct ctlr_info *h)  static int wait_for_device_to_become_ready(struct ctlr_info *h,  	unsigned char lunaddr[])  { -	int rc = 0; +	int rc;  	int count = 0;  	int waittime = 1; /* seconds */  	struct CommandList *c; @@ -2400,6 +4242,7 @@ static int wait_for_device_to_become_ready(struct ctlr_info *h,  		 */  		msleep(1000 * waittime);  		count++; +		rc = 0; /* Device ready. */  		/* Increase wait time with each try, up to a point. */  		if (waittime < HPSA_MAX_WAIT_INTERVAL_SECS) @@ -2456,7 +4299,7 @@ static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd)  	dev_warn(&h->pdev->dev, "resetting device %d:%d:%d:%d\n",  		h->scsi_host->host_no, dev->bus, dev->target, dev->lun);  	/* send a reset to the SCSI LUN which the command was sent to */ -	rc = hpsa_send_reset(h, dev->scsi3addr); +	rc = hpsa_send_reset(h, dev->scsi3addr, HPSA_RESET_TYPE_LUN);  	if (rc == 0 && wait_for_device_to_become_ready(h, dev->scsi3addr) == 0)  		return SUCCESS; @@ -2479,12 +4322,36 @@ static void swizzle_abort_tag(u8 *tag)  	tag[7] = original_tag[4];  } +static void hpsa_get_tag(struct ctlr_info *h, +	struct CommandList *c, u32 *taglower, u32 *tagupper) +{ +	if (c->cmd_type == CMD_IOACCEL1) { +		struct io_accel1_cmd *cm1 = (struct io_accel1_cmd *) +			&h->ioaccel_cmd_pool[c->cmdindex]; +		*tagupper = cm1->Tag.upper; +		*taglower = cm1->Tag.lower; +		return; +	} +	if (c->cmd_type == CMD_IOACCEL2) { +		struct io_accel2_cmd *cm2 = (struct io_accel2_cmd *) +			&h->ioaccel2_cmd_pool[c->cmdindex]; +		/* upper tag not used in ioaccel2 mode */ +		memset(tagupper, 0, sizeof(*tagupper)); +		*taglower = cm2->Tag; +		return; +	} +	*tagupper = c->Header.Tag.upper; +	*taglower = c->Header.Tag.lower; +} + +  static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,  	struct CommandList *abort, int swizzle)  {  	int rc = IO_OK;  	struct CommandList *c;  	struct ErrorInfo *ei; +	u32 tagupper, taglower;  	c = cmd_special_alloc(h);  	if (c == NULL) {	/* trouble... */ @@ -2498,8 +4365,9 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,  	if (swizzle)  		swizzle_abort_tag(&c->Request.CDB[4]);  	hpsa_scsi_do_simple_cmd_core(h, c); +	hpsa_get_tag(h, abort, &taglower, &tagupper);  	dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: do_simple_cmd_core completed.\n", -		__func__, abort->Header.Tag.upper, abort->Header.Tag.lower); +		__func__, tagupper, taglower);  	/* no unmap needed here because no data xfer. */  	ei = c->err_info; @@ -2511,15 +4379,14 @@ static int hpsa_send_abort(struct ctlr_info *h, unsigned char *scsi3addr,  		break;  	default:  		dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: interpreting error.\n", -			__func__, abort->Header.Tag.upper, -			abort->Header.Tag.lower); -		hpsa_scsi_interpret_error(c); +			__func__, tagupper, taglower); +		hpsa_scsi_interpret_error(h, c);  		rc = -1;  		break;  	}  	cmd_special_free(h, c); -	dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", __func__, -		abort->Header.Tag.upper, abort->Header.Tag.lower); +	dev_dbg(&h->pdev->dev, "%s: Tag:0x%08x:%08x: Finished.\n", +		__func__, tagupper, taglower);  	return rc;  } @@ -2573,6 +4440,83 @@ static struct CommandList *hpsa_find_cmd_in_queue_by_tag(struct ctlr_info *h,  	return NULL;  } +/* ioaccel2 path firmware cannot handle abort task requests. + * Change abort requests to physical target reset, and send to the + * address of the physical disk used for the ioaccel 2 command. + * Return 0 on success (IO_OK) + *	 -1 on failure + */ + +static int hpsa_send_reset_as_abort_ioaccel2(struct ctlr_info *h, +	unsigned char *scsi3addr, struct CommandList *abort) +{ +	int rc = IO_OK; +	struct scsi_cmnd *scmd; /* scsi command within request being aborted */ +	struct hpsa_scsi_dev_t *dev; /* device to which scsi cmd was sent */ +	unsigned char phys_scsi3addr[8]; /* addr of phys disk with volume */ +	unsigned char *psa = &phys_scsi3addr[0]; + +	/* Get a pointer to the hpsa logical device. */ +	scmd = (struct scsi_cmnd *) abort->scsi_cmd; +	dev = (struct hpsa_scsi_dev_t *)(scmd->device->hostdata); +	if (dev == NULL) { +		dev_warn(&h->pdev->dev, +			"Cannot abort: no device pointer for command.\n"); +			return -1; /* not abortable */ +	} + +	if (h->raid_offload_debug > 0) +		dev_info(&h->pdev->dev, +			"Reset as abort: Abort requested on C%d:B%d:T%d:L%d scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", +			h->scsi_host->host_no, dev->bus, dev->target, dev->lun, +			scsi3addr[0], scsi3addr[1], scsi3addr[2], scsi3addr[3], +			scsi3addr[4], scsi3addr[5], scsi3addr[6], scsi3addr[7]); + +	if (!dev->offload_enabled) { +		dev_warn(&h->pdev->dev, +			"Can't abort: device is not operating in HP SSD Smart Path mode.\n"); +		return -1; /* not abortable */ +	} + +	/* Incoming scsi3addr is logical addr. We need physical disk addr. */ +	if (!hpsa_get_pdisk_of_ioaccel2(h, abort, psa)) { +		dev_warn(&h->pdev->dev, "Can't abort: Failed lookup of physical address.\n"); +		return -1; /* not abortable */ +	} + +	/* send the reset */ +	if (h->raid_offload_debug > 0) +		dev_info(&h->pdev->dev, +			"Reset as abort: Resetting physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", +			psa[0], psa[1], psa[2], psa[3], +			psa[4], psa[5], psa[6], psa[7]); +	rc = hpsa_send_reset(h, psa, HPSA_RESET_TYPE_TARGET); +	if (rc != 0) { +		dev_warn(&h->pdev->dev, +			"Reset as abort: Failed on physical device at scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", +			psa[0], psa[1], psa[2], psa[3], +			psa[4], psa[5], psa[6], psa[7]); +		return rc; /* failed to reset */ +	} + +	/* wait for device to recover */ +	if (wait_for_device_to_become_ready(h, psa) != 0) { +		dev_warn(&h->pdev->dev, +			"Reset as abort: Failed: Device never recovered from reset: 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", +			psa[0], psa[1], psa[2], psa[3], +			psa[4], psa[5], psa[6], psa[7]); +		return -1;  /* failed to recover */ +	} + +	/* device recovered */ +	dev_info(&h->pdev->dev, +		"Reset as abort: Device recovered from reset: scsi3addr 0x%02x%02x%02x%02x%02x%02x%02x%02x\n", +		psa[0], psa[1], psa[2], psa[3], +		psa[4], psa[5], psa[6], psa[7]); + +	return rc; /* success */ +} +  /* Some Smart Arrays need the abort tag swizzled, and some don't.  It's hard to   * tell which kind we're dealing with, so we send the abort both ways.  There   * shouldn't be any collisions between swizzled and unswizzled tags due to the @@ -2586,6 +4530,14 @@ static int hpsa_send_abort_both_ways(struct ctlr_info *h,  	struct CommandList *c;  	int rc = 0, rc2 = 0; +	/* ioccelerator mode 2 commands should be aborted via the +	 * accelerated path, since RAID path is unaware of these commands, +	 * but underlying firmware can't handle abort TMF. +	 * Change abort to physical device reset. +	 */ +	if (abort->cmd_type == CMD_IOACCEL2) +		return hpsa_send_reset_as_abort_ioaccel2(h, scsi3addr, abort); +  	/* we do not expect to find the swizzled tag in our queue, but  	 * check anyway just to be sure the assumptions which make this  	 * the case haven't become wrong. @@ -2624,6 +4576,7 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)  	struct scsi_cmnd *as;	/* ptr to scsi cmd inside aborted command. */  	char msg[256];		/* For debug messaging. */  	int ml = 0; +	u32 tagupper, taglower;  	/* Find the controller of the command to be aborted */  	h = sdev_to_hba(sc->device); @@ -2656,9 +4609,8 @@ static int hpsa_eh_abort_handler(struct scsi_cmnd *sc)  				msg);  		return FAILED;  	} - -	ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", -		abort->Header.Tag.upper, abort->Header.Tag.lower); +	hpsa_get_tag(h, abort, &taglower, &tagupper); +	ml += sprintf(msg+ml, "Tag:0x%08x:%08x ", tagupper, taglower);  	as  = (struct scsi_cmnd *) abort->scsi_cmd;  	if (as != NULL)  		ml += sprintf(msg+ml, "Command:0x%x SN:0x%lx ", @@ -2784,6 +4736,7 @@ static struct CommandList *cmd_special_alloc(struct ctlr_info *h)  		return NULL;  	memset(c, 0, sizeof(*c)); +	c->cmd_type = CMD_SCSI;  	c->cmdindex = -1;  	c->err_info = pci_alloc_consistent(h->pdev, sizeof(*c->err_info), @@ -2997,7 +4950,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)  		buff = kmalloc(iocommand.buf_size, GFP_KERNEL);  		if (buff == NULL)  			return -EFAULT; -		if (iocommand.Request.Type.Direction == XFER_WRITE) { +		if (iocommand.Request.Type.Direction & XFER_WRITE) {  			/* Copy the data into the buffer we created */  			if (copy_from_user(buff, iocommand.buf,  				iocommand.buf_size)) { @@ -3046,7 +4999,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)  		c->SG[0].Addr.lower = temp64.val32.lower;  		c->SG[0].Addr.upper = temp64.val32.upper;  		c->SG[0].Len = iocommand.buf_size; -		c->SG[0].Ext = 0; /* we are not chaining*/ +		c->SG[0].Ext = HPSA_SG_LAST; /* we are not chaining*/  	}  	hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c);  	if (iocommand.buf_size > 0) @@ -3060,7 +5013,7 @@ static int hpsa_passthru_ioctl(struct ctlr_info *h, void __user *argp)  		rc = -EFAULT;  		goto out;  	} -	if (iocommand.Request.Type.Direction == XFER_READ && +	if ((iocommand.Request.Type.Direction & XFER_READ) &&  		iocommand.buf_size > 0) {  		/* Copy the data out of the buffer we created */  		if (copy_to_user(iocommand.buf, buff, iocommand.buf_size)) { @@ -3137,7 +5090,7 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)  			status = -ENOMEM;  			goto cleanup1;  		} -		if (ioc->Request.Type.Direction == XFER_WRITE) { +		if (ioc->Request.Type.Direction & XFER_WRITE) {  			if (copy_from_user(buff[sg_used], data_ptr, sz)) {  				status = -ENOMEM;  				goto cleanup1; @@ -3171,13 +5124,12 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)  				hpsa_pci_unmap(h->pdev, c, i,  					PCI_DMA_BIDIRECTIONAL);  				status = -ENOMEM; -				goto cleanup1; +				goto cleanup0;  			}  			c->SG[i].Addr.lower = temp64.val32.lower;  			c->SG[i].Addr.upper = temp64.val32.upper;  			c->SG[i].Len = buff_size[i]; -			/* we are not chaining */ -			c->SG[i].Ext = 0; +			c->SG[i].Ext = i < sg_used - 1 ? 0 : HPSA_SG_LAST;  		}  	}  	hpsa_scsi_do_simple_cmd_core_if_no_lockup(h, c); @@ -3187,24 +5139,23 @@ static int hpsa_big_passthru_ioctl(struct ctlr_info *h, void __user *argp)  	/* Copy the error information out */  	memcpy(&ioc->error_info, c->err_info, sizeof(ioc->error_info));  	if (copy_to_user(argp, ioc, sizeof(*ioc))) { -		cmd_special_free(h, c);  		status = -EFAULT; -		goto cleanup1; +		goto cleanup0;  	} -	if (ioc->Request.Type.Direction == XFER_READ && ioc->buf_size > 0) { +	if ((ioc->Request.Type.Direction & XFER_READ) && ioc->buf_size > 0) {  		/* Copy the data out of the buffer we created */  		BYTE __user *ptr = ioc->buf;  		for (i = 0; i < sg_used; i++) {  			if (copy_to_user(ptr, buff[i], buff_size[i])) { -				cmd_special_free(h, c);  				status = -EFAULT; -				goto cleanup1; +				goto cleanup0;  			}  			ptr += buff_size[i];  		}  	} -	cmd_special_free(h, c);  	status = 0; +cleanup0: +	cmd_special_free(h, c);  cleanup1:  	if (buff) {  		for (i = 0; i < sg_used; i++) @@ -3223,6 +5174,36 @@ static void check_ioctl_unit_attention(struct ctlr_info *h,  			c->err_info->ScsiStatus != SAM_STAT_CHECK_CONDITION)  		(void) check_for_unit_attention(h, c);  } + +static int increment_passthru_count(struct ctlr_info *h) +{ +	unsigned long flags; + +	spin_lock_irqsave(&h->passthru_count_lock, flags); +	if (h->passthru_count >= HPSA_MAX_CONCURRENT_PASSTHRUS) { +		spin_unlock_irqrestore(&h->passthru_count_lock, flags); +		return -1; +	} +	h->passthru_count++; +	spin_unlock_irqrestore(&h->passthru_count_lock, flags); +	return 0; +} + +static void decrement_passthru_count(struct ctlr_info *h) +{ +	unsigned long flags; + +	spin_lock_irqsave(&h->passthru_count_lock, flags); +	if (h->passthru_count <= 0) { +		spin_unlock_irqrestore(&h->passthru_count_lock, flags); +		/* not expecting to get here. */ +		dev_warn(&h->pdev->dev, "Bug detected, passthru_count seems to be incorrect.\n"); +		return; +	} +	h->passthru_count--; +	spin_unlock_irqrestore(&h->passthru_count_lock, flags); +} +  /*   * ioctl   */ @@ -3230,6 +5211,7 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)  {  	struct ctlr_info *h;  	void __user *argp = (void __user *)arg; +	int rc;  	h = sdev_to_hba(dev); @@ -3244,9 +5226,17 @@ static int hpsa_ioctl(struct scsi_device *dev, int cmd, void *arg)  	case CCISS_GETDRIVVER:  		return hpsa_getdrivver_ioctl(h, argp);  	case CCISS_PASSTHRU: -		return hpsa_passthru_ioctl(h, argp); +		if (increment_passthru_count(h)) +			return -EAGAIN; +		rc = hpsa_passthru_ioctl(h, argp); +		decrement_passthru_count(h); +		return rc;  	case CCISS_BIG_PASSTHRU: -		return hpsa_big_passthru_ioctl(h, argp); +		if (increment_passthru_count(h)) +			return -EAGAIN; +		rc = hpsa_big_passthru_ioctl(h, argp); +		decrement_passthru_count(h); +		return rc;  	default:  		return -ENOTTY;  	} @@ -3274,7 +5264,7 @@ static int hpsa_send_host_reset(struct ctlr_info *h, unsigned char *scsi3addr,  }  static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h, -	void *buff, size_t size, u8 page_code, unsigned char *scsi3addr, +	void *buff, size_t size, u16 page_code, unsigned char *scsi3addr,  	int cmd_type)  {  	int pci_dir = XFER_NONE; @@ -3297,9 +5287,9 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,  		switch (cmd) {  		case HPSA_INQUIRY:  			/* are we trying to read a vital product page */ -			if (page_code != 0) { +			if (page_code & VPD_PAGE) {  				c->Request.CDB[1] = 0x01; -				c->Request.CDB[2] = page_code; +				c->Request.CDB[2] = (page_code & 0xff);  			}  			c->Request.CDBLen = 6;  			c->Request.Type.Attribute = ATTR_SIMPLE; @@ -3339,6 +5329,28 @@ static int fill_cmd(struct CommandList *c, u8 cmd, struct ctlr_info *h,  			c->Request.Type.Direction = XFER_NONE;  			c->Request.Timeout = 0;  			break; +		case HPSA_GET_RAID_MAP: +			c->Request.CDBLen = 12; +			c->Request.Type.Attribute = ATTR_SIMPLE; +			c->Request.Type.Direction = XFER_READ; +			c->Request.Timeout = 0; +			c->Request.CDB[0] = HPSA_CISS_READ; +			c->Request.CDB[1] = cmd; +			c->Request.CDB[6] = (size >> 24) & 0xFF; /* MSB */ +			c->Request.CDB[7] = (size >> 16) & 0xFF; +			c->Request.CDB[8] = (size >> 8) & 0xFF; +			c->Request.CDB[9] = size & 0xFF; +			break; +		case BMIC_SENSE_CONTROLLER_PARAMETERS: +			c->Request.CDBLen = 10; +			c->Request.Type.Attribute = ATTR_SIMPLE; +			c->Request.Type.Direction = XFER_READ; +			c->Request.Timeout = 0; +			c->Request.CDB[0] = BMIC_READ; +			c->Request.CDB[6] = BMIC_SENSE_CONTROLLER_PARAMETERS; +			c->Request.CDB[7] = (size >> 16) & 0xFF; +			c->Request.CDB[8] = (size >> 8) & 0xFF; +			break;  		default:  			dev_warn(&h->pdev->dev, "unknown command 0x%c\n", cmd);  			BUG(); @@ -3434,20 +5446,21 @@ static void __iomem *remap_pci_mem(ulong base, ulong size)  /* Takes cmds off the submission queue and sends them to the hardware,   * then puts them on the queue of cmds waiting for completion. + * Assumes h->lock is held   */ -static void start_io(struct ctlr_info *h) +static void start_io(struct ctlr_info *h, unsigned long *flags)  {  	struct CommandList *c; -	unsigned long flags; -	spin_lock_irqsave(&h->lock, flags);  	while (!list_empty(&h->reqQ)) {  		c = list_entry(h->reqQ.next, struct CommandList, list);  		/* can't do anything if fifo is full */  		if ((h->access.fifo_full(h))) { +			h->fifo_recently_full = 1;  			dev_warn(&h->pdev->dev, "fifo full\n");  			break;  		} +		h->fifo_recently_full = 0;  		/* Get the first entry from the Request Q */  		removeQ(c); @@ -3461,14 +5474,20 @@ static void start_io(struct ctlr_info *h)  		 * condition.  		 */  		h->commands_outstanding++; -		if (h->commands_outstanding > h->max_outstanding) -			h->max_outstanding = h->commands_outstanding;  		/* Tell the controller execute command */ -		spin_unlock_irqrestore(&h->lock, flags); +		spin_unlock_irqrestore(&h->lock, *flags);  		h->access.submit_command(h, c); -		spin_lock_irqsave(&h->lock, flags); +		spin_lock_irqsave(&h->lock, *flags);  	} +} + +static void lock_and_start_io(struct ctlr_info *h) +{ +	unsigned long flags; + +	spin_lock_irqsave(&h->lock, flags); +	start_io(h, &flags);  	spin_unlock_irqrestore(&h->lock, flags);  } @@ -3501,15 +5520,42 @@ static inline int bad_tag(struct ctlr_info *h, u32 tag_index,  static inline void finish_cmd(struct CommandList *c)  {  	unsigned long flags; +	int io_may_be_stalled = 0; +	struct ctlr_info *h = c->h; -	spin_lock_irqsave(&c->h->lock, flags); +	spin_lock_irqsave(&h->lock, flags);  	removeQ(c); -	spin_unlock_irqrestore(&c->h->lock, flags); + +	/* +	 * Check for possibly stalled i/o. +	 * +	 * If a fifo_full condition is encountered, requests will back up +	 * in h->reqQ.  This queue is only emptied out by start_io which is +	 * only called when a new i/o request comes in.  If no i/o's are +	 * forthcoming, the i/o's in h->reqQ can get stuck.  So we call +	 * start_io from here if we detect such a danger. +	 * +	 * Normally, we shouldn't hit this case, but pounding on the +	 * CCISS_PASSTHRU ioctl can provoke it.  Only call start_io if +	 * commands_outstanding is low.  We want to avoid calling +	 * start_io from in here as much as possible, and esp. don't +	 * want to get in a cycle where we call start_io every time +	 * through here. +	 */ +	if (unlikely(h->fifo_recently_full) && +		h->commands_outstanding < 5) +		io_may_be_stalled = 1; + +	spin_unlock_irqrestore(&h->lock, flags); +  	dial_up_lockup_detection_on_fw_flash_complete(c->h, c); -	if (likely(c->cmd_type == CMD_SCSI)) +	if (likely(c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_SCSI +			|| c->cmd_type == CMD_IOACCEL2))  		complete_scsi_command(c);  	else if (c->cmd_type == CMD_IOCTL_PEND)  		complete(c->waiting); +	if (unlikely(io_may_be_stalled)) +		lock_and_start_io(h);  }  static inline u32 hpsa_tag_contains_index(u32 tag) @@ -3785,6 +5831,13 @@ static int hpsa_controller_hard_reset(struct pci_dev *pdev,  		 */  		dev_info(&pdev->dev, "using doorbell to reset controller\n");  		writel(use_doorbell, vaddr + SA5_DOORBELL); + +		/* PMC hardware guys tell us we need a 10 second delay after +		 * doorbell reset and before any attempt to talk to the board +		 * at all to ensure that this actually works and doesn't fall +		 * over in some weird corner cases. +		 */ +		msleep(10000);  	} else { /* Try to do it the PCI power state way */  		/* Quoting from the Open CISS Specification: "The Power @@ -3981,16 +6034,6 @@ static int hpsa_kdump_hard_reset_controller(struct pci_dev *pdev)  	   need a little pause here */  	msleep(HPSA_POST_RESET_PAUSE_MSECS); -	/* Wait for board to become not ready, then ready. */ -	dev_info(&pdev->dev, "Waiting for board to reset.\n"); -	rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY); -	if (rc) { -		dev_warn(&pdev->dev, -			"failed waiting for board to reset." -			" Will try soft reset.\n"); -		rc = -ENOTSUPP; /* Not expected, but try soft reset later */ -		goto unmap_cfgtable; -	}  	rc = hpsa_wait_for_board_state(pdev, vaddr, BOARD_READY);  	if (rc) {  		dev_warn(&pdev->dev, @@ -4114,21 +6157,26 @@ static void hpsa_interrupt_mode(struct ctlr_info *h)  		goto default_int_mode;  	if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) {  		dev_info(&h->pdev->dev, "MSIX\n"); +		h->msix_vector = MAX_REPLY_QUEUES; +		if (h->msix_vector > num_online_cpus()) +			h->msix_vector = num_online_cpus();  		err = pci_enable_msix(h->pdev, hpsa_msix_entries, -						MAX_REPLY_QUEUES); -		if (!err) { -			for (i = 0; i < MAX_REPLY_QUEUES; i++) -				h->intr[i] = hpsa_msix_entries[i].vector; -			h->msix_vector = 1; -			return; -		} +				      h->msix_vector);  		if (err > 0) {  			dev_warn(&h->pdev->dev, "only %d MSI-X vectors "  			       "available\n", err); -			goto default_int_mode; +			h->msix_vector = err; +			err = pci_enable_msix(h->pdev, hpsa_msix_entries, +					      h->msix_vector); +		} +		if (!err) { +			for (i = 0; i < h->msix_vector; i++) +				h->intr[i] = hpsa_msix_entries[i].vector; +			return;  		} else {  			dev_warn(&h->pdev->dev, "MSI-X init failed %d\n",  			       err); +			h->msix_vector = 0;  			goto default_int_mode;  		}  	} @@ -4281,6 +6329,7 @@ static void hpsa_find_board_params(struct ctlr_info *h)  	hpsa_get_max_perf_mode_cmds(h);  	h->nr_cmds = h->max_commands - 4; /* Allow room for some ioctls */  	h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements)); +	h->fw_support = readl(&(h->cfgtable->misc_fw_support));  	/*  	 * Limit in-command s/g elements to 32 save dma'able memory.  	 * Howvever spec says if 0, use 31 @@ -4297,6 +6346,10 @@ static void hpsa_find_board_params(struct ctlr_info *h)  	/* Find out what task management functions are supported and cache */  	h->TMFSupportFlags = readl(&(h->cfgtable->TMFSupportFlags)); +	if (!(HPSATMF_PHYS_TASK_ABORT & h->TMFSupportFlags)) +		dev_warn(&h->pdev->dev, "Physical aborts not supported\n"); +	if (!(HPSATMF_LOG_TASK_ABORT & h->TMFSupportFlags)) +		dev_warn(&h->pdev->dev, "Logical aborts not supported\n");  }  static inline bool hpsa_CISS_signature_present(struct ctlr_info *h) @@ -4308,16 +6361,17 @@ static inline bool hpsa_CISS_signature_present(struct ctlr_info *h)  	return true;  } -/* Need to enable prefetch in the SCSI core for 6400 in x86 */ -static inline void hpsa_enable_scsi_prefetch(struct ctlr_info *h) +static inline void hpsa_set_driver_support_bits(struct ctlr_info *h)  { -#ifdef CONFIG_X86 -	u32 prefetch; +	u32 driver_support; -	prefetch = readl(&(h->cfgtable->SCSI_Prefetch)); -	prefetch |= 0x100; -	writel(prefetch, &(h->cfgtable->SCSI_Prefetch)); +#ifdef CONFIG_X86 +	/* Need to enable prefetch in the SCSI core for 6400 in x86 */ +	driver_support = readl(&(h->cfgtable->driver_support)); +	driver_support |= ENABLE_SCSI_PREFETCH;  #endif +	driver_support |= ENABLE_UNIT_ATTN; +	writel(driver_support, &(h->cfgtable->driver_support));  }  /* Disable DMA prefetch for the P600.  Otherwise an ASIC bug may result @@ -4334,6 +6388,23 @@ static inline void hpsa_p600_dma_prefetch_quirk(struct ctlr_info *h)  	writel(dma_prefetch, h->vaddr + I2O_DMA1_CFG);  } +static void hpsa_wait_for_clear_event_notify_ack(struct ctlr_info *h) +{ +	int i; +	u32 doorbell_value; +	unsigned long flags; +	/* wait until the clear_event_notify bit 6 is cleared by controller. */ +	for (i = 0; i < MAX_CONFIG_WAIT; i++) { +		spin_lock_irqsave(&h->lock, flags); +		doorbell_value = readl(h->vaddr + SA5_DOORBELL); +		spin_unlock_irqrestore(&h->lock, flags); +		if (!(doorbell_value & DOORBELL_CLEAR_EVENTS)) +			break; +		/* delay and try again */ +		msleep(20); +	} +} +  static void hpsa_wait_for_mode_change_ack(struct ctlr_info *h)  {  	int i; @@ -4364,18 +6435,20 @@ static int hpsa_enter_simple_mode(struct ctlr_info *h)  		return -ENOTSUPP;  	h->max_commands = readl(&(h->cfgtable->CmdsOutMax)); +  	/* Update the field, and then ring the doorbell */  	writel(CFGTBL_Trans_Simple, &(h->cfgtable->HostWrite.TransportRequest)); +	writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi);  	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);  	hpsa_wait_for_mode_change_ack(h);  	print_cfg_table(&h->pdev->dev, h->cfgtable); -	if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) { -		dev_warn(&h->pdev->dev, -			"unable to get board into simple mode\n"); -		return -ENODEV; -	} +	if (!(readl(&(h->cfgtable->TransportActive)) & CFGTBL_Trans_Simple)) +		goto error;  	h->transMethod = CFGTBL_Trans_Simple;  	return 0; +error: +	dev_warn(&h->pdev->dev, "unable to get board into simple mode\n"); +	return -ENODEV;  }  static int hpsa_pci_init(struct ctlr_info *h) @@ -4427,7 +6500,7 @@ static int hpsa_pci_init(struct ctlr_info *h)  		err = -ENODEV;  		goto err_out_free_res;  	} -	hpsa_enable_scsi_prefetch(h); +	hpsa_set_driver_support_bits(h);  	hpsa_p600_dma_prefetch_quirk(h);  	err = hpsa_enter_simple_mode(h);  	if (err) @@ -4521,11 +6594,30 @@ static void hpsa_free_cmd_pool(struct ctlr_info *h)  		pci_free_consistent(h->pdev,  			    h->nr_cmds * sizeof(struct CommandList),  			    h->cmd_pool, h->cmd_pool_dhandle); +	if (h->ioaccel2_cmd_pool) +		pci_free_consistent(h->pdev, +			h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), +			h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle);  	if (h->errinfo_pool)  		pci_free_consistent(h->pdev,  			    h->nr_cmds * sizeof(struct ErrorInfo),  			    h->errinfo_pool,  			    h->errinfo_pool_dhandle); +	if (h->ioaccel_cmd_pool) +		pci_free_consistent(h->pdev, +			h->nr_cmds * sizeof(struct io_accel1_cmd), +			h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); +} + +static void hpsa_irq_affinity_hints(struct ctlr_info *h) +{ +	int i, cpu, rc; + +	cpu = cpumask_first(cpu_online_mask); +	for (i = 0; i < h->msix_vector; i++) { +		rc = irq_set_affinity_hint(h->intr[i], get_cpu_mask(cpu)); +		cpu = cpumask_next(cpu, cpu_online_mask); +	}  }  static int hpsa_request_irq(struct ctlr_info *h, @@ -4541,15 +6633,16 @@ static int hpsa_request_irq(struct ctlr_info *h,  	for (i = 0; i < MAX_REPLY_QUEUES; i++)  		h->q[i] = (u8) i; -	if (h->intr_mode == PERF_MODE_INT && h->msix_vector) { +	if (h->intr_mode == PERF_MODE_INT && h->msix_vector > 0) {  		/* If performant mode and MSI-X, use multiple reply queues */ -		for (i = 0; i < MAX_REPLY_QUEUES; i++) +		for (i = 0; i < h->msix_vector; i++)  			rc = request_irq(h->intr[i], msixhandler,  					0, h->devname,  					&h->q[i]); +		hpsa_irq_affinity_hints(h);  	} else {  		/* Use single reply pool */ -		if (h->msix_vector || h->msi_vector) { +		if (h->msix_vector > 0 || h->msi_vector) {  			rc = request_irq(h->intr[h->intr_mode],  				msixhandler, 0, h->devname,  				&h->q[h->intr_mode]); @@ -4598,12 +6691,15 @@ static void free_irqs(struct ctlr_info *h)  	if (!h->msix_vector || h->intr_mode != PERF_MODE_INT) {  		/* Single reply queue, only one irq to free */  		i = h->intr_mode; +		irq_set_affinity_hint(h->intr[i], NULL);  		free_irq(h->intr[i], &h->q[i]);  		return;  	} -	for (i = 0; i < MAX_REPLY_QUEUES; i++) +	for (i = 0; i < h->msix_vector; i++) { +		irq_set_affinity_hint(h->intr[i], NULL);  		free_irq(h->intr[i], &h->q[i]); +	}  }  static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h) @@ -4620,14 +6716,28 @@ static void hpsa_free_irqs_and_disable_msix(struct ctlr_info *h)  #endif /* CONFIG_PCI_MSI */  } +static void hpsa_free_reply_queues(struct ctlr_info *h) +{ +	int i; + +	for (i = 0; i < h->nreply_queues; i++) { +		if (!h->reply_queue[i].head) +			continue; +		pci_free_consistent(h->pdev, h->reply_queue_size, +			h->reply_queue[i].head, h->reply_queue[i].busaddr); +		h->reply_queue[i].head = NULL; +		h->reply_queue[i].busaddr = 0; +	} +} +  static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)  {  	hpsa_free_irqs_and_disable_msix(h);  	hpsa_free_sg_chain_blocks(h);  	hpsa_free_cmd_pool(h); +	kfree(h->ioaccel1_blockFetchTable);  	kfree(h->blockFetchTable); -	pci_free_consistent(h->pdev, h->reply_pool_size, -		h->reply_pool, h->reply_pool_dhandle); +	hpsa_free_reply_queues(h);  	if (h->vaddr)  		iounmap(h->vaddr);  	if (h->transtable) @@ -4638,16 +6748,6 @@ static void hpsa_undo_allocations_after_kdump_soft_reset(struct ctlr_info *h)  	kfree(h);  } -static void remove_ctlr_from_lockup_detector_list(struct ctlr_info *h) -{ -	assert_spin_locked(&lockup_detector_lock); -	if (!hpsa_lockup_detector) -		return; -	if (h->lockup_detected) -		return; /* already stopped the lockup detector */ -	list_del(&h->lockup_list); -} -  /* Called when controller lockup detected. */  static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list)  { @@ -4662,18 +6762,38 @@ static void fail_all_cmds_on_list(struct ctlr_info *h, struct list_head *list)  	}  } +static void set_lockup_detected_for_all_cpus(struct ctlr_info *h, u32 value) +{ +	int i, cpu; + +	cpu = cpumask_first(cpu_online_mask); +	for (i = 0; i < num_online_cpus(); i++) { +		u32 *lockup_detected; +		lockup_detected = per_cpu_ptr(h->lockup_detected, cpu); +		*lockup_detected = value; +		cpu = cpumask_next(cpu, cpu_online_mask); +	} +	wmb(); /* be sure the per-cpu variables are out to memory */ +} +  static void controller_lockup_detected(struct ctlr_info *h)  {  	unsigned long flags; +	u32 lockup_detected; -	assert_spin_locked(&lockup_detector_lock); -	remove_ctlr_from_lockup_detector_list(h);  	h->access.set_intr_mask(h, HPSA_INTR_OFF);  	spin_lock_irqsave(&h->lock, flags); -	h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); +	lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); +	if (!lockup_detected) { +		/* no heartbeat, but controller gave us a zero. */ +		dev_warn(&h->pdev->dev, +			"lockup detected but scratchpad register is zero\n"); +		lockup_detected = 0xffffffff; +	} +	set_lockup_detected_for_all_cpus(h, lockup_detected);  	spin_unlock_irqrestore(&h->lock, flags);  	dev_warn(&h->pdev->dev, "Controller lockup detected: 0x%08x\n", -			h->lockup_detected); +			lockup_detected);  	pci_disable_device(h->pdev);  	spin_lock_irqsave(&h->lock, flags);  	fail_all_cmds_on_list(h, &h->cmpQ); @@ -4687,7 +6807,6 @@ static void detect_controller_lockup(struct ctlr_info *h)  	u32 heartbeat;  	unsigned long flags; -	assert_spin_locked(&lockup_detector_lock);  	now = get_jiffies_64();  	/* If we've received an interrupt recently, we're ok. */  	if (time_after64(h->last_intr_timestamp + @@ -4717,68 +6836,117 @@ static void detect_controller_lockup(struct ctlr_info *h)  	h->last_heartbeat_timestamp = now;  } -static int detect_controller_lockup_thread(void *notused) +static void hpsa_ack_ctlr_events(struct ctlr_info *h)  { -	struct ctlr_info *h; -	unsigned long flags; - -	while (1) { -		struct list_head *this, *tmp; - -		schedule_timeout_interruptible(HEARTBEAT_SAMPLE_INTERVAL); -		if (kthread_should_stop()) -			break; -		spin_lock_irqsave(&lockup_detector_lock, flags); -		list_for_each_safe(this, tmp, &hpsa_ctlr_list) { -			h = list_entry(this, struct ctlr_info, lockup_list); -			detect_controller_lockup(h); -		} -		spin_unlock_irqrestore(&lockup_detector_lock, flags); +	int i; +	char *event_type; + +	/* Clear the driver-requested rescan flag */ +	h->drv_req_rescan = 0; + +	/* Ask the controller to clear the events we're handling. */ +	if ((h->transMethod & (CFGTBL_Trans_io_accel1 +			| CFGTBL_Trans_io_accel2)) && +		(h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE || +		 h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE)) { + +		if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_STATE_CHANGE) +			event_type = "state change"; +		if (h->events & HPSA_EVENT_NOTIFY_ACCEL_IO_PATH_CONFIG_CHANGE) +			event_type = "configuration change"; +		/* Stop sending new RAID offload reqs via the IO accelerator */ +		scsi_block_requests(h->scsi_host); +		for (i = 0; i < h->ndevices; i++) +			h->dev[i]->offload_enabled = 0; +		hpsa_drain_accel_commands(h); +		/* Set 'accelerator path config change' bit */ +		dev_warn(&h->pdev->dev, +			"Acknowledging event: 0x%08x (HP SSD Smart Path %s)\n", +			h->events, event_type); +		writel(h->events, &(h->cfgtable->clear_event_notify)); +		/* Set the "clear event notify field update" bit 6 */ +		writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); +		/* Wait until ctlr clears 'clear event notify field', bit 6 */ +		hpsa_wait_for_clear_event_notify_ack(h); +		scsi_unblock_requests(h->scsi_host); +	} else { +		/* Acknowledge controller notification events. */ +		writel(h->events, &(h->cfgtable->clear_event_notify)); +		writel(DOORBELL_CLEAR_EVENTS, h->vaddr + SA5_DOORBELL); +		hpsa_wait_for_clear_event_notify_ack(h); +#if 0 +		writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); +		hpsa_wait_for_mode_change_ack(h); +#endif  	} -	return 0; +	return;  } -static void add_ctlr_to_lockup_detector_list(struct ctlr_info *h) +/* Check a register on the controller to see if there are configuration + * changes (added/changed/removed logical drives, etc.) which mean that + * we should rescan the controller for devices. + * Also check flag for driver-initiated rescan. + */ +static int hpsa_ctlr_needs_rescan(struct ctlr_info *h)  { -	unsigned long flags; +	if (h->drv_req_rescan) +		return 1; -	h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; -	spin_lock_irqsave(&lockup_detector_lock, flags); -	list_add_tail(&h->lockup_list, &hpsa_ctlr_list); -	spin_unlock_irqrestore(&lockup_detector_lock, flags); +	if (!(h->fw_support & MISC_FW_EVENT_NOTIFY)) +		return 0; + +	h->events = readl(&(h->cfgtable->event_notify)); +	return h->events & RESCAN_REQUIRED_EVENT_BITS;  } -static void start_controller_lockup_detector(struct ctlr_info *h) +/* + * Check if any of the offline devices have become ready + */ +static int hpsa_offline_devices_ready(struct ctlr_info *h)  { -	/* Start the lockup detector thread if not already started */ -	if (!hpsa_lockup_detector) { -		spin_lock_init(&lockup_detector_lock); -		hpsa_lockup_detector = -			kthread_run(detect_controller_lockup_thread, -						NULL, HPSA); -	} -	if (!hpsa_lockup_detector) { -		dev_warn(&h->pdev->dev, -			"Could not start lockup detector thread\n"); -		return; +	unsigned long flags; +	struct offline_device_entry *d; +	struct list_head *this, *tmp; + +	spin_lock_irqsave(&h->offline_device_lock, flags); +	list_for_each_safe(this, tmp, &h->offline_device_list) { +		d = list_entry(this, struct offline_device_entry, +				offline_list); +		spin_unlock_irqrestore(&h->offline_device_lock, flags); +		if (!hpsa_volume_offline(h, d->scsi3addr)) +			return 1; +		spin_lock_irqsave(&h->offline_device_lock, flags);  	} -	add_ctlr_to_lockup_detector_list(h); +	spin_unlock_irqrestore(&h->offline_device_lock, flags); +	return 0;  } -static void stop_controller_lockup_detector(struct ctlr_info *h) + +static void hpsa_monitor_ctlr_worker(struct work_struct *work)  {  	unsigned long flags; +	struct ctlr_info *h = container_of(to_delayed_work(work), +					struct ctlr_info, monitor_ctlr_work); +	detect_controller_lockup(h); +	if (lockup_detected(h)) +		return; + +	if (hpsa_ctlr_needs_rescan(h) || hpsa_offline_devices_ready(h)) { +		scsi_host_get(h->scsi_host); +		h->drv_req_rescan = 0; +		hpsa_ack_ctlr_events(h); +		hpsa_scan_start(h->scsi_host); +		scsi_host_put(h->scsi_host); +	} -	spin_lock_irqsave(&lockup_detector_lock, flags); -	remove_ctlr_from_lockup_detector_list(h); -	/* If the list of ctlr's to monitor is empty, stop the thread */ -	if (list_empty(&hpsa_ctlr_list)) { -		spin_unlock_irqrestore(&lockup_detector_lock, flags); -		kthread_stop(hpsa_lockup_detector); -		spin_lock_irqsave(&lockup_detector_lock, flags); -		hpsa_lockup_detector = NULL; +	spin_lock_irqsave(&h->lock, flags); +	if (h->remove_in_progress) { +		spin_unlock_irqrestore(&h->lock, flags); +		return;  	} -	spin_unlock_irqrestore(&lockup_detector_lock, flags); +	schedule_delayed_work(&h->monitor_ctlr_work, +				h->heartbeat_sample_interval); +	spin_unlock_irqrestore(&h->lock, flags);  }  static int hpsa_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) @@ -4810,7 +6978,6 @@ reinit_after_soft_reset:  	 * the 5 lower bits of the address are used by the hardware. and by  	 * the driver.  See comments in hpsa.h for more info.  	 */ -#define COMMANDLIST_ALIGNMENT 32  	BUILD_BUG_ON(sizeof(struct CommandList) % COMMANDLIST_ALIGNMENT);  	h = kzalloc(sizeof(*h), GFP_KERNEL);  	if (!h) @@ -4820,8 +6987,18 @@ reinit_after_soft_reset:  	h->intr_mode = hpsa_simple_mode ? SIMPLE_MODE_INT : PERF_MODE_INT;  	INIT_LIST_HEAD(&h->cmpQ);  	INIT_LIST_HEAD(&h->reqQ); +	INIT_LIST_HEAD(&h->offline_device_list);  	spin_lock_init(&h->lock); +	spin_lock_init(&h->offline_device_lock);  	spin_lock_init(&h->scan_lock); +	spin_lock_init(&h->passthru_count_lock); + +	/* Allocate and clear per-cpu variable lockup_detected */ +	h->lockup_detected = alloc_percpu(u32); +	if (!h->lockup_detected) +		goto clean1; +	set_lockup_detected_for_all_cpus(h, 0); +  	rc = hpsa_pci_init(h);  	if (rc != 0)  		goto clean1; @@ -4861,6 +7038,7 @@ reinit_after_soft_reset:  	pci_set_drvdata(pdev, h);  	h->ndevices = 0; +	h->hba_mode_enabled = 0;  	h->scsi_host = NULL;  	spin_lock_init(&h->devlock);  	hpsa_put_ctlr_into_performant_mode(h); @@ -4920,13 +7098,23 @@ reinit_after_soft_reset:  		goto reinit_after_soft_reset;  	} +		/* Enable Accelerated IO path at driver layer */ +		h->acciopath_status = 1; + +	h->drv_req_rescan = 0; +  	/* Turn the interrupts on so we can service requests */  	h->access.set_intr_mask(h, HPSA_INTR_ON);  	hpsa_hba_inquiry(h);  	hpsa_register_scsi(h);	/* hook ourselves into SCSI subsystem */ -	start_controller_lockup_detector(h); -	return 1; + +	/* Monitor the controller for firmware lockups */ +	h->heartbeat_sample_interval = HEARTBEAT_SAMPLE_INTERVAL; +	INIT_DELAYED_WORK(&h->monitor_ctlr_work, hpsa_monitor_ctlr_worker); +	schedule_delayed_work(&h->monitor_ctlr_work, +				h->heartbeat_sample_interval); +	return 0;  clean4:  	hpsa_free_sg_chain_blocks(h); @@ -4934,6 +7122,8 @@ clean4:  	free_irqs(h);  clean2:  clean1: +	if (h->lockup_detected) +		free_percpu(h->lockup_detected);  	kfree(h);  	return rc;  } @@ -4943,6 +7133,9 @@ static void hpsa_flush_cache(struct ctlr_info *h)  	char *flush_buf;  	struct CommandList *c; +	/* Don't bother trying to flush the cache if locked up */ +	if (unlikely(lockup_detected(h))) +		return;  	flush_buf = kzalloc(4, GFP_KERNEL);  	if (!flush_buf)  		return; @@ -4991,13 +7184,20 @@ static void hpsa_free_device_info(struct ctlr_info *h)  static void hpsa_remove_one(struct pci_dev *pdev)  {  	struct ctlr_info *h; +	unsigned long flags;  	if (pci_get_drvdata(pdev) == NULL) {  		dev_err(&pdev->dev, "unable to remove device\n");  		return;  	}  	h = pci_get_drvdata(pdev); -	stop_controller_lockup_detector(h); + +	/* Get rid of any controller monitoring work items */ +	spin_lock_irqsave(&h->lock, flags); +	h->remove_in_progress = 1; +	cancel_delayed_work(&h->monitor_ctlr_work); +	spin_unlock_irqrestore(&h->lock, flags); +  	hpsa_unregister_scsi(h);	/* unhook from SCSI subsystem */  	hpsa_shutdown(pdev);  	iounmap(h->vaddr); @@ -5011,14 +7211,15 @@ static void hpsa_remove_one(struct pci_dev *pdev)  	pci_free_consistent(h->pdev,  		h->nr_cmds * sizeof(struct ErrorInfo),  		h->errinfo_pool, h->errinfo_pool_dhandle); -	pci_free_consistent(h->pdev, h->reply_pool_size, -		h->reply_pool, h->reply_pool_dhandle); +	hpsa_free_reply_queues(h);  	kfree(h->cmd_pool_bits);  	kfree(h->blockFetchTable); +	kfree(h->ioaccel1_blockFetchTable); +	kfree(h->ioaccel2_blockFetchTable);  	kfree(h->hba_inquiry_data);  	pci_disable_device(pdev);  	pci_release_regions(pdev); -	pci_set_drvdata(pdev, NULL); +	free_percpu(h->lockup_detected);  	kfree(h);  } @@ -5056,20 +7257,17 @@ static struct pci_driver hpsa_pci_driver = {   * bits of the command address.   */  static void  calc_bucket_map(int bucket[], int num_buckets, -	int nsgs, int *bucket_map) +	int nsgs, int min_blocks, int *bucket_map)  {  	int i, j, b, size; -	/* even a command with 0 SGs requires 4 blocks */ -#define MINIMUM_TRANSFER_BLOCKS 4 -#define NUM_BUCKETS 8  	/* Note, bucket_map must have nsgs+1 entries. */  	for (i = 0; i <= nsgs; i++) {  		/* Compute size of a command with i SG entries */ -		size = i + MINIMUM_TRANSFER_BLOCKS; +		size = i + min_blocks;  		b = num_buckets; /* Assume the biggest bucket */  		/* Find the bucket that is just big enough */ -		for (j = 0; j < 8; j++) { +		for (j = 0; j < num_buckets; j++) {  			if (bucket[j] >= size) {  				b = j;  				break; @@ -5080,10 +7278,16 @@ static void  calc_bucket_map(int bucket[], int num_buckets,  	}  } -static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags) +static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 trans_support)  {  	int i;  	unsigned long register_value; +	unsigned long transMethod = CFGTBL_Trans_Performant | +			(trans_support & CFGTBL_Trans_use_short_tags) | +				CFGTBL_Trans_enable_directed_msix | +			(trans_support & (CFGTBL_Trans_io_accel1 | +				CFGTBL_Trans_io_accel2)); +	struct access_method access = SA5_performant_access;  	/* This is a bit complicated.  There are 8 registers on  	 * the controller which we write to to tell it 8 different @@ -5103,6 +7307,16 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)  	 * sizes for small commands, and fewer sizes for larger commands.  	 */  	int bft[8] = {5, 6, 8, 10, 12, 20, 28, SG_ENTRIES_IN_CMD + 4}; +#define MIN_IOACCEL2_BFT_ENTRY 5 +#define HPSA_IOACCEL2_HEADER_SZ 4 +	int bft2[16] = {MIN_IOACCEL2_BFT_ENTRY, 6, 7, 8, 9, 10, 11, 12, +			13, 14, 15, 16, 17, 18, 19, +			HPSA_IOACCEL2_HEADER_SZ + IOACCEL2_MAXSGENTRIES}; +	BUILD_BUG_ON(ARRAY_SIZE(bft2) != 16); +	BUILD_BUG_ON(ARRAY_SIZE(bft) != 8); +	BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) > +				 16 * MIN_IOACCEL2_BFT_ENTRY); +	BUILD_BUG_ON(sizeof(struct ioaccel2_sg_element) != 16);  	BUILD_BUG_ON(28 > SG_ENTRIES_IN_CMD + 4);  	/*  5 = 1 s/g entry or 4k  	 *  6 = 2 s/g entry or 8k @@ -5110,12 +7324,20 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)  	 * 10 = 6 s/g entry or 24k  	 */ +	/* If the controller supports either ioaccel method then +	 * we can also use the RAID stack submit path that does not +	 * perform the superfluous readl() after each command submission. +	 */ +	if (trans_support & (CFGTBL_Trans_io_accel1 | CFGTBL_Trans_io_accel2)) +		access = SA5_performant_access_no_read; +  	/* Controller spec: zero out this buffer. */ -	memset(h->reply_pool, 0, h->reply_pool_size); +	for (i = 0; i < h->nreply_queues; i++) +		memset(h->reply_queue[i].head, 0, h->reply_queue_size);  	bft[7] = SG_ENTRIES_IN_CMD + 4;  	calc_bucket_map(bft, ARRAY_SIZE(bft), -				SG_ENTRIES_IN_CMD, h->blockFetchTable); +				SG_ENTRIES_IN_CMD, 4, h->blockFetchTable);  	for (i = 0; i < 8; i++)  		writel(bft[i], &h->transtable->BlockFetch[i]); @@ -5127,14 +7349,26 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)  	for (i = 0; i < h->nreply_queues; i++) {  		writel(0, &h->transtable->RepQAddr[i].upper); -		writel(h->reply_pool_dhandle + -			(h->max_commands * sizeof(u64) * i), +		writel(h->reply_queue[i].busaddr,  			&h->transtable->RepQAddr[i].lower);  	} -	writel(CFGTBL_Trans_Performant | use_short_tags | -		CFGTBL_Trans_enable_directed_msix, -		&(h->cfgtable->HostWrite.TransportRequest)); +	writel(0, &h->cfgtable->HostWrite.command_pool_addr_hi); +	writel(transMethod, &(h->cfgtable->HostWrite.TransportRequest)); +	/* +	 * enable outbound interrupt coalescing in accelerator mode; +	 */ +	if (trans_support & CFGTBL_Trans_io_accel1) { +		access = SA5_ioaccel_mode1_access; +		writel(10, &h->cfgtable->HostWrite.CoalIntDelay); +		writel(4, &h->cfgtable->HostWrite.CoalIntCount); +	} else { +		if (trans_support & CFGTBL_Trans_io_accel2) { +			access = SA5_ioaccel_mode2_access; +			writel(10, &h->cfgtable->HostWrite.CoalIntDelay); +			writel(4, &h->cfgtable->HostWrite.CoalIntCount); +		} +	}  	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL);  	hpsa_wait_for_mode_change_ack(h);  	register_value = readl(&(h->cfgtable->TransportActive)); @@ -5144,13 +7378,160 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)  		return;  	}  	/* Change the access methods to the performant access methods */ -	h->access = SA5_performant_access; -	h->transMethod = CFGTBL_Trans_Performant; +	h->access = access; +	h->transMethod = transMethod; + +	if (!((trans_support & CFGTBL_Trans_io_accel1) || +		(trans_support & CFGTBL_Trans_io_accel2))) +		return; + +	if (trans_support & CFGTBL_Trans_io_accel1) { +		/* Set up I/O accelerator mode */ +		for (i = 0; i < h->nreply_queues; i++) { +			writel(i, h->vaddr + IOACCEL_MODE1_REPLY_QUEUE_INDEX); +			h->reply_queue[i].current_entry = +				readl(h->vaddr + IOACCEL_MODE1_PRODUCER_INDEX); +		} +		bft[7] = h->ioaccel_maxsg + 8; +		calc_bucket_map(bft, ARRAY_SIZE(bft), h->ioaccel_maxsg, 8, +				h->ioaccel1_blockFetchTable); + +		/* initialize all reply queue entries to unused */ +		for (i = 0; i < h->nreply_queues; i++) +			memset(h->reply_queue[i].head, +				(u8) IOACCEL_MODE1_REPLY_UNUSED, +				h->reply_queue_size); + +		/* set all the constant fields in the accelerator command +		 * frames once at init time to save CPU cycles later. +		 */ +		for (i = 0; i < h->nr_cmds; i++) { +			struct io_accel1_cmd *cp = &h->ioaccel_cmd_pool[i]; + +			cp->function = IOACCEL1_FUNCTION_SCSIIO; +			cp->err_info = (u32) (h->errinfo_pool_dhandle + +					(i * sizeof(struct ErrorInfo))); +			cp->err_info_len = sizeof(struct ErrorInfo); +			cp->sgl_offset = IOACCEL1_SGLOFFSET; +			cp->host_context_flags = IOACCEL1_HCFLAGS_CISS_FORMAT; +			cp->timeout_sec = 0; +			cp->ReplyQueue = 0; +			cp->Tag.lower = (i << DIRECT_LOOKUP_SHIFT) | +						DIRECT_LOOKUP_BIT; +			cp->Tag.upper = 0; +			cp->host_addr.lower = +				(u32) (h->ioaccel_cmd_pool_dhandle + +					(i * sizeof(struct io_accel1_cmd))); +			cp->host_addr.upper = 0; +		} +	} else if (trans_support & CFGTBL_Trans_io_accel2) { +		u64 cfg_offset, cfg_base_addr_index; +		u32 bft2_offset, cfg_base_addr; +		int rc; + +		rc = hpsa_find_cfg_addrs(h->pdev, h->vaddr, &cfg_base_addr, +			&cfg_base_addr_index, &cfg_offset); +		BUILD_BUG_ON(offsetof(struct io_accel2_cmd, sg) != 64); +		bft2[15] = h->ioaccel_maxsg + HPSA_IOACCEL2_HEADER_SZ; +		calc_bucket_map(bft2, ARRAY_SIZE(bft2), h->ioaccel_maxsg, +				4, h->ioaccel2_blockFetchTable); +		bft2_offset = readl(&h->cfgtable->io_accel_request_size_offset); +		BUILD_BUG_ON(offsetof(struct CfgTable, +				io_accel_request_size_offset) != 0xb8); +		h->ioaccel2_bft2_regs = +			remap_pci_mem(pci_resource_start(h->pdev, +					cfg_base_addr_index) + +					cfg_offset + bft2_offset, +					ARRAY_SIZE(bft2) * +					sizeof(*h->ioaccel2_bft2_regs)); +		for (i = 0; i < ARRAY_SIZE(bft2); i++) +			writel(bft2[i], &h->ioaccel2_bft2_regs[i]); +	} +	writel(CFGTBL_ChangeReq, h->vaddr + SA5_DOORBELL); +	hpsa_wait_for_mode_change_ack(h); +} + +static int hpsa_alloc_ioaccel_cmd_and_bft(struct ctlr_info *h) +{ +	h->ioaccel_maxsg = +		readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); +	if (h->ioaccel_maxsg > IOACCEL1_MAXSGENTRIES) +		h->ioaccel_maxsg = IOACCEL1_MAXSGENTRIES; + +	/* Command structures must be aligned on a 128-byte boundary +	 * because the 7 lower bits of the address are used by the +	 * hardware. +	 */ +	BUILD_BUG_ON(sizeof(struct io_accel1_cmd) % +			IOACCEL1_COMMANDLIST_ALIGNMENT); +	h->ioaccel_cmd_pool = +		pci_alloc_consistent(h->pdev, +			h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), +			&(h->ioaccel_cmd_pool_dhandle)); + +	h->ioaccel1_blockFetchTable = +		kmalloc(((h->ioaccel_maxsg + 1) * +				sizeof(u32)), GFP_KERNEL); + +	if ((h->ioaccel_cmd_pool == NULL) || +		(h->ioaccel1_blockFetchTable == NULL)) +		goto clean_up; + +	memset(h->ioaccel_cmd_pool, 0, +		h->nr_cmds * sizeof(*h->ioaccel_cmd_pool)); +	return 0; + +clean_up: +	if (h->ioaccel_cmd_pool) +		pci_free_consistent(h->pdev, +			h->nr_cmds * sizeof(*h->ioaccel_cmd_pool), +			h->ioaccel_cmd_pool, h->ioaccel_cmd_pool_dhandle); +	kfree(h->ioaccel1_blockFetchTable); +	return 1; +} + +static int ioaccel2_alloc_cmds_and_bft(struct ctlr_info *h) +{ +	/* Allocate ioaccel2 mode command blocks and block fetch table */ + +	h->ioaccel_maxsg = +		readl(&(h->cfgtable->io_accel_max_embedded_sg_count)); +	if (h->ioaccel_maxsg > IOACCEL2_MAXSGENTRIES) +		h->ioaccel_maxsg = IOACCEL2_MAXSGENTRIES; + +	BUILD_BUG_ON(sizeof(struct io_accel2_cmd) % +			IOACCEL2_COMMANDLIST_ALIGNMENT); +	h->ioaccel2_cmd_pool = +		pci_alloc_consistent(h->pdev, +			h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), +			&(h->ioaccel2_cmd_pool_dhandle)); + +	h->ioaccel2_blockFetchTable = +		kmalloc(((h->ioaccel_maxsg + 1) * +				sizeof(u32)), GFP_KERNEL); + +	if ((h->ioaccel2_cmd_pool == NULL) || +		(h->ioaccel2_blockFetchTable == NULL)) +		goto clean_up; + +	memset(h->ioaccel2_cmd_pool, 0, +		h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool)); +	return 0; + +clean_up: +	if (h->ioaccel2_cmd_pool) +		pci_free_consistent(h->pdev, +			h->nr_cmds * sizeof(*h->ioaccel2_cmd_pool), +			h->ioaccel2_cmd_pool, h->ioaccel2_cmd_pool_dhandle); +	kfree(h->ioaccel2_blockFetchTable); +	return 1;  }  static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)  {  	u32 trans_support; +	unsigned long transMethod = CFGTBL_Trans_Performant | +					CFGTBL_Trans_use_short_tags;  	int i;  	if (hpsa_simple_mode) @@ -5160,15 +7541,32 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)  	if (!(trans_support & PERFORMANT_MODE))  		return; -	h->nreply_queues = h->msix_vector ? MAX_REPLY_QUEUES : 1; +	/* Check for I/O accelerator mode support */ +	if (trans_support & CFGTBL_Trans_io_accel1) { +		transMethod |= CFGTBL_Trans_io_accel1 | +				CFGTBL_Trans_enable_directed_msix; +		if (hpsa_alloc_ioaccel_cmd_and_bft(h)) +			goto clean_up; +	} else { +		if (trans_support & CFGTBL_Trans_io_accel2) { +				transMethod |= CFGTBL_Trans_io_accel2 | +				CFGTBL_Trans_enable_directed_msix; +		if (ioaccel2_alloc_cmds_and_bft(h)) +			goto clean_up; +		} +	} + +	h->nreply_queues = h->msix_vector > 0 ? h->msix_vector : 1;  	hpsa_get_max_perf_mode_cmds(h);  	/* Performant mode ring buffer and supporting data structures */ -	h->reply_pool_size = h->max_commands * sizeof(u64) * h->nreply_queues; -	h->reply_pool = pci_alloc_consistent(h->pdev, h->reply_pool_size, -				&(h->reply_pool_dhandle)); +	h->reply_queue_size = h->max_commands * sizeof(u64);  	for (i = 0; i < h->nreply_queues; i++) { -		h->reply_queue[i].head = &h->reply_pool[h->max_commands * i]; +		h->reply_queue[i].head = pci_alloc_consistent(h->pdev, +						h->reply_queue_size, +						&(h->reply_queue[i].busaddr)); +		if (!h->reply_queue[i].head) +			goto clean_up;  		h->reply_queue[i].size = h->max_commands;  		h->reply_queue[i].wraparound = 1;  /* spec: init to 1 */  		h->reply_queue[i].current_entry = 0; @@ -5177,23 +7575,42 @@ static void hpsa_put_ctlr_into_performant_mode(struct ctlr_info *h)  	/* Need a block fetch table for performant mode */  	h->blockFetchTable = kmalloc(((SG_ENTRIES_IN_CMD + 1) *  				sizeof(u32)), GFP_KERNEL); - -	if ((h->reply_pool == NULL) -		|| (h->blockFetchTable == NULL)) +	if (!h->blockFetchTable)  		goto clean_up; -	hpsa_enter_performant_mode(h, -		trans_support & CFGTBL_Trans_use_short_tags); - +	hpsa_enter_performant_mode(h, trans_support);  	return;  clean_up: -	if (h->reply_pool) -		pci_free_consistent(h->pdev, h->reply_pool_size, -			h->reply_pool, h->reply_pool_dhandle); +	hpsa_free_reply_queues(h);  	kfree(h->blockFetchTable);  } +static int is_accelerated_cmd(struct CommandList *c) +{ +	return c->cmd_type == CMD_IOACCEL1 || c->cmd_type == CMD_IOACCEL2; +} + +static void hpsa_drain_accel_commands(struct ctlr_info *h) +{ +	struct CommandList *c = NULL; +	unsigned long flags; +	int accel_cmds_out; + +	do { /* wait for all outstanding commands to drain out */ +		accel_cmds_out = 0; +		spin_lock_irqsave(&h->lock, flags); +		list_for_each_entry(c, &h->cmpQ, list) +			accel_cmds_out += is_accelerated_cmd(c); +		list_for_each_entry(c, &h->reqQ, list) +			accel_cmds_out += is_accelerated_cmd(c); +		spin_unlock_irqrestore(&h->lock, flags); +		if (accel_cmds_out <= 0) +			break; +		msleep(100); +	} while (1); +} +  /*   *  This is it.  Register the PCI driver information for the cards we control   *  the OS will call our registered routines when it finds one of our cards. @@ -5208,5 +7625,83 @@ static void __exit hpsa_cleanup(void)  	pci_unregister_driver(&hpsa_pci_driver);  } +static void __attribute__((unused)) verify_offsets(void) +{ +#define VERIFY_OFFSET(member, offset) \ +	BUILD_BUG_ON(offsetof(struct raid_map_data, member) != offset) + +	VERIFY_OFFSET(structure_size, 0); +	VERIFY_OFFSET(volume_blk_size, 4); +	VERIFY_OFFSET(volume_blk_cnt, 8); +	VERIFY_OFFSET(phys_blk_shift, 16); +	VERIFY_OFFSET(parity_rotation_shift, 17); +	VERIFY_OFFSET(strip_size, 18); +	VERIFY_OFFSET(disk_starting_blk, 20); +	VERIFY_OFFSET(disk_blk_cnt, 28); +	VERIFY_OFFSET(data_disks_per_row, 36); +	VERIFY_OFFSET(metadata_disks_per_row, 38); +	VERIFY_OFFSET(row_cnt, 40); +	VERIFY_OFFSET(layout_map_count, 42); +	VERIFY_OFFSET(flags, 44); +	VERIFY_OFFSET(dekindex, 46); +	/* VERIFY_OFFSET(reserved, 48 */ +	VERIFY_OFFSET(data, 64); + +#undef VERIFY_OFFSET + +#define VERIFY_OFFSET(member, offset) \ +	BUILD_BUG_ON(offsetof(struct io_accel2_cmd, member) != offset) + +	VERIFY_OFFSET(IU_type, 0); +	VERIFY_OFFSET(direction, 1); +	VERIFY_OFFSET(reply_queue, 2); +	/* VERIFY_OFFSET(reserved1, 3);  */ +	VERIFY_OFFSET(scsi_nexus, 4); +	VERIFY_OFFSET(Tag, 8); +	VERIFY_OFFSET(cdb, 16); +	VERIFY_OFFSET(cciss_lun, 32); +	VERIFY_OFFSET(data_len, 40); +	VERIFY_OFFSET(cmd_priority_task_attr, 44); +	VERIFY_OFFSET(sg_count, 45); +	/* VERIFY_OFFSET(reserved3 */ +	VERIFY_OFFSET(err_ptr, 48); +	VERIFY_OFFSET(err_len, 56); +	/* VERIFY_OFFSET(reserved4  */ +	VERIFY_OFFSET(sg, 64); + +#undef VERIFY_OFFSET + +#define VERIFY_OFFSET(member, offset) \ +	BUILD_BUG_ON(offsetof(struct io_accel1_cmd, member) != offset) + +	VERIFY_OFFSET(dev_handle, 0x00); +	VERIFY_OFFSET(reserved1, 0x02); +	VERIFY_OFFSET(function, 0x03); +	VERIFY_OFFSET(reserved2, 0x04); +	VERIFY_OFFSET(err_info, 0x0C); +	VERIFY_OFFSET(reserved3, 0x10); +	VERIFY_OFFSET(err_info_len, 0x12); +	VERIFY_OFFSET(reserved4, 0x13); +	VERIFY_OFFSET(sgl_offset, 0x14); +	VERIFY_OFFSET(reserved5, 0x15); +	VERIFY_OFFSET(transfer_len, 0x1C); +	VERIFY_OFFSET(reserved6, 0x20); +	VERIFY_OFFSET(io_flags, 0x24); +	VERIFY_OFFSET(reserved7, 0x26); +	VERIFY_OFFSET(LUN, 0x34); +	VERIFY_OFFSET(control, 0x3C); +	VERIFY_OFFSET(CDB, 0x40); +	VERIFY_OFFSET(reserved8, 0x50); +	VERIFY_OFFSET(host_context_flags, 0x60); +	VERIFY_OFFSET(timeout_sec, 0x62); +	VERIFY_OFFSET(ReplyQueue, 0x64); +	VERIFY_OFFSET(reserved9, 0x65); +	VERIFY_OFFSET(Tag, 0x68); +	VERIFY_OFFSET(host_addr, 0x70); +	VERIFY_OFFSET(CISS_LUN, 0x78); +	VERIFY_OFFSET(SG, 0x78 + 8); +#undef VERIFY_OFFSET +} +  module_init(hpsa_init);  module_exit(hpsa_cleanup);  | 
