aboutsummaryrefslogtreecommitdiff
path: root/drivers/scsi/arcmsr/arcmsr_hba.c
diff options
context:
space:
mode:
authorNick Cheng <nick.cheng@areca.com.tw>2007-09-13 17:26:40 +0800
committerJames Bottomley <jejb@mulgrave.localdomain>2007-10-12 14:48:27 -0400
commit1a4f550a09f89e3a15eff1971bc9db977571b9f6 (patch)
tree3ba5dea468391701664d4ca4c0b0e2a569521d89 /drivers/scsi/arcmsr/arcmsr_hba.c
parentbfd129445f23c037d9a440ebfa4109e11c220301 (diff)
[SCSI] arcmsr: 1.20.00.15: add SATA RAID plus other fixes
Description: ** support ARC1200/1201/1202 SATA RAID adapter, which is named ACB_ADAPTER_TYPE_B ** modify the arcmsr_pci_slot_reset function ** modify the arcmsr_pci_ers_disconnect_forepart function ** modify the arcmsr_pci_ers_need_reset_forepart function  Signed-off-by: Nick Cheng <nick.cheng@areca.com.tw> Signed-off-by: James Bottomley <James.Bottomley@SteelEye.com>
Diffstat (limited to 'drivers/scsi/arcmsr/arcmsr_hba.c')
-rw-r--r--drivers/scsi/arcmsr/arcmsr_hba.c2249
1 files changed, 1361 insertions, 888 deletions
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index 0ddfc21e9f7..d70398ac64d 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -9,7 +9,7 @@
** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
**
** Web site: www.areca.com.tw
-** E-mail: erich@areca.com.tw
+** E-mail: support@areca.com.tw
**
** This program is free software; you can redistribute it and/or modify
** it under the terms of the GNU General Public License version 2 as
@@ -71,33 +71,37 @@
#include <scsi/scsicam.h>
#include "arcmsr.h"
-MODULE_AUTHOR("Erich Chen <erich@areca.com.tw>");
+MODULE_AUTHOR("Erich Chen <support@areca.com.tw>");
MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID HOST Adapter");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_VERSION(ARCMSR_DRIVER_VERSION);
-static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_cmnd *cmd);
+static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
+ struct scsi_cmnd *cmd);
+static int arcmsr_iop_confirm(struct AdapterControlBlock *acb);
static int arcmsr_abort(struct scsi_cmnd *);
static int arcmsr_bus_reset(struct scsi_cmnd *);
static int arcmsr_bios_param(struct scsi_device *sdev,
- struct block_device *bdev, sector_t capacity, int *info);
-static int arcmsr_queue_command(struct scsi_cmnd * cmd,
- void (*done) (struct scsi_cmnd *));
+ struct block_device *bdev, sector_t capacity, int *info);
+static int arcmsr_queue_command(struct scsi_cmnd *cmd,
+ void (*done) (struct scsi_cmnd *));
static int arcmsr_probe(struct pci_dev *pdev,
const struct pci_device_id *id);
static void arcmsr_remove(struct pci_dev *pdev);
static void arcmsr_shutdown(struct pci_dev *pdev);
static void arcmsr_iop_init(struct AdapterControlBlock *acb);
static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
+static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb);
static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
-static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb);
-static uint8_t arcmsr_wait_msgint_ready(struct AdapterControlBlock *acb);
+static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb);
+static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb);
static const char *arcmsr_info(struct Scsi_Host *);
static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev,
pci_channel_state_t state);
static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev);
-static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth)
+static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
+ int queue_depth)
{
if (queue_depth > ARCMSR_MAX_CMD_PERLUN)
queue_depth = ARCMSR_MAX_CMD_PERLUN;
@@ -123,17 +127,21 @@ static struct scsi_host_template arcmsr_scsi_host_template = {
.use_clustering = ENABLE_CLUSTERING,
.shost_attrs = arcmsr_host_attrs,
};
+#ifdef CONFIG_SCSI_ARCMSR_AER
static struct pci_error_handlers arcmsr_pci_error_handlers = {
.error_detected = arcmsr_pci_error_detected,
.slot_reset = arcmsr_pci_slot_reset,
};
-
+#endif
static struct pci_device_id arcmsr_device_id_table[] = {
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130)},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160)},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202)},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210)},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220)},
{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230)},
@@ -153,20 +161,20 @@ static struct pci_driver arcmsr_pci_driver = {
.probe = arcmsr_probe,
.remove = arcmsr_remove,
.shutdown = arcmsr_shutdown,
+ #ifdef CONFIG_SCSI_ARCMSR_AER
.err_handler = &arcmsr_pci_error_handlers,
+ #endif
};
static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
{
irqreturn_t handle_state;
- struct AdapterControlBlock *acb;
- unsigned long flags;
-
- acb = (struct AdapterControlBlock *)dev_id;
+ struct AdapterControlBlock *acb = dev_id;
- spin_lock_irqsave(acb->host->host_lock, flags);
+ spin_lock(acb->host->host_lock);
handle_state = arcmsr_interrupt(acb);
- spin_unlock_irqrestore(acb->host->host_lock, flags);
+ spin_unlock(acb->host->host_lock);
+
return handle_state;
}
@@ -198,68 +206,159 @@ static int arcmsr_bios_param(struct scsi_device *sdev,
return 0;
}
-static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
+static void arcmsr_define_adapter_type(struct AdapterControlBlock *acb)
{
struct pci_dev *pdev = acb->pdev;
- struct MessageUnit __iomem *reg = acb->pmu;
- u32 ccb_phyaddr_hi32;
- void *dma_coherent;
- dma_addr_t dma_coherent_handle, dma_addr;
- struct CommandControlBlock *ccb_tmp;
- int i, j;
+ u16 dev_id;
+ pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id);
+ switch (dev_id) {
+ case 0x1201 : {
+ acb->adapter_type = ACB_ADAPTER_TYPE_B;
+ }
+ break;
+
+ default : acb->adapter_type = ACB_ADAPTER_TYPE_A;
+ }
+}
- dma_coherent = dma_alloc_coherent(&pdev->dev,
+static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb)
+{
+
+ switch (acb->adapter_type) {
+
+ case ACB_ADAPTER_TYPE_A: {
+ struct pci_dev *pdev = acb->pdev;
+ void *dma_coherent;
+ dma_addr_t dma_coherent_handle, dma_addr;
+ struct CommandControlBlock *ccb_tmp;
+ uint32_t intmask_org;
+ int i, j;
+
+ acb->pmu = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
+ if (!acb->pmu) {
+ printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n",
+ acb->host->host_no);
+ }
+
+ dma_coherent = dma_alloc_coherent(&pdev->dev,
ARCMSR_MAX_FREECCB_NUM *
sizeof (struct CommandControlBlock) + 0x20,
&dma_coherent_handle, GFP_KERNEL);
- if (!dma_coherent)
- return -ENOMEM;
+ if (!dma_coherent)
+ return -ENOMEM;
- acb->dma_coherent = dma_coherent;
- acb->dma_coherent_handle = dma_coherent_handle;
+ acb->dma_coherent = dma_coherent;
+ acb->dma_coherent_handle = dma_coherent_handle;
- if (((unsigned long)dma_coherent & 0x1F)) {
- dma_coherent = dma_coherent +
- (0x20 - ((unsigned long)dma_coherent & 0x1F));
- dma_coherent_handle = dma_coherent_handle +
- (0x20 - ((unsigned long)dma_coherent_handle & 0x1F));
- }
+ if (((unsigned long)dma_coherent & 0x1F)) {
+ dma_coherent = dma_coherent +
+ (0x20 - ((unsigned long)dma_coherent & 0x1F));
+ dma_coherent_handle = dma_coherent_handle +
+ (0x20 - ((unsigned long)dma_coherent_handle & 0x1F));
+ }
- dma_addr = dma_coherent_handle;
- ccb_tmp = (struct CommandControlBlock *)dma_coherent;
- for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
- ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5;
- ccb_tmp->acb = acb;
- acb->pccb_pool[i] = ccb_tmp;
- list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
- dma_addr = dma_addr + sizeof (struct CommandControlBlock);
- ccb_tmp++;
- }
+ dma_addr = dma_coherent_handle;
+ ccb_tmp = (struct CommandControlBlock *)dma_coherent;
+ for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
+ ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5;
+ ccb_tmp->acb = acb;
+ acb->pccb_pool[i] = ccb_tmp;
+ list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
+ dma_addr = dma_addr + sizeof(struct CommandControlBlock);
+ ccb_tmp++;
+ }
- acb->vir2phy_offset = (unsigned long)ccb_tmp -
- (unsigned long)dma_addr;
- for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
- for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
- acb->devstate[i][j] = ARECA_RAID_GOOD;
+ acb->vir2phy_offset = (unsigned long)ccb_tmp -(unsigned long)dma_addr;
+ for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
+ for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
+ acb->devstate[i][j] = ARECA_RAID_GONE;
- /*
- ** here we need to tell iop 331 our ccb_tmp.HighPart
- ** if ccb_tmp.HighPart is not zero
- */
- ccb_phyaddr_hi32 = (uint32_t) ((dma_coherent_handle >> 16) >> 16);
- if (ccb_phyaddr_hi32 != 0) {
- writel(ARCMSR_SIGNATURE_SET_CONFIG, &reg->message_rwbuffer[0]);
- writel(ccb_phyaddr_hi32, &reg->message_rwbuffer[1]);
- writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
- if (arcmsr_wait_msgint_ready(acb))
- printk(KERN_NOTICE "arcmsr%d: "
- "'set ccb high part physical address' timeout\n",
- acb->host->host_no);
- }
+ /*
+ ** here we need to tell iop 331 our ccb_tmp.HighPart
+ ** if ccb_tmp.HighPart is not zero
+ */
+ intmask_org = arcmsr_disable_outbound_ints(acb);
+ }
+ break;
+
+ case ACB_ADAPTER_TYPE_B: {
+
+ struct pci_dev *pdev = acb->pdev;
+ struct MessageUnit_B *reg;
+ void *mem_base0, *mem_base1;
+ void *dma_coherent;
+ dma_addr_t dma_coherent_handle, dma_addr;
+ uint32_t intmask_org;
+ struct CommandControlBlock *ccb_tmp;
+ int i, j;
+
+ dma_coherent = dma_alloc_coherent(&pdev->dev,
+ ((ARCMSR_MAX_FREECCB_NUM *
+ sizeof(struct CommandControlBlock) + 0x20) +
+ sizeof(struct MessageUnit_B)),
+ &dma_coherent_handle, GFP_KERNEL);
+ if (!dma_coherent)
+ return -ENOMEM;
- writel(readl(&reg->outbound_intmask) |
- ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE,
- &reg->outbound_intmask);
+ acb->dma_coherent = dma_coherent;
+ acb->dma_coherent_handle = dma_coherent_handle;
+
+ if (((unsigned long)dma_coherent & 0x1F)) {
+ dma_coherent = dma_coherent +
+ (0x20 - ((unsigned long)dma_coherent & 0x1F));
+ dma_coherent_handle = dma_coherent_handle +
+ (0x20 - ((unsigned long)dma_coherent_handle & 0x1F));
+ }
+
+ reg = (struct MessageUnit_B *)(dma_coherent +
+ ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock));
+
+ dma_addr = dma_coherent_handle;
+ ccb_tmp = (struct CommandControlBlock *)dma_coherent;
+ for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
+ ccb_tmp->cdb_shifted_phyaddr = dma_addr >> 5;
+ ccb_tmp->acb = acb;
+ acb->pccb_pool[i] = ccb_tmp;
+ list_add_tail(&ccb_tmp->list, &acb->ccb_free_list);
+ dma_addr = dma_addr + sizeof(struct CommandControlBlock);
+ ccb_tmp++;
+ }
+
+ reg = (struct MessageUnit_B *)(dma_coherent +
+ ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock));
+ acb->pmu = (struct MessageUnit_B *)reg;
+ mem_base0 = ioremap(pci_resource_start(pdev, 0),
+ pci_resource_len(pdev, 0));
+ mem_base1 = ioremap(pci_resource_start(pdev, 2),
+ pci_resource_len(pdev, 2));
+ reg->drv2iop_doorbell_reg = (uint32_t *)((char *)mem_base0 +
+ ARCMSR_DRV2IOP_DOORBELL);
+ reg->drv2iop_doorbell_mask_reg = (uint32_t *)((char *)mem_base0 +
+ ARCMSR_DRV2IOP_DOORBELL_MASK);
+ reg->iop2drv_doorbell_reg = (uint32_t *)((char *)mem_base0 +
+ ARCMSR_IOP2DRV_DOORBELL);
+ reg->iop2drv_doorbell_mask_reg = (uint32_t *)((char *)mem_base0 +
+ ARCMSR_IOP2DRV_DOORBELL_MASK);
+ reg->ioctl_wbuffer_reg = (uint32_t *)((char *)mem_base1 +
+ ARCMSR_IOCTL_WBUFFER);
+ reg->ioctl_rbuffer_reg = (uint32_t *)((char *)mem_base1 +
+ ARCMSR_IOCTL_RBUFFER);
+ reg->msgcode_rwbuffer_reg = (uint32_t *)((char *)mem_base1 +
+ ARCMSR_MSGCODE_RWBUFFER);
+
+ acb->vir2phy_offset = (unsigned long)ccb_tmp -(unsigned long)dma_addr;
+ for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
+ for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
+ acb->devstate[i][j] = ARECA_RAID_GOOD;
+
+ /*
+ ** here we need to tell iop 331 our ccb_tmp.HighPart
+ ** if ccb_tmp.HighPart is not zero
+ */
+ intmask_org = arcmsr_disable_outbound_ints(acb);
+ }
+ break;
+ }
return 0;
}
@@ -310,16 +409,11 @@ static int arcmsr_probe(struct pci_dev *pdev,
host->unique_id = (bus << 8) | dev_fun;
host->irq = pdev->irq;
error = pci_request_regions(pdev, "arcmsr");
- if (error)
+ if (error) {
goto out_host_put;
-
- acb->pmu = ioremap(pci_resource_start(pdev, 0),
- pci_resource_len(pdev, 0));
- if (!acb->pmu) {
- printk(KERN_NOTICE "arcmsr%d: memory"
- " mapping region fail \n", acb->host->host_no);
- goto out_release_regions;
}
+ arcmsr_define_adapter_type(acb);
+
acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
ACB_F_MESSAGE_RQBUFFER_CLEARED |
ACB_F_MESSAGE_WQBUFFER_READED);
@@ -328,10 +422,10 @@ static int arcmsr_probe(struct pci_dev *pdev,
error = arcmsr_alloc_ccb_pool(acb);
if (error)
- goto out_iounmap;
+ goto out_release_regions;
error = request_irq(pdev->irq, arcmsr_do_interrupt,
- IRQF_DISABLED | IRQF_SHARED, "arcmsr", acb);
+ IRQF_SHARED, "arcmsr", acb);
if (error)
goto out_free_ccb_pool;
@@ -349,14 +443,15 @@ static int arcmsr_probe(struct pci_dev *pdev,
goto out_free_sysfs;
scsi_scan_host(host);
+ #ifdef CONFIG_SCSI_ARCMSR_AER
pci_enable_pcie_error_reporting(pdev);
+ #endif
return 0;
out_free_sysfs:
out_free_irq:
free_irq(pdev->irq, acb);
out_free_ccb_pool:
arcmsr_free_ccb_pool(acb);
- out_iounmap:
iounmap(acb->pmu);
out_release_regions:
pci_release_regions(pdev);
@@ -368,17 +463,84 @@ static int arcmsr_probe(struct pci_dev *pdev,
return error;
}
-static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
+static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
+ uint32_t Index;
+ uint8_t Retries = 0x00;
+
+ do {
+ for (Index = 0; Index < 100; Index++) {
+ if (readl(&reg->outbound_intstatus) &
+ ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
+ writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT,
+ &reg->outbound_intstatus);
+ return 0x00;
+ }
+ msleep(10);
+ }/*max 1 seconds*/
+
+ } while (Retries++ < 20);/*max 20 sec*/
+ return 0xff;
+}
+
+static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
+ uint32_t Index;
+ uint8_t Retries = 0x00;
+
+ do {
+ for (Index = 0; Index < 100; Index++) {
+ if (readl(reg->iop2drv_doorbell_reg)
+ & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
+ writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN
+ , reg->iop2drv_doorbell_reg);
+ return 0x00;
+ }
+ msleep(10);
+ }/*max 1 seconds*/
+
+ } while (Retries++ < 20);/*max 20 sec*/
+ return 0xff;
+}
+
+static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
{
- struct MessageUnit __iomem *reg = acb->pmu;
+ struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
- if (arcmsr_wait_msgint_ready(acb))
+ if (arcmsr_hba_wait_msgint_ready(acb))
printk(KERN_NOTICE
"arcmsr%d: wait 'abort all outstanding command' timeout \n"
, acb->host->host_no);
}
+static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
+
+ writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell_reg);
+ if (arcmsr_hbb_wait_msgint_ready(acb))
+ printk(KERN_NOTICE
+ "arcmsr%d: wait 'abort all outstanding command' timeout \n"
+ , acb->host->host_no);
+}
+
+static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
+{
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A: {
+ arcmsr_abort_hba_allcmd(acb);
+ }
+ break;
+
+ case ACB_ADAPTER_TYPE_B: {
+ arcmsr_abort_hbb_allcmd(acb);
+ }
+ }
+}
+
static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
{
struct scsi_cmnd *pcmd = ccb->pcmd;
@@ -400,28 +562,239 @@ static void arcmsr_ccb_complete(struct CommandControlBlock *ccb, int stand_flag)
pcmd->scsi_done(pcmd);
}
+static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
+ int retry_count = 30;
+
+ writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
+ do {
+ if (!arcmsr_hba_wait_msgint_ready(acb))
+ break;
+ else {
+ retry_count--;
+ printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
+ timeout, retry count down = %d \n", acb->host->host_no, retry_count);
+ }
+ } while (retry_count != 0);
+}
+
+static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
+ int retry_count = 30;
+
+ writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell_reg);
+ do {
+ if (!arcmsr_hbb_wait_msgint_ready(acb))
+ break;
+ else {
+ retry_count--;
+ printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \
+ timeout,retry count down = %d \n", acb->host->host_no, retry_count);
+ }
+ } while (retry_count != 0);
+}
+
+static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
+{
+ switch (acb->adapter_type) {
+
+ case ACB_ADAPTER_TYPE_A: {
+ arcmsr_flush_hba_cache(acb);
+ }
+ break;
+
+ case ACB_ADAPTER_TYPE_B: {
+ arcmsr_flush_hbb_cache(acb);
+ }
+ }
+}
+
+static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
+{
+
+ struct scsi_cmnd *pcmd = ccb->pcmd;
+ struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
+
+ pcmd->result = DID_OK << 16;
+ if (sensebuffer) {
+ int sense_data_length =
+ sizeof(struct SENSE_DATA) < sizeof(pcmd->sense_buffer)
+ ? sizeof(struct SENSE_DATA) : sizeof(pcmd->sense_buffer);
+ memset(sensebuffer, 0, sizeof(pcmd->sense_buffer));
+ memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
+ sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
+ sensebuffer->Valid = 1;
+ }
+}
+
+static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
+{
+ u32 orig_mask = 0;
+ switch (acb->adapter_type) {
+
+ case ACB_ADAPTER_TYPE_A : {
+ struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
+ orig_mask = readl(&reg->outbound_intmask)|\
+ ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE;
+ writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \
+ &reg->outbound_intmask);
+ }
+ break;
+
+ case ACB_ADAPTER_TYPE_B : {
+ struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
+ orig_mask = readl(reg->iop2drv_doorbell_mask_reg) & \
+ (~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
+ writel(0, reg->iop2drv_doorbell_mask_reg);
+ }
+ break;
+ }
+ return orig_mask;
+}
+
+static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb, \
+ struct CommandControlBlock *ccb, uint32_t flag_ccb)
+{
+
+ uint8_t id, lun;
+ id = ccb->pcmd->device->id;
+ lun = ccb->pcmd->device->lun;
+ if (!(flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR)) {
+ if (acb->devstate[id][lun] == ARECA_RAID_GONE)
+ acb->devstate[id][lun] = ARECA_RAID_GOOD;
+ ccb->pcmd->result = DID_OK << 16;
+ arcmsr_ccb_complete(ccb, 1);
+ } else {
+ switch (ccb->arcmsr_cdb.DeviceStatus) {
+ case ARCMSR_DEV_SELECT_TIMEOUT: {
+ acb->devstate[id][lun] = ARECA_RAID_GONE;
+ ccb->pcmd->result = DID_NO_CONNECT << 16;
+ arcmsr_ccb_complete(ccb, 1);
+ }
+ break;
+
+ case ARCMSR_DEV_ABORTED:
+
+ case ARCMSR_DEV_INIT_FAIL: {
+ acb->devstate[id][lun] = ARECA_RAID_GONE;
+ ccb->pcmd->result = DID_BAD_TARGET << 16;
+ arcmsr_ccb_complete(ccb, 1);
+ }
+ break;
+
+ case ARCMSR_DEV_CHECK_CONDITION: {
+ acb->devstate[id][lun] = ARECA_RAID_GOOD;
+ arcmsr_report_sense_info(ccb);
+ arcmsr_ccb_complete(ccb, 1);
+ }
+ break;
+
+ default:
+ printk(KERN_NOTICE
+ "arcmsr%d: scsi id = %d lun = %d"
+ " isr get command error done, "
+ "but got unknown DeviceStatus = 0x%x \n"
+ , acb->host->host_no
+ , id
+ , lun
+ , ccb->arcmsr_cdb.DeviceStatus);
+ acb->devstate[id][lun] = ARECA_RAID_GONE;
+ ccb->pcmd->result = DID_NO_CONNECT << 16;
+ arcmsr_ccb_complete(ccb, 1);
+ break;
+ }
+ }
+}
+
+static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, uint32_t flag_ccb)
+
+{
+ struct CommandControlBlock *ccb;
+
+ ccb = (struct CommandControlBlock *)(acb->vir2phy_offset + (flag_ccb << 5));
+ if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
+ if (ccb->startdone == ARCMSR_CCB_ABORTED) {
+ struct scsi_cmnd *abortcmd = ccb->pcmd;
+ if (abortcmd) {
+ abortcmd->result |= DID_ABORT << 16;
+ arcmsr_ccb_complete(ccb, 1);
+ printk(KERN_NOTICE "arcmsr%d: ccb ='0x%p' \
+ isr got aborted command \n", acb->host->host_no, ccb);
+ }
+ }
+ printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \
+ done acb = '0x%p'"
+ "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x"
+ " ccboutstandingcount = %d \n"
+ , acb->host->host_no
+ , acb
+ , ccb
+ , ccb->acb
+ , ccb->startdone
+ , atomic_read(&acb->ccboutstandingcount));
+ }
+ arcmsr_report_ccb_state(acb, ccb, flag_ccb);
+}
+
+static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
+{
+ int i = 0;
+ uint32_t flag_ccb;
+
+ switch (acb->adapter_type) {
+
+ case ACB_ADAPTER_TYPE_A: {
+ struct MessageUnit_A __iomem *reg = \
+ (struct MessageUnit_A *)acb->pmu;
+ uint32_t outbound_intstatus;
+ outbound_intstatus = readl(&reg->outbound_intstatus) & \
+ acb->outbound_int_enable;
+ /*clear and abort all outbound posted Q*/
+ writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/
+ while (((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) \
+ && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
+ arcmsr_drain_donequeue(acb, flag_ccb);
+ }
+ }
+ break;
+
+ case ACB_ADAPTER_TYPE_B: {
+ struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
+ /*clear all outbound posted Q*/
+ for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
+ if ((flag_ccb = readl(&reg->done_qbuffer[i])) != 0) {
+ writel(0, &reg->done_qbuffer[i]);
+ arcmsr_drain_donequeue(acb, flag_ccb);
+ }
+ writel(0, &reg->post_qbuffer[i]);
+ }
+ reg->doneq_index = 0;
+ reg->postq_index = 0;
+ }
+ break;
+ }
+}
static void arcmsr_remove(struct pci_dev *pdev)
{
struct Scsi_Host *host = pci_get_drvdata(pdev);
struct AdapterControlBlock *acb =
(struct AdapterControlBlock *) host->hostdata;
- struct MessageUnit __iomem *reg = acb->pmu;
int poll_count = 0;
arcmsr_free_sysfs_attr(acb);
scsi_remove_host(host);
arcmsr_stop_adapter_bgrb(acb);
arcmsr_flush_adapter_cache(acb);
- writel(readl(&reg->outbound_intmask) |
- ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE,
- &reg->outbound_intmask);
+ arcmsr_disable_outbound_ints(acb);
acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
acb->acb_flags &= ~ACB_F_IOP_INITED;
- for (poll_count = 0; poll_count < 256; poll_count++) {
+ for (poll_count = 0; poll_count < ARCMSR_MAX_OUTSTANDING_CMD; poll_count++) {
if (!atomic_read(&acb->ccboutstandingcount))
break;
- arcmsr_interrupt(acb);
+ arcmsr_interrupt(acb);/* FIXME: need spinlock */
msleep(25);
}
@@ -429,8 +802,7 @@ static void arcmsr_remove(struct pci_dev *pdev)
int i;
arcmsr_abort_allcmd(acb);
- for (i = 0; i < ARCMSR_MAX_OUTSTANDING_CMD; i++)
- readl(&reg->outbound_queueport);
+ arcmsr_done4abort_postqueue(acb);
for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
struct CommandControlBlock *ccb = acb->pccb_pool[i];
if (ccb->startdone == ARCMSR_CCB_START) {
@@ -477,75 +849,32 @@ static void arcmsr_module_exit(void)
module_init(arcmsr_module_init);
module_exit(arcmsr_module_exit);
-static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb)
-{
- struct MessageUnit __iomem *reg = acb->pmu;
- u32 orig_mask = readl(&reg->outbound_intmask);
-
- writel(orig_mask | ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE,
- &reg->outbound_intmask);
- return orig_mask;
-}
-
-static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
- u32 orig_mask)
+static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \
+ u32 intmask_org)
{
- struct MessageUnit __iomem *reg = acb->pmu;
u32 mask;
- mask = orig_mask & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
- ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE);
- writel(mask, &reg->outbound_intmask);
-}
-
-static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
-{
- struct MessageUnit __iomem *reg = acb->pmu;
-
- writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
- if (arcmsr_wait_msgint_ready(acb))
- printk(KERN_NOTICE
- "arcmsr%d: wait 'flush adapter cache' timeout \n"
- , acb->host->host_no);
-}
+ switch (acb->adapter_type) {
-static void arcmsr_report_sense_info(struct CommandControlBlock *ccb)
-{
- struct scsi_cmnd *pcmd = ccb->pcmd;
- struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer;
+ case ACB_ADAPTER_TYPE_A : {
+ struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
+ mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
+ ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE);
+ writel(mask, &reg->outbound_intmask);
+ acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
+ }
+ break;
- pcmd->result = DID_OK << 16;
- if (sensebuffer) {
- int sense_data_length =
- sizeof (struct SENSE_DATA) < sizeof (pcmd->sense_buffer)
- ? sizeof (struct SENSE_DATA) : sizeof (pcmd->sense_buffer);
- memset(sensebuffer, 0, sizeof (pcmd->sense_buffer));
- memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length);
- sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
- sensebuffer->Valid = 1;
+ case ACB_ADAPTER_TYPE_B : {
+ struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
+ mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | \
+ ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE);
+ writel(mask, reg->iop2drv_doorbell_mask_reg);
+ acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
+ }
}
}
-static uint8_t arcmsr_wait_msgint_ready(struct AdapterControlBlock *acb)
-{
- struct MessageUnit __iomem *reg = acb->pmu;
- uint32_t Index;
- uint8_t Retries = 0x00;
-
- do {
- for (Index = 0; Index < 100; Index++) {
- if (readl(&reg->outbound_intstatus)
- & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
- writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT
- , &reg->outbound_intstatus);
- return 0x00;
- }
- msleep_interruptible(10);
- }/*max 1 seconds*/
- } while (Retries++ < 20);/*max 20 sec*/
- return 0xff;
-}
-
static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd)
{
@@ -556,7 +885,7 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
int nseg;
ccb->pcmd = pcmd;
- memset(arcmsr_cdb, 0, sizeof (struct ARCMSR_CDB));
+ memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB));
arcmsr_cdb->Bus = 0;
arcmsr_cdb->TargetID = pcmd->device->id;
arcmsr_cdb->LUN = pcmd->device->lun;
@@ -609,52 +938,85 @@ static void arcmsr_build_ccb(struct AdapterControlBlock *acb,
static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
{
- struct MessageUnit __iomem *reg = acb->pmu;
uint32_t cdb_shifted_phyaddr = ccb->cdb_shifted_phyaddr;
struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
-
atomic_inc(&acb->ccboutstandingcount);
ccb->startdone = ARCMSR_CCB_START;
- if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
- writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
+
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A: {
+ struct MessageUnit_A *reg = (struct MessageUnit_A *)acb->pmu;
+
+ if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
+ writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
&reg->inbound_queueport);
- else
- writel(cdb_shifted_phyaddr, &reg->inbound_queueport);
-}
+ else {
+ writel(cdb_shifted_phyaddr, &reg->inbound_queueport);
+ }
+ }
+ break;
-void arcmsr_post_Qbuffer(struct AdapterControlBlock *acb)
-{
- struct MessageUnit __iomem *reg = acb->pmu;
- struct QBUFFER __iomem *pwbuffer = (struct QBUFFER __iomem *) &reg->message_wbuffer;
- uint8_t __iomem *iop_data = (uint8_t __iomem *) pwbuffer->data;
- int32_t allxfer_len = 0;
+ case ACB_ADAPTER_TYPE_B: {
+ struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
+ uint32_t ending_index, index = reg->postq_index;
- if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
- acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
- while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex)
- && (allxfer_len < 124)) {
- writeb(acb->wqbuffer[acb->wqbuf_firstindex], iop_data);
- acb->wqbuf_firstindex++;
- acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
- iop_data++;
- allxfer_len++;
+ ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE);
+ writel(0, &reg->post_qbuffer[ending_index]);
+ if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
+ writel(cdb_shifted_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,\
+ &reg->post_qbuffer[index]);
+ }
+ else {
+ writel(cdb_shifted_phyaddr, &reg->post_qbuffer[index]);
+ }
+ index++;
+ index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */
+ reg->postq_index = index;
+ writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell_reg);
}
- writel(allxfer_len, &pwbuffer->data_len);
- writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK
- , &reg->inbound_doorbell);
+ break;
}
}
-static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
+static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
{
- struct MessageUnit __iomem *reg = acb->pmu;
-
+ struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
- if (arcmsr_wait_msgint_ready(acb))
+
+ if (arcmsr_hba_wait_msgint_ready(acb)) {
+ printk(KERN_NOTICE
+ "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
+ , acb->host->host_no);
+ }
+}
+
+static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb)
+{
+ struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
+ acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
+ writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell_reg);
+
+ if (arcmsr_hbb_wait_msgint_ready(acb)) {
printk(KERN_NOTICE
"arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
, acb->host->host_no);
+ }
+}
+
+static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
+{
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A: {
+ arcmsr_stop_hba_bgrb(acb);
+ }
+ break;
+
+ case ACB_ADAPTER_TYPE_B: {
+ arcmsr_stop_hbb_bgrb(acb);
+ }
+ break;
+ }
}
static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
@@ -665,151 +1027,260 @@ static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb)
acb->dma_coherent_handle);
}
-static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
+void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
{
- struct MessageUnit __iomem *reg = acb->pmu;
- struct CommandControlBlock *ccb;
- uint32_t flag_ccb, outbound_intstatus, outbound_doorbell;
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A: {
+ struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
+ writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
+ }
+ break;
- outbound_intstatus = readl(&reg->outbound_intstatus)
- & acb->outbound_int_enable;
- writel(outbound_intstatus, &reg->outbound_intstatus);
- if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) {
- outbound_doorbell = readl(&reg->outbound_doorbell);
- writel(outbound_doorbell, &reg->outbound_doorbell);
- if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
- struct QBUFFER __iomem * prbuffer =
- (struct QBUFFER __iomem *) &reg->message_rbuffer;
- uint8_t __iomem * iop_data = (uint8_t __iomem *)prbuffer->data;
- int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
-
- rqbuf_lastindex = acb->rqbuf_lastindex;
- rqbuf_firstindex = acb->rqbuf_firstindex;
- iop_len = readl(&prbuffer->data_len);
- my_empty_len = (rqbuf_firstindex - rqbuf_lastindex - 1)
- &(ARCMSR_MAX_QBUFFER - 1);
- if (my_empty_len >= iop_len) {
- while (iop_len > 0) {
- acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
- acb->rqbuf_lastindex++;
- acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
- iop_data++;
- iop_len--;
- }
- writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK,
- &reg->inbound_doorbell);
- } else
- acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
- }
- if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
- acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
- if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) {
- struct QBUFFER __iomem * pwbuffer =
- (struct QBUFFER __iomem *) &reg->message_wbuffer;
- uint8_t __iomem * iop_data = (uint8_t __iomem *) pwbuffer->data;
- int32_t allxfer_len = 0;
-
- acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
- while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex)
- && (allxfer_len < 124)) {
- writeb(acb->wqbuffer[acb->wqbuf_firstindex], iop_data);
- acb->wqbuf_firstindex++;
- acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
- iop_data++;
- allxfer_len++;
- }
- writel(allxfer_len, &pwbuffer->data_len);
- writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK,
- &reg->inbound_doorbell);
- }
- if (acb->wqbuf_firstindex == acb->wqbuf_lastindex)
- acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
+ case ACB_ADAPTER_TYPE_B: {
+ struct MessageUnit_B *reg = (struct MessageUnit_B *)acb->pmu;
+ writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell_reg);
}
+ break;
}
- if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
- int id, lun;
+}
+
+static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb)
+{
+ switch (acb->adapter_type) {
+ case ACB_ADAPTER_TYPE_A: {
+ struct MessageUnit_A __iomem *reg = (struct MessageUnit_A *)acb->pmu;
/*
- ****************************************************************
- ** areca cdb command done
- ****************************************************************
+ ** push inbound doorbell tell iop, driver data write ok
+ ** and wait reply on next hwinterrupt for next Qbuffer post
*/