diff options
Diffstat (limited to 'drivers/block/rsxx')
| -rw-r--r-- | drivers/block/rsxx/core.c | 365 | ||||
| -rw-r--r-- | drivers/block/rsxx/cregs.c | 14 | ||||
| -rw-r--r-- | drivers/block/rsxx/dev.c | 43 | ||||
| -rw-r--r-- | drivers/block/rsxx/dma.c | 293 | ||||
| -rw-r--r-- | drivers/block/rsxx/rsxx_priv.h | 19 |
5 files changed, 549 insertions, 185 deletions
diff --git a/drivers/block/rsxx/core.c b/drivers/block/rsxx/core.c index 5af21f2db29..a8de2eec6ff 100644 --- a/drivers/block/rsxx/core.c +++ b/drivers/block/rsxx/core.c @@ -31,6 +31,8 @@ #include <linux/slab.h> #include <linux/bitops.h> #include <linux/delay.h> +#include <linux/debugfs.h> +#include <linux/seq_file.h> #include <linux/genhd.h> #include <linux/idr.h> @@ -39,8 +41,9 @@ #include "rsxx_cfg.h" #define NO_LEGACY 0 +#define SYNC_START_TIMEOUT (10 * 60) /* 10 minutes */ -MODULE_DESCRIPTION("IBM FlashSystem 70/80 PCIe SSD Device Driver"); +MODULE_DESCRIPTION("IBM Flash Adapter 900GB Full Height Device Driver"); MODULE_AUTHOR("Joshua Morris/Philip Kelleher, IBM"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRIVER_VERSION); @@ -49,9 +52,282 @@ static unsigned int force_legacy = NO_LEGACY; module_param(force_legacy, uint, 0444); MODULE_PARM_DESC(force_legacy, "Force the use of legacy type PCI interrupts"); +static unsigned int sync_start = 1; +module_param(sync_start, uint, 0444); +MODULE_PARM_DESC(sync_start, "On by Default: Driver load will not complete " + "until the card startup has completed."); + static DEFINE_IDA(rsxx_disk_ida); static DEFINE_SPINLOCK(rsxx_ida_lock); +/* --------------------Debugfs Setup ------------------- */ + +struct rsxx_cram { + u32 f_pos; + u32 offset; + void *i_private; +}; + +static int rsxx_attr_pci_regs_show(struct seq_file *m, void *p) +{ + struct rsxx_cardinfo *card = m->private; + + seq_printf(m, "HWID 0x%08x\n", + ioread32(card->regmap + HWID)); + seq_printf(m, "SCRATCH 0x%08x\n", + ioread32(card->regmap + SCRATCH)); + seq_printf(m, "IER 0x%08x\n", + ioread32(card->regmap + IER)); + seq_printf(m, "IPR 0x%08x\n", + ioread32(card->regmap + IPR)); + seq_printf(m, "CREG_CMD 0x%08x\n", + ioread32(card->regmap + CREG_CMD)); + seq_printf(m, "CREG_ADD 0x%08x\n", + ioread32(card->regmap + CREG_ADD)); + seq_printf(m, "CREG_CNT 0x%08x\n", + ioread32(card->regmap + CREG_CNT)); + seq_printf(m, "CREG_STAT 0x%08x\n", + ioread32(card->regmap + CREG_STAT)); + seq_printf(m, "CREG_DATA0 0x%08x\n", + ioread32(card->regmap + CREG_DATA0)); + seq_printf(m, "CREG_DATA1 0x%08x\n", + ioread32(card->regmap + CREG_DATA1)); + seq_printf(m, "CREG_DATA2 0x%08x\n", + ioread32(card->regmap + CREG_DATA2)); + seq_printf(m, "CREG_DATA3 0x%08x\n", + ioread32(card->regmap + CREG_DATA3)); + seq_printf(m, "CREG_DATA4 0x%08x\n", + ioread32(card->regmap + CREG_DATA4)); + seq_printf(m, "CREG_DATA5 0x%08x\n", + ioread32(card->regmap + CREG_DATA5)); + seq_printf(m, "CREG_DATA6 0x%08x\n", + ioread32(card->regmap + CREG_DATA6)); + seq_printf(m, "CREG_DATA7 0x%08x\n", + ioread32(card->regmap + CREG_DATA7)); + seq_printf(m, "INTR_COAL 0x%08x\n", + ioread32(card->regmap + INTR_COAL)); + seq_printf(m, "HW_ERROR 0x%08x\n", + ioread32(card->regmap + HW_ERROR)); + seq_printf(m, "DEBUG0 0x%08x\n", + ioread32(card->regmap + PCI_DEBUG0)); + seq_printf(m, "DEBUG1 0x%08x\n", + ioread32(card->regmap + PCI_DEBUG1)); + seq_printf(m, "DEBUG2 0x%08x\n", + ioread32(card->regmap + PCI_DEBUG2)); + seq_printf(m, "DEBUG3 0x%08x\n", + ioread32(card->regmap + PCI_DEBUG3)); + seq_printf(m, "DEBUG4 0x%08x\n", + ioread32(card->regmap + PCI_DEBUG4)); + seq_printf(m, "DEBUG5 0x%08x\n", + ioread32(card->regmap + PCI_DEBUG5)); + seq_printf(m, "DEBUG6 0x%08x\n", + ioread32(card->regmap + PCI_DEBUG6)); + seq_printf(m, "DEBUG7 0x%08x\n", + ioread32(card->regmap + PCI_DEBUG7)); + seq_printf(m, "RECONFIG 0x%08x\n", + ioread32(card->regmap + PCI_RECONFIG)); + + return 0; +} + +static int rsxx_attr_stats_show(struct seq_file *m, void *p) +{ + struct rsxx_cardinfo *card = m->private; + int i; + + for (i = 0; i < card->n_targets; i++) { + seq_printf(m, "Ctrl %d CRC Errors = %d\n", + i, card->ctrl[i].stats.crc_errors); + seq_printf(m, "Ctrl %d Hard Errors = %d\n", + i, card->ctrl[i].stats.hard_errors); + seq_printf(m, "Ctrl %d Soft Errors = %d\n", + i, card->ctrl[i].stats.soft_errors); + seq_printf(m, "Ctrl %d Writes Issued = %d\n", + i, card->ctrl[i].stats.writes_issued); + seq_printf(m, "Ctrl %d Writes Failed = %d\n", + i, card->ctrl[i].stats.writes_failed); + seq_printf(m, "Ctrl %d Reads Issued = %d\n", + i, card->ctrl[i].stats.reads_issued); + seq_printf(m, "Ctrl %d Reads Failed = %d\n", + i, card->ctrl[i].stats.reads_failed); + seq_printf(m, "Ctrl %d Reads Retried = %d\n", + i, card->ctrl[i].stats.reads_retried); + seq_printf(m, "Ctrl %d Discards Issued = %d\n", + i, card->ctrl[i].stats.discards_issued); + seq_printf(m, "Ctrl %d Discards Failed = %d\n", + i, card->ctrl[i].stats.discards_failed); + seq_printf(m, "Ctrl %d DMA SW Errors = %d\n", + i, card->ctrl[i].stats.dma_sw_err); + seq_printf(m, "Ctrl %d DMA HW Faults = %d\n", + i, card->ctrl[i].stats.dma_hw_fault); + seq_printf(m, "Ctrl %d DMAs Cancelled = %d\n", + i, card->ctrl[i].stats.dma_cancelled); + seq_printf(m, "Ctrl %d SW Queue Depth = %d\n", + i, card->ctrl[i].stats.sw_q_depth); + seq_printf(m, "Ctrl %d HW Queue Depth = %d\n", + i, atomic_read(&card->ctrl[i].stats.hw_q_depth)); + } + + return 0; +} + +static int rsxx_attr_stats_open(struct inode *inode, struct file *file) +{ + return single_open(file, rsxx_attr_stats_show, inode->i_private); +} + +static int rsxx_attr_pci_regs_open(struct inode *inode, struct file *file) +{ + return single_open(file, rsxx_attr_pci_regs_show, inode->i_private); +} + +static ssize_t rsxx_cram_read(struct file *fp, char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + struct rsxx_cram *info = fp->private_data; + struct rsxx_cardinfo *card = info->i_private; + char *buf; + int st; + + buf = kzalloc(sizeof(*buf) * cnt, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + info->f_pos = (u32)*ppos + info->offset; + + st = rsxx_creg_read(card, CREG_ADD_CRAM + info->f_pos, cnt, buf, 1); + if (st) + return st; + + st = copy_to_user(ubuf, buf, cnt); + if (st) + return st; + + info->offset += cnt; + + kfree(buf); + + return cnt; +} + +static ssize_t rsxx_cram_write(struct file *fp, const char __user *ubuf, + size_t cnt, loff_t *ppos) +{ + struct rsxx_cram *info = fp->private_data; + struct rsxx_cardinfo *card = info->i_private; + char *buf; + int st; + + buf = kzalloc(sizeof(*buf) * cnt, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + st = copy_from_user(buf, ubuf, cnt); + if (st) + return st; + + info->f_pos = (u32)*ppos + info->offset; + + st = rsxx_creg_write(card, CREG_ADD_CRAM + info->f_pos, cnt, buf, 1); + if (st) + return st; + + info->offset += cnt; + + kfree(buf); + + return cnt; +} + +static int rsxx_cram_open(struct inode *inode, struct file *file) +{ + struct rsxx_cram *info = kzalloc(sizeof(*info), GFP_KERNEL); + if (!info) + return -ENOMEM; + + info->i_private = inode->i_private; + info->f_pos = file->f_pos; + file->private_data = info; + + return 0; +} + +static int rsxx_cram_release(struct inode *inode, struct file *file) +{ + struct rsxx_cram *info = file->private_data; + + if (!info) + return 0; + + kfree(info); + file->private_data = NULL; + + return 0; +} + +static const struct file_operations debugfs_cram_fops = { + .owner = THIS_MODULE, + .open = rsxx_cram_open, + .read = rsxx_cram_read, + .write = rsxx_cram_write, + .release = rsxx_cram_release, +}; + +static const struct file_operations debugfs_stats_fops = { + .owner = THIS_MODULE, + .open = rsxx_attr_stats_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static const struct file_operations debugfs_pci_regs_fops = { + .owner = THIS_MODULE, + .open = rsxx_attr_pci_regs_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static void rsxx_debugfs_dev_new(struct rsxx_cardinfo *card) +{ + struct dentry *debugfs_stats; + struct dentry *debugfs_pci_regs; + struct dentry *debugfs_cram; + + card->debugfs_dir = debugfs_create_dir(card->gendisk->disk_name, NULL); + if (IS_ERR_OR_NULL(card->debugfs_dir)) + goto failed_debugfs_dir; + + debugfs_stats = debugfs_create_file("stats", S_IRUGO, + card->debugfs_dir, card, + &debugfs_stats_fops); + if (IS_ERR_OR_NULL(debugfs_stats)) + goto failed_debugfs_stats; + + debugfs_pci_regs = debugfs_create_file("pci_regs", S_IRUGO, + card->debugfs_dir, card, + &debugfs_pci_regs_fops); + if (IS_ERR_OR_NULL(debugfs_pci_regs)) + goto failed_debugfs_pci_regs; + + debugfs_cram = debugfs_create_file("cram", S_IRUGO | S_IWUSR, + card->debugfs_dir, card, + &debugfs_cram_fops); + if (IS_ERR_OR_NULL(debugfs_cram)) + goto failed_debugfs_cram; + + return; +failed_debugfs_cram: + debugfs_remove(debugfs_pci_regs); +failed_debugfs_pci_regs: + debugfs_remove(debugfs_stats); +failed_debugfs_stats: + debugfs_remove(card->debugfs_dir); +failed_debugfs_dir: + card->debugfs_dir = NULL; +} + /*----------------- Interrupt Control & Handling -------------------*/ static void rsxx_mask_interrupts(struct rsxx_cardinfo *card) @@ -163,12 +439,13 @@ static irqreturn_t rsxx_isr(int irq, void *pdata) } if (isr & CR_INTR_CREG) { - schedule_work(&card->creg_ctrl.done_work); + queue_work(card->creg_ctrl.creg_wq, + &card->creg_ctrl.done_work); handled++; } if (isr & CR_INTR_EVENT) { - schedule_work(&card->event_work); + queue_work(card->event_wq, &card->event_work); rsxx_disable_ier_and_isr(card, CR_INTR_EVENT); handled++; } @@ -329,7 +606,7 @@ static int rsxx_eeh_frozen(struct pci_dev *dev) int i; int st; - dev_warn(&dev->dev, "IBM FlashSystem PCI: preparing for slot reset.\n"); + dev_warn(&dev->dev, "IBM Flash Adapter PCI: preparing for slot reset.\n"); card->eeh_state = 1; rsxx_mask_interrupts(card); @@ -367,15 +644,27 @@ static void rsxx_eeh_failure(struct pci_dev *dev) { struct rsxx_cardinfo *card = pci_get_drvdata(dev); int i; + int cnt = 0; - dev_err(&dev->dev, "IBM FlashSystem PCI: disabling failed card.\n"); + dev_err(&dev->dev, "IBM Flash Adapter PCI: disabling failed card.\n"); card->eeh_state = 1; + card->halt = 1; + + for (i = 0; i < card->n_targets; i++) { + spin_lock_bh(&card->ctrl[i].queue_lock); + cnt = rsxx_cleanup_dma_queue(&card->ctrl[i], + &card->ctrl[i].queue, + COMPLETE_DMA); + spin_unlock_bh(&card->ctrl[i].queue_lock); - for (i = 0; i < card->n_targets; i++) - del_timer_sync(&card->ctrl[i].activity_timer); + cnt += rsxx_dma_cancel(&card->ctrl[i]); - rsxx_eeh_cancel_dmas(card); + if (cnt) + dev_info(CARD_TO_DEV(card), + "Freed %d queued DMAs on channel %d\n", + cnt, card->ctrl[i].id); + } } static int rsxx_eeh_fifo_flush_poll(struct rsxx_cardinfo *card) @@ -432,7 +721,7 @@ static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev) int st; dev_warn(&dev->dev, - "IBM FlashSystem PCI: recovering from slot reset.\n"); + "IBM Flash Adapter PCI: recovering from slot reset.\n"); st = pci_enable_device(dev); if (st) @@ -460,10 +749,6 @@ static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev) card->eeh_state = 0; - st = rsxx_eeh_remap_dmas(card); - if (st) - goto failed_remap_dmas; - spin_lock_irqsave(&card->irq_lock, flags); if (card->n_targets & RSXX_MAX_TARGETS) rsxx_enable_ier_and_isr(card, CR_INTR_ALL_G); @@ -485,12 +770,11 @@ static pci_ers_result_t rsxx_slot_reset(struct pci_dev *dev) &card->ctrl[i].issue_dma_work); } - dev_info(&dev->dev, "IBM FlashSystem PCI: recovery complete.\n"); + dev_info(&dev->dev, "IBM Flash Adapter PCI: recovery complete.\n"); return PCI_ERS_RESULT_RECOVERED; failed_hw_buffers_init: -failed_remap_dmas: for (i = 0; i < card->n_targets; i++) { if (card->ctrl[i].status.buf) pci_free_consistent(card->dev, @@ -528,6 +812,7 @@ static int rsxx_pci_probe(struct pci_dev *dev, { struct rsxx_cardinfo *card; int st; + unsigned int sync_timeout; dev_info(&dev->dev, "PCI-Flash SSD discovered\n"); @@ -610,7 +895,11 @@ static int rsxx_pci_probe(struct pci_dev *dev, } /************* Setup Processor Command Interface *************/ - rsxx_creg_setup(card); + st = rsxx_creg_setup(card); + if (st) { + dev_err(CARD_TO_DEV(card), "Failed to setup creg interface.\n"); + goto failed_creg_setup; + } spin_lock_irq(&card->irq_lock); rsxx_enable_ier_and_isr(card, CR_INTR_CREG); @@ -650,6 +939,12 @@ static int rsxx_pci_probe(struct pci_dev *dev, } /************* Setup Card Event Handler *************/ + card->event_wq = create_singlethread_workqueue(DRIVER_NAME"_event"); + if (!card->event_wq) { + dev_err(CARD_TO_DEV(card), "Failed card event setup.\n"); + goto failed_event_handler; + } + INIT_WORK(&card->event_work, card_event_handler); st = rsxx_setup_dev(card); @@ -676,6 +971,33 @@ static int rsxx_pci_probe(struct pci_dev *dev, if (st) dev_crit(CARD_TO_DEV(card), "Failed issuing card startup\n"); + if (sync_start) { + sync_timeout = SYNC_START_TIMEOUT; + + dev_info(CARD_TO_DEV(card), + "Waiting for card to startup\n"); + + do { + ssleep(1); + sync_timeout--; + + rsxx_get_card_state(card, &card->state); + } while (sync_timeout && + (card->state == CARD_STATE_STARTING)); + + if (card->state == CARD_STATE_STARTING) { + dev_warn(CARD_TO_DEV(card), + "Card startup timed out\n"); + card->size8 = 0; + } else { + dev_info(CARD_TO_DEV(card), + "card state: %s\n", + rsxx_card_state_to_str(card->state)); + st = rsxx_get_card_size8(card, &card->size8); + if (st) + card->size8 = 0; + } + } } else if (card->state == CARD_STATE_GOOD || card->state == CARD_STATE_RD_ONLY_FAULT) { st = rsxx_get_card_size8(card, &card->size8); @@ -685,12 +1007,21 @@ static int rsxx_pci_probe(struct pci_dev *dev, rsxx_attach_dev(card); + /************* Setup Debugfs *************/ + rsxx_debugfs_dev_new(card); + return 0; failed_create_dev: + destroy_workqueue(card->event_wq); + card->event_wq = NULL; +failed_event_handler: rsxx_dma_destroy(card); failed_dma_setup: failed_compatiblity_check: + destroy_workqueue(card->creg_ctrl.creg_wq); + card->creg_ctrl.creg_wq = NULL; +failed_creg_setup: spin_lock_irq(&card->irq_lock); rsxx_disable_ier_and_isr(card, CR_INTR_ALL); spin_unlock_irq(&card->irq_lock); @@ -756,6 +1087,8 @@ static void rsxx_pci_remove(struct pci_dev *dev) /* Prevent work_structs from re-queuing themselves. */ card->halt = 1; + debugfs_remove_recursive(card->debugfs_dir); + free_irq(dev->irq, card); if (!force_legacy) diff --git a/drivers/block/rsxx/cregs.c b/drivers/block/rsxx/cregs.c index 4b5c020a0a6..926dce9c452 100644 --- a/drivers/block/rsxx/cregs.c +++ b/drivers/block/rsxx/cregs.c @@ -431,6 +431,15 @@ static int __issue_creg_rw(struct rsxx_cardinfo *card, *hw_stat = completion.creg_status; if (completion.st) { + /* + * This read is needed to verify that there has not been any + * extreme errors that might have occurred, i.e. EEH. The + * function iowrite32 will not detect EEH errors, so it is + * necessary that we recover if such an error is the reason + * for the timeout. This is a dummy read. + */ + ioread32(card->regmap + SCRATCH); + dev_warn(CARD_TO_DEV(card), "creg command failed(%d x%08x)\n", completion.st, addr); @@ -727,6 +736,11 @@ int rsxx_creg_setup(struct rsxx_cardinfo *card) { card->creg_ctrl.active_cmd = NULL; + card->creg_ctrl.creg_wq = + create_singlethread_workqueue(DRIVER_NAME"_creg"); + if (!card->creg_ctrl.creg_wq) + return -ENOMEM; + INIT_WORK(&card->creg_ctrl.done_work, creg_cmd_done); mutex_init(&card->creg_ctrl.reset_lock); INIT_LIST_HEAD(&card->creg_ctrl.queue); diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c index 4346d17d294..2839d37e5af 100644 --- a/drivers/block/rsxx/dev.c +++ b/drivers/block/rsxx/dev.c @@ -155,7 +155,8 @@ static void bio_dma_done_cb(struct rsxx_cardinfo *card, atomic_set(&meta->error, 1); if (atomic_dec_and_test(&meta->pending_dmas)) { - disk_stats_complete(card, meta->bio, meta->start_time); + if (!card->eeh_state && card->gendisk) + disk_stats_complete(card, meta->bio, meta->start_time); bio_endio(meta->bio, atomic_read(&meta->error) ? -EIO : 0); kmem_cache_free(bio_meta_pool, meta); @@ -170,6 +171,12 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio) might_sleep(); + if (!card) + goto req_err; + + if (bio_end_sector(bio) > get_capacity(card->gendisk)) + goto req_err; + if (unlikely(card->halt)) { st = -EFAULT; goto req_err; @@ -180,7 +187,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio) goto req_err; } - if (bio->bi_size == 0) { + if (bio->bi_iter.bi_size == 0) { dev_err(CARD_TO_DEV(card), "size zero BIO!\n"); goto req_err; } @@ -196,11 +203,12 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio) atomic_set(&bio_meta->pending_dmas, 0); bio_meta->start_time = jiffies; - disk_stats_start(card, bio); + if (!unlikely(card->halt)) + disk_stats_start(card, bio); dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n", bio_data_dir(bio) ? 'W' : 'R', bio_meta, - (u64)bio->bi_sector << 9, bio->bi_size); + (u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size); st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas, bio_dma_done_cb, bio_meta); @@ -225,24 +233,6 @@ static bool rsxx_discard_supported(struct rsxx_cardinfo *card) return (pci_rev >= RSXX_DISCARD_SUPPORT); } -static unsigned short rsxx_get_logical_block_size( - struct rsxx_cardinfo *card) -{ - u32 capabilities = 0; - int st; - - st = rsxx_get_card_capabilities(card, &capabilities); - if (st) - dev_warn(CARD_TO_DEV(card), - "Failed reading card capabilities register\n"); - - /* Earlier firmware did not have support for 512 byte accesses */ - if (capabilities & CARD_CAP_SUBPAGE_WRITES) - return 512; - else - return RSXX_HW_BLK_SIZE; -} - int rsxx_attach_dev(struct rsxx_cardinfo *card) { mutex_lock(&card->dev_lock); @@ -305,13 +295,15 @@ int rsxx_setup_dev(struct rsxx_cardinfo *card) return -ENOMEM; } - blk_size = rsxx_get_logical_block_size(card); + if (card->config_valid) { + blk_size = card->config.data.block_size; + blk_queue_dma_alignment(card->queue, blk_size - 1); + blk_queue_logical_block_size(card->queue, blk_size); + } blk_queue_make_request(card->queue, rsxx_make_request); blk_queue_bounce_limit(card->queue, BLK_BOUNCE_ANY); - blk_queue_dma_alignment(card->queue, blk_size - 1); blk_queue_max_hw_sectors(card->queue, blkdev_max_hw_sectors); - blk_queue_logical_block_size(card->queue, blk_size); blk_queue_physical_block_size(card->queue, RSXX_HW_BLK_SIZE); queue_flag_set_unlocked(QUEUE_FLAG_NONROT, card->queue); @@ -347,6 +339,7 @@ void rsxx_destroy_dev(struct rsxx_cardinfo *card) card->gendisk = NULL; blk_cleanup_queue(card->queue); + card->queue->queuedata = NULL; unregister_blkdev(card->major, DRIVER_NAME); } diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c index 0607513cfb4..cf8cd293abb 100644 --- a/drivers/block/rsxx/dma.c +++ b/drivers/block/rsxx/dma.c @@ -221,6 +221,21 @@ static void dma_intr_coal_auto_tune(struct rsxx_cardinfo *card) } /*----------------- RSXX DMA Handling -------------------*/ +static void rsxx_free_dma(struct rsxx_dma_ctrl *ctrl, struct rsxx_dma *dma) +{ + if (dma->cmd != HW_CMD_BLK_DISCARD) { + if (!pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) { + pci_unmap_page(ctrl->card->dev, dma->dma_addr, + get_dma_size(dma), + dma->cmd == HW_CMD_BLK_WRITE ? + PCI_DMA_TODEVICE : + PCI_DMA_FROMDEVICE); + } + } + + kmem_cache_free(rsxx_dma_pool, dma); +} + static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl, struct rsxx_dma *dma, unsigned int status) @@ -232,17 +247,29 @@ static void rsxx_complete_dma(struct rsxx_dma_ctrl *ctrl, if (status & DMA_CANCELLED) ctrl->stats.dma_cancelled++; - if (dma->dma_addr) - pci_unmap_page(ctrl->card->dev, dma->dma_addr, - get_dma_size(dma), - dma->cmd == HW_CMD_BLK_WRITE ? - PCI_DMA_TODEVICE : - PCI_DMA_FROMDEVICE); - if (dma->cb) dma->cb(ctrl->card, dma->cb_data, status ? 1 : 0); - kmem_cache_free(rsxx_dma_pool, dma); + rsxx_free_dma(ctrl, dma); +} + +int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, + struct list_head *q, unsigned int done) +{ + struct rsxx_dma *dma; + struct rsxx_dma *tmp; + int cnt = 0; + + list_for_each_entry_safe(dma, tmp, q, list) { + list_del(&dma->list); + if (done & COMPLETE_DMA) + rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); + else + rsxx_free_dma(ctrl, dma); + cnt++; + } + + return cnt; } static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl, @@ -252,9 +279,10 @@ static void rsxx_requeue_dma(struct rsxx_dma_ctrl *ctrl, * Requeued DMAs go to the front of the queue so they are issued * first. */ - spin_lock(&ctrl->queue_lock); + spin_lock_bh(&ctrl->queue_lock); + ctrl->stats.sw_q_depth++; list_add(&dma->list, &ctrl->queue); - spin_unlock(&ctrl->queue_lock); + spin_unlock_bh(&ctrl->queue_lock); } static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl, @@ -329,6 +357,7 @@ static void rsxx_handle_dma_error(struct rsxx_dma_ctrl *ctrl, static void dma_engine_stalled(unsigned long data) { struct rsxx_dma_ctrl *ctrl = (struct rsxx_dma_ctrl *)data; + int cnt; if (atomic_read(&ctrl->stats.hw_q_depth) == 0 || unlikely(ctrl->card->eeh_state)) @@ -349,18 +378,29 @@ static void dma_engine_stalled(unsigned long data) "DMA channel %d has stalled, faulting interface.\n", ctrl->id); ctrl->card->dma_fault = 1; + + /* Clean up the DMA queue */ + spin_lock(&ctrl->queue_lock); + cnt = rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA); + spin_unlock(&ctrl->queue_lock); + + cnt += rsxx_dma_cancel(ctrl); + + if (cnt) + dev_info(CARD_TO_DEV(ctrl->card), + "Freed %d queued DMAs on channel %d\n", + cnt, ctrl->id); } } -static void rsxx_issue_dmas(struct work_struct *work) +static void rsxx_issue_dmas(struct rsxx_dma_ctrl *ctrl) { - struct rsxx_dma_ctrl *ctrl; struct rsxx_dma *dma; int tag; int cmds_pending = 0; struct hw_cmd *hw_cmd_buf; + int dir; - ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work); hw_cmd_buf = ctrl->cmd.buf; if (unlikely(ctrl->card->halt) || @@ -368,22 +408,22 @@ static void rsxx_issue_dmas(struct work_struct *work) return; while (1) { - spin_lock(&ctrl->queue_lock); + spin_lock_bh(&ctrl->queue_lock); if (list_empty(&ctrl->queue)) { - spin_unlock(&ctrl->queue_lock); + spin_unlock_bh(&ctrl->queue_lock); break; } - spin_unlock(&ctrl->queue_lock); + spin_unlock_bh(&ctrl->queue_lock); tag = pop_tracker(ctrl->trackers); if (tag == -1) break; - spin_lock(&ctrl->queue_lock); + spin_lock_bh(&ctrl->queue_lock); dma = list_entry(ctrl->queue.next, struct rsxx_dma, list); list_del(&dma->list); ctrl->stats.sw_q_depth--; - spin_unlock(&ctrl->queue_lock); + spin_unlock_bh(&ctrl->queue_lock); /* * This will catch any DMAs that slipped in right before the @@ -396,6 +436,31 @@ static void rsxx_issue_dmas(struct work_struct *work) continue; } + if (dma->cmd != HW_CMD_BLK_DISCARD) { + if (dma->cmd == HW_CMD_BLK_WRITE) + dir = PCI_DMA_TODEVICE; + else + dir = PCI_DMA_FROMDEVICE; + + /* + * The function pci_map_page is placed here because we + * can only, by design, issue up to 255 commands to the + * hardware at one time per DMA channel. So the maximum + * amount of mapped memory would be 255 * 4 channels * + * 4096 Bytes which is less than 2GB, the limit of a x8 + * Non-HWWD PCIe slot. This way the pci_map_page + * function should never fail because of a lack of + * mappable memory. + */ + dma->dma_addr = pci_map_page(ctrl->card->dev, dma->page, + dma->pg_off, dma->sub_page.cnt << 9, dir); + if (pci_dma_mapping_error(ctrl->card->dev, dma->dma_addr)) { + push_tracker(ctrl->trackers, tag); + rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); + continue; + } + } + set_tracker_dma(ctrl->trackers, tag, dma); hw_cmd_buf[ctrl->cmd.idx].command = dma->cmd; hw_cmd_buf[ctrl->cmd.idx].tag = tag; @@ -440,9 +505,8 @@ static void rsxx_issue_dmas(struct work_struct *work) } } -static void rsxx_dma_done(struct work_struct *work) +static void rsxx_dma_done(struct rsxx_dma_ctrl *ctrl) { - struct rsxx_dma_ctrl *ctrl; struct rsxx_dma *dma; unsigned long flags; u16 count; @@ -450,7 +514,6 @@ static void rsxx_dma_done(struct work_struct *work) u8 tag; struct hw_status *hw_st_buf; - ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work); hw_st_buf = ctrl->status.buf; if (unlikely(ctrl->card->halt) || @@ -520,33 +583,32 @@ static void rsxx_dma_done(struct work_struct *work) rsxx_enable_ier(ctrl->card, CR_INTR_DMA(ctrl->id)); spin_unlock_irqrestore(&ctrl->card->irq_lock, flags); - spin_lock(&ctrl->queue_lock); + spin_lock_bh(&ctrl->queue_lock); if (ctrl->stats.sw_q_depth) queue_work(ctrl->issue_wq, &ctrl->issue_dma_work); - spin_unlock(&ctrl->queue_lock); + spin_unlock_bh(&ctrl->queue_lock); } -static int rsxx_cleanup_dma_queue(struct rsxx_cardinfo *card, - struct list_head *q) +static void rsxx_schedule_issue(struct work_struct *work) { - struct rsxx_dma *dma; - struct rsxx_dma *tmp; - int cnt = 0; + struct rsxx_dma_ctrl *ctrl; - list_for_each_entry_safe(dma, tmp, q, list) { - list_del(&dma->list); + ctrl = container_of(work, struct rsxx_dma_ctrl, issue_dma_work); - if (dma->dma_addr) - pci_unmap_page(card->dev, dma->dma_addr, - get_dma_size(dma), - (dma->cmd == HW_CMD_BLK_WRITE) ? - PCI_DMA_TODEVICE : - PCI_DMA_FROMDEVICE); - kmem_cache_free(rsxx_dma_pool, dma); - cnt++; - } + mutex_lock(&ctrl->work_lock); + rsxx_issue_dmas(ctrl); + mutex_unlock(&ctrl->work_lock); +} - return cnt; +static void rsxx_schedule_done(struct work_struct *work) +{ + struct rsxx_dma_ctrl *ctrl; + + ctrl = container_of(work, struct rsxx_dma_ctrl, dma_done_work); + + mutex_lock(&ctrl->work_lock); + rsxx_dma_done(ctrl); + mutex_unlock(&ctrl->work_lock); } static int rsxx_queue_discard(struct rsxx_cardinfo *card, @@ -595,14 +657,6 @@ static int rsxx_queue_dma(struct rsxx_cardinfo *card, if (!dma) return -ENOMEM; - dma->dma_addr = pci_map_page(card->dev, page, pg_off, dma_len, - dir ? PCI_DMA_TODEVICE : - PCI_DMA_FROMDEVICE); - if (!dma->dma_addr) { - kmem_cache_free(rsxx_dma_pool, dma); - return -ENOMEM; - } - dma->cmd = dir ? HW_CMD_BLK_WRITE : HW_CMD_BLK_READ; dma->laddr = laddr; dma->sub_page.off = (dma_off >> 9); @@ -630,7 +684,8 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, void *cb_data) { struct list_head dma_list[RSXX_MAX_TARGETS]; - struct bio_vec *bvec; + struct bio_vec bvec; + struct bvec_iter iter; unsigned long long addr8; unsigned int laddr; unsigned int bv_len; @@ -642,7 +697,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, int st; int i; - addr8 = bio->bi_sector << 9; /* sectors are 512 bytes */ + addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */ atomic_set(n_dmas, 0); for (i = 0; i < card->n_targets; i++) { @@ -651,7 +706,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, } if (bio->bi_rw & REQ_DISCARD) { - bv_len = bio->bi_size; + bv_len = bio->bi_iter.bi_size; while (bv_len > 0) { tgt = rsxx_get_dma_tgt(card, addr8); @@ -668,9 +723,9 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, bv_len -= RSXX_HW_BLK_SIZE; } } else { - bio_for_each_segment(bvec, bio, i) { - bv_len = bvec->bv_len; - bv_off = bvec->bv_offset; + bio_for_each_segment(bvec, bio, iter) { + bv_len = bvec.bv_len; + bv_off = bvec.bv_offset; while (bv_len > 0) { tgt = rsxx_get_dma_tgt(card, addr8); @@ -682,7 +737,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, st = rsxx_queue_dma(card, &dma_list[tgt], bio_data_dir(bio), dma_off, dma_len, - laddr, bvec->bv_page, + laddr, bvec.bv_page, bv_off, cb, cb_data); if (st) goto bvec_err; @@ -698,10 +753,10 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, for (i = 0; i < card->n_targets; i++) { if (!list_empty(&dma_list[i])) { - spin_lock(&card->ctrl[i].queue_lock); + spin_lock_bh(&card->ctrl[i].queue_lock); card->ctrl[i].stats.sw_q_depth += dma_cnt[i]; list_splice_tail(&dma_list[i], &card->ctrl[i].queue); - spin_unlock(&card->ctrl[i].queue_lock); + spin_unlock_bh(&card->ctrl[i].queue_lock); queue_work(card->ctrl[i].issue_wq, &card->ctrl[i].issue_dma_work); @@ -712,7 +767,8 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, bvec_err: for (i = 0; i < card->n_targets; i++) - rsxx_cleanup_dma_queue(card, &dma_list[i]); + rsxx_cleanup_dma_queue(&card->ctrl[i], &dma_list[i], + FREE_DMA); return st; } @@ -780,6 +836,7 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev, spin_lock_init(&ctrl->trackers->lock); spin_lock_init(&ctrl->queue_lock); + mutex_init(&ctrl->work_lock); INIT_LIST_HEAD(&ctrl->queue); setup_timer(&ctrl->activity_timer, dma_engine_stalled, @@ -793,8 +850,8 @@ static int rsxx_dma_ctrl_init(struct pci_dev *dev, if (!ctrl->done_wq) return -ENOMEM; - INIT_WORK(&ctrl->issue_dma_work, rsxx_issue_dmas); - INIT_WORK(&ctrl->dma_done_work, rsxx_dma_done); + INIT_WORK(&ctrl->issue_dma_work, rsxx_schedule_issue); + INIT_WORK(&ctrl->dma_done_work, rsxx_schedule_done); st = rsxx_hw_buffers_init(dev, ctrl); if (st) @@ -918,13 +975,30 @@ failed_dma_setup: return st; } +int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl) +{ + struct rsxx_dma *dma; + int i; + int cnt = 0; + + /* Clean up issued DMAs */ + for (i = 0; i < RSXX_MAX_OUTSTANDING_CMDS; i++) { + dma = get_tracker_dma(ctrl->trackers, i); + if (dma) { + atomic_dec(&ctrl->stats.hw_q_depth); + rsxx_complete_dma(ctrl, dma, DMA_CANCELLED); + push_tracker(ctrl->trackers, i); + cnt++; + } + } + + return cnt; +} void rsxx_dma_destroy(struct rsxx_cardinfo *card) { struct rsxx_dma_ctrl *ctrl; - struct rsxx_dma *dma; - int i, j; - int cnt = 0; + int i; for (i = 0; i < card->n_targets; i++) { ctrl = &card->ctrl[i]; @@ -943,33 +1017,11 @@ void rsxx_dma_destroy(struct rsxx_cardinfo *card) del_timer_sync(&ctrl->activity_timer); /* Clean up the DMA queue */ - spin_lock(&ctrl->queue_lock); - cnt = rsxx_cleanup_dma_queue(card, &ctrl->queue); - spin_unlock(&ctrl->queue_lock); + spin_lock_bh(&ctrl->queue_lock); + rsxx_cleanup_dma_queue(ctrl, &ctrl->queue, COMPLETE_DMA); + spin_unlock_bh(&ctrl->queue_lock); - if (cnt) - dev_info(CARD_TO_DEV(card), - "Freed %d queued DMAs on channel %d\n", - cnt, i); - - /* Clean up issued DMAs */ - for (j = 0; j < RSXX_MAX_OUTSTANDING_CMDS; j++) { - dma = get_tracker_dma(ctrl->trackers, j); - if (dma) { - pci_unmap_page(card->dev, dma->dma_addr, - get_dma_size(dma), - (dma->cmd == HW_CMD_BLK_WRITE) ? - PCI_DMA_TODEVICE : - PCI_DMA_FROMDEVICE); - kmem_cache_free(rsxx_dma_pool, dma); - cnt++; - } - } - - if (cnt) - dev_info(CARD_TO_DEV(card), - "Freed %d pending DMAs on channel %d\n", - cnt, i); + rsxx_dma_cancel(ctrl); vfree(ctrl->trackers); @@ -1008,27 +1060,26 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) else card->ctrl[i].stats.reads_issued--; + if (dma->cmd != HW_CMD_BLK_DISCARD) { + pci_unmap_page(card->dev, dma->dma_addr, + get_dma_size(dma), + dma->cmd == HW_CMD_BLK_WRITE ? + PCI_DMA_TODEVICE : + PCI_DMA_FROMDEVICE); + } + list_add_tail(&dma->list, &issued_dmas[i]); push_tracker(card->ctrl[i].trackers, j); cnt++; } - spin_lock(&card->ctrl[i].queue_lock); + spin_lock_bh(&card->ctrl[i].queue_lock); list_splice(&issued_dmas[i], &card->ctrl[i].queue); atomic_sub(cnt, &card->ctrl[i].stats.hw_q_depth); card->ctrl[i].stats.sw_q_depth += cnt; card->ctrl[i].e_cnt = 0; - - list_for_each_entry(dma, &card->ctrl[i].queue, list) { - if (dma->dma_addr) - pci_unmap_page(card->dev, dma->dma_addr, - get_dma_size(dma), - dma->cmd == HW_CMD_BLK_WRITE ? - PCI_DMA_TODEVICE : - PCI_DMA_FROMDEVICE); - } - spin_unlock(&card->ctrl[i].queue_lock); + spin_unlock_bh(&card->ctrl[i].queue_lock); } kfree(issued_dmas); @@ -1036,48 +1087,6 @@ int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card) return 0; } -void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card) -{ - struct rsxx_dma *dma; - struct rsxx_dma *tmp; - int i; - - for (i = 0; i < card->n_targets; i++) { - spin_lock(&card->ctrl[i].queue_lock); - list_for_each_entry_safe(dma, tmp, &card->ctrl[i].queue, list) { - list_del(&dma->list); - - rsxx_complete_dma(&card->ctrl[i], dma, DMA_CANCELLED); - } - spin_unlock(&card->ctrl[i].queue_lock); - } -} - -int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card) -{ - struct rsxx_dma *dma; - int i; - - for (i = 0; i < card->n_targets; i++) { - spin_lock(&card->ctrl[i].queue_lock); - list_for_each_entry(dma, &card->ctrl[i].queue, list) { - dma->dma_addr = pci_map_page(card->dev, dma->page, - dma->pg_off, get_dma_size(dma), - dma->cmd == HW_CMD_BLK_WRITE ? - PCI_DMA_TODEVICE : - PCI_DMA_FROMDEVICE); - if (!dma->dma_addr) { - spin_unlock(&card->ctrl[i].queue_lock); - kmem_cache_free(rsxx_dma_pool, dma); - return -ENOMEM; - } - } - spin_unlock(&card->ctrl[i].queue_lock); - } - - return 0; -} - int rsxx_dma_init(void) { rsxx_dma_pool = KMEM_CACHE(rsxx_dma, SLAB_HWCACHE_ALIGN); diff --git a/drivers/block/rsxx/rsxx_priv.h b/drivers/block/rsxx/rsxx_priv.h index 382e8bf5c03..6bbc64d0f69 100644 --- a/drivers/block/rsxx/rsxx_priv.h +++ b/drivers/block/rsxx/rsxx_priv.h @@ -39,6 +39,7 @@ #include <linux/vmalloc.h> #include <linux/timer.h> #include <linux/ioctl.h> +#include <linux/delay.h> #include "rsxx.h" #include "rsxx_cfg.h" @@ -51,7 +52,7 @@ struct proc_cmd; #define RS70_PCI_REV_SUPPORTED 4 #define DRIVER_NAME "rsxx" -#define DRIVER_VERSION "4.0" +#define DRIVER_VERSION "4.0.3.2516" /* Block size is 4096 */ #define RSXX_HW_BLK_SHIFT 12 @@ -114,6 +115,7 @@ struct rsxx_dma_ctrl { struct timer_list activity_timer; struct dma_tracker_list *trackers; struct rsxx_dma_stats stats; + struct mutex work_lock; }; struct rsxx_cardinfo { @@ -134,6 +136,7 @@ struct rsxx_cardinfo { spinlock_t lock; bool active; struct creg_cmd *active_cmd; + struct workqueue_struct *creg_wq; struct work_struct done_work; struct list_head queue; unsigned int q_depth; @@ -154,6 +157,7 @@ struct rsxx_cardinfo { int buf_len; } log; + struct workqueue_struct *event_wq; struct work_struct event_work; unsigned int state; u64 size8; @@ -181,6 +185,8 @@ struct rsxx_cardinfo { int n_targets; struct rsxx_dma_ctrl *ctrl; + + struct dentry *debugfs_dir; }; enum rsxx_pci_regmap { @@ -283,6 +289,7 @@ enum rsxx_creg_addr { CREG_ADD_CAPABILITIES = 0x80001050, CREG_ADD_LOG = 0x80002000, CREG_ADD_NUM_TARGETS = 0x80003000, + CREG_ADD_CRAM = 0xA0000000, CREG_ADD_CONFIG = 0xB0000000, }; @@ -338,6 +345,11 @@ enum rsxx_creg_stat { CREG_STAT_TAG_MASK = 0x0000ff00, }; +enum rsxx_dma_finish { + FREE_DMA = 0x0, + COMPLETE_DMA = 0x1, +}; + static inline unsigned int CREG_DATA(int N) { return CREG_DATA0 + (N << 2); @@ -372,6 +384,10 @@ typedef void (*rsxx_dma_cb)(struct rsxx_cardinfo *card, int rsxx_dma_setup(struct rsxx_cardinfo *card); void rsxx_dma_destroy(struct rsxx_cardinfo *card); int rsxx_dma_init(void); +int rsxx_cleanup_dma_queue(struct rsxx_dma_ctrl *ctrl, + struct list_head *q, + unsigned int done); +int rsxx_dma_cancel(struct rsxx_dma_ctrl *ctrl); void rsxx_dma_cleanup(void); void rsxx_dma_queue_reset(struct rsxx_cardinfo *card); int rsxx_dma_configure(struct rsxx_cardinfo *card); @@ -382,7 +398,6 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card, void *cb_data); int rsxx_hw_buffers_init(struct pci_dev *dev, struct rsxx_dma_ctrl *ctrl); int rsxx_eeh_save_issued_dmas(struct rsxx_cardinfo *card); -void rsxx_eeh_cancel_dmas(struct rsxx_cardinfo *card); int rsxx_eeh_remap_dmas(struct rsxx_cardinfo *card); /***** cregs.c *****/ |
