diff options
Diffstat (limited to 'drivers/mmc/card/queue.c')
| -rw-r--r-- | drivers/mmc/card/queue.c | 470 |
1 files changed, 315 insertions, 155 deletions
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c index 7731ddefdc1..3e049c13429 100644 --- a/drivers/mmc/card/queue.c +++ b/drivers/mmc/card/queue.c @@ -9,11 +9,13 @@ * published by the Free Software Foundation. * */ +#include <linux/slab.h> #include <linux/module.h> #include <linux/blkdev.h> #include <linux/freezer.h> #include <linux/kthread.h> #include <linux/scatterlist.h> +#include <linux/dma-mapping.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> @@ -21,21 +23,24 @@ #define MMC_QUEUE_BOUNCESZ 65536 -#define MMC_QUEUE_SUSPENDED (1 << 0) - /* * Prepare a MMC request. This just filters out odd stuff. */ static int mmc_prep_request(struct request_queue *q, struct request *req) { + struct mmc_queue *mq = q->queuedata; + /* - * We only like normal block requests. + * We only like normal block requests and discards. */ - if (!blk_fs_request(req) && !blk_pc_request(req)) { + if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) { blk_dump_rq_flags(req, "MMC bad request"); return BLKPREP_KILL; } + if (mq && mmc_card_removed(mq->card)) + return BLKPREP_KILL; + req->cmd_flags |= REQ_DONTPREP; return BLKPREP_OK; @@ -51,15 +56,40 @@ static int mmc_queue_thread(void *d) down(&mq->thread_sem); do { struct request *req = NULL; + struct mmc_queue_req *tmp; + unsigned int cmd_flags = 0; spin_lock_irq(q->queue_lock); set_current_state(TASK_INTERRUPTIBLE); - if (!blk_queue_plugged(q)) - req = elv_next_request(q); - mq->req = req; + req = blk_fetch_request(q); + mq->mqrq_cur->req = req; spin_unlock_irq(q->queue_lock); - if (!req) { + if (req || mq->mqrq_prev->req) { + set_current_state(TASK_RUNNING); + cmd_flags = req ? req->cmd_flags : 0; + mq->issue_fn(mq, req); + if (mq->flags & MMC_QUEUE_NEW_REQUEST) { + mq->flags &= ~MMC_QUEUE_NEW_REQUEST; + continue; /* fetch again */ + } + + /* + * Current request becomes previous request + * and vice versa. + * In case of special requests, current request + * has been finished. Do not assign it to previous + * request. + */ + if (cmd_flags & MMC_REQ_SPECIAL_MASK) + mq->mqrq_cur->req = NULL; + + mq->mqrq_prev->brq.mrq.data = NULL; + mq->mqrq_prev->req = NULL; + tmp = mq->mqrq_prev; + mq->mqrq_prev = mq->mqrq_cur; + mq->mqrq_cur = tmp; + } else { if (kthread_should_stop()) { set_current_state(TASK_RUNNING); break; @@ -67,11 +97,7 @@ static int mmc_queue_thread(void *d) up(&mq->thread_sem); schedule(); down(&mq->thread_sem); - continue; } - set_current_state(TASK_RUNNING); - - mq->issue_fn(mq, req); } while (1); up(&mq->thread_sem); @@ -84,56 +110,111 @@ static int mmc_queue_thread(void *d) * on any queue on this host, and attempt to issue it. This may * not be the queue we were asked to process. */ -static void mmc_request(struct request_queue *q) +static void mmc_request_fn(struct request_queue *q) { struct mmc_queue *mq = q->queuedata; struct request *req; - int ret; + unsigned long flags; + struct mmc_context_info *cntx; if (!mq) { - printk(KERN_ERR "MMC: killing requests for dead queue\n"); - while ((req = elv_next_request(q)) != NULL) { - do { - ret = __blk_end_request(req, -EIO, - blk_rq_cur_bytes(req)); - } while (ret); + while ((req = blk_fetch_request(q)) != NULL) { + req->cmd_flags |= REQ_QUIET; + __blk_end_request_all(req, -EIO); } return; } - if (!mq->req) + cntx = &mq->card->host->context_info; + if (!mq->mqrq_cur->req && mq->mqrq_prev->req) { + /* + * New MMC request arrived when MMC thread may be + * blocked on the previous request to be complete + * with no current request fetched + */ + spin_lock_irqsave(&cntx->lock, flags); + if (cntx->is_waiting_last_req) { + cntx->is_new_req = true; + wake_up_interruptible(&cntx->wait); + } + spin_unlock_irqrestore(&cntx->lock, flags); + } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req) wake_up_process(mq->thread); } +static struct scatterlist *mmc_alloc_sg(int sg_len, int *err) +{ + struct scatterlist *sg; + + sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL); + if (!sg) + *err = -ENOMEM; + else { + *err = 0; + sg_init_table(sg, sg_len); + } + + return sg; +} + +static void mmc_queue_setup_discard(struct request_queue *q, + struct mmc_card *card) +{ + unsigned max_discard; + + max_discard = mmc_calc_max_discard(card); + if (!max_discard) + return; + + queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); + q->limits.max_discard_sectors = max_discard; + if (card->erased_byte == 0 && !mmc_can_discard(card)) + q->limits.discard_zeroes_data = 1; + q->limits.discard_granularity = card->pref_erase << 9; + /* granularity must not be greater than max. discard */ + if (card->pref_erase > max_discard) + q->limits.discard_granularity = 0; + if (mmc_can_secure_erase_trim(card)) + queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q); +} + /** * mmc_init_queue - initialise a queue structure. * @mq: mmc queue * @card: mmc card to attach this queue * @lock: queue lock + * @subname: partition subname * * Initialise a MMC card request queue. */ -int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock) +int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, + spinlock_t *lock, const char *subname) { struct mmc_host *host = card->host; u64 limit = BLK_BOUNCE_HIGH; int ret; + struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; + struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask) - limit = *mmc_dev(host)->dma_mask; + limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT; mq->card = card; - mq->queue = blk_init_queue(mmc_request, lock); + mq->queue = blk_init_queue(mmc_request_fn, lock); if (!mq->queue) return -ENOMEM; + mq->mqrq_cur = mqrq_cur; + mq->mqrq_prev = mqrq_prev; mq->queue->queuedata = mq; - mq->req = NULL; blk_queue_prep_rq(mq->queue, mmc_prep_request); + queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); + if (mmc_can_erase(card)) + mmc_queue_setup_discard(mq->queue, card); #ifdef CONFIG_MMC_BLOCK_BOUNCE - if (host->max_hw_segs == 1) { + if (host->max_segs == 1) { unsigned int bouncesz; bouncesz = MMC_QUEUE_BOUNCESZ; @@ -142,56 +223,75 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock bouncesz = host->max_req_size; if (bouncesz > host->max_seg_size) bouncesz = host->max_seg_size; + if (bouncesz > (host->max_blk_count * 512)) + bouncesz = host->max_blk_count * 512; + + if (bouncesz > 512) { + mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); + if (!mqrq_cur->bounce_buf) { + pr_warning("%s: unable to " + "allocate bounce cur buffer\n", + mmc_card_name(card)); + } + mqrq_prev->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); + if (!mqrq_prev->bounce_buf) { + pr_warning("%s: unable to " + "allocate bounce prev buffer\n", + mmc_card_name(card)); + kfree(mqrq_cur->bounce_buf); + mqrq_cur->bounce_buf = NULL; + } + } - mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL); - if (!mq->bounce_buf) { - printk(KERN_WARNING "%s: unable to allocate " - "bounce buffer\n", mmc_card_name(card)); - } else { - blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_HIGH); - blk_queue_max_sectors(mq->queue, bouncesz / 512); - blk_queue_max_phys_segments(mq->queue, bouncesz / 512); - blk_queue_max_hw_segments(mq->queue, bouncesz / 512); + if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) { + blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); + blk_queue_max_hw_sectors(mq->queue, bouncesz / 512); + blk_queue_max_segments(mq->queue, bouncesz / 512); blk_queue_max_segment_size(mq->queue, bouncesz); - mq->sg = kmalloc(sizeof(struct scatterlist), - GFP_KERNEL); - if (!mq->sg) { - ret = -ENOMEM; + mqrq_cur->sg = mmc_alloc_sg(1, &ret); + if (ret) goto cleanup_queue; - } - sg_init_table(mq->sg, 1); - mq->bounce_sg = kmalloc(sizeof(struct scatterlist) * - bouncesz / 512, GFP_KERNEL); - if (!mq->bounce_sg) { - ret = -ENOMEM; + mqrq_cur->bounce_sg = + mmc_alloc_sg(bouncesz / 512, &ret); + if (ret) + goto cleanup_queue; + + mqrq_prev->sg = mmc_alloc_sg(1, &ret); + if (ret) + goto cleanup_queue; + + mqrq_prev->bounce_sg = + mmc_alloc_sg(bouncesz / 512, &ret); + if (ret) goto cleanup_queue; - } - sg_init_table(mq->bounce_sg, bouncesz / 512); } } #endif - if (!mq->bounce_buf) { + if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) { blk_queue_bounce_limit(mq->queue, limit); - blk_queue_max_sectors(mq->queue, host->max_req_size / 512); - blk_queue_max_phys_segments(mq->queue, host->max_phys_segs); - blk_queue_max_hw_segments(mq->queue, host->max_hw_segs); + blk_queue_max_hw_sectors(mq->queue, + min(host->max_blk_count, host->max_req_size / 512)); + blk_queue_max_segments(mq->queue, host->max_segs); blk_queue_max_segment_size(mq->queue, host->max_seg_size); - mq->sg = kmalloc(sizeof(struct scatterlist) * - host->max_phys_segs, GFP_KERNEL); - if (!mq->sg) { - ret = -ENOMEM; + mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret); + if (ret) + goto cleanup_queue; + + + mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret); + if (ret) goto cleanup_queue; - } - sg_init_table(mq->sg, host->max_phys_segs); } - init_MUTEX(&mq->thread_sem); + sema_init(&mq->thread_sem, 1); + + mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s", + host->index, subname ? subname : ""); - mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd"); if (IS_ERR(mq->thread)) { ret = PTR_ERR(mq->thread); goto free_bounce_sg; @@ -199,16 +299,22 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock return 0; free_bounce_sg: - if (mq->bounce_sg) - kfree(mq->bounce_sg); - mq->bounce_sg = NULL; + kfree(mqrq_cur->bounce_sg); + mqrq_cur->bounce_sg = NULL; + kfree(mqrq_prev->bounce_sg); + mqrq_prev->bounce_sg = NULL; + cleanup_queue: - if (mq->sg) - kfree(mq->sg); - mq->sg = NULL; - if (mq->bounce_buf) - kfree(mq->bounce_buf); - mq->bounce_buf = NULL; + kfree(mqrq_cur->sg); + mqrq_cur->sg = NULL; + kfree(mqrq_cur->bounce_buf); + mqrq_cur->bounce_buf = NULL; + + kfree(mqrq_prev->sg); + mqrq_prev->sg = NULL; + kfree(mqrq_prev->bounce_buf); + mqrq_prev->bounce_buf = NULL; + blk_cleanup_queue(mq->queue); return ret; } @@ -217,11 +323,8 @@ void mmc_cleanup_queue(struct mmc_queue *mq) { struct request_queue *q = mq->queue; unsigned long flags; - - /* Mark that we should start throwing out stragglers */ - spin_lock_irqsave(q->queue_lock, flags); - q->queuedata = NULL; - spin_unlock_irqrestore(q->queue_lock, flags); + struct mmc_queue_req *mqrq_cur = mq->mqrq_cur; + struct mmc_queue_req *mqrq_prev = mq->mqrq_prev; /* Make sure the queue isn't suspended, as that will deadlock */ mmc_queue_resume(mq); @@ -229,23 +332,77 @@ void mmc_cleanup_queue(struct mmc_queue *mq) /* Then terminate our worker thread */ kthread_stop(mq->thread); - if (mq->bounce_sg) - kfree(mq->bounce_sg); - mq->bounce_sg = NULL; + /* Empty the queue */ + spin_lock_irqsave(q->queue_lock, flags); + q->queuedata = NULL; + blk_start_queue(q); + spin_unlock_irqrestore(q->queue_lock, flags); + + kfree(mqrq_cur->bounce_sg); + mqrq_cur->bounce_sg = NULL; - kfree(mq->sg); - mq->sg = NULL; + kfree(mqrq_cur->sg); + mqrq_cur->sg = NULL; - if (mq->bounce_buf) - kfree(mq->bounce_buf); - mq->bounce_buf = NULL; + kfree(mqrq_cur->bounce_buf); + mqrq_cur->bounce_buf = NULL; - blk_cleanup_queue(mq->queue); + kfree(mqrq_prev->bounce_sg); + mqrq_prev->bounce_sg = NULL; + + kfree(mqrq_prev->sg); + mqrq_prev->sg = NULL; + + kfree(mqrq_prev->bounce_buf); + mqrq_prev->bounce_buf = NULL; mq->card = NULL; } EXPORT_SYMBOL(mmc_cleanup_queue); +int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card) +{ + struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; + struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; + int ret = 0; + + + mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL); + if (!mqrq_cur->packed) { + pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n", + mmc_card_name(card)); + ret = -ENOMEM; + goto out; + } + + mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL); + if (!mqrq_prev->packed) { + pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n", + mmc_card_name(card)); + kfree(mqrq_cur->packed); + mqrq_cur->packed = NULL; + ret = -ENOMEM; + goto out; + } + + INIT_LIST_HEAD(&mqrq_cur->packed->list); + INIT_LIST_HEAD(&mqrq_prev->packed->list); + +out: + return ret; +} + +void mmc_packed_clean(struct mmc_queue *mq) +{ + struct mmc_queue_req *mqrq_cur = &mq->mqrq[0]; + struct mmc_queue_req *mqrq_prev = &mq->mqrq[1]; + + kfree(mqrq_cur->packed); + mqrq_cur->packed = NULL; + kfree(mqrq_prev->packed); + mqrq_prev->packed = NULL; +} + /** * mmc_queue_suspend - suspend a MMC request queue * @mq: MMC queue to suspend @@ -290,106 +447,109 @@ void mmc_queue_resume(struct mmc_queue *mq) } } -static void copy_sg(struct scatterlist *dst, unsigned int dst_len, - struct scatterlist *src, unsigned int src_len) +static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq, + struct mmc_packed *packed, + struct scatterlist *sg, + enum mmc_packed_type cmd_type) { - unsigned int chunk; - char *dst_buf, *src_buf; - unsigned int dst_size, src_size; - - dst_buf = NULL; - src_buf = NULL; - dst_size = 0; - src_size = 0; - - while (src_len) { - BUG_ON(dst_len == 0); - - if (dst_size == 0) { - dst_buf = sg_virt(dst); - dst_size = dst->length; - } - - if (src_size == 0) { - src_buf = sg_virt(src); - src_size = src->length; - } - - chunk = min(dst_size, src_size); - - memcpy(dst_buf, src_buf, chunk); - - dst_buf += chunk; - src_buf += chunk; - dst_size -= chunk; - src_size -= chunk; + struct scatterlist *__sg = sg; + unsigned int sg_len = 0; + struct request *req; - if (dst_size == 0) { - dst++; - dst_len--; - } + if (mmc_packed_wr(cmd_type)) { + unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512; + unsigned int max_seg_sz = queue_max_segment_size(mq->queue); + unsigned int len, remain, offset = 0; + u8 *buf = (u8 *)packed->cmd_hdr; + + remain = hdr_sz; + do { + len = min(remain, max_seg_sz); + sg_set_buf(__sg, buf + offset, len); + offset += len; + remain -= len; + (__sg++)->page_link &= ~0x02; + sg_len++; + } while (remain); + } - if (src_size == 0) { - src++; - src_len--; - } + list_for_each_entry(req, &packed->list, queuelist) { + sg_len += blk_rq_map_sg(mq->queue, req, __sg); + __sg = sg + (sg_len - 1); + (__sg++)->page_link &= ~0x02; } + sg_mark_end(sg + (sg_len - 1)); + return sg_len; } -unsigned int mmc_queue_map_sg(struct mmc_queue *mq) +/* + * Prepare the sg list(s) to be handed of to the host driver + */ +unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq) { unsigned int sg_len; + size_t buflen; + struct scatterlist *sg; + enum mmc_packed_type cmd_type; + int i; + + cmd_type = mqrq->cmd_type; + + if (!mqrq->bounce_buf) { + if (mmc_packed_cmd(cmd_type)) + return mmc_queue_packed_map_sg(mq, mqrq->packed, + mqrq->sg, cmd_type); + else + return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg); + } - if (!mq->bounce_buf) - return blk_rq_map_sg(mq->queue, mq->req, mq->sg); - - BUG_ON(!mq->bounce_sg); - - sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg); + BUG_ON(!mqrq->bounce_sg); - mq->bounce_sg_len = sg_len; + if (mmc_packed_cmd(cmd_type)) + sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed, + mqrq->bounce_sg, cmd_type); + else + sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg); - /* - * Shortcut in the event we only get a single entry. - */ - if (sg_len == 1) { - memcpy(mq->sg, mq->bounce_sg, sizeof(struct scatterlist)); - return 1; - } + mqrq->bounce_sg_len = sg_len; - sg_init_one(mq->sg, mq->bounce_buf, 0); + buflen = 0; + for_each_sg(mqrq->bounce_sg, sg, sg_len, i) + buflen += sg->length; - while (sg_len) { - mq->sg[0].length += mq->bounce_sg[sg_len - 1].length; - sg_len--; - } + sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen); return 1; } -void mmc_queue_bounce_pre(struct mmc_queue *mq) +/* + * If writing, bounce the data to the buffer before the request + * is sent to the host driver + */ +void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq) { - if (!mq->bounce_buf) + if (!mqrq->bounce_buf) return; - if (mq->bounce_sg_len == 1) - return; - if (rq_data_dir(mq->req) != WRITE) + if (rq_data_dir(mqrq->req) != WRITE) return; - copy_sg(mq->sg, 1, mq->bounce_sg, mq->bounce_sg_len); + sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, + mqrq->bounce_buf, mqrq->sg[0].length); } -void mmc_queue_bounce_post(struct mmc_queue *mq) +/* + * If reading, bounce the data from the buffer after the request + * has been handled by the host driver + */ +void mmc_queue_bounce_post(struct mmc_queue_req *mqrq) { - if (!mq->bounce_buf) + if (!mqrq->bounce_buf) return; - if (mq->bounce_sg_len == 1) - return; - if (rq_data_dir(mq->req) != READ) + if (rq_data_dir(mqrq->req) != READ) return; - copy_sg(mq->bounce_sg, mq->bounce_sg_len, mq->sg, 1); + sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len, + mqrq->bounce_buf, mqrq->sg[0].length); } - |
