diff options
Diffstat (limited to 'block/blk-tag.c')
| -rw-r--r-- | block/blk-tag.c | 88 |
1 files changed, 39 insertions, 49 deletions
diff --git a/block/blk-tag.c b/block/blk-tag.c index 32667beb03e..a185b86741e 100644 --- a/block/blk-tag.c +++ b/block/blk-tag.c @@ -5,6 +5,7 @@ #include <linux/module.h> #include <linux/bio.h> #include <linux/blkdev.h> +#include <linux/slab.h> #include "blk.h" @@ -26,19 +27,17 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag) EXPORT_SYMBOL(blk_queue_find_tag); /** - * __blk_free_tags - release a given set of tag maintenance info + * blk_free_tags - release a given set of tag maintenance info * @bqt: the tag map to free * - * Tries to free the specified @bqt@. Returns true if it was - * actually freed and false if there are still references using it + * Drop the reference count on @bqt and frees it when the last reference + * is dropped. */ -static int __blk_free_tags(struct blk_queue_tag *bqt) +void blk_free_tags(struct blk_queue_tag *bqt) { - int retval; - - retval = atomic_dec_and_test(&bqt->refcnt); - if (retval) { - BUG_ON(bqt->busy); + if (atomic_dec_and_test(&bqt->refcnt)) { + BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) < + bqt->max_depth); kfree(bqt->tag_index); bqt->tag_index = NULL; @@ -48,9 +47,8 @@ static int __blk_free_tags(struct blk_queue_tag *bqt) kfree(bqt); } - - return retval; } +EXPORT_SYMBOL(blk_free_tags); /** * __blk_queue_free_tags - release tag maintenance info @@ -67,33 +65,18 @@ void __blk_queue_free_tags(struct request_queue *q) if (!bqt) return; - __blk_free_tags(bqt); + blk_free_tags(bqt); q->queue_tags = NULL; queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q); } /** - * blk_free_tags - release a given set of tag maintenance info - * @bqt: the tag map to free - * - * For externally managed @bqt@ frees the map. Callers of this - * function must guarantee to have released all the queues that - * might have been using this tag map. - */ -void blk_free_tags(struct blk_queue_tag *bqt) -{ - if (unlikely(!__blk_free_tags(bqt))) - BUG(); -} -EXPORT_SYMBOL(blk_free_tags); - -/** * blk_queue_free_tags - release tag maintenance info * @q: the request queue for the device * * Notes: - * This is used to disabled tagged queuing to a device, yet leave + * This is used to disable tagged queuing to a device, yet leave * queue in function. **/ void blk_queue_free_tags(struct request_queue *q) @@ -147,7 +130,6 @@ static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q, if (init_tag_map(q, tags, depth)) goto fail; - tags->busy = 0; atomic_set(&tags->refcnt, 1); return tags; fail: @@ -158,7 +140,6 @@ fail: /** * blk_init_tags - initialize the tag info for an external tag map * @depth: the maximum queue depth supported - * @tags: the tag to use **/ struct blk_queue_tag *blk_init_tags(int depth) { @@ -186,7 +167,8 @@ int blk_queue_init_tags(struct request_queue *q, int depth, tags = __blk_queue_init_tags(q, depth); if (!tags) - goto fail; + return -ENOMEM; + } else if (q->queue_tags) { rc = blk_queue_resize_tags(q, depth); if (rc) @@ -203,9 +185,6 @@ int blk_queue_init_tags(struct request_queue *q, int depth, queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q); INIT_LIST_HEAD(&q->tag_busy_list); return 0; -fail: - kfree(tags); - return -ENOMEM; } EXPORT_SYMBOL(blk_queue_init_tags); @@ -271,7 +250,7 @@ EXPORT_SYMBOL(blk_queue_resize_tags); * @rq: the request that has completed * * Description: - * Typically called when end_that_request_first() returns 0, meaning + * Typically called when end_that_request_first() returns %0, meaning * all transfers have been done for a request. It's important to call * this function before end_that_request_last(), as that will put the * request back on the free list thus corrupting the internal tag list. @@ -282,16 +261,9 @@ EXPORT_SYMBOL(blk_queue_resize_tags); void blk_queue_end_tag(struct request_queue *q, struct request *rq) { struct blk_queue_tag *bqt = q->queue_tags; - int tag = rq->tag; - - BUG_ON(tag == -1); + unsigned tag = rq->tag; /* negative tags invalid */ - if (unlikely(tag >= bqt->real_max_depth)) - /* - * This can happen after tag depth has been reduced. - * FIXME: how about a warning or info message here? - */ - return; + BUG_ON(tag >= bqt->real_max_depth); list_del_init(&rq->queuelist); rq->cmd_flags &= ~REQ_QUEUED; @@ -313,7 +285,6 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq) * unlock memory barrier semantics. */ clear_bit_unlock(tag, bqt->tag_map); - bqt->busy--; } EXPORT_SYMBOL(blk_queue_end_tag); @@ -338,6 +309,7 @@ EXPORT_SYMBOL(blk_queue_end_tag); int blk_queue_start_tag(struct request_queue *q, struct request *rq) { struct blk_queue_tag *bqt = q->queue_tags; + unsigned max_depth; int tag; if (unlikely((rq->cmd_flags & REQ_QUEUED))) { @@ -351,10 +323,29 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) /* * Protect against shared tag maps, as we may not have exclusive * access to the tag map. + * + * We reserve a few tags just for sync IO, since we don't want + * to starve sync IO on behalf of flooding async IO. */ + max_depth = bqt->max_depth; + if (!rq_is_sync(rq) && max_depth > 1) { + switch (max_depth) { + case 2: + max_depth = 1; + break; + case 3: + max_depth = 2; + break; + default: + max_depth -= 2; + } + if (q->in_flight[BLK_RW_ASYNC] > max_depth) + return 1; + } + do { - tag = find_first_zero_bit(bqt->tag_map, bqt->max_depth); - if (tag >= bqt->max_depth) + tag = find_first_zero_bit(bqt->tag_map, max_depth); + if (tag >= max_depth) return 1; } while (test_and_set_bit_lock(tag, bqt->tag_map)); @@ -366,9 +357,8 @@ int blk_queue_start_tag(struct request_queue *q, struct request *rq) rq->cmd_flags |= REQ_QUEUED; rq->tag = tag; bqt->tag_index[tag] = rq; - blkdev_dequeue_request(rq); + blk_start_request(rq); list_add(&rq->queuelist, &q->tag_busy_list); - bqt->busy++; return 0; } EXPORT_SYMBOL(blk_queue_start_tag); |
