diff options
author | Ming Lei <tom.leiming@gmail.com> | 2014-06-01 00:43:36 +0800 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-06-03 21:04:38 -0600 |
commit | 1aecfe4887713838c79bc52f774609a57db4f988 (patch) | |
tree | f2b2baf54092829ab1fc3d97087ac6b45b89cc04 /block | |
parent | 3de0ef8d0d3350964720cad2a0a72984f1bb81ba (diff) |
blk-mq: move blk_mq_get_ctx/blk_mq_put_ctx to mq private header
The blk-mq tag code need these helpers.
Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq.c | 22 | ||||
-rw-r--r-- | block/blk-mq.h | 22 |
2 files changed, 22 insertions, 22 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index 0f5879c42dc..b9230c522c6 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -33,28 +33,6 @@ static LIST_HEAD(all_q_list); static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx); -static struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, - unsigned int cpu) -{ - return per_cpu_ptr(q->queue_ctx, cpu); -} - -/* - * This assumes per-cpu software queueing queues. They could be per-node - * as well, for instance. For now this is hardcoded as-is. Note that we don't - * care about preemption, since we know the ctx's are persistent. This does - * mean that we can't rely on ctx always matching the currently running CPU. - */ -static struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) -{ - return __blk_mq_get_ctx(q, get_cpu()); -} - -static void blk_mq_put_ctx(struct blk_mq_ctx *ctx) -{ - put_cpu(); -} - /* * Check if any of the ctx's have pending work in this hardware queue */ diff --git a/block/blk-mq.h b/block/blk-mq.h index de7b3bbd5bd..57a7968e47b 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h @@ -69,4 +69,26 @@ struct blk_align_bitmap { unsigned long depth; } ____cacheline_aligned_in_smp; +static inline struct blk_mq_ctx *__blk_mq_get_ctx(struct request_queue *q, + unsigned int cpu) +{ + return per_cpu_ptr(q->queue_ctx, cpu); +} + +/* + * This assumes per-cpu software queueing queues. They could be per-node + * as well, for instance. For now this is hardcoded as-is. Note that we don't + * care about preemption, since we know the ctx's are persistent. This does + * mean that we can't rely on ctx always matching the currently running CPU. + */ +static inline struct blk_mq_ctx *blk_mq_get_ctx(struct request_queue *q) +{ + return __blk_mq_get_ctx(q, get_cpu()); +} + +static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) +{ + put_cpu(); +} + #endif |