diff options
author | Jerome Marchand <jmarchan@redhat.com> | 2009-04-22 14:01:49 +0200 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2009-04-24 08:54:21 +0200 |
commit | 42dad7647aec49b3ad20dd0cb832b232a6ae514f (patch) | |
tree | b70d4cb7706f2647e65426e24f078ddf14d6e139 /block/blk-core.c | |
parent | 097102c2d04974bdfcfa16a5f3062d499842139c (diff) |
block: simplify I/O stat accounting
This simplifies I/O stat accounting switching code and separates it
completely from I/O scheduler switch code.
Requests are accounted according to the state of their request queue
at the time of the request allocation. There is no need anymore to
flush the request queue when switching I/O accounting state.
Signed-off-by: Jerome Marchand <jmarchan@redhat.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-core.c')
-rw-r--r-- | block/blk-core.c | 6 |
1 files changed, 4 insertions, 2 deletions
diff --git a/block/blk-core.c b/block/blk-core.c index 07ab75403e1..2998fe3a237 100644 --- a/block/blk-core.c +++ b/block/blk-core.c @@ -643,7 +643,7 @@ static inline void blk_free_request(struct request_queue *q, struct request *rq) } static struct request * -blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask) +blk_alloc_request(struct request_queue *q, int flags, int priv, gfp_t gfp_mask) { struct request *rq = mempool_alloc(q->rq.rq_pool, gfp_mask); @@ -652,7 +652,7 @@ blk_alloc_request(struct request_queue *q, int rw, int priv, gfp_t gfp_mask) blk_rq_init(q, rq); - rq->cmd_flags = rw | REQ_ALLOCED; + rq->cmd_flags = flags | REQ_ALLOCED; if (priv) { if (unlikely(elv_set_request(q, rq, gfp_mask))) { @@ -792,6 +792,8 @@ static struct request *get_request(struct request_queue *q, int rw_flags, if (priv) rl->elvpriv++; + if (blk_queue_io_stat(q)) + rw_flags |= REQ_IO_STAT; spin_unlock_irq(q->queue_lock); rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); |