From 7749a8d423c483a51983b666613acda1a4dd9c1b Mon Sep 17 00:00:00 2001 From: Jens Axboe Date: Wed, 13 Dec 2006 13:02:26 +0100 Subject: [PATCH] Propagate down request sync flag We need to do this, otherwise the io schedulers don't get access to the sync flag. Then they cannot tell the difference between a regular write and an O_DIRECT write, which can cause a performance loss. Signed-off-by: Jens Axboe --- block/ll_rw_blk.c | 28 ++++++++++++++++++++-------- 1 file changed, 20 insertions(+), 8 deletions(-) (limited to 'block/ll_rw_blk.c') diff --git a/block/ll_rw_blk.c b/block/ll_rw_blk.c index a541b42c08e..79807dbc306 100644 --- a/block/ll_rw_blk.c +++ b/block/ll_rw_blk.c @@ -2058,15 +2058,16 @@ static void freed_request(request_queue_t *q, int rw, int priv) * Returns NULL on failure, with queue_lock held. * Returns !NULL on success, with queue_lock *not held*. */ -static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, - gfp_t gfp_mask) +static struct request *get_request(request_queue_t *q, int rw_flags, + struct bio *bio, gfp_t gfp_mask) { struct request *rq = NULL; struct request_list *rl = &q->rq; struct io_context *ioc = NULL; + const int rw = rw_flags & 0x01; int may_queue, priv; - may_queue = elv_may_queue(q, rw); + may_queue = elv_may_queue(q, rw_flags); if (may_queue == ELV_MQUEUE_NO) goto rq_starved; @@ -2114,7 +2115,7 @@ static struct request *get_request(request_queue_t *q, int rw, struct bio *bio, spin_unlock_irq(q->queue_lock); - rq = blk_alloc_request(q, rw, priv, gfp_mask); + rq = blk_alloc_request(q, rw_flags, priv, gfp_mask); if (unlikely(!rq)) { /* * Allocation failed presumably due to memory. Undo anything @@ -2162,12 +2163,13 @@ out: * * Called with q->queue_lock held, and returns with it unlocked. */ -static struct request *get_request_wait(request_queue_t *q, int rw, +static struct request *get_request_wait(request_queue_t *q, int rw_flags, struct bio *bio) { + const int rw = rw_flags & 0x01; struct request *rq; - rq = get_request(q, rw, bio, GFP_NOIO); + rq = get_request(q, rw_flags, bio, GFP_NOIO); while (!rq) { DEFINE_WAIT(wait); struct request_list *rl = &q->rq; @@ -2175,7 +2177,7 @@ static struct request *get_request_wait(request_queue_t *q, int rw, prepare_to_wait_exclusive(&rl->wait[rw], &wait, TASK_UNINTERRUPTIBLE); - rq = get_request(q, rw, bio, GFP_NOIO); + rq = get_request(q, rw_flags, bio, GFP_NOIO); if (!rq) { struct io_context *ioc; @@ -2910,6 +2912,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) int el_ret, nr_sectors, barrier, err; const unsigned short prio = bio_prio(bio); const int sync = bio_sync(bio); + int rw_flags; nr_sectors = bio_sectors(bio); @@ -2983,11 +2986,20 @@ static int __make_request(request_queue_t *q, struct bio *bio) } get_rq: + /* + * This sync check and mask will be re-done in init_request_from_bio(), + * but we need to set it earlier to expose the sync flag to the + * rq allocator and io schedulers. + */ + rw_flags = bio_data_dir(bio); + if (sync) + rw_flags |= REQ_RW_SYNC; + /* * Grab a free request. This is might sleep but can not fail. * Returns with the queue unlocked. */ - req = get_request_wait(q, bio_data_dir(bio), bio); + req = get_request_wait(q, rw_flags, bio); /* * After dropping the lock and possibly sleeping here, our request -- cgit v1.2.3-18-g5258