diff options
author | Richard Kennedy <richard@rsk.demon.co.uk> | 2010-04-14 20:53:37 +0200 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2010-04-14 20:53:37 +0200 |
commit | c0d97e9ca2cfa66bdfd1ed8ecb5dcd230924d675 (patch) | |
tree | caa22e17a137d1030ce2e71742f66076df19872b | |
parent | 28baf44299e0480d66ebb3093de5d51deff04e9f (diff) |
block: ensure jiffies wrap is handled correctly in blk_rq_timed_out_timer
blk_rq_timed_out_timer() relied on blk_add_timer() never returning a
timer value of zero, but commit 7838c15b8dd18e78a523513749e5b54bda07b0cb
removed the code that bumped this value when it was zero.
Therefore when jiffies is near wrap we could get unlucky & not set the
timeout value correctly.
This patch uses a flag to indicate that the timeout value was set and so
handles jiffies wrap correctly, and it keeps all the logic in one
function so should be easier to maintain in the future.
Signed-off-by: Richard Kennedy <richard@rsk.demon.co.uk>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
-rw-r--r-- | block/blk-timeout.c | 12 |
1 files changed, 5 insertions, 7 deletions
diff --git a/block/blk-timeout.c b/block/blk-timeout.c index 1ba7e0aca87..4f0c06c7a33 100644 --- a/block/blk-timeout.c +++ b/block/blk-timeout.c @@ -109,6 +109,7 @@ void blk_rq_timed_out_timer(unsigned long data) struct request_queue *q = (struct request_queue *) data; unsigned long flags, next = 0; struct request *rq, *tmp; + int next_set = 0; spin_lock_irqsave(q->queue_lock, flags); @@ -122,16 +123,13 @@ void blk_rq_timed_out_timer(unsigned long data) if (blk_mark_rq_complete(rq)) continue; blk_rq_timed_out(rq); - } else if (!next || time_after(next, rq->deadline)) + } else if (!next_set || time_after(next, rq->deadline)) { next = rq->deadline; + next_set = 1; + } } - /* - * next can never be 0 here with the list non-empty, since we always - * bump ->deadline to 1 so we can detect if the timer was ever added - * or not. See comment in blk_add_timer() - */ - if (next) + if (next_set) mod_timer(&q->timeout, round_jiffies_up(next)); spin_unlock_irqrestore(q->queue_lock, flags); |