diff options
author | Wu Fengguang <fengguang.wu@intel.com> | 2008-11-25 09:08:39 +0100 |
---|---|---|
committer | Jens Axboe <jens.axboe@oracle.com> | 2008-12-29 08:28:43 +0100 |
commit | 7c239517d9f18427fc2e7ed259fb3b866595f5af (patch) | |
tree | cd2149cc2f2eb0faa83d38fe64e1228f7c703a85 /block/blk-sysfs.c | |
parent | 42364690992e592c05f85c76fda4055820b48c1b (diff) |
block: don't take lock on changing ra_pages
There's no need to take queue_lock or kernel_lock when modifying
bdi->ra_pages. So remove them. Also remove out of date comment for
queue_max_sectors_store().
Signed-off-by: Wu Fengguang <wfg@linux.intel.com>
Signed-off-by: Jens Axboe <jens.axboe@oracle.com>
Diffstat (limited to 'block/blk-sysfs.c')
-rw-r--r-- | block/blk-sysfs.c | 7 |
1 files changed, 1 insertions, 6 deletions
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 21e275d7eed..a29cb788e40 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -88,9 +88,7 @@ queue_ra_store(struct request_queue *q, const char *page, size_t count) unsigned long ra_kb; ssize_t ret = queue_var_store(&ra_kb, page, count); - spin_lock_irq(q->queue_lock); q->backing_dev_info.ra_pages = ra_kb >> (PAGE_CACHE_SHIFT - 10); - spin_unlock_irq(q->queue_lock); return ret; } @@ -117,10 +115,7 @@ queue_max_sectors_store(struct request_queue *q, const char *page, size_t count) if (max_sectors_kb > max_hw_sectors_kb || max_sectors_kb < page_kb) return -EINVAL; - /* - * Take the queue lock to update the readahead and max_sectors - * values synchronously: - */ + spin_lock_irq(q->queue_lock); q->max_sectors = max_sectors_kb << 1; spin_unlock_irq(q->queue_lock); |