diff options
author | Tejun Heo <tj@kernel.org> | 2012-05-23 12:16:21 +0200 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2012-05-23 12:16:21 +0200 |
commit | ff26eaadf4d914e397872b99885d45756104e9ae (patch) | |
tree | 5020f3e4a14ab68f6d027366e719b7fb5193123c /block/blk-throttle.c | |
parent | 0b7877d4eea3f93e3dd941999522bbd8c538cb53 (diff) |
blkcg: tg_stats_alloc_lock is an irq lock
tg_stats_alloc_lock nests inside queue lock and should always be held
with irq disabled. throtl_pd_{init|exit}() were using non-irqsafe
spinlock ops which triggered inverse lock ordering via irq warning via
RCU freeing of blkg invoking throtl_pd_exit() w/o disabling IRQ.
Update both functions to use irq safe operations.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reported-by: Sasha Levin <sasha.levin@oracle.com>
LKML-Reference: <1335339396.16988.80.camel@lappy>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block/blk-throttle.c')
-rw-r--r-- | block/blk-throttle.c | 10 |
1 files changed, 6 insertions, 4 deletions
diff --git a/block/blk-throttle.c b/block/blk-throttle.c index 14dedecfc7e..5b065951204 100644 --- a/block/blk-throttle.c +++ b/block/blk-throttle.c @@ -219,6 +219,7 @@ alloc_stats: static void throtl_pd_init(struct blkcg_gq *blkg) { struct throtl_grp *tg = blkg_to_tg(blkg); + unsigned long flags; RB_CLEAR_NODE(&tg->rb_node); bio_list_init(&tg->bio_lists[0]); @@ -235,19 +236,20 @@ static void throtl_pd_init(struct blkcg_gq *blkg) * but percpu allocator can't be called from IO path. Queue tg on * tg_stats_alloc_list and allocate from work item. */ - spin_lock(&tg_stats_alloc_lock); + spin_lock_irqsave(&tg_stats_alloc_lock, flags); list_add(&tg->stats_alloc_node, &tg_stats_alloc_list); queue_delayed_work(system_nrt_wq, &tg_stats_alloc_work, 0); - spin_unlock(&tg_stats_alloc_lock); + spin_unlock_irqrestore(&tg_stats_alloc_lock, flags); } static void throtl_pd_exit(struct blkcg_gq *blkg) { struct throtl_grp *tg = blkg_to_tg(blkg); + unsigned long flags; - spin_lock(&tg_stats_alloc_lock); + spin_lock_irqsave(&tg_stats_alloc_lock, flags); list_del_init(&tg->stats_alloc_node); - spin_unlock(&tg_stats_alloc_lock); + spin_unlock_irqrestore(&tg_stats_alloc_lock, flags); free_percpu(tg->stats_cpu); } |