aboutsummaryrefslogtreecommitdiff
path: root/block
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2012-03-05 13:15:13 -0800
committerJens Axboe <axboe@kernel.dk>2012-03-06 21:27:23 +0100
commit923adde1be1df57cebd80c563058e503376645e8 (patch)
tree7009edf644abdf6b91daaab3967ffa6a8b30633a /block
parent5efd611351d1a847c72d74fb12ff4bd187c0cb2c (diff)
blkcg: clear all request_queues on blkcg policy [un]registrations
Keep track of all request_queues which have blkcg initialized and turn on bypass and invoke blkcg_clear_queue() on all before making changes to blkcg policies. This is to prepare for moving blkg management into blkcg core. Note that this uses more brute force than necessary. Finer grained shoot down will be implemented later and given that policy [un]registration almost never happens on running systems (blk-throtl can't be built as a module and cfq usually is the builtin default iosched), this shouldn't be a problem for the time being. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Vivek Goyal <vgoyal@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r--block/blk-cgroup.c48
1 files changed, 47 insertions, 1 deletions
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index b302ce1d662..266c0707d58 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -27,6 +27,9 @@
static DEFINE_SPINLOCK(blkio_list_lock);
static LIST_HEAD(blkio_list);
+static DEFINE_MUTEX(all_q_mutex);
+static LIST_HEAD(all_q_list);
+
struct blkio_cgroup blkio_root_cgroup = { .weight = 2*BLKIO_WEIGHT_DEFAULT };
EXPORT_SYMBOL_GPL(blkio_root_cgroup);
@@ -1472,9 +1475,20 @@ done:
*/
int blkcg_init_queue(struct request_queue *q)
{
+ int ret;
+
might_sleep();
- return blk_throtl_init(q);
+ ret = blk_throtl_init(q);
+ if (ret)
+ return ret;
+
+ mutex_lock(&all_q_mutex);
+ INIT_LIST_HEAD(&q->all_q_node);
+ list_add_tail(&q->all_q_node, &all_q_list);
+ mutex_unlock(&all_q_mutex);
+
+ return 0;
}
/**
@@ -1498,6 +1512,10 @@ void blkcg_drain_queue(struct request_queue *q)
*/
void blkcg_exit_queue(struct request_queue *q)
{
+ mutex_lock(&all_q_mutex);
+ list_del_init(&q->all_q_node);
+ mutex_unlock(&all_q_mutex);
+
blk_throtl_exit(q);
}
@@ -1543,8 +1561,33 @@ static void blkiocg_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
}
}
+static void blkcg_bypass_start(void)
+ __acquires(&all_q_mutex)
+{
+ struct request_queue *q;
+
+ mutex_lock(&all_q_mutex);
+
+ list_for_each_entry(q, &all_q_list, all_q_node) {
+ blk_queue_bypass_start(q);
+ blkg_destroy_all(q);
+ }
+}
+
+static void blkcg_bypass_end(void)
+ __releases(&all_q_mutex)
+{
+ struct request_queue *q;
+
+ list_for_each_entry(q, &all_q_list, all_q_node)
+ blk_queue_bypass_end(q);
+
+ mutex_unlock(&all_q_mutex);
+}
+
void blkio_policy_register(struct blkio_policy_type *blkiop)
{
+ blkcg_bypass_start();
spin_lock(&blkio_list_lock);
BUG_ON(blkio_policy[blkiop->plid]);
@@ -1552,11 +1595,13 @@ void blkio_policy_register(struct blkio_policy_type *blkiop)
list_add_tail(&blkiop->list, &blkio_list);
spin_unlock(&blkio_list_lock);
+ blkcg_bypass_end();
}
EXPORT_SYMBOL_GPL(blkio_policy_register);
void blkio_policy_unregister(struct blkio_policy_type *blkiop)
{
+ blkcg_bypass_start();
spin_lock(&blkio_list_lock);
BUG_ON(blkio_policy[blkiop->plid] != blkiop);
@@ -1564,5 +1609,6 @@ void blkio_policy_unregister(struct blkio_policy_type *blkiop)
list_del_init(&blkiop->list);
spin_unlock(&blkio_list_lock);
+ blkcg_bypass_end();
}
EXPORT_SYMBOL_GPL(blkio_policy_unregister);