diff options
Diffstat (limited to 'kernel/sched.c')
| -rw-r--r-- | kernel/sched.c | 45 | 
1 files changed, 22 insertions, 23 deletions
| diff --git a/kernel/sched.c b/kernel/sched.c index 04949089e76..a0eb0941fa8 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -278,14 +278,12 @@ struct task_group {  #endif  }; -#define root_task_group init_task_group -  /* task_group_lock serializes the addition/removal of task groups */  static DEFINE_SPINLOCK(task_group_lock);  #ifdef CONFIG_FAIR_GROUP_SCHED -# define INIT_TASK_GROUP_LOAD	NICE_0_LOAD +# define ROOT_TASK_GROUP_LOAD	NICE_0_LOAD  /*   * A weight of 0 or 1 can cause arithmetics problems. @@ -298,13 +296,13 @@ static DEFINE_SPINLOCK(task_group_lock);  #define MIN_SHARES	2  #define MAX_SHARES	(1UL << 18) -static int init_task_group_load = INIT_TASK_GROUP_LOAD; +static int root_task_group_load = ROOT_TASK_GROUP_LOAD;  #endif  /* Default task group.   *	Every task in system belong to this group at bootup.   */ -struct task_group init_task_group; +struct task_group root_task_group;  #endif	/* CONFIG_CGROUP_SCHED */ @@ -743,7 +741,7 @@ sched_feat_write(struct file *filp, const char __user *ubuf,  	buf[cnt] = 0;  	cmp = strstrip(buf); -	if (strncmp(buf, "NO_", 3) == 0) { +	if (strncmp(cmp, "NO_", 3) == 0) {  		neg = 1;  		cmp += 3;  	} @@ -7848,7 +7846,7 @@ static void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,  	cfs_rq->tg = tg;  	tg->se[cpu] = se; -	/* se could be NULL for init_task_group */ +	/* se could be NULL for root_task_group */  	if (!se)  		return; @@ -7908,18 +7906,18 @@ void __init sched_init(void)  		ptr = (unsigned long)kzalloc(alloc_size, GFP_NOWAIT);  #ifdef CONFIG_FAIR_GROUP_SCHED -		init_task_group.se = (struct sched_entity **)ptr; +		root_task_group.se = (struct sched_entity **)ptr;  		ptr += nr_cpu_ids * sizeof(void **); -		init_task_group.cfs_rq = (struct cfs_rq **)ptr; +		root_task_group.cfs_rq = (struct cfs_rq **)ptr;  		ptr += nr_cpu_ids * sizeof(void **);  #endif /* CONFIG_FAIR_GROUP_SCHED */  #ifdef CONFIG_RT_GROUP_SCHED -		init_task_group.rt_se = (struct sched_rt_entity **)ptr; +		root_task_group.rt_se = (struct sched_rt_entity **)ptr;  		ptr += nr_cpu_ids * sizeof(void **); -		init_task_group.rt_rq = (struct rt_rq **)ptr; +		root_task_group.rt_rq = (struct rt_rq **)ptr;  		ptr += nr_cpu_ids * sizeof(void **);  #endif /* CONFIG_RT_GROUP_SCHED */ @@ -7939,13 +7937,13 @@ void __init sched_init(void)  			global_rt_period(), global_rt_runtime());  #ifdef CONFIG_RT_GROUP_SCHED -	init_rt_bandwidth(&init_task_group.rt_bandwidth, +	init_rt_bandwidth(&root_task_group.rt_bandwidth,  			global_rt_period(), global_rt_runtime());  #endif /* CONFIG_RT_GROUP_SCHED */  #ifdef CONFIG_CGROUP_SCHED -	list_add(&init_task_group.list, &task_groups); -	INIT_LIST_HEAD(&init_task_group.children); +	list_add(&root_task_group.list, &task_groups); +	INIT_LIST_HEAD(&root_task_group.children);  	autogroup_init(&init_task);  #endif /* CONFIG_CGROUP_SCHED */ @@ -7960,34 +7958,34 @@ void __init sched_init(void)  		init_cfs_rq(&rq->cfs, rq);  		init_rt_rq(&rq->rt, rq);  #ifdef CONFIG_FAIR_GROUP_SCHED -		init_task_group.shares = init_task_group_load; +		root_task_group.shares = root_task_group_load;  		INIT_LIST_HEAD(&rq->leaf_cfs_rq_list);  		/* -		 * How much cpu bandwidth does init_task_group get? +		 * How much cpu bandwidth does root_task_group get?  		 *  		 * In case of task-groups formed thr' the cgroup filesystem, it  		 * gets 100% of the cpu resources in the system. This overall  		 * system cpu resource is divided among the tasks of -		 * init_task_group and its child task-groups in a fair manner, +		 * root_task_group and its child task-groups in a fair manner,  		 * based on each entity's (task or task-group's) weight  		 * (se->load.weight).  		 * -		 * In other words, if init_task_group has 10 tasks of weight +		 * In other words, if root_task_group has 10 tasks of weight  		 * 1024) and two child groups A0 and A1 (of weight 1024 each),  		 * then A0's share of the cpu resource is:  		 *  		 *	A0's bandwidth = 1024 / (10*1024 + 1024 + 1024) = 8.33%  		 * -		 * We achieve this by letting init_task_group's tasks sit -		 * directly in rq->cfs (i.e init_task_group->se[] = NULL). +		 * We achieve this by letting root_task_group's tasks sit +		 * directly in rq->cfs (i.e root_task_group->se[] = NULL).  		 */ -		init_tg_cfs_entry(&init_task_group, &rq->cfs, NULL, i, NULL); +		init_tg_cfs_entry(&root_task_group, &rq->cfs, NULL, i, NULL);  #endif /* CONFIG_FAIR_GROUP_SCHED */  		rq->rt.rt_runtime = def_rt_bandwidth.rt_runtime;  #ifdef CONFIG_RT_GROUP_SCHED  		INIT_LIST_HEAD(&rq->leaf_rt_rq_list); -		init_tg_rt_entry(&init_task_group, &rq->rt, NULL, i, NULL); +		init_tg_rt_entry(&root_task_group, &rq->rt, NULL, i, NULL);  #endif  		for (j = 0; j < CPU_LOAD_IDX_MAX; j++) @@ -8379,6 +8377,7 @@ static void free_sched_group(struct task_group *tg)  {  	free_fair_sched_group(tg);  	free_rt_sched_group(tg); +	autogroup_free(tg);  	kfree(tg);  } @@ -8812,7 +8811,7 @@ cpu_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cgrp)  	if (!cgrp->parent) {  		/* This is early initialization for the top cgroup */ -		return &init_task_group.css; +		return &root_task_group.css;  	}  	parent = cgroup_tg(cgrp->parent); | 
