diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Makefile | 1 | ||||
-rw-r--r-- | kernel/cgroup.c | 318 | ||||
-rw-r--r-- | kernel/cpuset.c | 394 | ||||
-rw-r--r-- | kernel/fork.c | 11 | ||||
-rw-r--r-- | kernel/kexec.c | 18 | ||||
-rw-r--r-- | kernel/pid.c | 1 | ||||
-rw-r--r-- | kernel/res_counter.c | 134 | ||||
-rw-r--r-- | kernel/sysctl.c | 9 |
8 files changed, 631 insertions, 255 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 135a1b94344..685697c0a18 100644 --- a/kernel/Makefile +++ b/kernel/Makefile @@ -43,6 +43,7 @@ obj-$(CONFIG_CGROUP_DEBUG) += cgroup_debug.o obj-$(CONFIG_CPUSETS) += cpuset.o obj-$(CONFIG_CGROUP_NS) += ns_cgroup.o obj-$(CONFIG_IKCONFIG) += configs.o +obj-$(CONFIG_RESOURCE_COUNTERS) += res_counter.o obj-$(CONFIG_STOP_MACHINE) += stop_machine.o obj-$(CONFIG_KPROBES_SANITY_TEST) += test_kprobes.o obj-$(CONFIG_AUDIT) += audit.o auditfilter.o diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 1a3c23936d4..4766bb65e4d 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -141,7 +141,7 @@ enum { ROOT_NOPREFIX, /* mounted subsystems have no named prefix */ }; -inline int cgroup_is_releasable(const struct cgroup *cgrp) +static int cgroup_is_releasable(const struct cgroup *cgrp) { const int bits = (1 << CGRP_RELEASABLE) | @@ -149,7 +149,7 @@ inline int cgroup_is_releasable(const struct cgroup *cgrp) return (cgrp->flags & bits) == bits; } -inline int notify_on_release(const struct cgroup *cgrp) +static int notify_on_release(const struct cgroup *cgrp) { return test_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); } @@ -489,7 +489,7 @@ static struct css_set *find_css_set( * Any task can increment and decrement the count field without lock. * So in general, code holding cgroup_mutex can't rely on the count * field not changing. However, if the count goes to zero, then only - * attach_task() can increment it again. Because a count of zero + * cgroup_attach_task() can increment it again. Because a count of zero * means that no tasks are currently attached, therefore there is no * way a task attached to that cgroup can fork (the other way to * increment the count). So code holding cgroup_mutex can safely @@ -520,17 +520,17 @@ static struct css_set *find_css_set( * The task_lock() exception * * The need for this exception arises from the action of - * attach_task(), which overwrites one tasks cgroup pointer with + * cgroup_attach_task(), which overwrites one tasks cgroup pointer with * another. It does so using cgroup_mutexe, however there are * several performance critical places that need to reference * task->cgroup without the expense of grabbing a system global * mutex. Therefore except as noted below, when dereferencing or, as - * in attach_task(), modifying a task'ss cgroup pointer we use + * in cgroup_attach_task(), modifying a task'ss cgroup pointer we use * task_lock(), which acts on a spinlock (task->alloc_lock) already in * the task_struct routinely used for such matters. * * P.S. One more locking exception. RCU is used to guard the - * update of a tasks cgroup pointer by attach_task() + * update of a tasks cgroup pointer by cgroup_attach_task() */ /** @@ -586,11 +586,27 @@ static struct inode *cgroup_new_inode(mode_t mode, struct super_block *sb) return inode; } +/* + * Call subsys's pre_destroy handler. + * This is called before css refcnt check. + */ + +static void cgroup_call_pre_destroy(struct cgroup *cgrp) +{ + struct cgroup_subsys *ss; + for_each_subsys(cgrp->root, ss) + if (ss->pre_destroy && cgrp->subsys[ss->subsys_id]) + ss->pre_destroy(ss, cgrp); + return; +} + + static void cgroup_diput(struct dentry *dentry, struct inode *inode) { /* is dentry a directory ? if so, kfree() associated cgroup */ if (S_ISDIR(inode->i_mode)) { struct cgroup *cgrp = dentry->d_fsdata; + struct cgroup_subsys *ss; BUG_ON(!(cgroup_is_removed(cgrp))); /* It's possible for external users to be holding css * reference counts on a cgroup; css_put() needs to @@ -599,6 +615,23 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode) * queue the cgroup to be handled by the release * agent */ synchronize_rcu(); + + mutex_lock(&cgroup_mutex); + /* + * Release the subsystem state objects. + */ + for_each_subsys(cgrp->root, ss) { + if (cgrp->subsys[ss->subsys_id]) + ss->destroy(ss, cgrp); + } + + cgrp->root->number_of_cgroups--; + mutex_unlock(&cgroup_mutex); + + /* Drop the active superblock reference that we took when we + * created the cgroup */ + deactivate_super(cgrp->root->sb); + kfree(cgrp); } iput(inode); @@ -1161,7 +1194,7 @@ static void get_first_subsys(const struct cgroup *cgrp, * Call holding cgroup_mutex. May take task_lock of * the task 'pid' during call. */ -static int attach_task(struct cgroup *cgrp, struct task_struct *tsk) +int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) { int retval = 0; struct cgroup_subsys *ss; @@ -1181,9 +1214,8 @@ static int attach_task(struct cgroup *cgrp, struct task_struct *tsk) for_each_subsys(root, ss) { if (ss->can_attach) { retval = ss->can_attach(ss, cgrp, tsk); - if (retval) { + if (retval) return retval; - } } } @@ -1192,9 +1224,8 @@ static int attach_task(struct cgroup *cgrp, struct task_struct *tsk) * based on its final set of cgroups */ newcg = find_css_set(cg, cgrp); - if (!newcg) { + if (!newcg) return -ENOMEM; - } task_lock(tsk); if (tsk->flags & PF_EXITING) { @@ -1214,9 +1245,8 @@ static int attach_task(struct cgroup *cgrp, struct task_struct *tsk) write_unlock(&css_set_lock); for_each_subsys(root, ss) { - if (ss->attach) { + if (ss->attach) ss->attach(ss, cgrp, oldcgrp, tsk); - } } set_bit(CGRP_RELEASABLE, &oldcgrp->flags); synchronize_rcu(); @@ -1239,7 +1269,7 @@ static int attach_task_by_pid(struct cgroup *cgrp, char *pidbuf) if (pid) { rcu_read_lock(); - tsk = find_task_by_pid(pid); + tsk = find_task_by_vpid(pid); if (!tsk || tsk->flags & PF_EXITING) { rcu_read_unlock(); return -ESRCH; @@ -1257,7 +1287,7 @@ static int attach_task_by_pid(struct cgroup *cgrp, char *pidbuf) get_task_struct(tsk); } - ret = attach_task(cgrp, tsk); + ret = cgroup_attach_task(cgrp, tsk); put_task_struct(tsk); return ret; } @@ -1329,9 +1359,14 @@ static ssize_t cgroup_common_file_write(struct cgroup *cgrp, goto out1; } buffer[nbytes] = 0; /* nul-terminate */ + strstrip(buffer); /* strip -just- trailing whitespace */ mutex_lock(&cgroup_mutex); + /* + * This was already checked for in cgroup_file_write(), but + * check again now we're holding cgroup_mutex. + */ if (cgroup_is_removed(cgrp)) { retval = -ENODEV; goto out2; @@ -1349,24 +1384,9 @@ static ssize_t cgroup_common_file_write(struct cgroup *cgrp, clear_bit(CGRP_NOTIFY_ON_RELEASE, &cgrp->flags); break; case FILE_RELEASE_AGENT: - { - struct cgroupfs_root *root = cgrp->root; - /* Strip trailing newline */ - if (nbytes && (buffer[nbytes-1] == '\n')) { - buffer[nbytes-1] = 0; - } - if (nbytes < sizeof(root->release_agent_path)) { - /* We never write anything other than '\0' - * into the last char of release_agent_path, - * so it always remains a NUL-terminated - * string */ - strncpy(root->release_agent_path, buffer, nbytes); - root->release_agent_path[nbytes] = 0; - } else { - retval = -ENOSPC; - } + BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX); + strcpy(cgrp->root->release_agent_path, buffer); break; - } default: retval = -EINVAL; goto out2; @@ -1387,7 +1407,7 @@ static ssize_t cgroup_file_write(struct file *file, const char __user *buf, struct cftype *cft = __d_cft(file->f_dentry); struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); - if (!cft) + if (!cft || cgroup_is_removed(cgrp)) return -ENODEV; if (cft->write) return cft->write(cgrp, cft, file, buf, nbytes, ppos); @@ -1457,7 +1477,7 @@ static ssize_t cgroup_file_read(struct file *file, char __user *buf, struct cftype *cft = __d_cft(file->f_dentry); struct cgroup *cgrp = __d_cgrp(file->f_dentry->d_parent); - if (!cft) + if (!cft || cgroup_is_removed(cgrp)) return -ENODEV; if (cft->read) @@ -1675,6 +1695,29 @@ static void cgroup_advance_iter(struct cgroup *cgrp, it->task = cg->tasks.next; } +/* + * To reduce the fork() overhead for systems that are not actually + * using their cgroups capability, we don't maintain the lists running + * through each css_set to its tasks until we see the list actually + * used - in other words after the first call to cgroup_iter_start(). + * + * The tasklist_lock is not held here, as do_each_thread() and + * while_each_thread() are protected by RCU. + */ +void cgroup_enable_task_cg_lists(void) +{ + struct task_struct *p, *g; + write_lock(&css_set_lock); + use_task_css_set_links = 1; + do_each_thread(g, p) { + task_lock(p); + if (list_empty(&p->cg_list)) + list_add(&p->cg_list, &p->cgroups->tasks); + task_unlock(p); + } while_each_thread(g, p); + write_unlock(&css_set_lock); +} + void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it) { /* @@ -1682,18 +1725,9 @@ void cgroup_iter_start(struct cgroup *cgrp, struct cgroup_iter *it) * we need to enable the list linking each css_set to its * tasks, and fix up all existing tasks. */ - if (!use_task_css_set_links) { - struct task_struct *p, *g; - write_lock(&css_set_lock); - use_task_css_set_links = 1; - do_each_thread(g, p) { - task_lock(p); - if (list_empty(&p->cg_list)) - list_add(&p->cg_list, &p->cgroups->tasks); - task_unlock(p); - } while_each_thread(g, p); - write_unlock(&css_set_lock); - } + if (!use_task_css_set_links) + cgroup_enable_task_cg_lists(); + read_lock(&css_set_lock); it->cg_link = &cgrp->css_sets; cgroup_advance_iter(cgrp, it); @@ -1726,6 +1760,166 @@ void cgroup_iter_end(struct cgroup *cgrp, struct cgroup_iter *it) read_unlock(&css_set_lock); } +static inline int started_after_time(struct task_struct *t1, + struct timespec *time, + struct task_struct *t2) +{ + int start_diff = timespec_compare(&t1->start_time, time); + if (start_diff > 0) { + return 1; + } else if (start_diff < 0) { + return 0; + } else { + /* + * Arbitrarily, if two processes started at the same + * time, we'll say that the lower pointer value + * started first. Note that t2 may have exited by now + * so this may not be a valid pointer any longer, but + * that's fine - it still serves to distinguish + * between two tasks started (effectively) simultaneously. + */ + return t1 > t2; + } +} + +/* + * This function is a callback from heap_insert() and is used to order + * the heap. + * In this case we order the heap in descending task start time. + */ +static inline int started_after(void *p1, void *p2) +{ + struct task_struct *t1 = p1; + struct task_struct *t2 = p2; + return started_after_time(t1, &t2->start_time, t2); +} + +/** + * cgroup_scan_tasks - iterate though all the tasks in a cgroup + * @scan: struct cgroup_scanner containing arguments for the scan + * + * Arguments include pointers to callback functions test_task() and + * process_task(). + * Iterate through all the tasks in a cgroup, calling test_task() for each, + * and if it returns true, call process_task() for it also. + * The test_task pointer may be NULL, meaning always true (select all tasks). + * Effectively duplicates cgroup_iter_{start,next,end}() + * but does not lock css_set_lock for the call to process_task(). + * The struct cgroup_scanner may be embedded in any structure of the caller's + * creation. + * It is guaranteed that process_task() will act on every task that + * is a member of the cgroup for the duration of this call. This + * function may or may not call process_task() for tasks that exit + * or move to a different cgroup during the call, or are forked or + * move into the cgroup during the call. + * + * Note that test_task() may be called with locks held, and may in some + * situations be called multiple times for the same task, so it should + * be cheap. + * If the heap pointer in the struct cgroup_scanner is non-NULL, a heap has been + * pre-allocated and will be used for heap operations (and its "gt" member will + * be overwritten), else a temporary heap will be used (allocation of which + * may cause this function to fail). + */ +int cgroup_scan_tasks(struct cgroup_scanner *scan) +{ + int retval, i; + struct cgroup_iter it; + struct task_struct *p, *dropped; + /* Never dereference latest_task, since it's not refcounted */ + struct task_struct *latest_task = NULL; + struct ptr_heap tmp_heap; + struct ptr_heap *heap; + struct timespec latest_time = { 0, 0 }; + + if (scan->heap) { + /* The caller supplied our heap and pre-allocated its memory */ + heap = scan->heap; + heap->gt = &started_after; + } else { + /* We need to allocate our own heap memory */ + heap = &tmp_heap; + retval = heap_init(heap, PAGE_SIZE, GFP_KERNEL, &started_after); + if (retval) + /* cannot allocate the heap */ + return retval; + } + + again: + /* + * Scan tasks in the cgroup, using the scanner's "test_task" callback + * to determine which are of interest, and using the scanner's + * "process_task" callback to process any of them that need an update. + * Since we don't want to hold any locks during the task updates, + * gather tasks to be processed in a heap structure. + * The heap is sorted by descending task start time. + * If the statically-sized heap fills up, we overflow tasks that + * started later, and in future iterations only consider tasks that + * started after the latest task in the previous pass. This + * guarantees forward progress and that we don't miss any tasks. + */ + heap->size = 0; + cgroup_iter_start(scan->cg, &it); + while ((p = cgroup_iter_next(scan->cg, &it))) { + /* + * Only affect tasks that qualify per the caller's callback, + * if he provided one + */ + if (scan->test_task && !scan->test_task(p, scan)) + continue; + /* + * Only process tasks that started after the last task + * we processed + */ + if (!started_after_time(p, &latest_time, latest_task)) + continue; + dropped = heap_insert(heap, p); + if (dropped == NULL) { + /* + * The new task was inserted; the heap wasn't + * previously full + */ + get_task_struct(p); + } else if (dropped != p) { + /* + * The new task was inserted, and pushed out a + * different task + */ + get_task_struct(p); + put_task_struct(dropped); + } + /* + * Else the new task was newer than anything already in + * the heap and wasn't inserted + */ + } + cgroup_iter_end(scan->cg, &it); + + if (heap->size) { + for (i = 0; i < heap->size; i++) { + struct task_struct *p = heap->ptrs[i]; + if (i == 0) { + latest_time = p->start_time; + latest_task = p; + } + /* Process the task per the caller's callback */ + scan->process_task(p, scan); + put_task_struct(p); + } + /* + * If we had to process any tasks at all, scan again + * in case some of them were in the middle of forking + * children that didn't get processed. + * Not the most efficient way to do it, but it avoids + * having to take callback_mutex in the fork path + */ + goto again; + } + if (heap == &tmp_heap) + heap_free(&tmp_heap); + return 0; +} + /* * Stuff for reading the 'tasks' file. * @@ -1761,7 +1955,7 @@ static int pid_array_load(pid_t *pidarray, int npids, struct cgroup *cgrp) while ((tsk = cgroup_iter_next(cgrp, &it))) { if (unlikely(n == npids)) break; - pidarray[n++] = task_pid_nr(tsk); + pidarray[n++] = task_pid_vnr(tsk); } cgroup_iter_end(cgrp, &it); return n; @@ -2126,9 +2320,8 @@ static inline int cgroup_has_css_refs(struct cgroup *cgrp) * matter, since it can only happen if the cgroup * has been deleted and hence no longer needs the * release agent to be called anyway. */ - if (css && atomic_read(&css->refcnt)) { + if (css && atomic_read(&css->refcnt)) return 1; - } } return 0; } @@ -2138,7 +2331,6 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) struct cgroup *cgrp = dentry->d_fsdata; struct dentry *d; struct cgroup *parent; - struct cgroup_subsys *ss; struct super_block *sb; struct cgroupfs_root *root; @@ -2157,17 +2349,19 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) parent = cgrp->parent; root = cgrp->root; sb = root->sb; + /* + * Call pre_destroy handlers of subsys + */ + cgroup_call_pre_destroy(cgrp); + /* + * Notify subsyses that rmdir() request comes. + */ if (cgroup_has_css_refs(cgrp)) { mutex_unlock(&cgroup_mutex); return -EBUSY; } - for_each_subsys(root, ss) { - if (cgrp->subsys[ss->subsys_id]) - ss->destroy(ss, cgrp); - } - spin_lock(&release_list_lock); set_bit(CGRP_REMOVED, &cgrp->flags); if (!list_empty(&cgrp->release_list)) @@ -2182,15 +2376,11 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry) cgroup_d_remove_dir(d); dput(d); - root->number_of_cgroups--; set_bit(CGRP_RELEASABLE, &parent->flags); check_for_release(parent); mutex_unlock(&cgroup_mutex); - /* Drop the active superblock reference that we took when we - * created the cgroup */ - deactivate_super(sb); return 0; } @@ -2324,7 +2514,7 @@ out: * - Used for /proc/<pid>/cgroup. * - No need to task_lock(tsk) on this tsk->cgroup reference, as it * doesn't really matter if tsk->cgroup changes after we read it, - * and we take cgroup_mutex, keeping attach_task() from changing it + * and we take cgroup_mutex, keeping cgroup_attach_task() from changing it * anyway. No need to check that tsk->cgroup != NULL, thanks to * the_top_cgroup_hack in cgroup_exit(), which sets an exiting tasks * cgroup to top_cgroup. @@ -2435,7 +2625,7 @@ static struct file_operations proc_cgroupstats_operations = { * A pointer to the shared css_set was automatically copied in * fork.c by dup_task_struct(). However, we ignore that copy, since * it was not made under the protection of RCU or cgroup_mutex, so - * might no longer be a valid cgroup pointer. attach_task() might + * might no longer be a valid cgroup pointer. cgroup_attach_task() might * have already changed current->cgroups, allowing the previously * referenced cgroup group to be removed and freed. * @@ -2514,8 +2704,8 @@ void cgroup_post_fork(struct task_struct *child) * attach us to a different cgroup, decrementing the count on * the first cgroup that we never incremented. But in this case, * top_cgroup isn't going away, and either task has PF_EXITING set, - * which wards off any attach_task() attempts, or task is a failed - * fork, never visible to attach_task. + * which wards off any cgroup_attach_task() attempts, or task is a failed + * fork, never visible to cgroup_attach_task. * */ void cgroup_exit(struct task_struct *tsk, int run_callbacks) @@ -2655,7 +2845,7 @@ int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys) } /* All seems fine. Finish by moving the task into the new cgroup */ - ret = attach_task(child, tsk); + ret = cgroup_attach_task(child, tsk); mutex_unlock(&cgroup_mutex); out_release: diff --git a/kernel/cpuset.c b/kernel/cpuset.c index cfaf6419d81..67b2bfe2781 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -38,7 +38,6 @@ #include <linux/mount.h> #include <linux/namei.h> #include <linux/pagemap.h> -#include <linux/prio_heap.h> #include <linux/proc_fs.h> #include <linux/rcupdate.h> #include <linux/sched.h> @@ -56,6 +55,8 @@ #include <asm/atomic.h> #include <linux/mutex.h> #include <linux/kfifo.h> +#include <linux/workqueue.h> +#include <linux/cgroup.h> /* * Tracks how many cpusets are currently defined in system. @@ -64,7 +65,7 @@ */ int number_of_cpusets __read_mostly; -/* Retrieve the cpuset from a cgroup */ +/* Forward declare cgroup structures */ struct cgroup_subsys cpuset_subsys; struct cpuset; @@ -96,6 +97,9 @@ struct cpuset { /* partition number for rebuild_sched_domains() */ int pn; + + /* used for walking a cpuset heirarchy */ + struct list_head stack_list; }; /* Retrieve the cpuset for a cgroup */ @@ -111,7 +115,10 @@ static inline struct cpuset *task_cs(struct task_struct *task) return container_of(task_subsys_state(task, cpuset_subsys_id), struct cpuset, css); } - +struct cpuset_hotplug_scanner { + struct cgroup_scanner scan; + struct cgroup *to; +}; /* bits in struct cpuset flags field */ typedef enum { @@ -160,17 +167,17 @@ static inline int is_spread_slab(const struct cpuset *cs) * number, and avoid having to lock and reload mems_allowed unless * the cpuset they're using changes generation. * - * A single, global generation is needed because attach_task() could + * A single, global generation is needed because cpuset_attach_task() could * reattach a task to a different cpuset, which must not have its * generation numbers aliased with those of that tasks previous cpuset. * * Generations are needed for mems_allowed because one task cannot - * modify anothers memory placement. So we must enable every task, + * modify another's memory placement. So we must enable every task, * on every visit to __alloc_pages(), to efficiently check whether * its current->cpuset->mems_allowed has changed, requiring an update * of its current->mems_allowed. * - * Since cpuset_mems_generation is guarded by manage_mutex, + * Since writes to cpuset_mems_generation are guarded by the cgroup lock * there is no need to mark it atomic. */ static int cpuset_mems_generation; @@ -182,17 +189,20 @@ static struct cpuset top_cpuset = { }; /* - * We have two global cpuset mutexes below. They can nest. - * It is ok to first take manage_mutex, then nest callback_mutex. We also - * require taking task_lock() when dereferencing a tasks cpuset pointer. - * See "The task_lock() exception", at the end of this comment. + * There are two global mutexes guarding cpuset structures. The first + * is the main control groups cgroup_mutex, accessed via + * cgroup_lock()/cgroup_unlock(). The second is the cpuset-specific + * callback_mutex, below. They can nest. It is ok to first take + * cgroup_mutex, then nest callback_mutex. We also require taking + * task_lock() when dereferencing a task's cpuset pointer. See "The + * task_lock() exception", at the end of this comment. * * A task must hold both mutexes to modify cpusets. If a task - * holds manage_mutex, then it blocks others wanting that mutex, + * holds cgroup_mutex, then it blocks others wanting that mutex, * ensuring that it is the only task able to also acquire callback_mutex * and be able to modify cpusets. It can perform various checks on * the cpuset structure first, knowing nothing will change. It can - * also allocate memory while just holding manage_mutex. While it is + * also allocate memory while just holding cgroup_mutex. While it is * performing these checks, various callback routines can briefly * acquire callback_mutex to query cpusets. Once it is ready to make * the changes, it takes callback_mutex, blocking everyone else. @@ -208,60 +218,16 @@ static struct cpuset top_cpuset = { * The task_struct fields mems_allowed and mems_generation may only * be accessed in the context of that task, so require no locks. * - * Any task can increment and decrement the count field without lock. - * So in general, code holding manage_mutex or callback_mutex can't rely - * on the count field not changing. However, if the count goes to - * zero, then only attach_task(), which holds both mutexes, can - * increment it again. Because a count of zero means that no tasks - * are currently attached, therefore there is no way a task attached - * to that cpuset can fork (the other way to increment the count). - * So code holding manage_mutex or callback_mutex can safely assume that - * if the count is zero, it will stay zero. Similarly, if a task - * holds manage_mutex or callback_mutex on a cpuset with zero count, it - * knows that the cpuset won't be removed, as cpuset_rmdir() needs - * both of those mutexes. - * * The cpuset_common_file_write handler for operations that modify - * the cpuset hierarchy holds manage_mutex across the entire operation, + * the cpuset hierarchy holds cgroup_mutex across the entire operation, * single threading all such cpuset modifications across the system. * * The cpuset_common_file_read() handlers only hold callback_mutex across * small pieces of code, such as when reading out possibly multi-word * cpumasks and nodemasks. * - * The fork and exit callbacks cpuset_fork() and cpuset_exit(), don't - * (usually) take either mutex. These are the two most performance - * critical pieces of code here. The exception occurs on cpuset_exit(), - * when a task in a notify_on_release cpuset exits. Then manage_mutex - * is taken, and if the cpuset count is zero, a usermode call made - * to /sbin/cpuset_release_agent with the name of the cpuset (path - * relative to the root of cpuset file system) as the argument. - * - * A cpuset can only be deleted if both its 'count' of using tasks - * is zero, and its list of 'children' cpusets is empty. Since all - * tasks in the system use _some_ cpuset, and since there is always at - * least one task in the system (init), therefore, top_cpuset - * always has either children cpusets and/or using tasks. So we don't - * need a special hack to ensure that top_cpuset cannot be deleted. - * - * The above "Tale of Two Semaphores" would be complete, but for: - * - * The task_lock() exception - * - * The need for this exception arises from the action of attach_task(), - * which overwrites one tasks cpuset pointer with another. It does - * so using both mutexes, however there are several performance - * critical places that need to reference task->cpuset without the - * expense of grabbing a system global mutex. Therefore except as - * noted below, when dereferencing or, as in attach_task(), modifying - * a tasks cpuset pointer we use task_lock(), which acts on a spinlock - * (task->alloc_lock) already in the task_struct routinely used for - * such matters. - * - * P.S. One more locking exception. RCU is used to guard the - * update of a tasks cpuset pointer by attach_task() and the - * access of task->cpuset->mems_generation via that pointer in - * the routine cpuset_update_task_memory_state(). + * Accessing a task's cpuset should be done in accordance with the + * guidelines for accessing subsystem state in kernel/cgroup.c */ static DEFINE_MUTEX(callback_mutex); @@ -354,15 +320,14 @@ static void guarantee_online_mems(const struct cpuset *cs, nodemask_t *pmask) * Do not call this routine if in_interrupt(). * * Call without callback_mutex or task_lock() held. May be - * called with or without manage_mutex held. Thanks in part to - * 'the_top_cpuset_hack', the tasks cpuset pointer will never + * called with or without cgroup_mutex held. Thanks in part to + * 'the_top_cpuset_hack', the task's cpuset pointer will never * be NULL. This routine also might acquire callback_mutex and * current->mm->mmap_sem during call. * * Reading current->cpuset->mems_generation doesn't need task_lock * to guard the current->cpuset derefence, because it is guarded - * from concurrent freeing of current->cpuset by attach_task(), - * using RCU. + * from concurrent freeing of current->cpuset using RCU. * * The rcu_dereference() is technically probably not needed, * as I don't actually mind if I see a new cpuset pointer but @@ -424,7 +389,7 @@ void cpuset_update_task_memory_state(void) * * One cpuset is a subset of another if all its allowed CPUs and * Memory Nodes are a subset of the other, and its exclusive flags - * are only set if the other's are set. Call holding manage_mutex. + * are only set if the other's are set. Call holding cgroup_mutex. */ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) @@ -442,7 +407,7 @@ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) * If we replaced the flag and mask values of the current cpuset * (cur) with those values in the trial cpuset (trial), would * our various subset and exclusive rules still be valid? Presumes - * manage_mutex held. + * cgroup_mutex held. * * 'cur' is the address of an actual, in-use cpuset. Operations * such as list traversal that depend on the actual address of the @@ -476,7 +441,10 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial) if (!is_cpuset_subset(trial, par)) return -EACCES; - /* If either I or some sibling (!= me) is exclusive, we can't overlap */ + /* + * If either I or some sibling (!= me) is exclusive, we can't + * overlap + */ list_for_each_entry(cont, &par->css.cgroup->children, sibling) { c = cgroup_cs(cont); if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && @@ -732,22 +700,50 @@ static inline int started_after(void *p1, void *p2) return started_after_time(t1, &t2->start_time, t2); } -/* - * Call with manage_mutex held. May take callback_mutex during call. +/** + * cpuset_test_cpumask - test a task's cpus_allowed versus its cpuset's + * @tsk: task to test + * @scan: struct cgroup_scanner contained in its struct cpuset_hotplug_scanner + * + * Call with cgroup_mutex held. May take callback_mutex during call. + * Called for each task in a cgroup by cgroup_scan_tasks(). + * Return nonzero if this tasks's cpus_allowed mask should be changed (in other + * words, if its mask is not equal to its cpuset's mask). + */ +int cpuset_test_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan) +{ + return !cpus_equal(tsk->cpus_allowed, + (cgroup_cs(scan->cg))->cpus_allowed); +} + +/** + * cpuset_change_cpumask - make a task's cpus_allowed the same as its cpuset's + * @tsk: task to test + * @scan: struct cgroup_scanner containing the cgroup of the task + * + * Called by cgroup_scan_tasks() for each task in a cgroup whose + * cpus_allowed mask needs to be changed. + * + * We don't need to re-check for the cgroup/cpuset membership, since we're + * holding cgroup_lock() at this point. */ +void cpuset_change_cpumask(struct task_struct *tsk, struct cgroup_scanner *scan) +{ + set_cpus_allowed(tsk, (cgroup_cs(scan->cg))->cpus_allowed); +} +/** + * update_cpumask - update the cpus_allowed mask of a cpuset and all tasks in it + * @cs: the cpuset to consider + * @buf: buffer of cpu numbers written to this cpuset + */ static int update_cpumask(struct cpuset *cs, char *buf) { struct cpuset trialcs; - int retval, i; - int is_load_balanced; - struct cgroup_iter it; - struct cgroup *cgrp = cs->css.cgroup; - struct task_struct *p, *dropped; - /* Never dereference latest_task, since it's not refcounted */ - struct task_struct *latest_task = NULL; + struct cgroup_scanner scan; struct ptr_heap heap; - struct timespec latest_time = { 0, 0 }; + int retval; + int is_load_balanced; /* top_cpuset.cpus_allowed tracks cpu_online_map; it's read-only */ if (cs == &top_cpuset) @@ -756,7 +752,7 @@ static int update_cpumask(struct cpuset *cs, char *buf) trialcs = *cs; /* - * An empty cpus_allowed is ok iff there are no tasks in the cpuset. + * An empty cpus_allowed is ok only if the cpuset has no tasks. * Since cpulist_parse() fails on an empty mask, we special case * that parsing. The validate_change() call ensures that cpusets * with tasks have cpus. @@ -777,6 +773,7 @@ static int update_cpumask(struct cpuset *cs, char *buf) /* Nothing to do if the cpus didn't change */ if (cpus_equal(cs->cpus_allowed, trialcs.cpus_allowed)) return 0; + retval = heap_init(&heap, PAGE_SIZE, GFP_KERNEL, &started_after); if (retval) return retval; @@ -787,62 +784,19 @@ static int update_cpumask(struct cpuset *cs, char *buf) cs->cpus_allowed = trialcs.cpus_allowed; mutex_unlock(&callback_mutex); - again: /* * Scan tasks in the cpuset, and update the cpumasks of any - * that need an update. Since we can't call set_cpus_allowed() - * while holding tasklist_lock, gather tasks to be processed - * in a heap structure. If the statically-sized heap fills up, - * overflow tasks that started later, and in future iterations - * only consider tasks that started after the latest task in - * the previous pass. This guarantees forward progress and - * that we don't miss any tasks + * that need an update. */ - heap.size = 0; - cgroup_iter_start(cgrp, &it); - while ((p = cgroup_iter_next(cgrp, &it))) { - /* Only affect tasks that don't have the right cpus_allowed */ - if (cpus_equal(p->cpus_allowed, cs->cpus_allowed)) - continue; - /* - * Only process tasks that started after the last task - * we processed - */ - if (!started_after_time(p, &latest_time, latest_task)) - continue; - dropped = heap_insert(&heap, p); - if (dropped == NULL) { - get_task_struct(p); - } else if (dropped != p) { - get_task_struct(p); - put_task_struct(dropped); - } - } - cgroup_iter_end(cgrp, &it); - if (heap.size) { - for (i = 0; i < heap.size; i++) { - struct task_struct *p = heap.ptrs[i]; - if (i == 0) { - latest_time = p->start_time; - latest_task = p; - } - set_cpus_allowed(p, cs->cpus_allowed); - put_task_struct(p); - } - /* - * If we had to process any tasks at all, scan again - * in case some of them were in the middle of forking - * children that didn't notice the new cpumask - * restriction. Not the most efficient way to do it, - * but it avoids having to take callback_mutex in the - * fork path - */ - goto again; - } + scan.cg = cs->css.cgroup; + scan.test_task = cpuset_test_cpumask; + scan.process_task = cpuset_change_cpumask; + scan.heap = &heap; + cgroup_scan_tasks(&scan); heap_free(&heap); + if (is_load_balanced) rebuild_sched_domains(); - return 0; } @@ -854,11 +808,11 @@ static int update_cpumask(struct cpuset *cs, char *buf) * Temporarilly set tasks mems_allowed to target nodes of migration, * so that the migration code can allocate pages on these nodes. * - * Call holding manage_mutex, so our current->cpuset won't change - * during this call, as manage_mutex holds off any attach_task() + * Call holding cgroup_mutex, so current's cpuset won't change + * during this call, as manage_mutex holds off any cpuset_attach() * calls. Therefore we don't need to take task_lock around the * call to guarantee_online_mems(), as we know no one is changing - * our tasks cpuset. + * our task's cpuset. * * Hold callback_mutex around the two modifications of our tasks * mems_allowed to synchronize with cpuset_mems_allowed(). @@ -903,7 +857,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, * the cpuset is marked 'memory_migrate', migrate the tasks * pages to the new memory. * - * Call with manage_mutex held. May take callback_mutex during call. + * Call with cgroup_mutex held. May take callback_mutex during call. * Will take tasklist_lock, scan tasklist for tasks in cpuset cs, * lock each |