diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/cpuset.c | 1188 | ||||
-rw-r--r-- | kernel/exit.c | 2 | ||||
-rw-r--r-- | kernel/fork.c | 3 |
3 files changed, 212 insertions, 981 deletions
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index a40a2c4384b..1133062395e 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -5,6 +5,7 @@ * * Copyright (C) 2003 BULL SA. * Copyright (C) 2004-2006 Silicon Graphics, Inc. + * Copyright (C) 2006 Google, Inc * * Portions derived from Patrick Mochel's sysfs code. * sysfs is Copyright (c) 2001-3 Patrick Mochel @@ -12,6 +13,7 @@ * 2003-10-10 Written by Simon Derr. * 2003-10-22 Updates by Stephen Hemminger. * 2004 May-July Rework by Paul Jackson. + * 2006 Rework by Paul Menage to use generic cgroups * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of the Linux @@ -53,8 +55,6 @@ #include <asm/atomic.h> #include <linux/mutex.h> -#define CPUSET_SUPER_MAGIC 0x27e0eb - /* * Tracks how many cpusets are currently defined in system. * When there is only one cpuset (the root cpuset) we can @@ -62,6 +62,10 @@ */ int number_of_cpusets __read_mostly; +/* Retrieve the cpuset from a cgroup */ +struct cgroup_subsys cpuset_subsys; +struct cpuset; + /* See "Frequency meter" comments, below. */ struct fmeter { @@ -72,24 +76,13 @@ struct fmeter { }; struct cpuset { + struct cgroup_subsys_state css; + unsigned long flags; /* "unsigned long" so bitops work */ cpumask_t cpus_allowed; /* CPUs allowed to tasks in cpuset */ nodemask_t mems_allowed; /* Memory Nodes allowed to tasks */ - /* - * Count is atomic so can incr (fork) or decr (exit) without a lock. - */ - atomic_t count; /* count tasks using this cpuset */ - - /* - * We link our 'sibling' struct into our parents 'children'. - * Our children link their 'sibling' into our 'children'. - */ - struct list_head sibling; /* my parents children */ - struct list_head children; /* my children */ - struct cpuset *parent; /* my parent */ - struct dentry *dentry; /* cpuset fs entry */ /* * Copy of global cpuset_mems_generation as of the most @@ -100,13 +93,26 @@ struct cpuset { struct fmeter fmeter; /* memory_pressure filter */ }; +/* Retrieve the cpuset for a cgroup */ +static inline struct cpuset *cgroup_cs(struct cgroup *cont) +{ + return container_of(cgroup_subsys_state(cont, cpuset_subsys_id), + struct cpuset, css); +} + +/* Retrieve the cpuset for a task */ +static inline struct cpuset *task_cs(struct task_struct *task) +{ + return container_of(task_subsys_state(task, cpuset_subsys_id), + struct cpuset, css); +} + + /* bits in struct cpuset flags field */ typedef enum { CS_CPU_EXCLUSIVE, CS_MEM_EXCLUSIVE, CS_MEMORY_MIGRATE, - CS_REMOVED, - CS_NOTIFY_ON_RELEASE, CS_SPREAD_PAGE, CS_SPREAD_SLAB, } cpuset_flagbits_t; @@ -122,16 +128,6 @@ static inline int is_mem_exclusive(const struct cpuset *cs) return test_bit(CS_MEM_EXCLUSIVE, &cs->flags); } -static inline int is_removed(const struct cpuset *cs) -{ - return test_bit(CS_REMOVED, &cs->flags); -} - -static inline int notify_on_release(const struct cpuset *cs) -{ - return test_bit(CS_NOTIFY_ON_RELEASE, &cs->flags); -} - static inline int is_memory_migrate(const struct cpuset *cs) { return test_bit(CS_MEMORY_MIGRATE, &cs->flags); @@ -172,14 +168,8 @@ static struct cpuset top_cpuset = { .flags = ((1 << CS_CPU_EXCLUSIVE) | (1 << CS_MEM_EXCLUSIVE)), .cpus_allowed = CPU_MASK_ALL, .mems_allowed = NODE_MASK_ALL, - .count = ATOMIC_INIT(0), - .sibling = LIST_HEAD_INIT(top_cpuset.sibling), - .children = LIST_HEAD_INIT(top_cpuset.children), }; -static struct vfsmount *cpuset_mount; -static struct super_block *cpuset_sb; - /* * We have two global cpuset mutexes below. They can nest. * It is ok to first take manage_mutex, then nest callback_mutex. We also @@ -263,297 +253,33 @@ static struct super_block *cpuset_sb; * the routine cpuset_update_task_memory_state(). */ -static DEFINE_MUTEX(manage_mutex); static DEFINE_MUTEX(callback_mutex); -/* - * A couple of forward declarations required, due to cyclic reference loop: - * cpuset_mkdir -> cpuset_create -> cpuset_populate_dir -> cpuset_add_file - * -> cpuset_create_file -> cpuset_dir_inode_operations -> cpuset_mkdir. - */ - -static int cpuset_mkdir(struct inode *dir, struct dentry *dentry, int mode); -static int cpuset_rmdir(struct inode *unused_dir, struct dentry *dentry); - -static struct backing_dev_info cpuset_backing_dev_info = { - .ra_pages = 0, /* No readahead */ - .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK, -}; - -static struct inode *cpuset_new_inode(mode_t mode) -{ - struct inode *inode = new_inode(cpuset_sb); - - if (inode) { - inode->i_mode = mode; - inode->i_uid = current->fsuid; - inode->i_gid = current->fsgid; - inode->i_blocks = 0; - inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; - inode->i_mapping->backing_dev_info = &cpuset_backing_dev_info; - } - return inode; -} - -static void cpuset_diput(struct dentry *dentry, struct inode *inode) -{ - /* is dentry a directory ? if so, kfree() associated cpuset */ - if (S_ISDIR(inode->i_mode)) { - struct cpuset *cs = dentry->d_fsdata; - BUG_ON(!(is_removed(cs))); - kfree(cs); - } - iput(inode); -} - -static struct dentry_operations cpuset_dops = { - .d_iput = cpuset_diput, -}; - -static struct dentry *cpuset_get_dentry(struct dentry *parent, const char *name) -{ - struct dentry *d = lookup_one_len(name, parent, strlen(name)); - if (!IS_ERR(d)) - d->d_op = &cpuset_dops; - return d; -} - -static void remove_dir(struct dentry *d) -{ - struct dentry *parent = dget(d->d_parent); - - d_delete(d); - simple_rmdir(parent->d_inode, d); - dput(parent); -} - -/* - * NOTE : the dentry must have been dget()'ed - */ -static void cpuset_d_remove_dir(struct dentry *dentry) -{ - struct list_head *node; - - spin_lock(&dcache_lock); - node = dentry->d_subdirs.next; - while (node != &dentry->d_subdirs) { - struct dentry *d = list_entry(node, struct dentry, d_u.d_child); - list_del_init(node); - if (d->d_inode) { - d = dget_locked(d); - spin_unlock(&dcache_lock); - d_delete(d); - simple_unlink(dentry->d_inode, d); - dput(d); - spin_lock(&dcache_lock); - } - node = dentry->d_subdirs.next; - } - list_del_init(&dentry->d_u.d_child); - spin_unlock(&dcache_lock); - remove_dir(dentry); -} - -static struct super_operations cpuset_ops = { - .statfs = simple_statfs, - .drop_inode = generic_delete_inode, -}; - -static int cpuset_fill_super(struct super_block *sb, void *unused_data, - int unused_silent) -{ - struct inode *inode; - struct dentry *root; - - sb->s_blocksize = PAGE_CACHE_SIZE; - sb->s_blocksize_bits = PAGE_CACHE_SHIFT; - sb->s_magic = CPUSET_SUPER_MAGIC; - sb->s_op = &cpuset_ops; - cpuset_sb = sb; - - inode = cpuset_new_inode(S_IFDIR | S_IRUGO | S_IXUGO | S_IWUSR); - if (inode) { - inode->i_op = &simple_dir_inode_operations; - inode->i_fop = &simple_dir_operations; - /* directories start off with i_nlink == 2 (for "." entry) */ - inc_nlink(inode); - } else { - return -ENOMEM; - } - - root = d_alloc_root(inode); - if (!root) { - iput(inode); - return -ENOMEM; - } - sb->s_root = root; - return 0; -} - +/* This is ugly, but preserves the userspace API for existing cpuset + * users. If someone tries to mount the "cpuset" filesystem, we + * silently switch it to mount "cgroup" instead */ static int cpuset_get_sb(struct file_system_type *fs_type, int flags, const char *unused_dev_name, void *data, struct vfsmount *mnt) { - return get_sb_single(fs_type, flags, data, cpuset_fill_super, mnt); + struct file_system_type *cgroup_fs = get_fs_type("cgroup"); + int ret = -ENODEV; + if (cgroup_fs) { + char mountopts[] = + "cpuset,noprefix," + "release_agent=/sbin/cpuset_release_agent"; + ret = cgroup_fs->get_sb(cgroup_fs, flags, + unused_dev_name, mountopts, mnt); + put_filesystem(cgroup_fs); + } + return ret; } static struct file_system_type cpuset_fs_type = { .name = "cpuset", .get_sb = cpuset_get_sb, - .kill_sb = kill_litter_super, }; -/* struct cftype: - * - * The files in the cpuset filesystem mostly have a very simple read/write - * handling, some common function will take care of it. Nevertheless some cases - * (read tasks) are special and therefore I define this structure for every - * kind of file. - * - * - * When reading/writing to a file: - * - the cpuset to use in file->f_path.dentry->d_parent->d_fsdata - * - the 'cftype' of the file is file->f_path.dentry->d_fsdata - */ - -struct cftype { - char *name; - int private; - int (*open) (struct inode *inode, struct file *file); - ssize_t (*read) (struct file *file, char __user *buf, size_t nbytes, - loff_t *ppos); - int (*write) (struct file *file, const char __user *buf, size_t nbytes, - loff_t *ppos); - int (*release) (struct inode *inode, struct file *file); -}; - -static inline struct cpuset *__d_cs(struct dentry *dentry) -{ - return dentry->d_fsdata; -} - -static inline struct cftype *__d_cft(struct dentry *dentry) -{ - return dentry->d_fsdata; -} - -/* - * Call with manage_mutex held. Writes path of cpuset into buf. - * Returns 0 on success, -errno on error. - */ - -static int cpuset_path(const struct cpuset *cs, char *buf, int buflen) -{ - char *start; - - start = buf + buflen; - - *--start = '\0'; - for (;;) { - int len = cs->dentry->d_name.len; - if ((start -= len) < buf) - return -ENAMETOOLONG; - memcpy(start, cs->dentry->d_name.name, len); - cs = cs->parent; - if (!cs) - break; - if (!cs->parent) - continue; - if (--start < buf) - return -ENAMETOOLONG; - *start = '/'; - } - memmove(buf, start, buf + buflen - start); - return 0; -} - -/* - * Notify userspace when a cpuset is released, by running - * /sbin/cpuset_release_agent with the name of the cpuset (path - * relative to the root of cpuset file system) as the argument. - * - * Most likely, this user command will try to rmdir this cpuset. - * - * This races with the possibility that some other task will be - * attached to this cpuset before it is removed, or that some other - * user task will 'mkdir' a child cpuset of this cpuset. That's ok. - * The presumed 'rmdir' will fail quietly if this cpuset is no longer - * unused, and this cpuset will be reprieved from its death sentence, - * to continue to serve a useful existence. Next time it's released, - * we will get notified again, if it still has 'notify_on_release' set. - * - * The final arg to call_usermodehelper() is 0, which means don't - * wait. The separate /sbin/cpuset_release_agent task is forked by - * call_usermodehelper(), then control in this thread returns here, - * without waiting for the release agent task. We don't bother to - * wait because the caller of this routine has no use for the exit - * status of the /sbin/cpuset_release_agent task, so no sense holding - * our caller up for that. - * - * When we had only one cpuset mutex, we had to call this - * without holding it, to avoid deadlock when call_usermodehelper() - * allocated memory. With two locks, we could now call this while - * holding manage_mutex, but we still don't, so as to minimize - * the time manage_mutex is held. - */ - -static void cpuset_release_agent(const char *pathbuf) -{ - char *argv[3], *envp[3]; - int i; - - if (!pathbuf) - return; - - i = 0; - argv[i++] = "/sbin/cpuset_release_agent"; - argv[i++] = (char *)pathbuf; - argv[i] = NULL; - - i = 0; - /* minimal command environment */ - envp[i++] = "HOME=/"; - envp[i++] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; - envp[i] = NULL; - - call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC); - kfree(pathbuf); -} - -/* - * Either cs->count of using tasks transitioned to zero, or the - * cs->children list of child cpusets just became empty. If this - * cs is notify_on_release() and now both the user count is zero and - * the list of children is empty, prepare cpuset path in a kmalloc'd - * buffer, to be returned via ppathbuf, so that the caller can invoke - * cpuset_release_agent() with it later on, once manage_mutex is dropped. - * Call here with manage_mutex held. - * - * This check_for_release() routine is responsible for kmalloc'ing - * pathbuf. The above cpuset_release_agent() is responsible for - * kfree'ing pathbuf. The caller of these routines is responsible - * for providing a pathbuf pointer, initialized to NULL, then - * calling check_for_release() with manage_mutex held and the address - * of the pathbuf pointer, then dropping manage_mutex, then calling - * cpuset_release_agent() with pathbuf, as set by check_for_release(). - */ - -static void check_for_release(struct cpuset *cs, char **ppathbuf) -{ - if (notify_on_release(cs) && atomic_read(&cs->count) == 0 && - list_empty(&cs->children)) { - char *buf; - - buf = kmalloc(PAGE_SIZE, GFP_KERNEL); - if (!buf) - return; - if (cpuset_path(cs, buf, PAGE_SIZE) < 0) - kfree(buf); - else - *ppathbuf = buf; - } -} - /* * Return in *pmask the portion of a cpusets's cpus_allowed that * are online. If none are online, walk up the cpuset hierarchy @@ -653,20 +379,19 @@ void cpuset_update_task_memory_state(void) struct task_struct *tsk = current; struct cpuset *cs; - if (tsk->cpuset == &top_cpuset) { + if (task_cs(tsk) == &top_cpuset) { /* Don't need rcu for top_cpuset. It's never freed. */ my_cpusets_mem_gen = top_cpuset.mems_generation; } else { rcu_read_lock(); - cs = rcu_dereference(tsk->cpuset); - my_cpusets_mem_gen = cs->mems_generation; + my_cpusets_mem_gen = task_cs(current)->mems_generation; rcu_read_unlock(); } if (my_cpusets_mem_gen != tsk->cpuset_mems_generation) { mutex_lock(&callback_mutex); task_lock(tsk); - cs = tsk->cpuset; /* Maybe changed when task not locked */ + cs = task_cs(tsk); /* Maybe changed when task not locked */ guarantee_online_mems(cs, &tsk->mems_allowed); tsk->cpuset_mems_generation = cs->mems_generation; if (is_spread_page(cs)) @@ -721,11 +446,12 @@ static int is_cpuset_subset(const struct cpuset *p, const struct cpuset *q) static int validate_change(const struct cpuset *cur, const struct cpuset *trial) { + struct cgroup *cont; struct cpuset *c, *par; /* Each of our child cpusets must be a subset of us */ - list_for_each_entry(c, &cur->children, sibling) { - if (!is_cpuset_subset(c, trial)) + list_for_each_entry(cont, &cur->css.cgroup->children, sibling) { + if (!is_cpuset_subset(cgroup_cs(cont), trial)) return -EBUSY; } @@ -740,7 +466,8 @@ static int validate_change(const struct cpuset *cur, const struct cpuset *trial) return -EACCES; /* If either I or some sibling (!= me) is exclusive, we can't overlap */ - list_for_each_entry(c, &par->children, sibling) { + list_for_each_entry(cont, &par->css.cgroup->children, sibling) { + c = cgroup_cs(cont); if ((is_cpu_exclusive(trial) || is_cpu_exclusive(c)) && c != cur && cpus_intersects(trial->cpus_allowed, c->cpus_allowed)) @@ -783,7 +510,8 @@ static int update_cpumask(struct cpuset *cs, char *buf) } cpus_and(trialcs.cpus_allowed, trialcs.cpus_allowed, cpu_online_map); /* cpus_allowed cannot be empty for a cpuset with attached tasks. */ - if (atomic_read(&cs->count) && cpus_empty(trialcs.cpus_allowed)) + if (cgroup_task_count(cs->css.cgroup) && + cpus_empty(trialcs.cpus_allowed)) return -ENOSPC; retval = validate_change(cs, &trialcs); if (retval < 0) @@ -839,7 +567,7 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL); mutex_lock(&callback_mutex); - guarantee_online_mems(tsk->cpuset, &tsk->mems_allowed); + guarantee_online_mems(task_cs(tsk),&tsk->mems_allowed); mutex_unlock(&callback_mutex); } @@ -857,16 +585,19 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from, * their mempolicies to the cpusets new mems_allowed. */ +static void *cpuset_being_rebound; + static int update_nodemask(struct cpuset *cs, char *buf) { struct cpuset trialcs; nodemask_t oldmem; - struct task_struct *g, *p; + struct task_struct *p; struct mm_struct **mmarray; int i, n, ntasks; int migrate; int fudge; int retval; + struct cgroup_iter it; /* * top_cpuset.mems_allowed tracks node_stats[N_HIGH_MEMORY]; @@ -909,7 +640,8 @@ static int update_nodemask(struct cpuset *cs, char *buf) goto done; } /* mems_allowed cannot be empty for a cpuset with attached tasks. */ - if (atomic_read(&cs->count) && nodes_empty(trialcs.mems_allowed)) { + if (cgroup_task_count(cs->css.cgroup) && + nodes_empty(trialcs.mems_allowed)) { retval = -ENOSPC; goto done; } @@ -922,7 +654,7 @@ static int update_nodemask(struct cpuset *cs, char *buf) cs->mems_generation = cpuset_mems_generation++; mutex_unlock(&callback_mutex); - set_cpuset_being_rebound(cs); /* causes mpol_copy() rebind */ + cpuset_being_rebound = cs; /* causes mpol_copy() rebind */ fudge = 10; /* spare mmarray[] slots */ fudge += cpus_weight(cs->cpus_allowed); /* imagine one fork-bomb/cpu */ @@ -936,13 +668,13 @@ static int update_nodemask(struct cpuset *cs, char *buf) * enough mmarray[] w/o using GFP_ATOMIC. */ while (1) { - ntasks = atomic_read(&cs->count); /* guess */ + ntasks = cgroup_task_count(cs->css.cgroup); /* guess */ ntasks += fudge; mmarray = kmalloc(ntasks * sizeof(*mmarray), GFP_KERNEL); if (!mmarray) goto done; read_lock(&tasklist_lock); /* block fork */ - if (atomic_read(&cs->count) <= ntasks) + if (cgroup_task_count(cs->css.cgroup) <= ntasks) break; /* got enough */ read_unlock(&tasklist_lock); /* try again */ kfree(mmarray); @@ -951,21 +683,21 @@ static int update_nodemask(struct cpuset *cs, char *buf) n = 0; /* Load up mmarray[] with mm reference for each task in cpuset. */ - do_each_thread(g, p) { + cgroup_iter_start(cs->css.cgroup, &it); + while ((p = cgroup_iter_next(cs->css.cgroup, &it))) { struct mm_struct *mm; if (n >= ntasks) { printk(KERN_WARNING "Cpuset mempolicy rebind incomplete.\n"); - continue; + break; } - if (p->cpuset != cs) - continue; mm = get_task_mm(p); if (!mm) continue; mmarray[n++] = mm; - } while_each_thread(g, p); + } + cgroup_iter_end(cs->css.cgroup, &it); read_unlock(&tasklist_lock); /* @@ -993,12 +725,17 @@ static int update_nodemask(struct cpuset *cs, char *buf) /* We're done rebinding vma's to this cpusets new mems_allowed. */ kfree(mmarray); - set_cpuset_being_rebound(NULL); + cpuset_being_rebound = NULL; retval = 0; done: return retval; } +int current_cpuset_is_being_rebound(void) +{ + return task_cs(current) == cpuset_being_rebound; +} + /* * Call with manage_mutex held. */ @@ -1145,85 +882,34 @@ static int fmeter_getrate(struct fmeter *fmp) return val; } -/* - * Attack task specified by pid in 'pidbuf' to cpuset 'cs', possibly - * writing the path of the old cpuset in 'ppathbuf' if it needs to be - * notified on release. - * - * Call holding manage_mutex. May take callback_mutex and task_lock of - * the task 'pid' during call. - */ - -static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf) +static int cpuset_can_attach(struct cgroup_subsys *ss, + struct cgroup *cont, struct task_struct *tsk) { - pid_t pid; - struct task_struct *tsk; - struct cpuset *oldcs; - cpumask_t cpus; - nodemask_t from, to; - struct mm_struct *mm; - int retval; + struct cpuset *cs = cgroup_cs(cont); - if (sscanf(pidbuf, "%d", &pid) != 1) - return -EIO; if (cpus_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) return -ENOSPC; - if (pid) { - read_lock(&tasklist_lock); - - tsk = find_task_by_pid(pid); - if (!tsk || tsk->flags & PF_EXITING) { - read_unlock(&tasklist_lock); - return -ESRCH; - } - - get_task_struct(tsk); - read_unlock(&tasklist_lock); - - if ((current->euid) && (current->euid != tsk->uid) - && (current->euid != tsk->suid)) { - put_task_struct(tsk); - return -EACCES; - } - } else { - tsk = current; - get_task_struct(tsk); - } + return security_task_setscheduler(tsk, 0, NULL); +} - retval = security_task_setscheduler(tsk, 0, NULL); - if (retval) { - put_task_struct(tsk); - return retval; - } +static void cpuset_attach(struct cgroup_subsys *ss, + struct cgroup *cont, struct cgroup *oldcont, + struct task_struct *tsk) +{ + cpumask_t cpus; + nodemask_t from, to; + struct mm_struct *mm; + struct cpuset *cs = cgroup_cs(cont); + struct cpuset *oldcs = cgroup_cs(oldcont); mutex_lock(&callback_mutex); - - task_lock(tsk); - oldcs = tsk->cpuset; - /* - * After getting 'oldcs' cpuset ptr, be sure still not exiting. - * If 'oldcs' might be the top_cpuset due to the_top_cpuset_hack - * then fail this attach_task(), to avoid breaking top_cpuset.count. - */ - if (tsk->flags & PF_EXITING) { - task_unlock(tsk); - mutex_unlock(&callback_mutex); - put_task_struct(tsk); - return -ESRCH; - } - atomic_inc(&cs->count); - rcu_assign_pointer(tsk->cpuset, cs); - task_unlock(tsk); - guarantee_online_cpus(cs, &cpus); set_cpus_allowed(tsk, cpus); + mutex_unlock(&callback_mutex); from = oldcs->mems_allowed; to = cs->mems_allowed; - - mutex_unlock(&callback_mutex); - mm = get_task_mm(tsk); if (mm) { mpol_rebind_mm(mm, &to); @@ -1232,40 +918,31 @@ static int attach_task(struct cpuset *cs, char *pidbuf, char **ppathbuf) mmput(mm); } - put_task_struct(tsk); - synchronize_rcu(); - if (atomic_dec_and_test(&oldcs->count)) - check_for_release(oldcs, ppathbuf); - return 0; } /* The various types of files and directories in a cpuset file system */ typedef enum { - FILE_ROOT, - FILE_DIR, FILE_MEMORY_MIGRATE, FILE_CPULIST, FILE_MEMLIST, FILE_CPU_EXCLUSIVE, FILE_MEM_EXCLUSIVE, - FILE_NOTIFY_ON_RELEASE, FILE_MEMORY_PRESSURE_ENABLED, FILE_MEMORY_PRESSURE, FILE_SPREAD_PAGE, FILE_SPREAD_SLAB, - FILE_TASKLIST, } cpuset_filetype_t; -static ssize_t cpuset_common_file_write(struct file *file, +static ssize_t cpuset_common_file_write(struct cgroup *cont, + struct cftype *cft, + struct file *file, const char __user *userbuf, size_t nbytes, loff_t *unused_ppos) { - struct cpuset *cs = __d_cs(file->f_path.dentry->d_parent); - struct cftype *cft = __d_cft(file->f_path.dentry); + struct cpuset *cs = cgroup_cs(cont); cpuset_filetype_t type = cft->private; char *buffer; - char *pathbuf = NULL; int retval = 0; /* Crude upper limit on largest legitimate cpulist user might write. */ @@ -1282,9 +959,9 @@ static ssize_t cpuset_common_file_write(struct file *file, } buffer[nbytes] = 0; /* nul-terminate */ - mutex_lock(&manage_mutex); + cgroup_lock(); - if (is_removed(cs)) { + if (cgroup_is_removed(cont)) { retval = -ENODEV; goto out2; } @@ -1302,9 +979,6 @@ static ssize_t cpuset_common_file_write(struct file *file, case FILE_MEM_EXCLUSIVE: retval = update_flag(CS_MEM_EXCLUSIVE, cs, buffer); break; - case FILE_NOTIFY_ON_RELEASE: - retval = update_flag(CS_NOTIFY_ON_RELEASE, cs, buffer); - break; case FILE_MEMORY_MIGRATE: retval = update_flag(CS_MEMORY_MIGRATE, cs, buffer); break; @@ -1322,9 +996,6 @@ static ssize_t cpuset_common_file_write(struct file *file, retval = update_flag(CS_SPREAD_SLAB, cs, buffer); cs->mems_generation = cpuset_mems_generation++; break; - case FILE_TASKLIST: - retval = attach_task(cs, buffer, &pathbuf); - break; default: retval = -EINVAL; goto out2; @@ -1333,30 +1004,12 @@ static ssize_t cpuset_common_file_write(struct file *file, if (retval == 0) retval = nbytes; out2: - mutex_unlock(&manage_mutex); - cpuset_release_agent(pathbuf); + cgroup_unlock(); out1: kfree(buffer); return retval; } -static ssize_t cpuset_file_write(struct file *file, const char __user *buf, - size_t nbytes, loff_t *ppos) -{ - ssize_t retval = 0; - struct cftype *cft = __d_cft(file->f_path.dentry); - if (!cft) - return -ENODEV; - - /* special function ? */ - if (cft->write) - retval = cft->write(file, buf, nbytes, ppos); - else - retval = cpuset_common_file_write(file, buf, nbytes, ppos); - - return retval; -} - /* * These ascii lists should be read in a single call, by using a user * buffer large enough to hold the entire map. If read in smaller @@ -1391,11 +1044,13 @@ static int cpuset_sprintf_memlist(char *page, struct cpuset *cs) return nodelist_scnprintf(page, PAGE_SIZE, mask); } -static ssize_t cpuset_common_file_read(struct file *file, char __user *buf, - size_t nbytes, loff_t *ppos) +static ssize_t cpuset_common_file_read(struct cgroup *cont, + struct cftype *cft, + struct file *file, + char __user *buf, + size_t nbytes, loff_t *ppos) { - struct cftype *cft = __d_cft(file->f_path.dentry); - struct cpuset *cs = __d_cs(file->f_path.dentry->d_parent); + struct cpuset *cs = cgroup_cs(cont); cpuset_filetype_t type = cft->private; char *page; ssize_t retval = 0; @@ -1419,9 +1074,6 @@ static ssize_t cpuset_common_file_read(struct file *file, char __user *buf, case FILE_MEM_EXCLUSIVE: *s++ = is_mem_exclusive(cs) ? '1' : '0'; break; - case FILE_NOTIFY_ON_RELEASE: - *s++ = notify_on_release(cs) ? '1' : '0'; - break; case FILE_MEMORY_MIGRATE: *s++ = is_memory_migrate(cs) ? '1' : '0'; break; @@ -1449,390 +1101,141 @@ out: return retval; } -static ssize_t cpuset_file_read(struct file *file, char __user *buf, size_t nbytes, - loff_t *ppos) -{ - ssize_t retval = 0; - struct cftype *cft = __d_cft(file->f_path.dentry); - if (!cft) - return -ENODEV; - /* special function ? */ - if (cft->read) - retval = cft->read(file, buf, nbytes, ppos); - else - retval = cpuset_common_file_read(file, buf, nbytes, ppos); - return retval; -} -static int cpuset_file_open(struct inode *inode, struct file *file) -{ - int err; - struct cftype *cft; - - err = generic_file_open(inode, file); - if (err) - return err; - - cft = __d_cft(file->f_path.dentry); - if (!cft) - return -ENODEV; - if (cft->open) - err = cft->open(inode, file); - else - err = 0; - - return err; -} - -static int cpuset_file_release(struct inode *inode, struct file *file) -{ - struct cftype *cft = __d_cft(file->f_path.dentry); - if (cft->release) - return cft->release(inode, file); - return 0; -} - -/* - * cpuset_rename - Only allow simple rename of directories in place. - */ -static int cpuset_rename(struct inode *old_dir, struct dentry *old_dentry, - struct inode *new_dir, struct dentry *new_dentry) -{ - if (!S_ISDIR(old_dentry->d_inode->i_mode)) - return -ENOTDIR; - if (new_dentry->d_inode) - return -EEXIST; - if (old_dir != new_dir) - return -EIO; - return simple_rename(old_dir, old_dentry, new_dir, new_dentry); -} - -static const struct file_operations cpuset_file_operations = { - .read = cpuset_file_read, - .write = cpuset_file_write, - .llseek = generic_file_llseek, - .open = cpuset_file_open, - .release = cpuset_file_release, -}; - -static const struct inode_operations cpuset_dir_inode_operations = { - .lookup = simple_lookup, - .mkdir = cpuset_mkdir, - .rmdir = cpuset_rmdir, - .rename = cpuset_rename, -}; - -static int cpuset_create_file(struct dentry *dentry, int mode) -{ - struct inode *inode; - - if (!dentry) - return -ENOENT; - if (dentry->d_inode) - return -EEXIST; - - inode = cpuset_new_inode(mode); - if (!inode) - return -ENOMEM; - - if (S_ISDIR(mode)) { - inode->i_op = &cpuset_dir_inode_operations; - inode->i_fop = &simple_dir_operations; - - /* start off with i_nlink == 2 (for "." entry) */ - inc_nlink(inode); - } else if (S_ISREG(mode)) { - inode->i_size = 0; - inode->i_fop = &cpuset_file_operations; - } - - d_instantiate(dentry, inode); - dget(dentry); /* Extra count - pin the dentry in core */ - return 0; -} - -/* - * cpuset_create_dir - create a directory for an object. - * cs: the cpuset we create the directory for. - * It must have a valid ->parent field - * And we are going to fill its ->dentry field. - * name: The name to give to the cpuset directory. Will be copied. - * mode: mode to set on new directory. - */ - -static int cpuset_create_dir(struct cpuset *cs, const char *name, int mode) -{ - struct dentry *dentry = NULL; - struct dentry *parent; - int error = 0; - - parent = cs->parent->dentry; - dentry = cpuset_get_dentry(parent, name); - if (IS_ERR(dentry)) - return PTR_ERR(dentry); - error = cpuset_create_file(dentry, S_IFDIR | mode); - if (!error) { - dentry->d_fsdata = cs; - inc_nlink(parent->d_inode); - cs->dentry = dentry; - } - dput(dentry); - - return error; -} - -static int cpuset_add_file(struct dentry *dir, const struct cftype *cft) -{ - struct dentry *dentry; - int error; - - mutex_lock(&dir->d_inode->i_mutex); - dentry = cpuset_get_dentry(dir, cft->name); - if (!IS_ERR(dentry)) { - error = cpuset_create_file(dentry, 0644 | S_IFREG); - if (!error) - dentry->d_fsdata = (void *)cft; - dput(dentry); - } else - error = PTR_ERR(dentry); - mutex_unlock(&dir->d_inode->i_mutex); - return error; -} - -/* - * Stuff for reading the 'tasks' file. - * - * Reading this file can return large amounts of data if a cpuset has - * *lots* of attached tasks. So it may need several calls to read(), - * but we cannot guarantee that the information we produce is correct - * unless we produce it entirely atomically. - * - * Upon tasks file open(), a struct ctr_struct is allocated, that - * will have a pointer to an array (also allocated here). The struct - * ctr_struct * is stored in file->private_data. Its resources will - * be freed by release() when the file is closed. The array is used - * to sprintf the PIDs and then used by read(). - */ - -/* cpusets_tasks_read array */ - -struct ctr_struct { - char *buf; - int bufsz; -}; - -/* - * Load into 'pidarray' up to 'npids' of the tasks using cpuset 'cs'. - * Return actual number of pids loaded. No need to task_lock(p) - * when reading out p->cpuset, as we don't really care if it changes - * on the next cycle, and we are not going to try to dereference it. - */ -static int pid_array_load(pid_t *pidarray, int npids, struct cpuset *cs) -{ - int n = 0; - struct task_struct *g, *p; - - read_lock(&tasklist_lock); - - do_each_thread(g, p) { - if (p->cpuset == cs) { - pidarray[n++] = p->pid; - if (unlikely(n == npids)) - goto array_full; - } - } while_each_thread(g, p); - -array_full: - read_unlock(&tasklist_lock); - return n; -} - -static int cmppid(const void *a, const void *b) -{ - return *(pid_t *)a - *(pid_t *)b; -} - -/* - * Convert array 'a' of 'npids' pid_t's to a string of newline separated - * decimal pids in 'buf'. Don't write more than 'sz' chars, but return - * count 'cnt' of how many chars would be written if buf were large enough. - */ -static int pid_array_to_buf(char *buf, int sz, pid_t *a, int npids) -{ - int cnt = 0; - int i; - - for (i = 0; i < npids; i++) - cnt += snprintf(buf + cnt, max(sz - cnt, 0), "%d\n", a[i]); - return cnt; -} - -/* - * Handle an open on 'tasks' file. Prepare a buffer listing the - * process id's of tasks currently attached to the cpuset being opened. - * - * Does not require any specific cpuset mutexes, and does not take any. - */ -static int cpuset_tasks_open(struct inode *unused, struct file *file) -{ - struct cpuset *cs = __d_cs(file->f_path.dentry->d_parent); - struct ctr_struct *ctr; - pid_t *pidarray; - int npids; - char c; - - if (!(file->f_mode & FMODE_READ)) - return 0; - - ctr = kmalloc(sizeof(*ctr), GFP_KERNEL); - if (!ctr) - goto err0; - - /* - * If cpuset gets more users after we read count, we won't have - * enough space - tough. This race is indistinguishable to the - * caller from the case that the additional cpuset users didn't - * show up until sometime later on. - */ - npids = atomic_read(&cs->count); - pidarray = kmalloc(npids * sizeof(pid_t), GFP_KERNEL); - if (!pidarray) - goto err1; - - npids = pid_array_load(pidarray, npids, cs); - sort(pidarray, npids, sizeof(pid_t), cmppid, NULL); - - /* Call pid_array_to_buf() twice, first just to get bufsz */ - ctr->bufsz = pid_array_to_buf(&c, sizeof(c), pidarray, npids) + 1; - ctr->buf = kmalloc(ctr->bufsz, GFP_KERNEL); - if (!ctr->buf) - goto err2; - ctr->bufsz = pid_array_to_buf(ctr->buf, ctr->bufsz, pidarray, npids); - - kfree(pidarray); - file->private_data = ctr; - return 0; - -err2: - kfree(pidarray); -err1: - kfree(ctr); -err0: - return -ENOMEM; -} - -static ssize_t cpuset_tasks_read(struct file *file, char __user *buf, - size_t nbytes, loff_t *ppos) -{ - struct ctr_struct *ctr = file->private_data; - - return simple_read_from_buffer(buf, nbytes, ppos, ctr->buf, ctr->bufsz); -} - -static int cpuset_tasks_release(struct inode *unused_inode, struct file *file) -{ - struct ctr_struct *ctr; - - if (file->f_mode & FMODE_READ) { - ctr = file->private_data; - kfree(ctr->buf); - kfree(ctr); - } - return 0; -} /* * for the common functions, 'private' gives the type of file */ -static struct cftype cft_tasks = { - .name = "tasks", - .open = cpuset_tasks_open, - .read = cpuset_tasks_read, - .release = cpuset_tasks_release, - .private = FILE_TASKLIST, -}; - static struct cftype cft_cpus = { .name = "cpus", + .read = cpuset_common_file_read, + .write = cpuset_common_file_write, .private = FILE_CPULIST, |