diff options
Diffstat (limited to 'kernel/pid.c')
| -rw-r--r-- | kernel/pid.c | 313 |
1 files changed, 193 insertions, 120 deletions
diff --git a/kernel/pid.c b/kernel/pid.c index 477691576b3..9b9a2669814 100644 --- a/kernel/pid.c +++ b/kernel/pid.c @@ -1,8 +1,8 @@ /* * Generic pidhash and scalable, time-bounded PID allocator * - * (C) 2002-2003 William Irwin, IBM - * (C) 2004 William Irwin, Oracle + * (C) 2002-2003 Nadia Yvette Chambers, IBM + * (C) 2004 Nadia Yvette Chambers, Oracle * (C) 2002-2004 Ingo Molnar, Red Hat * * pid-structures are backing objects for tasks sharing a given ID to chain @@ -27,19 +27,22 @@ */ #include <linux/mm.h> -#include <linux/module.h> +#include <linux/export.h> #include <linux/slab.h> #include <linux/init.h> +#include <linux/rculist.h> #include <linux/bootmem.h> #include <linux/hash.h> #include <linux/pid_namespace.h> #include <linux/init_task.h> #include <linux/syscalls.h> +#include <linux/proc_ns.h> +#include <linux/proc_fs.h> #define pid_hashfn(nr, ns) \ hash_long((unsigned long)nr + (unsigned long)ns, pidhash_shift) static struct hlist_head *pid_hash; -static int pidhash_shift; +static unsigned int pidhash_shift = 4; struct pid init_struct_pid = INIT_STRUCT_PID; int pid_max = PID_MAX_DEFAULT; @@ -49,9 +52,6 @@ int pid_max = PID_MAX_DEFAULT; int pid_max_min = RESERVED_PIDS + 1; int pid_max_max = PID_MAX_LIMIT; -#define BITS_PER_PAGE (PAGE_SIZE*8) -#define BITS_PER_PAGE_MASK (BITS_PER_PAGE-1) - static inline int mk_pid(struct pid_namespace *pid_ns, struct pidmap *map, int off) { @@ -75,26 +75,14 @@ struct pid_namespace init_pid_ns = { [ 0 ... PIDMAP_ENTRIES-1] = { ATOMIC_INIT(BITS_PER_PAGE), NULL } }, .last_pid = 0, + .nr_hashed = PIDNS_HASH_ADDING, .level = 0, .child_reaper = &init_task, + .user_ns = &init_user_ns, + .proc_inum = PROC_PID_INIT_INO, }; EXPORT_SYMBOL_GPL(init_pid_ns); -int is_container_init(struct task_struct *tsk) -{ - int ret = 0; - struct pid *pid; - - rcu_read_lock(); - pid = task_pid(tsk); - if (pid != NULL && pid->numbers[pid->level].nr == 1) - ret = 1; - rcu_read_unlock(); - - return ret; -} -EXPORT_SYMBOL(is_container_init); - /* * Note: disable interrupts while the pidmap_lock is held as an * interrupt might come in and do read_lock(&tasklist_lock). @@ -111,15 +99,55 @@ EXPORT_SYMBOL(is_container_init); static __cacheline_aligned_in_smp DEFINE_SPINLOCK(pidmap_lock); -static void free_pidmap(struct pid_namespace *pid_ns, int pid) +static void free_pidmap(struct upid *upid) { - struct pidmap *map = pid_ns->pidmap + pid / BITS_PER_PAGE; - int offset = pid & BITS_PER_PAGE_MASK; + int nr = upid->nr; + struct pidmap *map = upid->ns->pidmap + nr / BITS_PER_PAGE; + int offset = nr & BITS_PER_PAGE_MASK; clear_bit(offset, map->page); atomic_inc(&map->nr_free); } +/* + * If we started walking pids at 'base', is 'a' seen before 'b'? + */ +static int pid_before(int base, int a, int b) +{ + /* + * This is the same as saying + * + * (a - base + MAXUINT) % MAXUINT < (b - base + MAXUINT) % MAXUINT + * and that mapping orders 'a' and 'b' with respect to 'base'. + */ + return (unsigned)(a - base) < (unsigned)(b - base); +} + +/* + * We might be racing with someone else trying to set pid_ns->last_pid + * at the pid allocation time (there's also a sysctl for this, but racing + * with this one is OK, see comment in kernel/pid_namespace.c about it). + * We want the winner to have the "later" value, because if the + * "earlier" value prevails, then a pid may get reused immediately. + * + * Since pids rollover, it is not sufficient to just pick the bigger + * value. We have to consider where we started counting from. + * + * 'base' is the value of pid_ns->last_pid that we observed when + * we started looking for a pid. + * + * 'pid' is the pid that we eventually found. + */ +static void set_last_pid(struct pid_namespace *pid_ns, int base, int pid) +{ + int prev; + int last_write = base; + do { + prev = last_write; + last_write = cmpxchg(&pid_ns->last_pid, prev, pid); + } while ((prev != last_write) && (pid_before(base, last_write, pid))); +} + static int alloc_pidmap(struct pid_namespace *pid_ns) { int i, offset, max_scan, pid, last = pid_ns->last_pid; @@ -130,7 +158,12 @@ static int alloc_pidmap(struct pid_namespace *pid_ns) pid = RESERVED_PIDS; offset = pid & BITS_PER_PAGE_MASK; map = &pid_ns->pidmap[pid/BITS_PER_PAGE]; - max_scan = (pid_max + BITS_PER_PAGE - 1)/BITS_PER_PAGE - !offset; + /* + * If last_pid points into the middle of the map->page we + * want to scan this bitmap block twice, the second time + * we start with offset == 0 (or RESERVED_PIDS). + */ + max_scan = DIV_ROUND_UP(pid_max, BITS_PER_PAGE) - !offset; for (i = 0; i <= max_scan; ++i) { if (unlikely(!map->page)) { void *page = kzalloc(PAGE_SIZE, GFP_KERNEL); @@ -139,32 +172,29 @@ static int alloc_pidmap(struct pid_namespace *pid_ns) * installing it: */ spin_lock_irq(&pidmap_lock); - if (map->page) - kfree(page); - else + if (!map->page) { map->page = page; + page = NULL; + } spin_unlock_irq(&pidmap_lock); + kfree(page); if (unlikely(!map->page)) break; } if (likely(atomic_read(&map->nr_free))) { - do { + for ( ; ; ) { if (!test_and_set_bit(offset, map->page)) { atomic_dec(&map->nr_free); - pid_ns->last_pid = pid; + set_last_pid(pid_ns, last, pid); return pid; } offset = find_next_offset(map, offset); + if (offset >= BITS_PER_PAGE) + break; pid = mk_pid(pid_ns, map, offset); - /* - * find_next_offset() found a bit, the pid from it - * is in-bounds, and if we fell back to the last - * bitmap block and the final block was the same - * as the starting point, pid is before last_pid. - */ - } while (offset < BITS_PER_PAGE && pid < pid_max && - (i != max_scan || pid < last || - !((last+1) & BITS_PER_PAGE_MASK))); + if (pid >= pid_max) + break; + } } if (map < &pid_ns->pidmap[(pid_max-1)/BITS_PER_PAGE]) { ++map; @@ -180,11 +210,14 @@ static int alloc_pidmap(struct pid_namespace *pid_ns) return -1; } -int next_pidmap(struct pid_namespace *pid_ns, int last) +int next_pidmap(struct pid_namespace *pid_ns, unsigned int last) { int offset; struct pidmap *map, *end; + if (last >= PID_MAX_LIMIT) + return -1; + offset = (last + 1) & BITS_PER_PAGE_MASK; map = &pid_ns->pidmap[(last + 1)/BITS_PER_PAGE]; end = &pid_ns->pidmap[PIDMAP_ENTRIES]; @@ -227,12 +260,33 @@ void free_pid(struct pid *pid) unsigned long flags; spin_lock_irqsave(&pidmap_lock, flags); - for (i = 0; i <= pid->level; i++) - hlist_del_rcu(&pid->numbers[i].pid_chain); + for (i = 0; i <= pid->level; i++) { + struct upid *upid = pid->numbers + i; + struct pid_namespace *ns = upid->ns; + hlist_del_rcu(&upid->pid_chain); + switch(--ns->nr_hashed) { + case 2: + case 1: + /* When all that is left in the pid namespace + * is the reaper wake up the reaper. The reaper + * may be sleeping in zap_pid_ns_processes(). + */ + wake_up_process(ns->child_reaper); + break; + case PIDNS_HASH_ADDING: + /* Handle a fork failure of the first process */ + WARN_ON(ns->child_reaper); + ns->nr_hashed = 0; + /* fall through */ + case 0: + schedule_work(&ns->proc_work); + break; + } + } spin_unlock_irqrestore(&pidmap_lock, flags); for (i = 0; i <= pid->level; i++) - free_pidmap(pid->numbers[i].ns, pid->numbers[i].nr); + free_pidmap(pid->numbers + i); call_rcu(&pid->rcu, delayed_put_pid); } @@ -250,6 +304,7 @@ struct pid *alloc_pid(struct pid_namespace *ns) goto out; tmp = ns; + pid->level = ns->level; for (i = ns->level; i >= 0; i--) { nr = alloc_pidmap(tmp); if (nr < 0) @@ -260,38 +315,53 @@ struct pid *alloc_pid(struct pid_namespace *ns) tmp = tmp->parent; } + if (unlikely(is_child_reaper(pid))) { + if (pid_ns_prepare_proc(ns)) + goto out_free; + } + get_pid_ns(ns); - pid->level = ns->level; atomic_set(&pid->count, 1); for (type = 0; type < PIDTYPE_MAX; ++type) INIT_HLIST_HEAD(&pid->tasks[type]); + upid = pid->numbers + ns->level; spin_lock_irq(&pidmap_lock); - for (i = ns->level; i >= 0; i--) { - upid = &pid->numbers[i]; + if (!(ns->nr_hashed & PIDNS_HASH_ADDING)) + goto out_unlock; + for ( ; upid >= pid->numbers; --upid) { hlist_add_head_rcu(&upid->pid_chain, &pid_hash[pid_hashfn(upid->nr, upid->ns)]); + upid->ns->nr_hashed++; } spin_unlock_irq(&pidmap_lock); out: return pid; +out_unlock: + spin_unlock_irq(&pidmap_lock); out_free: - for (i++; i <= ns->level; i++) - free_pidmap(pid->numbers[i].ns, pid->numbers[i].nr); + while (++i <= ns->level) + free_pidmap(pid->numbers + i); kmem_cache_free(ns->pid_cachep, pid); pid = NULL; goto out; } +void disable_pid_allocation(struct pid_namespace *ns) +{ + spin_lock_irq(&pidmap_lock); + ns->nr_hashed &= ~PIDNS_HASH_ADDING; + spin_unlock_irq(&pidmap_lock); +} + struct pid *find_pid_ns(int nr, struct pid_namespace *ns) { - struct hlist_node *elem; struct upid *pnr; - hlist_for_each_entry_rcu(pnr, elem, + hlist_for_each_entry_rcu(pnr, &pid_hash[pid_hashfn(nr, ns)], pid_chain) if (pnr->nr == nr && pnr->ns == ns) return container_of(pnr, struct pid, @@ -303,32 +373,21 @@ EXPORT_SYMBOL_GPL(find_pid_ns); struct pid *find_vpid(int nr) { - return find_pid_ns(nr, current->nsproxy->pid_ns); + return find_pid_ns(nr, task_active_pid_ns(current)); } EXPORT_SYMBOL_GPL(find_vpid); -struct pid *find_pid(int nr) -{ - return find_pid_ns(nr, &init_pid_ns); -} -EXPORT_SYMBOL_GPL(find_pid); - /* * attach_pid() must be called with the tasklist_lock write-held. */ -int attach_pid(struct task_struct *task, enum pid_type type, - struct pid *pid) +void attach_pid(struct task_struct *task, enum pid_type type) { - struct pid_link *link; - - link = &task->pids[type]; - link->pid = pid; - hlist_add_head_rcu(&link->node, &pid->tasks[type]); - - return 0; + struct pid_link *link = &task->pids[type]; + hlist_add_head_rcu(&link->node, &link->pid->tasks[type]); } -void detach_pid(struct task_struct *task, enum pid_type type) +static void __change_pid(struct task_struct *task, enum pid_type type, + struct pid *new) { struct pid_link *link; struct pid *pid; @@ -338,7 +397,7 @@ void detach_pid(struct task_struct *task, enum pid_type type) pid = link->pid; hlist_del_rcu(&link->node); - link->pid = NULL; + link->pid = new; for (tmp = PIDTYPE_MAX; --tmp >= 0; ) if (!hlist_empty(&pid->tasks[tmp])) @@ -347,13 +406,24 @@ void detach_pid(struct task_struct *task, enum pid_type type) free_pid(pid); } +void detach_pid(struct task_struct *task, enum pid_type type) +{ + __change_pid(task, type, NULL); +} + +void change_pid(struct task_struct *task, enum pid_type type, + struct pid *pid) +{ + __change_pid(task, type, pid); + attach_pid(task, type); +} + /* transfer_pid is an optimization of attach_pid(new), detach_pid(old) */ void transfer_pid(struct task_struct *old, struct task_struct *new, enum pid_type type) { new->pids[type].pid = old->pids[type].pid; hlist_replace_rcu(&old->pids[type].node, &new->pids[type].node); - old->pids[type].pid = NULL; } struct task_struct *pid_task(struct pid *pid, enum pid_type type) @@ -361,7 +431,8 @@ struct task_struct *pid_task(struct pid *pid, enum pid_type type) struct task_struct *result = NULL; if (pid) { struct hlist_node *first; - first = rcu_dereference(pid->tasks[type].first); + first = rcu_dereference_check(hlist_first_rcu(&pid->tasks[type]), + lockdep_tasklist_lock_is_held()); if (first) result = hlist_entry(first, struct task_struct, pids[(type)].node); } @@ -370,43 +441,32 @@ struct task_struct *pid_task(struct pid *pid, enum pid_type type) EXPORT_SYMBOL(pid_task); /* - * Must be called under rcu_read_lock() or with tasklist_lock read-held. + * Must be called under rcu_read_lock(). */ -struct task_struct *find_task_by_pid_type_ns(int type, int nr, - struct pid_namespace *ns) -{ - return pid_task(find_pid_ns(nr, ns), type); -} - -EXPORT_SYMBOL(find_task_by_pid_type_ns); - -struct task_struct *find_task_by_pid(pid_t nr) +struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) { - return find_task_by_pid_type_ns(PIDTYPE_PID, nr, &init_pid_ns); + rcu_lockdep_assert(rcu_read_lock_held(), + "find_task_by_pid_ns() needs rcu_read_lock()" + " protection"); + return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); } -EXPORT_SYMBOL(find_task_by_pid); struct task_struct *find_task_by_vpid(pid_t vnr) { - return find_task_by_pid_type_ns(PIDTYPE_PID, vnr, - current->nsproxy->pid_ns); -} -EXPORT_SYMBOL(find_task_by_vpid); - -struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) -{ - return find_task_by_pid_type_ns(PIDTYPE_PID, nr, ns); + return find_task_by_pid_ns(vnr, task_active_pid_ns(current)); } -EXPORT_SYMBOL(find_task_by_pid_ns); struct pid *get_task_pid(struct task_struct *task, enum pid_type type) { struct pid *pid; rcu_read_lock(); + if (type != PIDTYPE_PID) + task = task->group_leader; pid = get_pid(task->pids[type].pid); rcu_read_unlock(); return pid; } +EXPORT_SYMBOL_GPL(get_task_pid); struct task_struct *get_pid_task(struct pid *pid, enum pid_type type) { @@ -418,6 +478,7 @@ struct task_struct *get_pid_task(struct pid *pid, enum pid_type type) rcu_read_unlock(); return result; } +EXPORT_SYMBOL_GPL(get_pid_task); struct pid *find_get_pid(pid_t nr) { @@ -429,6 +490,7 @@ struct pid *find_get_pid(pid_t nr) return pid; } +EXPORT_SYMBOL_GPL(find_get_pid); pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) { @@ -442,18 +504,32 @@ pid_t pid_nr_ns(struct pid *pid, struct pid_namespace *ns) } return nr; } +EXPORT_SYMBOL_GPL(pid_nr_ns); pid_t pid_vnr(struct pid *pid) { - return pid_nr_ns(pid, current->nsproxy->pid_ns); + return pid_nr_ns(pid, task_active_pid_ns(current)); } EXPORT_SYMBOL_GPL(pid_vnr); -pid_t task_pid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) +pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type, + struct pid_namespace *ns) { - return pid_nr_ns(task_pid(tsk), ns); + pid_t nr = 0; + + rcu_read_lock(); + if (!ns) + ns = task_active_pid_ns(current); + if (likely(pid_alive(task))) { + if (type != PIDTYPE_PID) + task = task->group_leader; + nr = pid_nr_ns(task->pids[type].pid, ns); + } + rcu_read_unlock(); + + return nr; } -EXPORT_SYMBOL(task_pid_nr_ns); +EXPORT_SYMBOL(__task_pid_nr_ns); pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) { @@ -461,22 +537,16 @@ pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) } EXPORT_SYMBOL(task_tgid_nr_ns); -pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) -{ - return pid_nr_ns(task_pgrp(tsk), ns); -} -EXPORT_SYMBOL(task_pgrp_nr_ns); - -pid_t task_session_nr_ns(struct task_struct *tsk, struct pid_namespace *ns) +struct pid_namespace *task_active_pid_ns(struct task_struct *tsk) { - return pid_nr_ns(task_session(tsk), ns); + return ns_of_pid(task_pid(tsk)); } -EXPORT_SYMBOL(task_session_nr_ns); +EXPORT_SYMBOL_GPL(task_active_pid_ns); /* - * Used by proc to find the first pid that is greater then or equal to nr. + * Used by proc to find the first pid that is greater than or equal to nr. * - * If there is a pid at nr this function is exactly the same as find_pid. + * If there is a pid at nr this function is exactly the same as find_pid_ns. */ struct pid *find_ge_pid(int nr, struct pid_namespace *ns) { @@ -491,7 +561,6 @@ struct pid *find_ge_pid(int nr, struct pid_namespace *ns) return pid; } -EXPORT_SYMBOL_GPL(find_get_pid); /* * The pid hash table is scaled according to the amount of memory in the @@ -500,26 +569,30 @@ EXPORT_SYMBOL_GPL(find_get_pid); */ void __init pidhash_init(void) { - int i, pidhash_size; - unsigned long megabytes = nr_kernel_pages >> (20 - PAGE_SHIFT); - - pidhash_shift = max(4, fls(megabytes * 4)); - pidhash_shift = min(12, pidhash_shift); - pidhash_size = 1 << pidhash_shift; + unsigned int i, pidhash_size; - printk("PID hash table entries: %d (order: %d, %Zd bytes)\n", - pidhash_size, pidhash_shift, - pidhash_size * sizeof(struct hlist_head)); + pid_hash = alloc_large_system_hash("PID", sizeof(*pid_hash), 0, 18, + HASH_EARLY | HASH_SMALL, + &pidhash_shift, NULL, + 0, 4096); + pidhash_size = 1U << pidhash_shift; - pid_hash = alloc_bootmem(pidhash_size * sizeof(*(pid_hash))); - if (!pid_hash) - panic("Could not alloc pidhash!\n"); for (i = 0; i < pidhash_size; i++) INIT_HLIST_HEAD(&pid_hash[i]); } void __init pidmap_init(void) { + /* Veryify no one has done anything silly */ + BUILD_BUG_ON(PID_MAX_LIMIT >= PIDNS_HASH_ADDING); + + /* bump default and minimum pid_max based on number of cpus */ + pid_max = min(pid_max_max, max_t(int, pid_max, + PIDS_PER_CPU_DEFAULT * num_possible_cpus())); + pid_max_min = max_t(int, pid_max_min, + PIDS_PER_CPU_MIN * num_possible_cpus()); + pr_info("pid_max: default: %u minimum: %u\n", pid_max, pid_max_min); + init_pid_ns.pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL); /* Reserve PID 0. We never call free_pidmap(0) */ set_bit(0, init_pid_ns.pidmap[0].page); |
