diff options
Diffstat (limited to 'kernel/user.c')
| -rw-r--r-- | kernel/user.c | 201 |
1 files changed, 116 insertions, 85 deletions
diff --git a/kernel/user.c b/kernel/user.c index 89e562feb1b..4efa39350e4 100644 --- a/kernel/user.c +++ b/kernel/user.c @@ -13,58 +13,104 @@ #include <linux/slab.h> #include <linux/bitops.h> #include <linux/key.h> +#include <linux/interrupt.h> +#include <linux/export.h> +#include <linux/user_namespace.h> +#include <linux/proc_ns.h> + +/* + * userns count is 1 for root user, 1 for init_uts_ns, + * and 1 for... ? + */ +struct user_namespace init_user_ns = { + .uid_map = { + .nr_extents = 1, + .extent[0] = { + .first = 0, + .lower_first = 0, + .count = 4294967295U, + }, + }, + .gid_map = { + .nr_extents = 1, + .extent[0] = { + .first = 0, + .lower_first = 0, + .count = 4294967295U, + }, + }, + .projid_map = { + .nr_extents = 1, + .extent[0] = { + .first = 0, + .lower_first = 0, + .count = 4294967295U, + }, + }, + .count = ATOMIC_INIT(3), + .owner = GLOBAL_ROOT_UID, + .group = GLOBAL_ROOT_GID, + .proc_inum = PROC_USER_INIT_INO, +#ifdef CONFIG_PERSISTENT_KEYRINGS + .persistent_keyring_register_sem = + __RWSEM_INITIALIZER(init_user_ns.persistent_keyring_register_sem), +#endif +}; +EXPORT_SYMBOL_GPL(init_user_ns); /* * UID task count cache, to get fast user lookup in "alloc_uid" * when changing user ID's (ie setuid() and friends). */ -#define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 8) -#define UIDHASH_SZ (1 << UIDHASH_BITS) +#define UIDHASH_BITS (CONFIG_BASE_SMALL ? 3 : 7) +#define UIDHASH_SZ (1 << UIDHASH_BITS) #define UIDHASH_MASK (UIDHASH_SZ - 1) #define __uidhashfn(uid) (((uid >> UIDHASH_BITS) + uid) & UIDHASH_MASK) -#define uidhashentry(uid) (uidhash_table + __uidhashfn((uid))) +#define uidhashentry(uid) (uidhash_table + __uidhashfn((__kuid_val(uid)))) + +static struct kmem_cache *uid_cachep; +struct hlist_head uidhash_table[UIDHASH_SZ]; -static kmem_cache_t *uid_cachep; -static struct list_head uidhash_table[UIDHASH_SZ]; +/* + * The uidhash_lock is mostly taken from process context, but it is + * occasionally also taken from softirq/tasklet context, when + * task-structs get RCU-freed. Hence all locking must be softirq-safe. + * But free_uid() is also called with local interrupts disabled, and running + * local_bh_enable() with local interrupts disabled is an error - we'll run + * softirq callbacks, and they can unconditionally enable interrupts, and + * the caller of free_uid() didn't expect that.. + */ static DEFINE_SPINLOCK(uidhash_lock); +/* root_user.__count is 1, for init task cred */ struct user_struct root_user = { .__count = ATOMIC_INIT(1), .processes = ATOMIC_INIT(1), - .files = ATOMIC_INIT(0), .sigpending = ATOMIC_INIT(0), - .mq_bytes = 0, .locked_shm = 0, -#ifdef CONFIG_KEYS - .uid_keyring = &root_user_keyring, - .session_keyring = &root_session_keyring, -#endif + .uid = GLOBAL_ROOT_UID, }; /* * These routines must be called with the uidhash spinlock held! */ -static inline void uid_hash_insert(struct user_struct *up, struct list_head *hashent) +static void uid_hash_insert(struct user_struct *up, struct hlist_head *hashent) { - list_add(&up->uidhash_list, hashent); + hlist_add_head(&up->uidhash_node, hashent); } -static inline void uid_hash_remove(struct user_struct *up) +static void uid_hash_remove(struct user_struct *up) { - list_del(&up->uidhash_list); + hlist_del_init(&up->uidhash_node); } -static inline struct user_struct *uid_hash_find(uid_t uid, struct list_head *hashent) +static struct user_struct *uid_hash_find(kuid_t uid, struct hlist_head *hashent) { - struct list_head *up; - - list_for_each(up, hashent) { - struct user_struct *user; + struct user_struct *user; - user = list_entry(up, struct user_struct, uidhash_list); - - if(user->uid == uid) { + hlist_for_each_entry(user, hashent, uidhash_node) { + if (uid_eq(user->uid, uid)) { atomic_inc(&user->__count); return user; } @@ -73,71 +119,73 @@ static inline struct user_struct *uid_hash_find(uid_t uid, struct list_head *has return NULL; } +/* IRQs are disabled and uidhash_lock is held upon function entry. + * IRQ state (as stored in flags) is restored and uidhash_lock released + * upon function exit. + */ +static void free_user(struct user_struct *up, unsigned long flags) + __releases(&uidhash_lock) +{ + uid_hash_remove(up); + spin_unlock_irqrestore(&uidhash_lock, flags); + key_put(up->uid_keyring); + key_put(up->session_keyring); + kmem_cache_free(uid_cachep, up); +} + /* * Locate the user_struct for the passed UID. If found, take a ref on it. The * caller must undo that ref with free_uid(). * * If the user_struct could not be found, return NULL. */ -struct user_struct *find_user(uid_t uid) +struct user_struct *find_user(kuid_t uid) { struct user_struct *ret; + unsigned long flags; - spin_lock(&uidhash_lock); + spin_lock_irqsave(&uidhash_lock, flags); ret = uid_hash_find(uid, uidhashentry(uid)); - spin_unlock(&uidhash_lock); + spin_unlock_irqrestore(&uidhash_lock, flags); return ret; } void free_uid(struct user_struct *up) { - if (up && atomic_dec_and_lock(&up->__count, &uidhash_lock)) { - uid_hash_remove(up); - key_put(up->uid_keyring); - key_put(up->session_keyring); - kmem_cache_free(uid_cachep, up); - spin_unlock(&uidhash_lock); - } + unsigned long flags; + + if (!up) + return; + + local_irq_save(flags); + if (atomic_dec_and_lock(&up->__count, &uidhash_lock)) + free_user(up, flags); + else + local_irq_restore(flags); } -struct user_struct * alloc_uid(uid_t uid) +struct user_struct *alloc_uid(kuid_t uid) { - struct list_head *hashent = uidhashentry(uid); - struct user_struct *up; + struct hlist_head *hashent = uidhashentry(uid); + struct user_struct *up, *new; - spin_lock(&uidhash_lock); + spin_lock_irq(&uidhash_lock); up = uid_hash_find(uid, hashent); - spin_unlock(&uidhash_lock); + spin_unlock_irq(&uidhash_lock); if (!up) { - struct user_struct *new; - - new = kmem_cache_alloc(uid_cachep, SLAB_KERNEL); + new = kmem_cache_zalloc(uid_cachep, GFP_KERNEL); if (!new) - return NULL; + goto out_unlock; + new->uid = uid; atomic_set(&new->__count, 1); - atomic_set(&new->processes, 0); - atomic_set(&new->files, 0); - atomic_set(&new->sigpending, 0); -#ifdef CONFIG_INOTIFY - atomic_set(&new->inotify_watches, 0); - atomic_set(&new->inotify_devs, 0); -#endif - - new->mq_bytes = 0; - new->locked_shm = 0; - - if (alloc_uid_keyring(new) < 0) { - kmem_cache_free(uid_cachep, new); - return NULL; - } /* * Before adding this, check whether we raced * on adding the same user already.. */ - spin_lock(&uidhash_lock); + spin_lock_irq(&uidhash_lock); up = uid_hash_find(uid, hashent); if (up) { key_put(new->uid_keyring); @@ -147,47 +195,30 @@ struct user_struct * alloc_uid(uid_t uid) uid_hash_insert(new, hashent); up = new; } - spin_unlock(&uidhash_lock); - + spin_unlock_irq(&uidhash_lock); } + return up; -} -void switch_uid(struct user_struct *new_user) -{ - struct user_struct *old_user; - - /* What if a process setreuid()'s and this brings the - * new uid over his NPROC rlimit? We can check this now - * cheaply with the new uid cache, so if it matters - * we should be checking for it. -DaveM - */ - old_user = current->user; - atomic_inc(&new_user->processes); - atomic_dec(&old_user->processes); - switch_uid_keyring(new_user); - current->user = new_user; - free_uid(old_user); - suid_keys(current); +out_unlock: + return NULL; } - static int __init uid_cache_init(void) { int n; uid_cachep = kmem_cache_create("uid_cache", sizeof(struct user_struct), - 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL); + 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); for(n = 0; n < UIDHASH_SZ; ++n) - INIT_LIST_HEAD(uidhash_table + n); + INIT_HLIST_HEAD(uidhash_table + n); /* Insert the root user immediately (init already runs as root) */ - spin_lock(&uidhash_lock); - uid_hash_insert(&root_user, uidhashentry(0)); - spin_unlock(&uidhash_lock); + spin_lock_irq(&uidhash_lock); + uid_hash_insert(&root_user, uidhashentry(GLOBAL_ROOT_UID)); + spin_unlock_irq(&uidhash_lock); return 0; } - -module_init(uid_cache_init); +subsys_initcall(uid_cache_init); |
