diff options
Diffstat (limited to 'security/yama/yama_lsm.c')
| -rw-r--r-- | security/yama/yama_lsm.c | 210 |
1 files changed, 165 insertions, 45 deletions
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c index 573723843a0..13c88fbcf03 100644 --- a/security/yama/yama_lsm.c +++ b/security/yama/yama_lsm.c @@ -17,19 +17,50 @@ #include <linux/ptrace.h> #include <linux/prctl.h> #include <linux/ratelimit.h> +#include <linux/workqueue.h> -static int ptrace_scope = 1; +#define YAMA_SCOPE_DISABLED 0 +#define YAMA_SCOPE_RELATIONAL 1 +#define YAMA_SCOPE_CAPABILITY 2 +#define YAMA_SCOPE_NO_ATTACH 3 + +static int ptrace_scope = YAMA_SCOPE_RELATIONAL; /* describe a ptrace relationship for potential exception */ struct ptrace_relation { struct task_struct *tracer; struct task_struct *tracee; + bool invalid; struct list_head node; + struct rcu_head rcu; }; static LIST_HEAD(ptracer_relations); static DEFINE_SPINLOCK(ptracer_relations_lock); +static void yama_relation_cleanup(struct work_struct *work); +static DECLARE_WORK(yama_relation_work, yama_relation_cleanup); + +/** + * yama_relation_cleanup - remove invalid entries from the relation list + * + */ +static void yama_relation_cleanup(struct work_struct *work) +{ + struct ptrace_relation *relation; + + spin_lock(&ptracer_relations_lock); + rcu_read_lock(); + list_for_each_entry_rcu(relation, &ptracer_relations, node) { + if (relation->invalid) { + list_del_rcu(&relation->node); + kfree_rcu(relation, rcu); + } + } + rcu_read_unlock(); + spin_unlock(&ptracer_relations_lock); +} + /** * yama_ptracer_add - add/replace an exception for this tracer/tracee pair * @tracer: the task_struct of the process doing the ptrace @@ -43,32 +74,34 @@ static DEFINE_SPINLOCK(ptracer_relations_lock); static int yama_ptracer_add(struct task_struct *tracer, struct task_struct *tracee) { - int rc = 0; - struct ptrace_relation *added; - struct ptrace_relation *entry, *relation = NULL; + struct ptrace_relation *relation, *added; added = kmalloc(sizeof(*added), GFP_KERNEL); if (!added) return -ENOMEM; - spin_lock_bh(&ptracer_relations_lock); - list_for_each_entry(entry, &ptracer_relations, node) - if (entry->tracee == tracee) { - relation = entry; - break; + added->tracee = tracee; + added->tracer = tracer; + added->invalid = false; + + spin_lock(&ptracer_relations_lock); + rcu_read_lock(); + list_for_each_entry_rcu(relation, &ptracer_relations, node) { + if (relation->invalid) + continue; + if (relation->tracee == tracee) { + list_replace_rcu(&relation->node, &added->node); + kfree_rcu(relation, rcu); + goto out; } - if (!relation) { - relation = added; - relation->tracee = tracee; - list_add(&relation->node, &ptracer_relations); } - relation->tracer = tracer; - spin_unlock_bh(&ptracer_relations_lock); - if (added != relation) - kfree(added); + list_add_rcu(&added->node, &ptracer_relations); - return rc; +out: + rcu_read_unlock(); + spin_unlock(&ptracer_relations_lock); + return 0; } /** @@ -79,23 +112,30 @@ static int yama_ptracer_add(struct task_struct *tracer, static void yama_ptracer_del(struct task_struct *tracer, struct task_struct *tracee) { - struct ptrace_relation *relation, *safe; + struct ptrace_relation *relation; + bool marked = false; - spin_lock_bh(&ptracer_relations_lock); - list_for_each_entry_safe(relation, safe, &ptracer_relations, node) + rcu_read_lock(); + list_for_each_entry_rcu(relation, &ptracer_relations, node) { + if (relation->invalid) + continue; if (relation->tracee == tracee || (tracer && relation->tracer == tracer)) { - list_del(&relation->node); - kfree(relation); + relation->invalid = true; + marked = true; } - spin_unlock_bh(&ptracer_relations_lock); + } + rcu_read_unlock(); + + if (marked) + schedule_work(&yama_relation_work); } /** * yama_task_free - check for task_pid to remove from exception list * @task: task being removed */ -static void yama_task_free(struct task_struct *task) +void yama_task_free(struct task_struct *task) { yama_ptracer_del(task, task); } @@ -111,7 +151,7 @@ static void yama_task_free(struct task_struct *task) * Return 0 on success, -ve on error. -ENOSYS is returned when Yama * does not handle the given option. */ -static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3, +int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5) { int rc; @@ -138,7 +178,7 @@ static int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3, if (arg2 == 0) { yama_ptracer_del(NULL, myself); rc = 0; - } else if (arg2 == PR_SET_PTRACER_ANY) { + } else if (arg2 == PR_SET_PTRACER_ANY || (int)arg2 == -1) { rc = yama_ptracer_add(NULL, myself); } else { struct task_struct *tracer; @@ -212,21 +252,22 @@ static int ptracer_exception_found(struct task_struct *tracer, struct task_struct *parent = NULL; bool found = false; - spin_lock_bh(&ptracer_relations_lock); rcu_read_lock(); if (!thread_group_leader(tracee)) tracee = rcu_dereference(tracee->group_leader); - list_for_each_entry(relation, &ptracer_relations, node) + list_for_each_entry_rcu(relation, &ptracer_relations, node) { + if (relation->invalid) + continue; if (relation->tracee == tracee) { parent = relation->tracer; found = true; break; } + } if (found && (parent == NULL || task_is_descendant(parent, tracer))) rc = 1; rcu_read_unlock(); - spin_unlock_bh(&ptracer_relations_lock); return rc; } @@ -238,7 +279,7 @@ static int ptracer_exception_found(struct task_struct *tracer, * * Returns 0 if following the ptrace is allowed, -ve on error. */ -static int yama_ptrace_access_check(struct task_struct *child, +int yama_ptrace_access_check(struct task_struct *child, unsigned int mode) { int rc; @@ -251,36 +292,111 @@ static int yama_ptrace_access_check(struct task_struct *child, return rc; /* require ptrace target be a child of ptracer on attach */ - if (mode == PTRACE_MODE_ATTACH && - ptrace_scope && - !task_is_descendant(current, child) && - !ptracer_exception_found(current, child) && - !capable(CAP_SYS_PTRACE)) + if (mode == PTRACE_MODE_ATTACH) { + switch (ptrace_scope) { + case YAMA_SCOPE_DISABLED: + /* No additional restrictions. */ + break; + case YAMA_SCOPE_RELATIONAL: + rcu_read_lock(); + if (!task_is_descendant(current, child) && + !ptracer_exception_found(current, child) && + !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE)) + rc = -EPERM; + rcu_read_unlock(); + break; + case YAMA_SCOPE_CAPABILITY: + rcu_read_lock(); + if (!ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE)) + rc = -EPERM; + rcu_read_unlock(); + break; + case YAMA_SCOPE_NO_ATTACH: + default: + rc = -EPERM; + break; + } + } + + if (rc) { + printk_ratelimited(KERN_NOTICE + "ptrace of pid %d was attempted by: %s (pid %d)\n", + child->pid, current->comm, current->pid); + } + + return rc; +} + +/** + * yama_ptrace_traceme - validate PTRACE_TRACEME calls + * @parent: task that will become the ptracer of the current task + * + * Returns 0 if following the ptrace is allowed, -ve on error. + */ +int yama_ptrace_traceme(struct task_struct *parent) +{ + int rc; + + /* If standard caps disallows it, so does Yama. We should + * only tighten restrictions further. + */ + rc = cap_ptrace_traceme(parent); + if (rc) + return rc; + + /* Only disallow PTRACE_TRACEME on more aggressive settings. */ + switch (ptrace_scope) { + case YAMA_SCOPE_CAPABILITY: + if (!has_ns_capability(parent, current_user_ns(), CAP_SYS_PTRACE)) + rc = -EPERM; + break; + case YAMA_SCOPE_NO_ATTACH: rc = -EPERM; + break; + } if (rc) { - char name[sizeof(current->comm)]; - printk_ratelimited(KERN_NOTICE "ptrace of non-child" - " pid %d was attempted by: %s (pid %d)\n", - child->pid, - get_task_comm(name, current), - current->pid); + printk_ratelimited(KERN_NOTICE + "ptraceme of pid %d was attempted by: %s (pid %d)\n", + current->pid, parent->comm, parent->pid); } return rc; } +#ifndef CONFIG_SECURITY_YAMA_STACKED static struct security_operations yama_ops = { .name = "yama", .ptrace_access_check = yama_ptrace_access_check, + .ptrace_traceme = yama_ptrace_traceme, .task_prctl = yama_task_prctl, .task_free = yama_task_free, }; +#endif #ifdef CONFIG_SYSCTL +static int yama_dointvec_minmax(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + int rc; + + if (write && !capable(CAP_SYS_PTRACE)) + return -EPERM; + + rc = proc_dointvec_minmax(table, write, buffer, lenp, ppos); + if (rc) + return rc; + + /* Lock the max value if it ever gets set. */ + if (write && *(int *)table->data == *(int *)table->extra2) + table->extra1 = table->extra2; + + return rc; +} + static int zero; -static int one = 1; +static int max_scope = YAMA_SCOPE_NO_ATTACH; struct ctl_path yama_sysctl_path[] = { { .procname = "kernel", }, @@ -294,9 +410,9 @@ static struct ctl_table yama_sysctl_table[] = { .data = &ptrace_scope, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec_minmax, + .proc_handler = yama_dointvec_minmax, .extra1 = &zero, - .extra2 = &one, + .extra2 = &max_scope, }, { } }; @@ -304,13 +420,17 @@ static struct ctl_table yama_sysctl_table[] = { static __init int yama_init(void) { +#ifndef CONFIG_SECURITY_YAMA_STACKED if (!security_module_enable(&yama_ops)) return 0; +#endif printk(KERN_INFO "Yama: becoming mindful.\n"); +#ifndef CONFIG_SECURITY_YAMA_STACKED if (register_security(&yama_ops)) panic("Yama: kernel registration failed.\n"); +#endif #ifdef CONFIG_SYSCTL if (!register_sysctl_paths(yama_sysctl_path, yama_sysctl_table)) |
