diff options
Diffstat (limited to 'security')
161 files changed, 28128 insertions, 10782 deletions
diff --git a/security/Kconfig b/security/Kconfig index e80da955e68..beb86b500ad 100644 --- a/security/Kconfig +++ b/security/Kconfig @@ -4,40 +4,7 @@ menu "Security options" -config KEYS - bool "Enable access key retention support" - help - This option provides support for retaining authentication tokens and - access keys in the kernel. - - It also includes provision of methods by which such keys might be - associated with a process so that network filesystems, encryption - support and the like can find them. - - Furthermore, a special type of key is available that acts as keyring: - a searchable sequence of keys. Each process is equipped with access - to five standard keyrings: UID-specific, GID-specific, session, - process and thread. - - If you are unsure as to whether this is required, answer N. - -config KEYS_DEBUG_PROC_KEYS - bool "Enable the /proc/keys file by which keys may be viewed" - depends on KEYS - help - This option turns on support for the /proc/keys file - through which - can be listed all the keys on the system that are viewable by the - reading process. - - The only keys included in the list are those that grant View - permission to the reading process whether or not it possesses them. - Note that LSM security checks are still performed, and may further - filter out keys that the current process is not authorised to view. - - Only key attributes are listed here; key payloads are not included in - the resulting table. - - If you are unsure as to whether this is required, answer N. +source security/keys/Kconfig config SECURITY_DMESG_RESTRICT bool "Restrict unprivileged access to the kernel syslog" @@ -136,6 +103,7 @@ config INTEL_TXT config LSM_MMAP_MIN_ADDR int "Low address space for LSM to protect from user allocation" depends on SECURITY && SECURITY_SELINUX + default 32768 if ARM || (ARM64 && COMPAT) default 65536 help This is the portion of low virtual memory which should be protected @@ -153,8 +121,9 @@ source security/selinux/Kconfig source security/smack/Kconfig source security/tomoyo/Kconfig source security/apparmor/Kconfig +source security/yama/Kconfig -source security/integrity/ima/Kconfig +source security/integrity/Kconfig choice prompt "Default security module" @@ -162,6 +131,7 @@ choice default DEFAULT_SECURITY_SMACK if SECURITY_SMACK default DEFAULT_SECURITY_TOMOYO if SECURITY_TOMOYO default DEFAULT_SECURITY_APPARMOR if SECURITY_APPARMOR + default DEFAULT_SECURITY_YAMA if SECURITY_YAMA default DEFAULT_SECURITY_DAC help @@ -180,6 +150,9 @@ choice config DEFAULT_SECURITY_APPARMOR bool "AppArmor" if SECURITY_APPARMOR=y + config DEFAULT_SECURITY_YAMA + bool "Yama" if SECURITY_YAMA=y + config DEFAULT_SECURITY_DAC bool "Unix Discretionary Access Controls" @@ -191,6 +164,7 @@ config DEFAULT_SECURITY default "smack" if DEFAULT_SECURITY_SMACK default "tomoyo" if DEFAULT_SECURITY_TOMOYO default "apparmor" if DEFAULT_SECURITY_APPARMOR + default "yama" if DEFAULT_SECURITY_YAMA default "" if DEFAULT_SECURITY_DAC endmenu diff --git a/security/Makefile b/security/Makefile index 8bb0fe9e1ca..05f1c934d74 100644 --- a/security/Makefile +++ b/security/Makefile @@ -7,6 +7,7 @@ subdir-$(CONFIG_SECURITY_SELINUX) += selinux subdir-$(CONFIG_SECURITY_SMACK) += smack subdir-$(CONFIG_SECURITY_TOMOYO) += tomoyo subdir-$(CONFIG_SECURITY_APPARMOR) += apparmor +subdir-$(CONFIG_SECURITY_YAMA) += yama # always enable default capabilities obj-y += commoncap.o @@ -15,14 +16,14 @@ obj-$(CONFIG_MMU) += min_addr.o # Object file lists obj-$(CONFIG_SECURITY) += security.o capability.o obj-$(CONFIG_SECURITYFS) += inode.o -# Must precede capability.o in order to stack properly. -obj-$(CONFIG_SECURITY_SELINUX) += selinux/built-in.o -obj-$(CONFIG_SECURITY_SMACK) += smack/built-in.o +obj-$(CONFIG_SECURITY_SELINUX) += selinux/ +obj-$(CONFIG_SECURITY_SMACK) += smack/ obj-$(CONFIG_AUDIT) += lsm_audit.o -obj-$(CONFIG_SECURITY_TOMOYO) += tomoyo/built-in.o -obj-$(CONFIG_SECURITY_APPARMOR) += apparmor/built-in.o +obj-$(CONFIG_SECURITY_TOMOYO) += tomoyo/ +obj-$(CONFIG_SECURITY_APPARMOR) += apparmor/ +obj-$(CONFIG_SECURITY_YAMA) += yama/ obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o # Object integrity file lists -subdir-$(CONFIG_IMA) += integrity/ima -obj-$(CONFIG_IMA) += integrity/ima/built-in.o +subdir-$(CONFIG_INTEGRITY) += integrity +obj-$(CONFIG_INTEGRITY) += integrity/ diff --git a/security/apparmor/.gitignore b/security/apparmor/.gitignore index 4d995aeaebc..9cdec70d72b 100644 --- a/security/apparmor/.gitignore +++ b/security/apparmor/.gitignore @@ -1,6 +1,5 @@ # # Generated include files # -af_names.h capability_names.h rlim_names.h diff --git a/security/apparmor/Kconfig b/security/apparmor/Kconfig index 9b9013b2e32..d49c53960b6 100644 --- a/security/apparmor/Kconfig +++ b/security/apparmor/Kconfig @@ -29,3 +29,15 @@ config SECURITY_APPARMOR_BOOTPARAM_VALUE boot. If you are unsure how to answer this question, answer 1. + +config SECURITY_APPARMOR_HASH + bool "SHA1 hash of loaded profiles" + depends on SECURITY_APPARMOR + depends on CRYPTO + select CRYPTO_SHA1 + default y + + help + This option selects whether sha1 hashing is done against loaded + profiles and exported for inspection to user space via the apparmor + filesystem. diff --git a/security/apparmor/Makefile b/security/apparmor/Makefile index f204869399e..d693df87481 100644 --- a/security/apparmor/Makefile +++ b/security/apparmor/Makefile @@ -5,20 +5,66 @@ obj-$(CONFIG_SECURITY_APPARMOR) += apparmor.o apparmor-y := apparmorfs.o audit.o capability.o context.o ipc.o lib.o match.o \ path.o domain.o policy.o policy_unpack.o procattr.o lsm.o \ resource.o sid.o file.o +apparmor-$(CONFIG_SECURITY_APPARMOR_HASH) += crypto.o -clean-files: capability_names.h af_names.h +clean-files := capability_names.h rlim_names.h + +# Build a lower case string table of capability names +# Transforms lines from +# #define CAP_DAC_OVERRIDE 1 +# to +# [1] = "dac_override", quiet_cmd_make-caps = GEN $@ -cmd_make-caps = echo "static const char *capability_names[] = {" > $@ ; sed -n -e "/CAP_FS_MASK/d" -e "s/^\#define[ \\t]\\+CAP_\\([A-Z0-9_]\\+\\)[ \\t]\\+\\([0-9]\\+\\)\$$/[\\2] = \"\\1\",/p" $< | tr A-Z a-z >> $@ ; echo "};" >> $@ +cmd_make-caps = echo "static const char *const capability_names[] = {" > $@ ;\ + sed $< >>$@ -r -n -e '/CAP_FS_MASK/d' \ + -e 's/^\#define[ \t]+CAP_([A-Z0-9_]+)[ \t]+([0-9]+)/[\2] = "\L\1",/p';\ + echo "};" >> $@ ;\ + echo -n '\#define AA_FS_CAPS_MASK "' >> $@ ;\ + sed $< -r -n -e '/CAP_FS_MASK/d' \ + -e 's/^\#define[ \t]+CAP_([A-Z0-9_]+)[ \t]+([0-9]+)/\L\1/p' | \ + tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@ + +# Build a lower case string table of rlimit names. +# Transforms lines from +# #define RLIMIT_STACK 3 /* max stack size */ +# to +# [RLIMIT_STACK] = "stack", +# +# and build a second integer table (with the second sed cmd), that maps +# RLIMIT defines to the order defined in asm-generic/resource.h This is +# required by policy load to map policy ordering of RLIMITs to internal +# ordering for architectures that redefine an RLIMIT. +# Transforms lines from +# #define RLIMIT_STACK 3 /* max stack size */ +# to +# RLIMIT_STACK, +# +# and build the securityfs entries for the mapping. +# Transforms lines from +# #define RLIMIT_FSIZE 1 /* Maximum filesize */ +# #define RLIMIT_STACK 3 /* max stack size */ +# to +# #define AA_FS_RLIMIT_MASK "fsize stack" quiet_cmd_make-rlim = GEN $@ -cmd_make-rlim = echo "static const char *rlim_names[] = {" > $@ ; sed -n --e "/AF_MAX/d" -e "s/^\# \\?define[ \\t]\\+RLIMIT_\\([A-Z0-9_]\\+\\)[ \\t]\\+\\([0-9]\\+\\)\\(.*\\)\$$/[\\2] = \"\\1\",/p" $< | tr A-Z a-z >> $@ ; echo "};" >> $@ ; echo "static const int rlim_map[] = {" >> $@ ; sed -n -e "/AF_MAX/d" -e "s/^\# \\?define[ \\t]\\+\\(RLIMIT_[A-Z0-9_]\\+\\)[ \\t]\\+\\([0-9]\\+\\)\\(.*\\)\$$/\\1,/p" $< >> $@ ; echo "};" >> $@ +cmd_make-rlim = echo "static const char *const rlim_names[RLIM_NLIMITS] = {" \ + > $@ ;\ + sed $< >> $@ -r -n \ + -e 's/^\# ?define[ \t]+(RLIMIT_([A-Z0-9_]+)).*/[\1] = "\L\2",/p';\ + echo "};" >> $@ ;\ + echo "static const int rlim_map[RLIM_NLIMITS] = {" >> $@ ;\ + sed -r -n "s/^\# ?define[ \t]+(RLIMIT_[A-Z0-9_]+).*/\1,/p" $< >> $@ ;\ + echo "};" >> $@ ; \ + echo -n '\#define AA_FS_RLIMIT_MASK "' >> $@ ;\ + sed -r -n 's/^\# ?define[ \t]+RLIMIT_([A-Z0-9_]+).*/\L\1/p' $< | \ + tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@ $(obj)/capability.o : $(obj)/capability_names.h $(obj)/resource.o : $(obj)/rlim_names.h -$(obj)/capability_names.h : $(srctree)/include/linux/capability.h +$(obj)/capability_names.h : $(srctree)/include/uapi/linux/capability.h \ + $(src)/Makefile $(call cmd,make-caps) -$(obj)/af_names.h : $(srctree)/include/linux/socket.h - $(call cmd,make-af) -$(obj)/rlim_names.h : $(srctree)/include/asm-generic/resource.h +$(obj)/rlim_names.h : $(srctree)/include/uapi/asm-generic/resource.h \ + $(src)/Makefile $(call cmd,make-rlim) diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c index 0848292982a..7db9954f1af 100644 --- a/security/apparmor/apparmorfs.c +++ b/security/apparmor/apparmorfs.c @@ -12,18 +12,62 @@ * License. */ +#include <linux/ctype.h> #include <linux/security.h> #include <linux/vmalloc.h> #include <linux/module.h> #include <linux/seq_file.h> #include <linux/uaccess.h> #include <linux/namei.h> +#include <linux/capability.h> +#include <linux/rcupdate.h> #include "include/apparmor.h" #include "include/apparmorfs.h" #include "include/audit.h" #include "include/context.h" +#include "include/crypto.h" #include "include/policy.h" +#include "include/resource.h" + +/** + * aa_mangle_name - mangle a profile name to std profile layout form + * @name: profile name to mangle (NOT NULL) + * @target: buffer to store mangled name, same length as @name (MAYBE NULL) + * + * Returns: length of mangled name + */ +static int mangle_name(char *name, char *target) +{ + char *t = target; + + while (*name == '/' || *name == '.') + name++; + + if (target) { + for (; *name; name++) { + if (*name == '/') + *(t)++ = '.'; + else if (isspace(*name)) + *(t)++ = '_'; + else if (isalnum(*name) || strchr("._-", *name)) + *(t)++ = *name; + } + + *t = 0; + } else { + int len = 0; + for (; *name; name++) { + if (isalnum(*name) || isspace(*name) || + strchr("/._-", *name)) + len++; + } + + return len; + } + + return t - target; +} /** * aa_simple_write_to_buffer - common routine for getting policy from user @@ -142,38 +186,733 @@ static const struct file_operations aa_fs_profile_remove = { .llseek = default_llseek, }; -/** Base file system setup **/ +static int aa_fs_seq_show(struct seq_file *seq, void *v) +{ + struct aa_fs_entry *fs_file = seq->private; + + if (!fs_file) + return 0; + + switch (fs_file->v_type) { + case AA_FS_TYPE_BOOLEAN: + seq_printf(seq, "%s\n", fs_file->v.boolean ? "yes" : "no"); + break; + case AA_FS_TYPE_STRING: + seq_printf(seq, "%s\n", fs_file->v.string); + break; + case AA_FS_TYPE_U64: + seq_printf(seq, "%#08lx\n", fs_file->v.u64); + break; + default: + /* Ignore unpritable entry types. */ + break; + } + + return 0; +} + +static int aa_fs_seq_open(struct inode *inode, struct file *file) +{ + return single_open(file, aa_fs_seq_show, inode->i_private); +} + +const struct file_operations aa_fs_seq_file_ops = { + .owner = THIS_MODULE, + .open = aa_fs_seq_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int aa_fs_seq_profile_open(struct inode *inode, struct file *file, + int (*show)(struct seq_file *, void *)) +{ + struct aa_replacedby *r = aa_get_replacedby(inode->i_private); + int error = single_open(file, show, r); + + if (error) { + file->private_data = NULL; + aa_put_replacedby(r); + } + + return error; +} + +static int aa_fs_seq_profile_release(struct inode *inode, struct file *file) +{ + struct seq_file *seq = (struct seq_file *) file->private_data; + if (seq) + aa_put_replacedby(seq->private); + return single_release(inode, file); +} + +static int aa_fs_seq_profname_show(struct seq_file *seq, void *v) +{ + struct aa_replacedby *r = seq->private; + struct aa_profile *profile = aa_get_profile_rcu(&r->profile); + seq_printf(seq, "%s\n", profile->base.name); + aa_put_profile(profile); + + return 0; +} + +static int aa_fs_seq_profname_open(struct inode *inode, struct file *file) +{ + return aa_fs_seq_profile_open(inode, file, aa_fs_seq_profname_show); +} + +static const struct file_operations aa_fs_profname_fops = { + .owner = THIS_MODULE, + .open = aa_fs_seq_profname_open, + .read = seq_read, + .llseek = seq_lseek, + .release = aa_fs_seq_profile_release, +}; + +static int aa_fs_seq_profmode_show(struct seq_file *seq, void *v) +{ + struct aa_replacedby *r = seq->private; + struct aa_profile *profile = aa_get_profile_rcu(&r->profile); + seq_printf(seq, "%s\n", aa_profile_mode_names[profile->mode]); + aa_put_profile(profile); + + return 0; +} + +static int aa_fs_seq_profmode_open(struct inode *inode, struct file *file) +{ + return aa_fs_seq_profile_open(inode, file, aa_fs_seq_profmode_show); +} + +static const struct file_operations aa_fs_profmode_fops = { + .owner = THIS_MODULE, + .open = aa_fs_seq_profmode_open, + .read = seq_read, + .llseek = seq_lseek, + .release = aa_fs_seq_profile_release, +}; + +static int aa_fs_seq_profattach_show(struct seq_file *seq, void *v) +{ + struct aa_replacedby *r = seq->private; + struct aa_profile *profile = aa_get_profile_rcu(&r->profile); + if (profile->attach) + seq_printf(seq, "%s\n", profile->attach); + else if (profile->xmatch) + seq_puts(seq, "<unknown>\n"); + else + seq_printf(seq, "%s\n", profile->base.name); + aa_put_profile(profile); + + return 0; +} + +static int aa_fs_seq_profattach_open(struct inode *inode, struct file *file) +{ + return aa_fs_seq_profile_open(inode, file, aa_fs_seq_profattach_show); +} + +static const struct file_operations aa_fs_profattach_fops = { + .owner = THIS_MODULE, + .open = aa_fs_seq_profattach_open, + .read = seq_read, + .llseek = seq_lseek, + .release = aa_fs_seq_profile_release, +}; + +static int aa_fs_seq_hash_show(struct seq_file *seq, void *v) +{ + struct aa_replacedby *r = seq->private; + struct aa_profile *profile = aa_get_profile_rcu(&r->profile); + unsigned int i, size = aa_hash_size(); + + if (profile->hash) { + for (i = 0; i < size; i++) + seq_printf(seq, "%.2x", profile->hash[i]); + seq_puts(seq, "\n"); + } + + return 0; +} + +static int aa_fs_seq_hash_open(struct inode *inode, struct file *file) +{ + return single_open(file, aa_fs_seq_hash_show, inode->i_private); +} + +static const struct file_operations aa_fs_seq_hash_fops = { + .owner = THIS_MODULE, + .open = aa_fs_seq_hash_open, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +/** fns to setup dynamic per profile/namespace files **/ +void __aa_fs_profile_rmdir(struct aa_profile *profile) +{ + struct aa_profile *child; + int i; + + if (!profile) + return; + + list_for_each_entry(child, &profile->base.profiles, base.list) + __aa_fs_profile_rmdir(child); + + for (i = AAFS_PROF_SIZEOF - 1; i >= 0; --i) { + struct aa_replacedby *r; + if (!profile->dents[i]) + continue; + + r = profile->dents[i]->d_inode->i_private; + securityfs_remove(profile->dents[i]); + aa_put_replacedby(r); + profile->dents[i] = NULL; + } +} + +void __aa_fs_profile_migrate_dents(struct aa_profile *old, + struct aa_profile *new) +{ + int i; + + for (i = 0; i < AAFS_PROF_SIZEOF; i++) { + new->dents[i] = old->dents[i]; + old->dents[i] = NULL; + } +} + +static struct dentry *create_profile_file(struct dentry *dir, const char *name, + struct aa_profile *profile, + const struct file_operations *fops) +{ + struct aa_replacedby *r = aa_get_replacedby(profile->replacedby); + struct dentry *dent; + + dent = securityfs_create_file(name, S_IFREG | 0444, dir, r, fops); + if (IS_ERR(dent)) + aa_put_replacedby(r); + + return dent; +} + +/* requires lock be held */ +int __aa_fs_profile_mkdir(struct aa_profile *profile, struct dentry *parent) +{ + struct aa_profile *child; + struct dentry *dent = NULL, *dir; + int error; + + if (!parent) { + struct aa_profile *p; + p = aa_deref_parent(profile); + dent = prof_dir(p); + /* adding to parent that previously didn't have children */ + dent = securityfs_create_dir("profiles", dent); + if (IS_ERR(dent)) + goto fail; + prof_child_dir(p) = parent = dent; + } + + if (!profile->dirname) { + int len, id_len; + len = mangle_name(profile->base.name, NULL); + id_len = snprintf(NULL, 0, ".%ld", profile->ns->uniq_id); + + profile->dirname = kmalloc(len + id_len + 1, GFP_KERNEL); + if (!profile->dirname) + goto fail; + + mangle_name(profile->base.name, profile->dirname); + sprintf(profile->dirname + len, ".%ld", profile->ns->uniq_id++); + } + + dent = securityfs_create_dir(profile->dirname, parent); + if (IS_ERR(dent)) + goto fail; + prof_dir(profile) = dir = dent; + + dent = create_profile_file(dir, "name", profile, &aa_fs_profname_fops); + if (IS_ERR(dent)) + goto fail; + profile->dents[AAFS_PROF_NAME] = dent; + + dent = create_profile_file(dir, "mode", profile, &aa_fs_profmode_fops); + if (IS_ERR(dent)) + goto fail; + profile->dents[AAFS_PROF_MODE] = dent; + + dent = create_profile_file(dir, "attach", profile, + &aa_fs_profattach_fops); + if (IS_ERR(dent)) + goto fail; + profile->dents[AAFS_PROF_ATTACH] = dent; + + if (profile->hash) { + dent = create_profile_file(dir, "sha1", profile, + &aa_fs_seq_hash_fops); + if (IS_ERR(dent)) + goto fail; + profile->dents[AAFS_PROF_HASH] = dent; + } + + list_for_each_entry(child, &profile->base.profiles, base.list) { + error = __aa_fs_profile_mkdir(child, prof_child_dir(profile)); + if (error) + goto fail2; + } + + return 0; + +fail: + error = PTR_ERR(dent); + +fail2: + __aa_fs_profile_rmdir(profile); + + return error; +} + +void __aa_fs_namespace_rmdir(struct aa_namespace *ns) +{ + struct aa_namespace *sub; + struct aa_profile *child; + int i; + + if (!ns) + return; + + list_for_each_entry(child, &ns->base.profiles, base.list) + __aa_fs_profile_rmdir(child); + + list_for_each_entry(sub, &ns->sub_ns, base.list) { + mutex_lock(&sub->lock); + __aa_fs_namespace_rmdir(sub); + mutex_unlock(&sub->lock); + } + + for (i = AAFS_NS_SIZEOF - 1; i >= 0; --i) { + securityfs_remove(ns->dents[i]); + ns->dents[i] = NULL; + } +} + +int __aa_fs_namespace_mkdir(struct aa_namespace *ns, struct dentry *parent, + const char *name) +{ + struct aa_namespace *sub; + struct aa_profile *child; + struct dentry *dent, *dir; + int error; + + if (!name) + name = ns->base.name; + + dent = securityfs_create_dir(name, parent); + if (IS_ERR(dent)) + goto fail; + ns_dir(ns) = dir = dent; + + dent = securityfs_create_dir("profiles", dir); + if (IS_ERR(dent)) + goto fail; + ns_subprofs_dir(ns) = dent; + + dent = securityfs_create_dir("namespaces", dir); + if (IS_ERR(dent)) + goto fail; + ns_subns_dir(ns) = dent; + + list_for_each_entry(child, &ns->base.profiles, base.list) { + error = __aa_fs_profile_mkdir(child, ns_subprofs_dir(ns)); + if (error) + goto fail2; + } + + list_for_each_entry(sub, &ns->sub_ns, base.list) { + mutex_lock(&sub->lock); + error = __aa_fs_namespace_mkdir(sub, ns_subns_dir(ns), NULL); + mutex_unlock(&sub->lock); + if (error) + goto fail2; + } + + return 0; -static struct dentry *aa_fs_dentry __initdata; +fail: + error = PTR_ERR(dent); -static void __init aafs_remove(const char *name) +fail2: + __aa_fs_namespace_rmdir(ns); + + return error; +} + + +#define list_entry_next(pos, member) \ + list_entry(pos->member.next, typeof(*pos), member) +#define list_entry_is_head(pos, head, member) (&pos->member == (head)) + +/** + * __next_namespace - find the next namespace to list + * @root: root namespace to stop search at (NOT NULL) + * @ns: current ns position (NOT NULL) + * + * Find the next namespace from @ns under @root and handle all locking needed + * while switching current namespace. + * + * Returns: next namespace or NULL if at last namespace under @root + * Requires: ns->parent->lock to be held + * NOTE: will not unlock root->lock + */ +static struct aa_namespace *__next_namespace(struct aa_namespace *root, + struct aa_namespace *ns) { - struct dentry *dentry; + struct aa_namespace *parent, *next; + + /* is next namespace a child */ + if (!list_empty(&ns->sub_ns)) { + next = list_first_entry(&ns->sub_ns, typeof(*ns), base.list); + mutex_lock(&next->lock); + return next; + } + + /* check if the next ns is a sibling, parent, gp, .. */ + parent = ns->parent; + while (ns != root) { + mutex_unlock(&ns->lock); + next = list_entry_next(ns, base.list); + if (!list_entry_is_head(next, &parent->sub_ns, base.list)) { + mutex_lock(&next->lock); + return next; + } + ns = parent; + parent = parent->parent; + } + + return NULL; +} - dentry = lookup_one_len(name, aa_fs_dentry, strlen(name)); - if (!IS_ERR(dentry)) { - securityfs_remove(dentry); - dput(dentry); +/** + * __first_profile - find the first profile in a namespace + * @root: namespace that is root of profiles being displayed (NOT NULL) + * @ns: namespace to start in (NOT NULL) + * + * Returns: unrefcounted profile or NULL if no profile + * Requires: profile->ns.lock to be held + */ +static struct aa_profile *__first_profile(struct aa_namespace *root, + struct aa_namespace *ns) +{ + for (; ns; ns = __next_namespace(root, ns)) { + if (!list_empty(&ns->base.profiles)) + return list_first_entry(&ns->base.profiles, + struct aa_profile, base.list); } + return NULL; } /** - * aafs_create - create an entry in the apparmor filesystem - * @name: name of the entry (NOT NULL) - * @mask: file permission mask of the file - * @fops: file operations for the file (NOT NULL) + * __next_profile - step to the next profile in a profile tree + * @profile: current profile in tree (NOT NULL) * - * Used aafs_remove to remove entries created with this fn. + * Perform a depth first traversal on the profile tree in a namespace + * + * Returns: next profile or NULL if done + * Requires: profile->ns.lock to be held */ -static int __init aafs_create(const char *name, int mask, - const struct file_operations *fops) +static struct aa_profile *__next_profile(struct aa_profile *p) { - struct dentry *dentry; + struct aa_profile *parent; + struct aa_namespace *ns = p->ns; + + /* is next profile a child */ + if (!list_empty(&p->base.profiles)) + return list_first_entry(&p->base.profiles, typeof(*p), + base.list); + + /* is next profile a sibling, parent sibling, gp, sibling, .. */ + parent = rcu_dereference_protected(p->parent, + mutex_is_locked(&p->ns->lock)); + while (parent) { + p = list_entry_next(p, base.list); + if (!list_entry_is_head(p, &parent->base.profiles, base.list)) + return p; + p = parent; + parent = rcu_dereference_protected(parent->parent, + mutex_is_locked(&parent->ns->lock)); + } - dentry = securityfs_create_file(name, S_IFREG | mask, aa_fs_dentry, - NULL, fops); + /* is next another profile in the namespace */ + p = list_entry_next(p, base.list); + if (!list_entry_is_head(p, &ns->base.profiles, base.list)) + return p; - return IS_ERR(dentry) ? PTR_ERR(dentry) : 0; + return NULL; +} + +/** + * next_profile - step to the next profile in where ever it may be + * @root: root namespace (NOT NULL) + * @profile: current profile (NOT NULL) + * + * Returns: next profile or NULL if there isn't one + */ +static struct aa_profile *next_profile(struct aa_namespace *root, + struct aa_profile *profile) +{ + struct aa_profile *next = __next_profile(profile); + if (next) + return next; + + /* finished all profiles in namespace move to next namespace */ + return __first_profile(root, __next_namespace(root, profile->ns)); +} + +/** + * p_start - start a depth first traversal of profile tree + * @f: seq_file to fill + * @pos: current position + * + * Returns: first profile under current namespace or NULL if none found + * + * acquires first ns->lock + */ +static void *p_start(struct seq_file *f, loff_t *pos) +{ + struct aa_profile *profile = NULL; + struct aa_namespace *root = aa_current_profile()->ns; + loff_t l = *pos; + f->private = aa_get_namespace(root); + + + /* find the first profile */ + mutex_lock(&root->lock); + profile = __first_profile(root, root); + + /* skip to position */ + for (; profile && l > 0; l--) + profile = next_profile(root, profile); + + return profile; +} + +/** + * p_next - read the next profile entry + * @f: seq_file to fill + * @p: profile previously returned + * @pos: current position + * + * Returns: next profile after @p or NULL if none + * + * may acquire/release locks in namespace tree as necessary + */ +static void *p_next(struct seq_file *f, void *p, loff_t *pos) +{ + struct aa_profile *profile = p; + struct aa_namespace *ns = f->private; + (*pos)++; + + return next_profile(ns, profile); +} + +/** + * p_stop - stop depth first traversal + * @f: seq_file we are filling + * @p: the last profile writen + * + * Release all locking done by p_start/p_next on namespace tree + */ +static void p_stop(struct seq_file *f, void *p) +{ + struct aa_profile *profile = p; + struct aa_namespace *root = f->private, *ns; + + if (profile) { + for (ns = profile->ns; ns && ns != root; ns = ns->parent) + mutex_unlock(&ns->lock); + } + mutex_unlock(&root->lock); + aa_put_namespace(root); +} + +/** + * seq_show_profile - show a profile entry + * @f: seq_file to file + * @p: current position (profile) (NOT NULL) + * + * Returns: error on failure + */ +static int seq_show_profile(struct seq_file *f, void *p) +{ + struct aa_profile *profile = (struct aa_profile *)p; + struct aa_namespace *root = f->private; + + if (profile->ns != root) + seq_printf(f, ":%s://", aa_ns_name(root, profile->ns)); + seq_printf(f, "%s (%s)\n", profile->base.hname, + aa_profile_mode_names[profile->mode]); + + return 0; +} + +static const struct seq_operations aa_fs_profiles_op = { + .start = p_start, + .next = p_next, + .stop = p_stop, + .show = seq_show_profile, +}; + +static int profiles_open(struct inode *inode, struct file *file) +{ + return seq_open(file, &aa_fs_profiles_op); +} + +static int profiles_release(struct inode *inode, struct file *file) +{ + return seq_release(inode, file); +} + +static const struct file_operations aa_fs_profiles_fops = { + .open = profiles_open, + .read = seq_read, + .llseek = seq_lseek, + .release = profiles_release, +}; + + +/** Base file system setup **/ +static struct aa_fs_entry aa_fs_entry_file[] = { + AA_FS_FILE_STRING("mask", "create read write exec append mmap_exec " \ + "link lock"), + { } +}; + +static struct aa_fs_entry aa_fs_entry_domain[] = { + AA_FS_FILE_BOOLEAN("change_hat", 1), + AA_FS_FILE_BOOLEAN("change_hatv", 1), + AA_FS_FILE_BOOLEAN("change_onexec", 1), + AA_FS_FILE_BOOLEAN("change_profile", 1), + { } +}; + +static struct aa_fs_entry aa_fs_entry_policy[] = { + AA_FS_FILE_BOOLEAN("set_load", 1), + {} +}; + +static struct aa_fs_entry aa_fs_entry_features[] = { + AA_FS_DIR("policy", aa_fs_entry_policy), + AA_FS_DIR("domain", aa_fs_entry_domain), + AA_FS_DIR("file", aa_fs_entry_file), + AA_FS_FILE_U64("capability", VFS_CAP_FLAGS_MASK), + AA_FS_DIR("rlimit", aa_fs_entry_rlimit), + AA_FS_DIR("caps", aa_fs_entry_caps), + { } +}; + +static struct aa_fs_entry aa_fs_entry_apparmor[] = { + AA_FS_FILE_FOPS(".load", 0640, &aa_fs_profile_load), + AA_FS_FILE_FOPS(".replace", 0640, &aa_fs_profile_replace), + AA_FS_FILE_FOPS(".remove", 0640, &aa_fs_profile_remove), + AA_FS_FILE_FOPS("profiles", 0640, &aa_fs_profiles_fops), + AA_FS_DIR("features", aa_fs_entry_features), + { } +}; + +static struct aa_fs_entry aa_fs_entry = + AA_FS_DIR("apparmor", aa_fs_entry_apparmor); + +/** + * aafs_create_file - create a file entry in the apparmor securityfs + * @fs_file: aa_fs_entry to build an entry for (NOT NULL) + * @parent: the parent dentry in the securityfs + * + * Use aafs_remove_file to remove entries created with this fn. + */ +static int __init aafs_create_file(struct aa_fs_entry *fs_file, + struct dentry *parent) +{ + int error = 0; + + fs_file->dentry = securityfs_create_file(fs_file->name, + S_IFREG | fs_file->mode, + parent, fs_file, + fs_file->file_ops); + if (IS_ERR(fs_file->dentry)) { + error = PTR_ERR(fs_file->dentry); + fs_file->dentry = NULL; + } + return error; +} + +static void __init aafs_remove_dir(struct aa_fs_entry *fs_dir); +/** + * aafs_create_dir - recursively create a directory entry in the securityfs + * @fs_dir: aa_fs_entry (and all child entries) to build (NOT NULL) + * @parent: the parent dentry in the securityfs + * + * Use aafs_remove_dir to remove entries created with this fn. + */ +static int __init aafs_create_dir(struct aa_fs_entry *fs_dir, + struct dentry *parent) +{ + struct aa_fs_entry *fs_file; + struct dentry *dir; + int error; + + dir = securityfs_create_dir(fs_dir->name, parent); + if (IS_ERR(dir)) + return PTR_ERR(dir); + fs_dir->dentry = dir; + + for (fs_file = fs_dir->v.files; fs_file && fs_file->name; ++fs_file) { + if (fs_file->v_type == AA_FS_TYPE_DIR) + error = aafs_create_dir(fs_file, fs_dir->dentry); + else + error = aafs_create_file(fs_file, fs_dir->dentry); + if (error) + goto failed; + } + + return 0; + +failed: + aafs_remove_dir(fs_dir); + + return error; +} + +/** + * aafs_remove_file - drop a single file entry in the apparmor securityfs + * @fs_file: aa_fs_entry to detach from the securityfs (NOT NULL) + */ +static void __init aafs_remove_file(struct aa_fs_entry *fs_file) +{ + if (!fs_file->dentry) + return; + + securityfs_remove(fs_file->dentry); + fs_file->dentry = NULL; +} + +/** + * aafs_remove_dir - recursively drop a directory entry from the securityfs + * @fs_dir: aa_fs_entry (and all child entries) to detach (NOT NULL) + */ +static void __init aafs_remove_dir(struct aa_fs_entry *fs_dir) +{ + struct aa_fs_entry *fs_file; + + for (fs_file = fs_dir->v.files; fs_file && fs_file->name; ++fs_file) { + if (fs_file->v_type == AA_FS_TYPE_DIR) + aafs_remove_dir(fs_file); + else + aafs_remove_file(fs_file); + } + + aafs_remove_file(fs_dir); } /** @@ -183,14 +922,7 @@ static int __init aafs_create(const char *name, int mask, */ void __init aa_destroy_aafs(void) { - if (aa_fs_dentry) { - aafs_remove(".remove"); - aafs_remove(".replace"); - aafs_remove(".load"); - - securityfs_remove(aa_fs_dentry); - aa_fs_dentry = NULL; - } + aafs_remove_dir(&aa_fs_entry); } /** @@ -200,32 +932,25 @@ void __init aa_destroy_aafs(void) * * Returns: error on failure */ -int __init aa_create_aafs(void) +static int __init aa_create_aafs(void) { int error; if (!apparmor_initialized) return 0; - if (aa_fs_dentry) { + if (aa_fs_entry.dentry) { AA_ERROR("%s: AppArmor securityfs already exists\n", __func__); return -EEXIST; } - aa_fs_dentry = securityfs_create_dir("apparmor", NULL); - if (IS_ERR(aa_fs_dentry)) { - error = PTR_ERR(aa_fs_dentry); - aa_fs_dentry = NULL; - goto error; - } - - error = aafs_create(".load", 0640, &aa_fs_profile_load); - if (error) - goto error; - error = aafs_create(".replace", 0640, &aa_fs_profile_replace); + /* Populate fs tree. */ + error = aafs_create_dir(&aa_fs_entry, NULL); if (error) goto error; - error = aafs_create(".remove", 0640, &aa_fs_profile_remove); + + error = __aa_fs_namespace_mkdir(root_ns, aa_fs_entry.dentry, + "policy"); if (error) goto error; diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c index 96502b22b26..89c78658031 100644 --- a/security/apparmor/audit.c +++ b/security/apparmor/audit.c @@ -19,7 +19,7 @@ #include "include/audit.h" #include "include/policy.h" -const char *op_table[] = { +const char *const op_table[] = { "null", "sysctl", @@ -73,7 +73,7 @@ const char *op_table[] = { "profile_remove" }; -const char *audit_mode_names[] = { +const char *const audit_mode_names[] = { "normal", "quiet_denied", "quiet", @@ -81,14 +81,15 @@ const char *audit_mode_names[] = { "all" }; -static char *aa_audit_type[] = { +static const char *const aa_audit_type[] = { "AUDIT", "ALLOWED", "DENIED", "HINT", "STATUS", "ERROR", - "KILLED" + "KILLED", + "AUTO" }; /* @@ -110,32 +111,26 @@ static char *aa_audit_type[] = { static void audit_pre(struct audit_buffer *ab, void *ca) { struct common_audit_data *sa = ca; - struct task_struct *tsk = sa->tsk ? sa->tsk : current; if (aa_g_audit_header) { audit_log_format(ab, "apparmor="); - audit_log_string(ab, aa_audit_type[sa->aad.type]); + audit_log_string(ab, aa_audit_type[sa->aad->type]); } - if (sa->aad.op) { + if (sa->aad->op) { audit_log_format(ab, " operation="); - audit_log_string(ab, op_table[sa->aad.op]); + audit_log_string(ab, op_table[sa->aad->op]); } - if (sa->aad.info) { + if (sa->aad->info) { audit_log_format(ab, " info="); - audit_log_string(ab, sa->aad.info); - if (sa->aad.error) - audit_log_format(ab, " error=%d", sa->aad.error); + audit_log_string(ab, sa->aad->info); + if (sa->aad->error) + audit_log_format(ab, " error=%d", sa->aad->error); } - if (sa->aad.profile) { - struct aa_profile *profile = sa->aad.profile; - pid_t pid; - rcu_read_lock(); - pid = tsk->real_parent->pid; - rcu_read_unlock(); - audit_log_format(ab, " parent=%d", pid); + if (sa->aad->profile) { + struct aa_profile *profile = sa->aad->profile; if (profile->ns != root_ns) { audit_log_format(ab, " namespace="); audit_log_untrustedstring(ab, profile->ns->base.hname); @@ -144,9 +139,9 @@ static void audit_pre(struct audit_buffer *ab, void *ca) audit_log_untrustedstring(ab, profile->base.hname); } - if (sa->aad.name) { + if (sa->aad->name) { audit_log_format(ab, " name="); - audit_log_untrustedstring(ab, sa->aad.name); + audit_log_untrustedstring(ab, sa->aad->name); } } @@ -158,10 +153,8 @@ static void audit_pre(struct audit_buffer *ab, void *ca) void aa_audit_msg(int type, struct common_audit_data *sa, void (*cb) (struct audit_buffer *, void *)) { - sa->aad.type = type; - sa->lsm_pre_audit = audit_pre; - sa->lsm_post_audit = cb; - common_lsm_audit(sa); + sa->aad->type = type; + common_lsm_audit(sa, audit_pre, cb); } /** @@ -183,7 +176,7 @@ int aa_audit(int type, struct aa_profile *profile, gfp_t gfp, BUG_ON(!profile); if (type == AUDIT_APPARMOR_AUTO) { - if (likely(!sa->aad.error)) { + if (likely(!sa->aad->error)) { if (AUDIT_MODE(profile) != AUDIT_ALL) return 0; type = AUDIT_APPARMOR_AUDIT; @@ -195,21 +188,22 @@ int aa_audit(int type, struct aa_profile *profile, gfp_t gfp, if (AUDIT_MODE(profile) == AUDIT_QUIET || (type == AUDIT_APPARMOR_DENIED && AUDIT_MODE(profile) == AUDIT_QUIET)) - return sa->aad.error; + return sa->aad->error; if (KILL_MODE(profile) && type == AUDIT_APPARMOR_DENIED) type = AUDIT_APPARMOR_KILL; if (!unconfined(profile)) - sa->aad.profile = profile; + sa->aad->profile = profile; aa_audit_msg(type, sa, cb); - if (sa->aad.type == AUDIT_APPARMOR_KILL) - (void)send_sig_info(SIGKILL, NULL, sa->tsk ? sa->tsk : current); + if (sa->aad->type == AUDIT_APPARMOR_KILL) + (void)send_sig_info(SIGKILL, NULL, + sa->u.tsk ? sa->u.tsk : current); - if (sa->aad.type == AUDIT_APPARMOR_ALLOWED) - return complain_error(sa->aad.error); + if (sa->aad->type == AUDIT_APPARMOR_ALLOWED) + return complain_error(sa->aad->error); - return sa->aad.error; + return sa->aad->error; } diff --git a/security/apparmor/capability.c b/security/apparmor/capability.c index 9982c48def4..1101c6f64bb 100644 --- a/security/apparmor/capability.c +++ b/security/apparmor/capability.c @@ -27,6 +27,11 @@ */ #include "capability_names.h" +struct aa_fs_entry aa_fs_entry_caps[] = { + AA_FS_FILE_STRING("mask", AA_FS_CAPS_MASK), + { } +}; + struct audit_cache { struct aa_profile *profile; kernel_cap_t caps; @@ -48,8 +53,7 @@ static void audit_cb(struct audit_buffer *ab, void *va) /** * audit_caps - audit a capability - * @profile: profile confining task (NOT NULL) - * @task: task capability test was performed against (NOT NULL) + * @profile: profile being tested for confinement (NOT NULL) * @cap: capability tested * @error: error code returned by test * @@ -58,17 +62,17 @@ static void audit_cb(struct audit_buffer *ab, void *va) * * Returns: 0 or sa->error on success, error code on failure */ -static int audit_caps(struct aa_profile *profile, struct task_struct *task, - int cap, int error) +static int audit_caps(struct aa_profile *profile, int cap, int error) { struct audit_cache *ent; int type = AUDIT_APPARMOR_AUTO; struct common_audit_data sa; - COMMON_AUDIT_DATA_INIT(&sa, CAP); - sa.tsk = task; + struct apparmor_audit_data aad = {0,}; + sa.type = LSM_AUDIT_DATA_CAP; + sa.aad = &aad; sa.u.cap = cap; - sa.aad.op = OP_CAPABLE; - sa.aad.error = error; + sa.aad->op = OP_CAPABLE; + sa.aad->error = error; if (likely(!error)) { /* test if auditing is being forced */ @@ -117,8 +121,7 @@ static int profile_capable(struct aa_profile *profile, int cap) /** * aa_capable - test permission to use capability - * @task: task doing capability test against (NOT NULL) - * @profile: profile confining @task (NOT NULL) + * @profile: profile being tested against (NOT NULL) * @cap: capability to be tested * @audit: whether an audit record should be generated * @@ -126,8 +129,7 @@ static int profile_capable(struct aa_profile *profile, int cap) * * Returns: 0 on success, or else an error code. */ -int aa_capable(struct task_struct *task, struct aa_profile *profile, int cap, - int audit) +int aa_capable(struct aa_profile *profile, int cap, int audit) { int error = profile_capable(profile, cap); @@ -137,5 +139,5 @@ int aa_capable(struct task_struct *task, struct aa_profile *profile, int cap, return error; } - return audit_caps(profile, task, cap, error); + return audit_caps(profile, cap, error); } diff --git a/security/apparmor/context.c b/security/apparmor/context.c index 8a9b5027c81..3064c6ced87 100644 --- a/security/apparmor/context.c +++ b/security/apparmor/context.c @@ -69,6 +69,23 @@ void aa_dup_task_context(struct aa_task_cxt *new, const struct aa_task_cxt *old) } /** + * aa_get_task_profile - Get another task's profile + * @task: task to query (NOT NULL) + * + * Returns: counted reference to @task's profile + */ +struct aa_profile *aa_get_task_profile(struct task_struct *task) +{ + struct aa_profile *p; + + rcu_read_lock(); + p = aa_get_profile(__aa_task_profile(task)); + rcu_read_unlock(); + + return p; +} + +/** * aa_replace_current_profile - replace the current tasks profiles * @profile: new profile (NOT NULL) * @@ -76,7 +93,7 @@ void aa_dup_task_context(struct aa_task_cxt *new, const struct aa_task_cxt *old) */ int aa_replace_current_profile(struct aa_profile *profile) { - struct aa_task_cxt *cxt = current_cred()->security; + struct aa_task_cxt *cxt = current_cxt(); struct cred *new; BUG_ON(!profile); @@ -87,21 +104,17 @@ int aa_replace_current_profile(struct aa_profile *profile) if (!new) return -ENOMEM; - cxt = new->security; - if (unconfined(profile) || (cxt->profile->ns != profile->ns)) { + cxt = cred_cxt(new); + if (unconfined(profile) || (cxt->profile->ns != profile->ns)) /* if switching to unconfined or a different profile namespace * clear out context state */ - aa_put_profile(cxt->previous); - aa_put_profile(cxt->onexec); - cxt->previous = NULL; - cxt->onexec = NULL; - cxt->token = 0; - } + aa_clear_task_cxt_trans(cxt); + /* be careful switching cxt->profile, when racing replacement it - * is possible that cxt->profile->replacedby is the reference keeping - * @profile valid, so make sure to get its reference before dropping - * the reference on cxt->profile */ + * is possible that cxt->profile->replacedby->profile is the reference + * keeping @profile valid, so make sure to get its reference before + * dropping the reference on cxt->profile */ aa_get_profile(profile); aa_put_profile(cxt->profile); cxt->profile = profile; @@ -123,7 +136,7 @@ int aa_set_current_onexec(struct aa_profile *profile) if (!new) return -ENOMEM; - cxt = new->security; + cxt = cred_cxt(new); aa_get_profile(profile); aa_put_profile(cxt->onexec); cxt->onexec = profile; @@ -150,7 +163,7 @@ int aa_set_current_hat(struct aa_profile *profile, u64 token) return -ENOMEM; BUG_ON(!profile); - cxt = new->security; + cxt = cred_cxt(new); if (!cxt->previous) { /* transfer refcount */ cxt->previous = cxt->profile; @@ -162,7 +175,7 @@ int aa_set_current_hat(struct aa_profile *profile, u64 token) abort_creds(new); return -EACCES; } - cxt->profile = aa_get_profile(aa_newest_version(profile)); + cxt->profile = aa_get_newest_profile(profile); /* clear exec on switching context */ aa_put_profile(cxt->onexec); cxt->onexec = NULL; @@ -187,7 +200,7 @@ int aa_restore_previous_profile(u64 token) if (!new) return -ENOMEM; - cxt = new->security; + cxt = cred_cxt(new); if (cxt->token != token) { abort_creds(new); return -EACCES; @@ -199,17 +212,10 @@ int aa_restore_previous_profile(u64 token) } aa_put_profile(cxt->profile); - cxt->profile = aa_newest_version(cxt->previous); + cxt->profile = aa_get_newest_profile(cxt->previous); BUG_ON(!cxt->profile); - if (unlikely(cxt->profile != cxt->previous)) { - aa_get_profile(cxt->profile); - aa_put_profile(cxt->previous); - } /* clear exec && prev information when restoring to previous context */ - cxt->previous = NULL; - cxt->token = 0; - aa_put_profile(cxt->onexec); - cxt->onexec = NULL; + aa_clear_task_cxt_trans(cxt); commit_creds(new); return 0; diff --git a/security/apparmor/crypto.c b/security/apparmor/crypto.c new file mode 100644 index 00000000000..532471d0b3a --- /dev/null +++ b/security/apparmor/crypto.c @@ -0,0 +1,95 @@ +/* + * AppArmor security module + * + * This file contains AppArmor policy loading interface function definitions. + * + * Copyright 2013 Canonical Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + * Fns to provide a checksum of policy that has been loaded this can be + * compared to userspace policy compiles to check loaded policy is what + * it should be. + */ + +#include <crypto/hash.h> + +#include "include/apparmor.h" +#include "include/crypto.h" + +static unsigned int apparmor_hash_size; + +static struct crypto_shash *apparmor_tfm; + +unsigned int aa_hash_size(void) +{ + return apparmor_hash_size; +} + +int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start, + size_t len) +{ + struct { + struct shash_desc shash; + char ctx[crypto_shash_descsize(apparmor_tfm)]; + } desc; + int error = -ENOMEM; + u32 le32_version = cpu_to_le32(version); + + if (!apparmor_tfm) + return 0; + + profile->hash = kzalloc(apparmor_hash_size, GFP_KERNEL); + if (!profile->hash) + goto fail; + + desc.shash.tfm = apparmor_tfm; + desc.shash.flags = 0; + + error = crypto_shash_init(&desc.shash); + if (error) + goto fail; + error = crypto_shash_update(&desc.shash, (u8 *) &le32_version, 4); + if (error) + goto fail; + error = crypto_shash_update(&desc.shash, (u8 *) start, len); + if (error) + goto fail; + error = crypto_shash_final(&desc.shash, profile->hash); + if (error) + goto fail; + + return 0; + +fail: + kfree(profile->hash); + profile->hash = NULL; + + return error; +} + +static int __init init_profile_hash(void) +{ + struct crypto_shash *tfm; + + if (!apparmor_initialized) + return 0; + + tfm = crypto_alloc_shash("sha1", 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) { + int error = PTR_ERR(tfm); + AA_ERROR("failed to setup profile sha1 hashing: %d\n", error); + return error; + } + apparmor_tfm = tfm; + apparmor_hash_size = crypto_shash_digestsize(apparmor_tfm); + + aa_info_message("AppArmor sha1 policy hashing enabled"); + + return 0; +} + +late_initcall(init_profile_hash); diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c index c825c6e0b63..452567d3a08 100644 --- a/security/apparmor/domain.c +++ b/security/apparmor/domain.c @@ -50,40 +50,34 @@ void aa_free_domain_entries(struct aa_domain *domain) /** * may_change_ptraced_domain - check if can change profile on ptraced task - * @task: task we want to change profile of (NOT NULL) * @to_profile: profile to change to (NOT NULL) * - * Check if the task is ptraced and if so if the tracing task is allowed + * Check if current is ptraced and if so if the tracing task is allowed * to trace the new domain * * Returns: %0 or error if change not allowed */ -static int may_change_ptraced_domain(struct task_struct *task, - struct aa_profile *to_profile) +static int may_change_ptraced_domain(struct aa_profile *to_profile) { struct task_struct *tracer; - const struct cred *cred = NULL; struct aa_profile *tracerp = NULL; int error = 0; rcu_read_lock(); - tracer = tracehook_tracer_task(task); - if (tracer) { + tracer = ptrace_parent(current); + if (tracer) /* released below */ - cred = get_task_cred(tracer); - tracerp = aa_cred_profile(cred); - } - rcu_read_unlock(); + tracerp = aa_get_task_profile(tracer); /* not ptraced */ if (!tracer || unconfined(tracerp)) goto out; - error = aa_may_ptrace(tracer, tracerp, to_profile, PTRACE_MODE_ATTACH); + error = aa_may_ptrace(tracerp, to_profile, PTRACE_MODE_ATTACH); out: - if (cred) - put_cred(cred); + rcu_read_unlock(); + aa_put_profile(tracerp); return error; } @@ -148,7 +142,7 @@ static struct aa_profile *__attach_match(const char *name, int len = 0; struct aa_profile *profile, *candidate = NULL; - list_for_each_entry(profile, head, base.list) { + list_for_each_entry_rcu(profile, head, base.list) { if (profile->flags & PFLAG_NULL) continue; if (profile->xmatch && profile->xmatch_len > len) { @@ -181,9 +175,9 @@ static struct aa_profile *find_attach(struct aa_namespace *ns, { struct aa_profile *profile; - read_lock(&ns->lock); + rcu_read_lock(); profile = aa_get_profile(__attach_match(name, list)); - read_unlock(&ns->lock); + rcu_read_unlock(); return profile; } @@ -349,8 +343,8 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm) unsigned int state; struct file_perms perms = {}; struct path_cond cond = { - bprm->file->f_path.dentry->d_inode->i_uid, - bprm->file->f_path.dentry->d_inode->i_mode + file_inode(bprm->file)->i_uid, + file_inode(bprm->file)->i_mode }; const char *name = NULL, *target = NULL, *info = NULL; int error = cap_bprm_set_creds(bprm); @@ -360,10 +354,10 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm) if (bprm->cred_prepared) return 0; - cxt = bprm->cred->security; + cxt = cred_cxt(bprm->cred); BUG_ON(!cxt); - profile = aa_get_profile(aa_newest_version(cxt->profile)); + profile = aa_get_newest_profile(cxt->profile); /* * get the namespace from the replacement profile as replacement * can change the namespace @@ -372,13 +366,12 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm) state = profile->file.start; /* buffer freed below, name is pointer into buffer */ - error = aa_get_name(&bprm->file->f_path, profile->path_flags, &buffer, - &name); + error = aa_path_name(&bprm->file->f_path, profile->path_flags, &buffer, + &name, &info); if (error) { - if (profile->flags & - (PFLAG_IX_ON_NAME_ERROR | PFLAG_UNCONFINED)) + if (unconfined(profile) || + (profile->flags & PFLAG_IX_ON_NAME_ERROR)) error = 0; - info = "Exec failed name resolution"; name = bprm->filename; goto audit; } @@ -395,6 +388,11 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm) new_profile = find_attach(ns, &ns->base.profiles, name); if (!new_profile) goto cleanup; + /* + * NOTE: Domain transitions from unconfined are allowed + * even when no_new_privs is set because this aways results + * in a further reduction of permissions. + */ goto apply; } @@ -411,12 +409,13 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm) * exec\0change_profile */ state = aa_dfa_null_transition(profile->file.dfa, state); - cp = change_profile_perms(profile, cxt->onexec->ns, name, + cp = change_profile_perms(profile, cxt->onexec->ns, + cxt->onexec->base.name, AA_MAY_ONEXEC, state); if (!(cp.allow & AA_MAY_ONEXEC)) goto audit; - new_profile = aa_get_profile(aa_newest_version(cxt->onexec)); + new_profile = aa_get_newest_profile(cxt->onexec); goto apply; } @@ -433,11 +432,13 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm) new_profile = aa_get_profile(profile); goto x_clear; } else if (perms.xindex & AA_X_UNCONFINED) { - new_profile = aa_get_profile(ns->unconfined); + new_profile = aa_get_newest_profile(ns->unconfined); info = "ux fallback"; } else { error = -ENOENT; info = "profile not found"; + /* remove MAY_EXEC to audit as failure */ + perms.allow &= ~MAY_EXEC; } } } else if (COMPLAIN_MODE(profile)) { @@ -455,6 +456,16 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm) /* fail exec */ error = -EACCES; + /* + * Policy has specified a domain transition, if no_new_privs then + * fail the exec. + */ + if (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS) { + aa_put_profile(new_profile); + error = -EPERM; + goto cleanup; + } + if (!new_profile) goto audit; @@ -464,7 +475,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm) } if (bprm->unsafe & (LSM_UNSAFE_PTRACE | LSM_UNSAFE_PTRACE_CAP)) { - error = may_change_ptraced_domain(current, new_profile); + error = may_change_ptraced_domain(new_profile); if (error) { aa_put_profile(new_profile); goto audit; @@ -499,11 +510,7 @@ x_clear: cxt->profile = new_profile; /* clear out all temporary/transitional state from the context */ - aa_put_profile(cxt->previous); - aa_put_profile(cxt->onexec); - cxt->previous = NULL; - cxt->onexec = NULL; - cxt->token = 0; + aa_clear_task_cxt_trans(cxt); audit: error = aa_audit_file(profile, &perms, GFP_KERNEL, OP_EXEC, MAY_EXEC, @@ -542,7 +549,7 @@ int apparmor_bprm_secureexec(struct linux_binprm *bprm) void apparmor_bprm_committing_creds(struct linux_binprm *bprm) { struct aa_profile *profile = __aa_current_profile(); - struct aa_task_cxt *new_cxt = bprm->cred->security; + struct aa_task_cxt *new_cxt = cred_cxt(bprm->cred); /* bail out if unconfined or not changing profile */ if ((new_cxt->profile == profile) || @@ -609,9 +616,17 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest) const char *target = NULL, *info = NULL; int error = 0; + /* + * Fail explicitly requested domain transitions if no_new_privs. + * There is no exception for unconfined as change_hat is not + * available. + */ + if (current->no_new_privs) + return -EPERM; + /* released below */ cred = get_current_cred(); - cxt = cred->security; + cxt = cred_cxt(cred); profile = aa_cred_profile(cred); previous_profile = cxt->previous; @@ -624,7 +639,10 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest) if (count) { /* attempting to change into a new hat or switch to a sibling */ struct aa_profile *root; - root = PROFILE_IS_HAT(profile) ? profile->parent : profile; + if (PROFILE_IS_HAT(profile)) + root = aa_get_profile_rcu(&profile->parent); + else + root = aa_get_profile(profile); /* find first matching hat */ for (i = 0; i < count && !hat; i++) @@ -636,6 +654,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest) error = -ECHILD; else error = -ENOENT; + aa_put_profile(root); goto out; } @@ -650,6 +669,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest) /* freed below */ name = new_compound_name(root->base.hname, hats[0]); + aa_put_profile(root); target = name; /* released below */ hat = aa_new_null_profile(profile, 1); @@ -659,6 +679,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest) goto audit; } } else { + aa_put_profile(root); target = hat->base.hname; if (!PROFILE_IS_HAT(hat)) { info = "target not hat"; @@ -667,7 +688,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest) } } - error = may_change_ptraced_domain(current, hat); + error = may_change_ptraced_domain(hat); if (error) { info = "ptraced"; error = -EPERM; @@ -698,7 +719,7 @@ audit: if (!permtest) error = aa_audit_file(profile, &perms, GFP_KERNEL, OP_CHANGE_HAT, AA_MAY_CHANGEHAT, NULL, - target, 0, info, error); + target, GLOBAL_ROOT_UID, info, error); out: aa_put_profile(hat); @@ -727,7 +748,6 @@ int aa_change_profile(const char *ns_name, const char *hname, bool onexec, bool permtest) { const struct cred *cred; - struct aa_task_cxt *cxt; struct aa_profile *profile, *target = NULL; struct aa_namespace *ns = NULL; struct file_perms perms = {}; @@ -747,9 +767,20 @@ int aa_change_profile(const char *ns_name, const char *hname, bool onexec, } cred = get_current_cred(); - cxt = cred->security; profile = aa_cred_profile(cred); + /* + * Fail explicitly requested domain transitions if no_new_privs + * and not unconfined. + * Domain transitions from unconfined are allowed even when + * no_new_privs is set because this aways results in a reduction + * of permissions. + */ + if (current->no_new_privs && !unconfined(profile)) { + put_cred(cred); + return -EPERM; + } + if (ns_name) { /* released below */ ns = aa_find_namespace(profile->ns, ns_name); @@ -796,7 +827,7 @@ int aa_change_profile(const char *ns_name, const char *hname, bool onexec, } /* check if tracing task is allowed to trace target domain */ - error = may_change_ptraced_domain(current, target); + error = may_change_ptraced_domain(target); if (error) { info = "ptrace prevents transition"; goto audit; @@ -813,7 +844,7 @@ int aa_change_profile(const char *ns_name, const char *hname, bool onexec, audit: if (!permtest) error = aa_audit_file(profile, &perms, GFP_KERNEL, op, request, - name, hname, 0, info, error); + name, hname, GLOBAL_ROOT_UID, info, error); aa_put_namespace(ns); aa_put_profile(target); diff --git a/security/apparmor/file.c b/security/apparmor/file.c index 7312db74121..fdaa50cb187 100644 --- a/security/apparmor/file.c +++ b/security/apparmor/file.c @@ -65,24 +65,26 @@ static void audit_file_mask(struct audit_buffer *ab, u32 mask) static void file_audit_cb(struct audit_buffer *ab, void *va) { struct common_audit_data *sa = va; - uid_t fsuid = current_fsuid(); + kuid_t fsuid = current_fsuid(); - if (sa->aad.fs.request & AA_AUDIT_FILE_MASK) { + if (sa->aad->fs.request & AA_AUDIT_FILE_MASK) { audit_log_format(ab, " requested_mask="); - audit_file_mask(ab, sa->aad.fs.request); + audit_file_mask(ab, sa->aad->fs.request); } - if (sa->aad.fs.denied & AA_AUDIT_FILE_MASK) { + if (sa->aad->fs.denied & AA_AUDIT_FILE_MASK) { audit_log_format(ab, " denied_mask="); - audit_file_mask(ab, sa->aad.fs.denied); + audit_file_mask(ab, sa->aad->fs.denied); } - if (sa->aad.fs.request & AA_AUDIT_FILE_MASK) { - audit_log_format(ab, " fsuid=%d", fsuid); - audit_log_format(ab, " ouid=%d", sa->aad.fs.ouid); + if (sa->aad->fs.request & AA_AUDIT_FILE_MASK) { + audit_log_format(ab, " fsuid=%d", + from_kuid(&init_user_ns, fsuid)); + audit_log_format(ab, " ouid=%d", + from_kuid(&init_user_ns, sa->aad->fs.ouid)); } - if (sa->aad.fs.target) { + if (sa->aad->fs.target) { audit_log_format(ab, " target="); - audit_log_untrustedstring(ab, sa->aad.fs.target); + audit_log_untrustedstring(ab, sa->aad->fs.target); } } @@ -103,49 +105,51 @@ static void file_audit_cb(struct audit_buffer *ab, void *va) */ int aa_audit_file(struct aa_profile *profile, struct file_perms *perms, gfp_t gfp, int op, u32 request, const char *name, - const char *target, uid_t ouid, const char *info, int error) + const char *target, kuid_t ouid, const char *info, int error) { int type = AUDIT_APPARMOR_AUTO; struct common_audit_data sa; - COMMON_AUDIT_DATA_INIT(&sa, NONE); - sa.aad.op = op, - sa.aad.fs.request = request; - sa.aad.name = name; - sa.aad.fs.target = target; - sa.aad.fs.ouid = ouid; - sa.aad.info = info; - sa.aad.error = error; - - if (likely(!sa.aad.error)) { + struct apparmor_audit_data aad = {0,}; + sa.type = LSM_AUDIT_DATA_NONE; + sa.aad = &aad; + aad.op = op, + aad.fs.request = request; + aad.name = name; + aad.fs.target = target; + aad.fs.ouid = ouid; + aad.info = info; + aad.error = error; + + if (likely(!sa.aad->error)) { u32 mask = perms->audit; if (unlikely(AUDIT_MODE(profile) == AUDIT_ALL)) mask = 0xffff; /* mask off perms that are not being force audited */ - sa.aad.fs.request &= mask; + sa.aad->fs.request &= mask; - if (likely(!sa.aad.fs.request)) + if (likely(!sa.aad->fs.request)) return 0; type = AUDIT_APPARMOR_AUDIT; } else { /* only report permissions that were denied */ - sa.aad.fs.request = sa.aad.fs.request & ~perms->allow; + sa.aad->fs.request = sa.aad->fs.request & ~perms->allow; - if (sa.aad.fs.request & perms->kill) + if (sa.aad->fs.request & perms->kill) type = AUDIT_APPARMOR_KILL; /* quiet known rejects, assumes quiet and kill do not overlap */ - if ((sa.aad.fs.request & perms->quiet) && + if ((sa.aad->fs.request & perms->quiet) && AUDIT_MODE(profile) != AUDIT_NOQUIET && AUDIT_MODE(profile) != AUDIT_ALL) - sa.aad.fs.request &= ~perms->quiet; + sa.aad->fs.request &= ~perms->quiet; - if (!sa.aad.fs.request) - return COMPLAIN_MODE(profile) ? 0 : sa.aad.error; + if (!sa.aad->fs.request) + return COMPLAIN_MODE(profile) ? 0 : sa.aad->error; } - sa.aad.fs.denied = sa.aad.fs.request & ~perms->allow; + sa.aad->fs.denied = sa.aad->fs.request & ~perms->allow; return aa_audit(type, profile, gfp, &sa, file_audit_cb); } @@ -173,8 +177,6 @@ static u32 map_old_perms(u32 old) if (old & 0x40) /* AA_EXEC_MMAP */ new |= AA_EXEC_MMAP; - new |= AA_MAY_META_READ; - return new; } @@ -201,7 +203,7 @@ static struct file_perms compute_perms(struct aa_dfa *dfa, unsigned int state, */ perms.kill = 0; - if (current_fsuid() == cond->uid) { + if (uid_eq(current_fsuid(), cond->uid)) { perms.allow = map_old_perms(dfa_user_allow(dfa, state)); perms.audit = map_old_perms(dfa_user_audit(dfa, state)); perms.quiet = map_old_perms(dfa_user_quiet(dfa, state)); @@ -212,10 +214,13 @@ static struct file_perms compute_perms(struct aa_dfa *dfa, unsigned int state, perms.quiet = map_old_perms(dfa_other_quiet(dfa, state)); perms.xindex = dfa_other_xindex(dfa, state); } + perms.allow |= AA_MAY_META_READ; /* change_profile wasn't determined by ownership in old mapping */ if (ACCEPT_TABLE(dfa)[state] & 0x80000000) perms.allow |= AA_MAY_CHANGE_PROFILE; + if (ACCEPT_TABLE(dfa)[state] & 0x40000000) + perms.allow |= AA_MAY_ONEXEC; return perms; } @@ -279,22 +284,16 @@ int aa_path_perm(int op, struct aa_profile *profile, struct path *path, int error; flags |= profile->path_flags | (S_ISDIR(cond->mode) ? PATH_IS_DIR : 0); - error = aa_get_name(path, flags, &buffer, &name); + error = aa_path_name(path, flags, &buffer, &name, &info); if (error) { if (error == -ENOENT && is_deleted(path->dentry)) { /* Access to open files that are deleted are * give a pass (implicit delegation) */ error = 0; + info = NULL; perms.allow = request; - } else if (error == -ENOENT) - info = "Failed name lookup - deleted entry"; - else if (error == -ESTALE) - info = "Failed name lookup - disconnected path"; - else if (error == -ENAMETOOLONG) - info = "Failed name lookup - name too long"; - else - info = "Failed name lookup"; + } } else { aa_str_perms(profile->file.dfa, profile->file.start, name, cond, &perms); @@ -365,12 +364,14 @@ int aa_path_link(struct aa_profile *profile, struct dentry *old_dentry, lperms = nullperms; /* buffer freed below, lname is pointer in buffer */ - error = aa_get_name(&link, profile->path_flags, &buffer, &lname); + error = aa_path_name(&link, profile->path_flags, &buffer, &lname, + &info); if (error) goto audit; /* buffer2 freed below, tname is pointer in buffer2 */ - error = aa_get_name(&target, profile->path_flags, &buffer2, &tname); + error = aa_path_name(&target, profile->path_flags, &buffer2, &tname, + &info); if (error) goto audit; @@ -448,8 +449,8 @@ int aa_file_perm(int op, struct aa_profile *profile, struct file *file, u32 request) { struct path_cond cond = { - .uid = file->f_path.dentry->d_inode->i_uid, - .mode = file->f_path.dentry->d_inode->i_mode + .uid = file_inode(file)->i_uid, + .mode = file_inode(file)->i_mode }; return aa_path_perm(op, profile, &file->f_path, PATH_DELEGATE_DELETED, diff --git a/security/apparmor/include/apparmor.h b/security/apparmor/include/apparmor.h index 38ccaea0820..97130f88838 100644 --- a/security/apparmor/include/apparmor.h +++ b/security/apparmor/include/apparmor.h @@ -15,17 +15,31 @@ #ifndef __APPARMOR_H #define __APPARMOR_H +#include <linux/slab.h> #include <linux/fs.h> #include "match.h" +/* + * Class of mediation types in the AppArmor policy db + */ +#define AA_CLASS_ENTRY 0 +#define AA_CLASS_UNKNOWN 1 +#define AA_CLASS_FILE 2 +#define AA_CLASS_CAP 3 +#define AA_CLASS_NET 4 +#define AA_CLASS_RLIMITS 5 +#define AA_CLASS_DOMAIN 6 + +#define AA_CLASS_LAST AA_CLASS_DOMAIN + /* Control parameters settable through module/boot flags */ extern enum audit_mode aa_g_audit; -extern int aa_g_audit_header; -extern int aa_g_debug; -extern int aa_g_lock_policy; -extern int aa_g_logsyscall; -extern int aa_g_paranoid_load; +extern bool aa_g_audit_header; +extern bool aa_g_debug; +extern bool aa_g_lock_policy; +extern bool aa_g_logsyscall; +extern bool aa_g_paranoid_load; extern unsigned int aa_g_path_max; /* @@ -51,9 +65,23 @@ extern int apparmor_initialized __initdata; /* fn's in lib */ char *aa_split_fqname(char *args, char **ns_name); void aa_info_message(const char *str); -void *kvmalloc(size_t size); -void kvfree(void *buffer); +void *__aa_kvmalloc(size_t size, gfp_t flags); + +static inline void *kvmalloc(size_t size) +{ + return __aa_kvmalloc(size, 0); +} + +static inline void *kvzalloc(size_t size) +{ + return __aa_kvmalloc(size, __GFP_ZERO); +} +/* returns 0 if kref not incremented */ +static inline int kref_get_not0(struct kref *kref) +{ + return atomic_inc_not_zero(&kref->refcount); +} /** * aa_strneq - compare null terminated @str to a non null terminated substring @@ -81,7 +109,7 @@ static inline unsigned int aa_dfa_null_transition(struct aa_dfa *dfa, unsigned int start) { /* the null transition only needs the string's null terminator byte */ - return aa_dfa_match_len(dfa, start, "", 1); + return aa_dfa_next(dfa, start, 0); } static inline bool mediated_filesystem(struct inode *inode) diff --git a/security/apparmor/include/apparmorfs.h b/security/apparmor/include/apparmorfs.h index cb1e93a114d..414e56878dd 100644 --- a/security/apparmor/include/apparmorfs.h +++ b/security/apparmor/include/apparmorfs.h @@ -15,6 +15,90 @@ #ifndef __AA_APPARMORFS_H #define __AA_APPARMORFS_H +enum aa_fs_type { + AA_FS_TYPE_BOOLEAN, + AA_FS_TYPE_STRING, + AA_FS_TYPE_U64, + AA_FS_TYPE_FOPS, + AA_FS_TYPE_DIR, +}; + +struct aa_fs_entry; + +struct aa_fs_entry { + const char *name; + struct dentry *dentry; + umode_t mode; + enum aa_fs_type v_type; + union { + bool boolean; + char *string; + unsigned long u64; + struct aa_fs_entry *files; + } v; + const struct file_operations *file_ops; +}; + +extern const struct file_operations aa_fs_seq_file_ops; + +#define AA_FS_FILE_BOOLEAN(_name, _value) \ + { .name = (_name), .mode = 0444, \ + .v_type = AA_FS_TYPE_BOOLEAN, .v.boolean = (_value), \ + .file_ops = &aa_fs_seq_file_ops } +#define AA_FS_FILE_STRING(_name, _value) \ + { .name = (_name), .mode = 0444, \ + .v_type = AA_FS_TYPE_STRING, .v.string = (_value), \ + .file_ops = &aa_fs_seq_file_ops } +#define AA_FS_FILE_U64(_name, _value) \ + { .name = (_name), .mode = 0444, \ + .v_type = AA_FS_TYPE_U64, .v.u64 = (_value), \ + .file_ops = &aa_fs_seq_file_ops } +#define AA_FS_FILE_FOPS(_name, _mode, _fops) \ + { .name = (_name), .v_type = AA_FS_TYPE_FOPS, \ + .mode = (_mode), .file_ops = (_fops) } +#define AA_FS_DIR(_name, _value) \ + { .name = (_name), .v_type = AA_FS_TYPE_DIR, .v.files = (_value) } + extern void __init aa_destroy_aafs(void); +struct aa_profile; +struct aa_namespace; + +enum aafs_ns_type { + AAFS_NS_DIR, + AAFS_NS_PROFS, + AAFS_NS_NS, + AAFS_NS_COUNT, + AAFS_NS_MAX_COUNT, + AAFS_NS_SIZE, + AAFS_NS_MAX_SIZE, + AAFS_NS_OWNER, + AAFS_NS_SIZEOF, +}; + +enum aafs_prof_type { + AAFS_PROF_DIR, + AAFS_PROF_PROFS, + AAFS_PROF_NAME, + AAFS_PROF_MODE, + AAFS_PROF_ATTACH, + AAFS_PROF_HASH, + AAFS_PROF_SIZEOF, +}; + +#define ns_dir(X) ((X)->dents[AAFS_NS_DIR]) +#define ns_subns_dir(X) ((X)->dents[AAFS_NS_NS]) +#define ns_subprofs_dir(X) ((X)->dents[AAFS_NS_PROFS]) + +#define prof_dir(X) ((X)->dents[AAFS_PROF_DIR]) +#define prof_child_dir(X) ((X)->dents[AAFS_PROF_PROFS]) + +void __aa_fs_profile_rmdir(struct aa_profile *profile); +void __aa_fs_profile_migrate_dents(struct aa_profile *old, + struct aa_profile *new); +int __aa_fs_profile_mkdir(struct aa_profile *profile, struct dentry *parent); +void __aa_fs_namespace_rmdir(struct aa_namespace *ns); +int __aa_fs_namespace_mkdir(struct aa_namespace *ns, struct dentry *parent, + const char *name); + #endif /* __AA_APPARMORFS_H */ diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h index 1951786d32e..ba3dfd17f23 100644 --- a/security/apparmor/include/audit.h +++ b/security/apparmor/include/audit.h @@ -25,11 +25,8 @@ struct aa_profile; -extern const char *audit_mode_names[]; +extern const char *const audit_mode_names[]; #define AUDIT_MAX_INDEX 5 - -#define AUDIT_APPARMOR_AUTO 0 /* auto choose audit message type */ - enum audit_mode { AUDIT_NORMAL, /* follow normal auditing of accesses */ AUDIT_QUIET_DENIED, /* quiet all denied access messages */ @@ -45,10 +42,11 @@ enum audit_type { AUDIT_APPARMOR_HINT, AUDIT_APPARMOR_STATUS, AUDIT_APPARMOR_ERROR, - AUDIT_APPARMOR_KILL + AUDIT_APPARMOR_KILL, + AUDIT_APPARMOR_AUTO }; -extern const char *op_table[]; +extern const char *const op_table[]; enum aa_ops { OP_NULL, @@ -104,7 +102,33 @@ enum aa_ops { }; -/* define a short hand for apparmor_audit_data portion of common_audit_data */ +struct apparmor_audit_data { + int error; + int op; + int type; + void *profile; + const char *name; + const char *info; + union { + void *target; + struct { + long pos; + void *target; + } iface; + struct { + int rlim; + unsigned long max; + } rlim; + struct { + const char *target; + u32 request; + u32 denied; + kuid_t ouid; + } fs; + }; +}; + +/* define a short hand for apparmor_audit_data structure */ #define aad apparmor_audit_data void aa_audit_msg(int type, struct common_audit_data *sa, diff --git a/security/apparmor/include/capability.h b/security/apparmor/include/capability.h index c24d2959ea0..fc3fa381d85 100644 --- a/security/apparmor/include/capability.h +++ b/security/apparmor/include/capability.h @@ -4,7 +4,7 @@ * This file contains AppArmor capability mediation definitions. * * Copyright (C) 1998-2008 Novell/SUSE - * Copyright 2009-2010 Canonical Ltd. + * Copyright 2009-2013 Canonical Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as @@ -17,6 +17,8 @@ #include <linux/sched.h> +#include "apparmorfs.h" + struct aa_profile; /* aa_caps - confinement data for capabilities @@ -34,8 +36,9 @@ struct aa_caps { kernel_cap_t extended; }; -int aa_capable(struct task_struct *task, struct aa_profile *profile, int cap, - int audit); +extern struct aa_fs_entry aa_fs_entry_caps[]; + +int aa_capable(struct aa_profile *profile, int cap, int audit); static inline void aa_free_cap_rules(struct aa_caps *caps) { diff --git a/security/apparmor/include/context.h b/security/apparmor/include/context.h index a9cbee4d9e4..6bf65798e5d 100644 --- a/security/apparmor/include/context.h +++ b/security/apparmor/include/context.h @@ -21,6 +21,9 @@ #include "policy.h" +#define cred_cxt(X) (X)->security +#define current_cxt() cred_cxt(current_cred()) + /* struct aa_file_cxt - the AppArmor context the file was opened in * @perms: the permission the file was opened with * @@ -80,23 +83,8 @@ int aa_replace_current_profile(struct aa_profile *profile); int aa_set_current_onexec(struct aa_profile *profile); int aa_set_current_hat(struct aa_profile *profile, u64 token); int aa_restore_previous_profile(u64 cookie); +struct aa_profile *aa_get_task_profile(struct task_struct *task); -/** - * __aa_task_is_confined - determine if @task has any confinement - * @task: task to check confinement of (NOT NULL) - * - * If @task != current needs to be called in RCU safe critical section - */ -static inline bool __aa_task_is_confined(struct task_struct *task) -{ - struct aa_task_cxt *cxt = __task_cred(task)->security; - - BUG_ON(!cxt || !cxt->profile); - if (unconfined(aa_newest_version(cxt->profile))) - return 0; - - return 1; -} /** * aa_cred_profile - obtain cred's profiles @@ -108,9 +96,33 @@ static inline bool __aa_task_is_confined(struct task_struct *task) */ static inline struct aa_profile *aa_cred_profile(const struct cred *cred) { - struct aa_task_cxt *cxt = cred->security; + struct aa_task_cxt *cxt = cred_cxt(cred); BUG_ON(!cxt || !cxt->profile); - return aa_newest_version(cxt->profile); + return cxt->profile; +} + +/** + * __aa_task_profile - retrieve another task's profile + * @task: task to query (NOT NULL) + * + * Returns: @task's profile without incrementing its ref count + * + * If @task != current needs to be called in RCU safe critical section + */ +static inline struct aa_profile *__aa_task_profile(struct task_struct *task) +{ + return aa_cred_profile(__task_cred(task)); +} + +/** + * __aa_task_is_confined - determine if @task has any confinement + * @task: task to check confinement of (NOT NULL) + * + * If @task != current needs to be called in RCU safe critical section + */ +static inline bool __aa_task_is_confined(struct task_struct *task) +{ + return !unconfined(__aa_task_profile(task)); } /** @@ -136,19 +148,31 @@ static inline struct aa_profile *__aa_current_profile(void) */ static inline struct aa_profile *aa_current_profile(void) { - const struct aa_task_cxt *cxt = current_cred()->security; + const struct aa_task_cxt *cxt = current_cxt(); struct aa_profile *profile; BUG_ON(!cxt || !cxt->profile); - profile = aa_newest_version(cxt->profile); - /* - * Whether or not replacement succeeds, use newest profile so - * there is no need to update it after replacement. - */ - if (unlikely((cxt->profile != profile))) + if (PROFILE_INVALID(cxt->profile)) { + profile = aa_get_newest_profile(cxt->profile); aa_replace_current_profile(profile); + aa_put_profile(profile); + cxt = current_cxt(); + } - return profile; + return cxt->profile; +} + +/** + * aa_clear_task_cxt_trans - clear transition tracking info from the cxt + * @cxt: task context to clear (NOT NULL) + */ +static inline void aa_clear_task_cxt_trans(struct aa_task_cxt *cxt) +{ + aa_put_profile(cxt->previous); + aa_put_profile(cxt->onexec); + cxt->previous = NULL; + cxt->onexec = NULL; + cxt->token = 0; } #endif /* __AA_CONTEXT_H */ diff --git a/security/apparmor/include/crypto.h b/security/apparmor/include/crypto.h new file mode 100644 index 00000000000..dc418e5024d --- /dev/null +++ b/security/apparmor/include/crypto.h @@ -0,0 +1,36 @@ +/* + * AppArmor security module + * + * This file contains AppArmor policy loading interface function definitions. + * + * Copyright 2013 Canonical Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + */ + +#ifndef __APPARMOR_CRYPTO_H +#define __APPARMOR_CRYPTO_H + +#include "policy.h" + +#ifdef CONFIG_SECURITY_APPARMOR_HASH +unsigned int aa_hash_size(void); +int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start, + size_t len); +#else +static inline int aa_calc_profile_hash(struct aa_profile *profile, u32 version, + void *start, size_t len) +{ + return 0; +} + +static inline unsigned int aa_hash_size(void) +{ + return 0; +} +#endif + +#endif /* __APPARMOR_CRYPTO_H */ diff --git a/security/apparmor/include/file.h b/security/apparmor/include/file.h index be36feabb16..2c922b86bd4 100644 --- a/security/apparmor/include/file.h +++ b/security/apparmor/include/file.h @@ -15,12 +15,11 @@ #ifndef __AA_FILE_H #define __AA_FILE_H -#include <linux/path.h> - #include "domain.h" #include "match.h" struct aa_profile; +struct path; /* * We use MAY_EXEC, MAY_WRITE, MAY_READ, MAY_APPEND and the following flags @@ -72,7 +71,7 @@ struct aa_profile; /* need to make conditional which ones are being set */ struct path_cond { - uid_t uid; + kuid_t uid; umode_t mode; }; @@ -118,7 +117,7 @@ static inline u16 dfa_map_xindex(u16 mask) index |= AA_X_NAME; } else if (old_index == 3) { index |= AA_X_NAME | AA_X_CHILD; - } else { + } else if (old_index) { index |= AA_X_TABLE; index |= old_index - 4; } @@ -147,7 +146,7 @@ static inline u16 dfa_map_xindex(u16 mask) int aa_audit_file(struct aa_profile *profile, struct file_perms *perms, gfp_t gfp, int op, u32 request, const char *name, - const char *target, uid_t ouid, const char *info, int error); + const char *target, kuid_t ouid, const char *info, int error); /** * struct aa_file_rules - components used for file rule permissions @@ -187,11 +186,6 @@ static inline void aa_free_file_rules(struct aa_file_rules *rules) aa_free_domain_entries(&rules->trans); } -#define ACC_FMODE(x) (("\000\004\002\006"[(x)&O_ACCMODE]) | (((x) << 1) & 0x40)) - -/* from namei.c */ -#define MAP_OPEN_FLAGS(x) ((((x) + 1) & O_ACCMODE) ? (x) + 1 : (x)) - /** * aa_map_file_perms - map file flags to AppArmor permissions * @file: open file to map flags to AppArmor permissions @@ -200,8 +194,13 @@ static inline void aa_free_file_rules(struct aa_file_rules *rules) */ static inline u32 aa_map_file_to_perms(struct file *file) { - int flags = MAP_OPEN_FLAGS(file->f_flags); - u32 perms = ACC_FMODE(file->f_mode); + int flags = file->f_flags; + u32 perms = 0; + + if (file->f_mode & FMODE_WRITE) + perms |= MAY_WRITE; + if (file->f_mode & FMODE_READ) + perms |= MAY_READ; if ((flags & O_APPEND) && (perms & MAY_WRITE)) perms = (perms & ~MAY_WRITE) | MAY_APPEND; diff --git a/security/apparmor/include/ipc.h b/security/apparmor/include/ipc.h index aeda0fbc8b2..288ca76e2fb 100644 --- a/security/apparmor/include/ipc.h +++ b/security/apparmor/include/ipc.h @@ -19,8 +19,8 @@ struct aa_profile; -int aa_may_ptrace(struct task_struct *tracer_task, struct aa_profile *tracer, - struct aa_profile *tracee, unsigned int mode); +int aa_may_ptrace(struct aa_profile *tracer, struct aa_profile *tracee, + unsigned int mode); int aa_ptrace(struct task_struct *tracer, struct task_struct *tracee, unsigned int mode); diff --git a/security/apparmor/include/match.h b/security/apparmor/include/match.h index 734a6d35112..001c43aa040 100644 --- a/security/apparmor/include/match.h +++ b/security/apparmor/include/match.h @@ -4,7 +4,7 @@ * This file contains AppArmor policy dfa matching engine definitions. * * Copyright (C) 1998-2008 Novell/SUSE - * Copyright 2009-2010 Canonical Ltd. + * Copyright 2009-2012 Canonical Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as @@ -15,25 +15,31 @@ #ifndef __AA_MATCH_H #define __AA_MATCH_H -#include <linux/workqueue.h> +#include <linux/kref.h> #define DFA_NOMATCH 0 #define DFA_START 1 -#define DFA_VALID_PERM_MASK 0xffffffff -#define DFA_VALID_PERM2_MASK 0xffffffff /** * The format used for transition tables is based on the GNU flex table * file format (--tables-file option; see Table File Format in the flex * info pages and the flex sources for documentation). The magic number - * used in the header is 0x1B5E783D insted of 0xF13C57B1 though, because - * the YY_ID_CHK (check) and YY_ID_DEF (default) tables are used - * slightly differently (see the apparmor-parser package). + * used in the header is 0x1B5E783D instead of 0xF13C57B1 though, because + * new tables have been defined and others YY_ID_CHK (check) and YY_ID_DEF + * (default) tables are used slightly differently (see the apparmor-parser + * package). + * + * + * The data in the packed dfa is stored in network byte order, and the tables + * are arranged for flexibility. We convert the table data to host native + * byte order. + * + * The dfa begins with a table set header, and is followed by the actual + * tables. */ #define YYTH_MAGIC 0x1B5E783D -#define YYTH_DEF_RECURSE 0x1 /* DEF Table is recursive */ struct table_set_header { u32 th_magic; /* YYTH_MAGIC */ @@ -62,7 +68,7 @@ struct table_set_header { #define YYTD_DATA32 4 #define YYTD_DATA64 8 -/* Each ACCEPT2 table gets 6 dedicated flags, YYTD_DATAX define the +/* ACCEPT & ACCEPT2 tables gets 6 dedicated flags, YYTD_DATAX define the * first flags */ #define ACCEPT1_FLAGS(X) ((X) & 0x3f) @@ -115,6 +121,9 @@ unsigned int aa_dfa_match_len(struct aa_dfa *dfa, unsigned int start, const char *str, int len); unsigned int aa_dfa_match(struct aa_dfa *dfa, unsigned int start, const char *str); +unsigned int aa_dfa_next(struct aa_dfa *dfa, unsigned int state, + const char c); + void aa_dfa_free_kref(struct kref *kref); /** diff --git a/security/apparmor/include/path.h b/security/apparmor/include/path.h index 27b327a7fae..286ac75dc88 100644 --- a/security/apparmor/include/path.h +++ b/security/apparmor/include/path.h @@ -26,6 +26,7 @@ enum path_flags { PATH_MEDIATE_DELETED = 0x10000, /* mediate deleted paths */ }; -int aa_get_name(struct path *path, int flags, char **buffer, const char **name); +int aa_path_name(struct path *path, int flags, char **buffer, + const char **name, const char **info); #endif /* __AA_PATH_H */ diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h index aeda5cf5690..c28b0f20ab5 100644 --- a/security/apparmor/include/policy.h +++ b/security/apparmor/include/policy.h @@ -29,19 +29,23 @@ #include "file.h" #include "resource.h" -extern const char *profile_mode_names[]; -#define APPARMOR_NAMES_MAX_INDEX 3 +extern const char *const aa_profile_mode_names[]; +#define APPARMOR_MODE_NAMES_MAX_INDEX 4 -#define COMPLAIN_MODE(_profile) \ - ((aa_g_profile_mode == APPARMOR_COMPLAIN) || \ - ((_profile)->mode == APPARMOR_COMPLAIN)) +#define PROFILE_MODE(_profile, _mode) \ + ((aa_g_profile_mode == (_mode)) || \ + ((_profile)->mode == (_mode))) -#define KILL_MODE(_profile) \ - ((aa_g_profile_mode == APPARMOR_KILL) || \ - ((_profile)->mode == APPARMOR_KILL)) +#define COMPLAIN_MODE(_profile) PROFILE_MODE((_profile), APPARMOR_COMPLAIN) + +#define KILL_MODE(_profile) PROFILE_MODE((_profile), APPARMOR_KILL) #define PROFILE_IS_HAT(_profile) ((_profile)->flags & PFLAG_HAT) +#define PROFILE_INVALID(_profile) ((_profile)->flags & PFLAG_INVALID) + +#define on_list_rcu(X) (!list_empty(X) && (X)->prev != LIST_POISON2) + /* * FIXME: currently need a clean way to replace and remove profiles as a * set. It should be done at the namespace level. @@ -52,17 +56,19 @@ enum profile_mode { APPARMOR_ENFORCE, /* enforce access rules */ APPARMOR_COMPLAIN, /* allow and log access violations */ APPARMOR_KILL, /* kill task on access violation */ + APPARMOR_UNCONFINED, /* profile set to unconfined */ }; enum profile_flags { PFLAG_HAT = 1, /* profile is a hat */ - PFLAG_UNCONFINED = 2, /* profile is an unconfined profile */ PFLAG_NULL = 4, /* profile is null learning profile */ PFLAG_IX_ON_NAME_ERROR = 8, /* fallback to ix on name lookup fail */ PFLAG_IMMUTABLE = 0x10, /* don't allow changes/replacement */ PFLAG_USER_DEFINED = 0x20, /* user based profile - lower privs */ PFLAG_NO_LIST_REF = 0x40, /* list doesn't keep profile ref */ PFLAG_OLD_NULL_TRANS = 0x100, /* use // as the null transition */ + PFLAG_INVALID = 0x200, /* profile replaced/removed */ + PFLAG_NS_COUNT = 0x400, /* carries NS ref count */ /* These flags must correspond with PATH_flags */ PFLAG_MEDIATE_DELETED = 0x10000, /* mediate instead delegate deleted */ @@ -73,14 +79,12 @@ struct aa_profile; /* struct aa_policy - common part of both namespaces and profiles * @name: name of the object * @hname - The hierarchical name - * @count: reference count of the obj * @list: list policy object is on * @profiles: head of the profiles list contained in the object */ struct aa_policy { char *name; char *hname; - struct kref count; struct list_head list; struct list_head profiles; }; @@ -105,6 +109,9 @@ struct aa_ns_acct { * @acct: accounting for the namespace * @unconfined: special unconfined profile for the namespace * @sub_ns: list of namespaces under the current namespace. + * @uniq_null: uniq value used for null learning profiles + * @uniq_id: a unique id count for the profiles in the namespace + * @dents: dentries for the namespaces file entries in apparmorfs * * An aa_namespace defines the set profiles that are searched to determine * which profile to attach to a task. Profiles can not be shared between @@ -123,37 +130,63 @@ struct aa_ns_acct { struct aa_namespace { struct aa_policy base; struct aa_namespace *parent; - rwlock_t lock; + struct mutex lock; struct aa_ns_acct acct; struct aa_profile *unconfined; struct list_head sub_ns; + atomic_t uniq_null; + long uniq_id; + + struct dentry *dents[AAFS_NS_SIZEOF]; }; +/* struct aa_policydb - match engine for a policy + * dfa: dfa pattern match + * start: set of start states for the different classes of data + */ +struct aa_policydb { + /* Generic policy DFA specific rule types will be subsections of it */ + struct aa_dfa *dfa; + unsigned int start[AA_CLASS_LAST + 1]; + +}; + +struct aa_replacedby { + struct kref count; + struct aa_profile __rcu *profile; +}; + + /* struct aa_profile - basic confinement data * @base - base components of the profile (name, refcount, lists, lock ...) + * @count: reference count of the obj + * @rcu: rcu head used when removing from @list * @parent: parent of profile * @ns: namespace the profile is in * @replacedby: is set to the profile that replaced this profile * @rename: optional profile name that this profile renamed + * @attach: human readable attachment string * @xmatch: optional extended matching for unconfined executables names * @xmatch_len: xmatch prefix len, used to determine xmatch priority - * @sid: the unique security id number of this profile * @audit: the auditing mode of the profile * @mode: the enforcement mode of the profile * @flags: flags controlling profile behavior * @path_flags: flags controlling path generation behavior * @size: the memory consumed by this profiles rules + * @policy: general match rules governing policy * @file: The set of rules governing basic file access and domain transitions * @caps: capabilities for the profile * @rlimits: rlimits for the profile * + * @dents: dentries for the profiles file entries in apparmorfs + * @dirname: name of the profile dir in apparmorfs + * * The AppArmor profile contains the basic confinement data. Each profile * has a name, and exists in a namespace. The @name and @exec_match are * used to determine profile attachment against unconfined tasks. All other * attachments are determined by profile X transition rules. * - * The @replacedby field is write protected by the profile lock. Reads - * are assumed to be atomic, and are done without locking. + * The @replacedby struct is write protected by the profile lock. * * Profiles have a hierarchy where hats and children profiles keep * a reference to their parent. @@ -164,24 +197,31 @@ struct aa_namespace { */ struct aa_profile { struct aa_policy base; - struct aa_profile *parent; + struct kref count; + struct rcu_head rcu; + struct aa_profile __rcu *parent; struct aa_namespace *ns; - struct aa_profile *replacedby; + struct aa_replacedby *replacedby; const char *rename; + const char *attach; struct aa_dfa *xmatch; int xmatch_len; - u32 sid; enum audit_mode audit; - enum profile_mode mode; - u32 flags; + long mode; + long flags; u32 path_flags; int size; + struct aa_policydb policy; struct aa_file_rules file; struct aa_caps caps; struct aa_rlimit rlimits; + + unsigned char *hash; + char *dirname; + struct dentry *dents[AAFS_PROF_SIZEOF]; }; extern struct aa_namespace *root_ns; @@ -198,43 +238,11 @@ void aa_free_namespace_kref(struct kref *kref); struct aa_namespace *aa_find_namespace(struct aa_namespace *root, const char *name); -static inline struct aa_policy *aa_get_common(struct aa_policy *c) -{ - if (c) - kref_get(&c->count); - - return c; -} - -/** - * aa_get_namespace - increment references count on @ns - * @ns: namespace to increment reference count of (MAYBE NULL) - * - * Returns: pointer to @ns, if @ns is NULL returns NULL - * Requires: @ns must be held with valid refcount when called - */ -static inline struct aa_namespace *aa_get_namespace(struct aa_namespace *ns) -{ - if (ns) - kref_get(&(ns->base.count)); - - return ns; -} - -/** - * aa_put_namespace - decrement refcount on @ns - * @ns: namespace to put reference of - * - * Decrement reference count of @ns and if no longer in use free it - */ -static inline void aa_put_namespace(struct aa_namespace *ns) -{ - if (ns) - kref_put(&ns->base.count, aa_free_namespace_kref); -} +void aa_free_replacedby_kref(struct kref *kref); struct aa_profile *aa_alloc_profile(const char *name); struct aa_profile *aa_new_null_profile(struct aa_profile *parent, int hat); +void aa_free_profile(struct aa_profile *profile); void aa_free_profile_kref(struct kref *kref); struct aa_profile *aa_find_child(struct aa_profile *parent, const char *name); struct aa_profile *aa_lookup_profile(struct aa_namespace *ns, const char *name); @@ -246,25 +254,13 @@ ssize_t aa_remove_profiles(char *name, size_t size); #define PROF_ADD 1 #define PROF_REPLACE 0 -#define unconfined(X) ((X)->flags & PFLAG_UNCONFINED) +#define unconfined(X) ((X)->mode == APPARMOR_UNCONFINED) -/** - * aa_newest_version - find the newest version of @profile - * @profile: the profile to check for newer versions of (NOT NULL) - * - * Returns: newest version of @profile, if @profile is the newest version - * return @profile. - * - * NOTE: the profile returned is not refcounted, The refcount on @profile - * must be held until the caller decides what to do with the returned newest - * version. - */ -static inline struct aa_profile *aa_newest_version(struct aa_profile *profile) -{ - while (profile->replacedby) - profile = profile->replacedby; - return profile; +static inline struct aa_profile *aa_deref_parent(struct aa_profile *p) +{ + return rcu_dereference_protected(p->parent, + mutex_is_locked(&p->ns->lock)); } /** @@ -277,19 +273,126 @@ static inline struct aa_profile *aa_newest_version(struct aa_profile *profile) static inline struct aa_profile *aa_get_profile(struct aa_profile *p) { if (p) - kref_get(&(p->base.count)); + kref_get(&(p->count)); return p; } /** + * aa_get_profile_not0 - increment refcount on profile @p found via lookup + * @p: profile (MAYBE NULL) + * + * Returns: pointer to @p if @p is NULL will return NULL + * Requires: @p must be held with valid refcount when called + */ +static inline struct aa_profile *aa_get_profile_not0(struct aa_profile *p) +{ + if (p && kref_get_not0(&p->count)) + return p; + + return NULL; +} + +/** + * aa_get_profile_rcu - increment a refcount profile that can be replaced + * @p: pointer to profile that can be replaced (NOT NULL) + * + * Returns: pointer to a refcounted profile. + * else NULL if no profile + */ +static inline struct aa_profile *aa_get_profile_rcu(struct aa_profile __rcu **p) +{ + struct aa_profile *c; + + rcu_read_lock(); + do { + c = rcu_dereference(*p); + } while (c && !kref_get_not0(&c->count)); + rcu_read_unlock(); + + return c; +} + +/** + * aa_get_newest_profile - find the newest version of @profile + * @profile: the profile to check for newer versions of + * + * Returns: refcounted newest version of @profile taking into account + * replacement, renames and removals + * return @profile. + */ +static inline struct aa_profile *aa_get_newest_profile(struct aa_profile *p) +{ + if (!p) + return NULL; + + if (PROFILE_INVALID(p)) + return aa_get_profile_rcu(&p->replacedby->profile); + + return aa_get_profile(p); +} + +/** * aa_put_profile - decrement refcount on profile @p * @p: profile (MAYBE NULL) */ static inline void aa_put_profile(struct aa_profile *p) { if (p) - kref_put(&p->base.count, aa_free_profile_kref); + kref_put(&p->count, aa_free_profile_kref); +} + +static inline struct aa_replacedby *aa_get_replacedby(struct aa_replacedby *p) +{ + if (p) + kref_get(&(p->count)); + + return p; +} + +static inline void aa_put_replacedby(struct aa_replacedby *p) +{ + if (p) + kref_put(&p->count, aa_free_replacedby_kref); +} + +/* requires profile list write lock held */ +static inline void __aa_update_replacedby(struct aa_profile *orig, + struct aa_profile *new) +{ + struct aa_profile *tmp; + tmp = rcu_dereference_protected(orig->replacedby->profile, + mutex_is_locked(&orig->ns->lock)); + rcu_assign_pointer(orig->replacedby->profile, aa_get_profile(new)); + orig->flags |= PFLAG_INVALID; + aa_put_profile(tmp); +} + +/** + * aa_get_namespace - increment references count on @ns + * @ns: namespace to increment reference count of (MAYBE NULL) + * + * Returns: pointer to @ns, if @ns is NULL returns NULL + * Requires: @ns must be held with valid refcount when called + */ +static inline struct aa_namespace *aa_get_namespace(struct aa_namespace *ns) +{ + if (ns) + aa_get_profile(ns->unconfined); + + return ns; +} + +/** + * aa_put_namespace - decrement refcount on @ns + * @ns: namespace to put reference of + * + * Decrement reference count of @ns and if no longer in use free it + */ +static inline void aa_put_namespace(struct aa_namespace *ns) +{ + if (ns) + aa_put_profile(ns->unconfined); } static inline int AUDIT_MODE(struct aa_profile *profile) diff --git a/security/apparmor/include/policy_unpack.h b/security/apparmor/include/policy_unpack.h index a2dcccac45a..c214fb88b1b 100644 --- a/security/apparmor/include/policy_unpack.h +++ b/security/apparmor/include/policy_unpack.h @@ -15,6 +15,25 @@ #ifndef __POLICY_INTERFACE_H #define __POLICY_INTERFACE_H -struct aa_profile *aa_unpack(void *udata, size_t size, const char **ns); +#include <linux/list.h> + +struct aa_load_ent { + struct list_head list; + struct aa_profile *new; + struct aa_profile *old; + struct aa_profile *rename; +}; + +void aa_load_ent_free(struct aa_load_ent *ent); +struct aa_load_ent *aa_load_ent_alloc(void); + +#define PACKED_FLAG_HAT 1 + +#define PACKED_MODE_ENFORCE 0 +#define PACKED_MODE_COMPLAIN 1 +#define PACKED_MODE_KILL 2 +#define PACKED_MODE_UNCONFINED 3 + +int aa_unpack(void *udata, size_t size, struct list_head *lh, const char **ns); #endif /* __POLICY_INTERFACE_H */ diff --git a/security/apparmor/include/procattr.h b/security/apparmor/include/procattr.h index 544aa6b766a..6bd5f33d953 100644 --- a/security/apparmor/include/procattr.h +++ b/security/apparmor/include/procattr.h @@ -21,6 +21,5 @@ int aa_getprocattr(struct aa_profile *profile, char **string); int aa_setprocattr_changehat(char *args, size_t size, int test); int aa_setprocattr_changeprofile(char *fqname, bool onexec, int test); -int aa_setprocattr_permipc(char *fqname); #endif /* __AA_PROCATTR_H */ diff --git a/security/apparmor/include/resource.h b/security/apparmor/include/resource.h index 02baec732bb..d3f4cf02795 100644 --- a/security/apparmor/include/resource.h +++ b/security/apparmor/include/resource.h @@ -18,6 +18,8 @@ #include <linux/resource.h> #include <linux/sched.h> +#include "apparmorfs.h" + struct aa_profile; /* struct aa_rlimit - rlimit settings for the profile @@ -32,6 +34,8 @@ struct aa_rlimit { struct rlimit limits[RLIM_NLIMITS]; }; +extern struct aa_fs_entry aa_fs_entry_rlimit[]; + int aa_map_resource(int resource); int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *, unsigned int resource, struct rlimit *new_rlim); diff --git a/security/apparmor/include/sid.h b/security/apparmor/include/sid.h index 020db35c301..513ca0e4896 100644 --- a/security/apparmor/include/sid.h +++ b/security/apparmor/include/sid.h @@ -16,7 +16,9 @@ #include <linux/types.h> -struct aa_profile; +/* sid value that will not be allocated */ +#define AA_SID_INVALID 0 +#define AA_SID_ALLOC AA_SID_INVALID u32 aa_alloc_sid(void); void aa_free_sid(u32 sid); diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c index 649fad88869..777ac1c4725 100644 --- a/security/apparmor/ipc.c +++ b/security/apparmor/ipc.c @@ -19,13 +19,14 @@ #include "include/capability.h" #include "include/context.h" #include "include/policy.h" +#include "include/ipc.h" /* call back to audit ptrace fields */ static void audit_cb(struct audit_buffer *ab, void *va) { struct common_audit_data *sa = va; audit_log_format(ab, " target="); - audit_log_untrustedstring(ab, sa->aad.target); + audit_log_untrustedstring(ab, sa->aad->target); } /** @@ -40,10 +41,12 @@ static int aa_audit_ptrace(struct aa_profile *profile, struct aa_profile *target, int error) { struct common_audit_data sa; - COMMON_AUDIT_DATA_INIT(&sa, NONE); - sa.aad.op = OP_PTRACE; - sa.aad.target = target; - sa.aad.error = error; + struct apparmor_audit_data aad = {0,}; + sa.type = LSM_AUDIT_DATA_NONE; + sa.aad = &aad; + aad.op = OP_PTRACE; + aad.target = target; + aad.error = error; return aa_audit(AUDIT_APPARMOR_AUTO, profile, GFP_ATOMIC, &sa, audit_cb); @@ -51,15 +54,14 @@ static int aa_audit_ptrace(struct aa_profile *profile, /** * aa_may_ptrace - test if tracer task can trace the tracee - * @tracer_task: task who will do the tracing (NOT NULL) * @tracer: profile of the task doing the tracing (NOT NULL) * @tracee: task to be traced * @mode: whether PTRACE_MODE_READ || PTRACE_MODE_ATTACH * * Returns: %0 else error code if permission denied or error */ -int aa_may_ptrace(struct task_struct *tracer_task, struct aa_profile *tracer, - struct aa_profile *tracee, unsigned int mode) +int aa_may_ptrace(struct aa_profile *tracer, struct aa_profile *tracee, + unsigned int mode) { /* TODO: currently only based on capability, not extended ptrace * rules, @@ -69,7 +71,7 @@ int aa_may_ptrace(struct task_struct *tracer_task, struct aa_profile *tracer, if (unconfined(tracer) || tracer == tracee) return 0; /* log this capability request */ - return aa_capable(tracer_task, tracer, CAP_SYS_PTRACE, 1); + return aa_capable(tracer, CAP_SYS_PTRACE, 1); } /** @@ -92,23 +94,18 @@ int aa_ptrace(struct task_struct *tracer, struct task_struct *tracee, * - tracer profile has CAP_SYS_PTRACE */ - struct aa_profile *tracer_p; - /* cred released below */ - const struct cred *cred = get_task_cred(tracer); + struct aa_profile *tracer_p = aa_get_task_profile(tracer); int error = 0; - tracer_p = aa_cred_profile(cred); if (!unconfined(tracer_p)) { - /* lcred released below */ - const struct cred *lcred = get_task_cred(tracee); - struct aa_profile *tracee_p = aa_cred_profile(lcred); + struct aa_profile *tracee_p = aa_get_task_profile(tracee); - error = aa_may_ptrace(tracer, tracer_p, tracee_p, mode); + error = aa_may_ptrace(tracer_p, tracee_p, mode); error = aa_audit_ptrace(tracer_p, tracee_p, error); - put_cred(lcred); + aa_put_profile(tracee_p); } - put_cred(cred); + aa_put_profile(tracer_p); return error; } diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c index 506d2baf614..c1827e06845 100644 --- a/security/apparmor/lib.c +++ b/security/apparmor/lib.c @@ -12,11 +12,13 @@ * License. */ +#include <linux/mm.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/vmalloc.h> #include "include/audit.h" +#include "include/apparmor.h" /** @@ -43,8 +45,10 @@ char *aa_split_fqname(char *fqname, char **ns_name) *ns_name = skip_spaces(&name[1]); if (split) { /* overwrite ':' with \0 */ - *split = 0; - name = skip_spaces(split + 1); + *split++ = 0; + if (strncmp(split, "//", 2) == 0) + split += 2; + name = skip_spaces(split); } else /* a ns name without a following profile is allowed */ name = NULL; @@ -63,23 +67,26 @@ void aa_info_message(const char *str) { if (audit_enabled) { struct common_audit_data sa; - COMMON_AUDIT_DATA_INIT(&sa, NONE); - sa.aad.info = str; + struct apparmor_audit_data aad = {0,}; + sa.type = LSM_AUDIT_DATA_NONE; + sa.aad = &aad; + aad.info = str; aa_audit_msg(AUDIT_APPARMOR_STATUS, &sa, NULL); } printk(KERN_INFO "AppArmor: %s\n", str); } /** - * kvmalloc - do allocation preferring kmalloc but falling back to vmalloc - * @size: size of allocation + * __aa_kvmalloc - do allocation preferring kmalloc but falling back to vmalloc + * @size: how many bytes of memory are required + * @flags: the type of memory to allocate (see kmalloc). * * Return: allocated buffer or NULL if failed * * It is possible that policy being loaded from the user is larger than * what can be allocated by kmalloc, in those cases fall back to vmalloc. */ -void *kvmalloc(size_t size) +void *__aa_kvmalloc(size_t size, gfp_t flags) { void *buffer = NULL; @@ -88,46 +95,12 @@ void *kvmalloc(size_t size) /* do not attempt kmalloc if we need more than 16 pages at once */ if (size <= (16*PAGE_SIZE)) - buffer = kmalloc(size, GFP_NOIO | __GFP_NOWARN); + buffer = kmalloc(size, flags | GFP_NOIO | __GFP_NOWARN); if (!buffer) { - /* see kvfree for why size must be at least work_struct size - * when allocated via vmalloc - */ - if (size < sizeof(struct work_struct)) - size = sizeof(struct work_struct); - buffer = vmalloc(size); + if (flags & __GFP_ZERO) + buffer = vzalloc(size); + else + buffer = vmalloc(size); } return buffer; } - -/** - * do_vfree - workqueue routine for freeing vmalloced memory - * @work: data to be freed - * - * The work_struct is overlaid to the data being freed, as at the point - * the work is scheduled the data is no longer valid, be its freeing - * needs to be delayed until safe. - */ -static void do_vfree(struct work_struct *work) -{ - vfree(work); -} - -/** - * kvfree - free an allocation do by kvmalloc - * @buffer: buffer to free (MAYBE_NULL) - * - * Free a buffer allocated by kvmalloc - */ -void kvfree(void *buffer) -{ - if (is_vmalloc_addr(buffer)) { - /* Data is no longer valid so just use the allocated space - * as the work_struct - */ - struct work_struct *work = (struct work_struct *) buffer; - INIT_WORK(work, do_vfree); - schedule_work(work); - } else - kfree(buffer); -} diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c index b7106f192b7..99810009333 100644 --- a/security/apparmor/lsm.c +++ b/security/apparmor/lsm.c @@ -22,6 +22,7 @@ #include <linux/ctype.h> #include <linux/sysctl.h> #include <linux/audit.h> +#include <linux/user_namespace.h> #include <net/sock.h> #include "include/apparmor.h" @@ -47,8 +48,8 @@ int apparmor_initialized __initdata; */ static void apparmor_cred_free(struct cred *cred) { - aa_free_task_context(cred->security); - cred->security = NULL; + aa_free_task_context(cred_cxt(cred)); + cred_cxt(cred) = NULL; } /* @@ -61,7 +62,7 @@ static int apparmor_cred_alloc_blank(struct cred *cred, gfp_t gfp) if (!cxt) return -ENOMEM; - cred->security = cxt; + cred_cxt(cred) = cxt; return 0; } @@ -76,8 +77,8 @@ static int apparmor_cred_prepare(struct cred *new, const struct cred *old, if (!cxt) return -ENOMEM; - aa_dup_task_context(cxt, old->security); - new->security = cxt; + aa_dup_task_context(cxt, cred_cxt(old)); + cred_cxt(new) = cxt; return 0; } @@ -86,8 +87,8 @@ static int apparmor_cred_prepare(struct cred *new, const struct cred *old, */ static void apparmor_cred_transfer(struct cred *new, const struct cred *old) { - const struct aa_task_cxt *old_cxt = old->security; - struct aa_task_cxt *new_cxt = new->security; + const struct aa_task_cxt *old_cxt = cred_cxt(old); + struct aa_task_cxt *new_cxt = cred_cxt(new); aa_dup_task_context(new_cxt, old_cxt); } @@ -126,7 +127,7 @@ static int apparmor_capget(struct task_struct *target, kernel_cap_t *effective, *inheritable = cred->cap_inheritable; *permitted = cred->cap_permitted; - if (!unconfined(profile)) { + if (!unconfined(profile) && !COMPLAIN_MODE(profile)) { *effective = cap_intersect(*effective, profile->caps.allow); *permitted = cap_intersect(*permitted, profile->caps.allow); } @@ -135,16 +136,16 @@ static int apparmor_capget(struct task_struct *target, kernel_cap_t *effective, return 0; } -static int apparmor_capable(struct task_struct *task, const struct cred *cred, +static int apparmor_capable(const struct cred *cred, struct user_namespace *ns, int cap, int audit) { struct aa_profile *profile; /* cap_capable returns 0 on success, else -EPERM */ - int error = cap_capable(task, cred, cap, audit); + int error = cap_capable(cred, ns, cap, audit); if (!error) { profile = aa_cred_profile(cred); if (!unconfined(profile)) - error = aa_capable(task, profile, cap, audit); + error = aa_capable(profile, cap, audit); } return error; } @@ -261,7 +262,7 @@ static int apparmor_path_unlink(struct path *dir, struct dentry *dentry) } static int apparmor_path_mkdir(struct path *dir, struct dentry *dentry, - int mode) + umode_t mode) { return common_perm_create(OP_MKDIR, dir, dentry, AA_MAY_CREATE, S_IFDIR); @@ -273,7 +274,7 @@ static int apparmor_path_rmdir(struct path *dir, struct dentry *dentry) } static int apparmor_path_mknod(struct path *dir, struct dentry *dentry, - int mode, unsigned int dev) + umode_t mode, unsigned int dev) { return common_perm_create(OP_MKNOD, dir, dentry, AA_MAY_CREATE, mode); } @@ -343,16 +344,15 @@ static int apparmor_path_rename(struct path *old_dir, struct dentry *old_dentry, return error; } -static int apparmor_path_chmod(struct dentry *dentry, struct vfsmount *mnt, - mode_t mode) +static int apparmor_path_chmod(struct path *path, umode_t mode) { - if (!mediated_filesystem(dentry->d_inode)) + if (!mediated_filesystem(path->dentry->d_inode)) return 0; - return common_perm_mnt_dentry(OP_CHMOD, mnt, dentry, AA_MAY_CHMOD); + return common_perm_mnt_dentry(OP_CHMOD, path->mnt, path->dentry, AA_MAY_CHMOD); } -static int apparmor_path_chown(struct path *path, uid_t uid, gid_t gid) +static int apparmor_path_chown(struct path *path, kuid_t uid, kgid_t gid) { struct path_cond cond = { path->dentry->d_inode->i_uid, path->dentry->d_inode->i_mode @@ -373,13 +373,13 @@ static int apparmor_inode_getattr(struct vfsmount *mnt, struct dentry *dentry) AA_MAY_META_READ); } -static int apparmor_dentry_open(struct file *file, const struct cred *cred) +static int apparmor_file_open(struct file *file, const struct cred *cred) { struct aa_file_cxt *fcxt = file->f_security; struct aa_profile *profile; int error = 0; - if (!mediated_filesystem(file->f_path.dentry->d_inode)) + if (!mediated_filesystem(file_inode(file))) return 0; /* If in exec, permission is handled by bprm hooks. @@ -394,7 +394,7 @@ static int apparmor_dentry_open(struct file *file, const struct cred *cred) profile = aa_cred_profile(cred); if (!unconfined(profile)) { - struct inode *inode = file->f_path.dentry->d_inode; + struct inode *inode = file_inode(file); struct path_cond cond = { inode->i_uid, inode->i_mode }; error = aa_path_perm(OP_OPEN, profile, &file->f_path, 0, @@ -432,7 +432,7 @@ static int common_file_perm(int op, struct file *file, u32 mask) BUG_ON(!fprofile); if (!file->f_path.mnt || - !mediated_filesystem(file->f_path.dentry->d_inode)) + !mediated_filesystem(file_inode(file))) return 0; profile = __aa_current_profile(); @@ -469,7 +469,6 @@ static int apparmor_file_lock(struct file *file, unsigned int cmd) static int common_mmap(int op, struct file *file, unsigned long prot, unsigned long flags) { - struct dentry *dentry; int mask = 0; if (!file || !file->f_security) @@ -486,21 +485,12 @@ static int common_mmap(int op, struct file *file, unsigned long prot, if (prot & PROT_EXEC) mask |= AA_EXEC_MMAP; - dentry = file->f_path.dentry; return common_file_perm(op, file, mask); } -static int apparmor_file_mmap(struct file *file, unsigned long reqprot, - unsigned long prot, unsigned long flags, - unsigned long addr, unsigned long addr_only) +static int apparmor_mmap_file(struct file *file, unsigned long reqprot, + unsigned long prot, unsigned long flags) { - int rc = 0; - - /* do DAC check */ - rc = cap_file_mmap(file, reqprot, prot, flags, addr, addr_only); - if (rc || addr_only) - return rc; - return common_mmap(OP_FMMAP, file, prot, flags); } @@ -515,24 +505,24 @@ static int apparmor_getprocattr(struct task_struct *task, char *name, char **value) { int error = -ENOENT; - struct aa_profile *profile; /* released below */ const struct cred *cred = get_task_cred(task); - struct aa_task_cxt *cxt = cred->security; - profile = aa_cred_profile(cred); + struct aa_task_cxt *cxt = cred_cxt(cred); + struct aa_profile *profile = NULL; if (strcmp(name, "current") == 0) - error = aa_getprocattr(aa_newest_version(cxt->profile), - value); + profile = aa_get_newest_profile(cxt->profile); else if (strcmp(name, "prev") == 0 && cxt->previous) - error = aa_getprocattr(aa_newest_version(cxt->previous), - value); + profile = aa_get_newest_profile(cxt->previous); else if (strcmp(name, "exec") == 0 && cxt->onexec) - error = aa_getprocattr(aa_newest_version(cxt->onexec), - value); + profile = aa_get_newest_profile(cxt->onexec); else error = -EINVAL; + if (profile) + error = aa_getprocattr(profile, value); + + aa_put_profile(profile); put_cred(cred); return error; @@ -541,6 +531,8 @@ static int apparmor_getprocattr(struct task_struct *task, char *name, static int apparmor_setprocattr(struct task_struct *task, char *name, void *value, size_t size) { + struct common_audit_data sa; + struct apparmor_audit_data aad = {0,}; char *command, *args = value; size_t arg_size; int error; @@ -584,33 +576,37 @@ static int apparmor_setprocattr(struct task_struct *task, char *name, } else if (strcmp(command, "permprofile") == 0) { error = aa_setprocattr_changeprofile(args, !AA_ONEXEC, AA_DO_TEST); - } else if (strcmp(command, "permipc") == 0) { - error = aa_setprocattr_permipc(args); - } else { - struct common_audit_data sa; - COMMON_AUDIT_DATA_INIT(&sa, NONE); - sa.aad.op = OP_SETPROCATTR; - sa.aad.info = name; - sa.aad.error = -EINVAL; - return aa_audit(AUDIT_APPARMOR_DENIED, NULL, GFP_KERNEL, - &sa, NULL); - } + } else + goto fail; } else if (strcmp(name, "exec") == 0) { - error = aa_setprocattr_changeprofile(args, AA_ONEXEC, - !AA_DO_TEST); - } else { + if (strcmp(command, "exec") == 0) + error = aa_setprocattr_changeprofile(args, AA_ONEXEC, + !AA_DO_TEST); + else + goto fail; + } else /* only support the "current" and "exec" process attributes */ return -EINVAL; - } + if (!error) error = size; return error; + +fail: + sa.type = LSM_AUDIT_DATA_NONE; + sa.aad = &aad; + aad.profile = aa_current_profile(); + aad.op = OP_SETPROCATTR; + aad.info = name; + aad.error = -EINVAL; + aa_audit_msg(AUDIT_APPARMOR_DENIED, &sa, NULL); + return -EINVAL; } static int apparmor_task_setrlimit(struct task_struct *task, unsigned int resource, struct rlimit *new_rlim) { - struct aa_profile *profile = aa_current_profile(); + struct aa_profile *profile = __aa_current_profile(); int error = 0; if (!unconfined(profile)) @@ -637,13 +633,14 @@ static struct security_operations apparmor_ops = { .path_chmod = apparmor_path_chmod, .path_chown = apparmor_path_chown, .path_truncate = apparmor_path_truncate, - .dentry_open = apparmor_dentry_open, .inode_getattr = apparmor_inode_getattr, + .file_open = apparmor_file_open, .file_permission = apparmor_file_permission, .file_alloc_security = apparmor_file_alloc_security, .file_free_security = apparmor_file_free_security, - .file_mmap = apparmor_file_mmap, + .mmap_file = apparmor_mmap_file, + .mmap_addr = cap_mmap_addr, .file_mprotect = apparmor_file_mprotect, .file_lock = apparmor_file_lock, @@ -669,15 +666,16 @@ static struct security_operations apparmor_ops = { static int param_set_aabool(const char *val, const struct kernel_param *kp); static int param_get_aabool(char *buffer, const struct kernel_param *kp); -#define param_check_aabool(name, p) __param_check(name, p, int) +#define param_check_aabool param_check_bool static struct kernel_param_ops param_ops_aabool = { + .flags = KERNEL_PARAM_FL_NOARG, .set = param_set_aabool, .get = param_get_aabool }; static int param_set_aauint(const char *val, const struct kernel_param *kp); static int param_get_aauint(char *buffer, const struct kernel_param *kp); -#define param_check_aauint(name, p) __param_check(name, p, int) +#define param_check_aauint param_check_uint static struct kernel_param_ops param_ops_aauint = { .set = param_set_aauint, .get = param_get_aauint @@ -685,19 +683,18 @@ static struct kernel_param_ops param_ops_aauint = { static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp); static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp); -#define param_check_aalockpolicy(name, p) __param_check(name, p, int) +#define param_check_aalockpolicy param_check_bool static struct kernel_param_ops param_ops_aalockpolicy = { + .flags = KERNEL_PARAM_FL_NOARG, .set = param_set_aalockpolicy, .get = param_get_aalockpolicy }; static int param_set_audit(const char *val, struct kernel_param *kp); static int param_get_audit(char *buffer, struct kernel_param *kp); -#define param_check_audit(name, p) __param_check(name, p, int) static int param_set_mode(const char *val, struct kernel_param *kp); static int param_get_mode(char *buffer, struct kernel_param *kp); -#define param_check_mode(name, p) __param_check(name, p, int) /* Flag values, also controllable via /sys/module/apparmor/parameters * We define special types as we want to do additional mediation. @@ -709,7 +706,7 @@ module_param_call(mode, param_set_mode, param_get_mode, &aa_g_profile_mode, S_IRUSR | S_IWUSR); /* Debug mode */ -int aa_g_debug; +bool aa_g_debug; module_param_named(debug, aa_g_debug, aabool, S_IRUSR | S_IWUSR); /* Audit mode */ @@ -720,7 +717,7 @@ module_param_call(audit, param_set_audit, param_get_audit, /* Determines if audit header is included in audited messages. This * provides more context if the audit daemon is not running */ -int aa_g_audit_header = 1; +bool aa_g_audit_header = 1; module_param_named(audit_header, aa_g_audit_header, aabool, S_IRUSR | S_IWUSR); @@ -728,12 +725,12 @@ module_param_named(audit_header, aa_g_audit_header, aabool, * TODO: add in at boot loading of policy, which is the only way to * load policy, if lock_policy is set */ -int aa_g_lock_policy; +bool aa_g_lock_policy; module_param_named(lock_policy, aa_g_lock_policy, aalockpolicy, S_IRUSR | S_IWUSR); /* Syscall logging mode */ -int aa_g_logsyscall; +bool aa_g_logsyscall; module_param_named(logsyscall, aa_g_logsyscall, aabool, S_IRUSR | S_IWUSR); /* Maximum pathname length before accesses will start getting rejected */ @@ -743,18 +740,18 @@ module_param_named(path_max, aa_g_path_max, aauint, S_IRUSR | S_IWUSR); /* Determines how paranoid loading of policy is and how much verification * on the loaded policy is done. */ -int aa_g_paranoid_load = 1; +bool aa_g_paranoid_load = 1; module_param_named(paranoid_load, aa_g_paranoid_load, aabool, S_IRUSR | S_IWUSR); /* Boot time disable flag */ -static unsigned int apparmor_enabled = CONFIG_SECURITY_APPARMOR_BOOTPARAM_VALUE; -module_param_named(enabled, apparmor_enabled, aabool, S_IRUSR); +static bool apparmor_enabled = CONFIG_SECURITY_APPARMOR_BOOTPARAM_VALUE; +module_param_named(enabled, apparmor_enabled, bool, S_IRUGO); static int __init apparmor_enabled_setup(char *str) { unsigned long enabled; - int error = strict_strtoul(str, 0, &enabled); + int error = kstrtoul(str, 0, &enabled); if (!error) apparmor_enabled = enabled ? 1 : 0; return 1; @@ -848,7 +845,7 @@ static int param_get_mode(char *buffer, struct kernel_param *kp) if (!apparmor_enabled) return -EINVAL; - return sprintf(buffer, "%s", profile_mode_names[aa_g_profile_mode]); + return sprintf(buffer, "%s", aa_profile_mode_names[aa_g_profile_mode]); } static int param_set_mode(const char *val, struct kernel_param *kp) @@ -863,8 +860,8 @@ static int param_set_mode(const char *val, struct kernel_param *kp) if (!val) return -EINVAL; - for (i = 0; i < APPARMOR_NAMES_MAX_INDEX; i++) { - if (strcmp(val, profile_mode_names[i]) == 0) { + for (i = 0; i < APPARMOR_MODE_NAMES_MAX_INDEX; i++) { + if (strcmp(val, aa_profile_mode_names[i]) == 0) { aa_g_profile_mode = i; return 0; } @@ -892,7 +889,7 @@ static int __init set_init_cxt(void) return -ENOMEM; cxt->profile = aa_get_profile(root_ns->unconfined); - cred->security = cxt; + cred_cxt(cred) = cxt; return 0; } @@ -921,8 +918,11 @@ static int __init apparmor_init(void) error = register_security(&apparmor_ops); if (error) { + struct cred *cred = (struct cred *)current->real_cred; + aa_free_task_context(cred_cxt(cred)); + cred_cxt(cred) = NULL; AA_ERROR("Unable to register AppArmor\n"); - goto set_init_cxt_out; + goto register_security_out; } /* Report that AppArmor successfully initialized */ @@ -936,9 +936,6 @@ static int __init apparmor_init(void) return error; -set_init_cxt_out: - aa_free_task_context(current->real_cred->security); - register_security_out: aa_free_root_ns(); diff --git a/security/apparmor/match.c b/security/apparmor/match.c index 5cb4dc1f699..727eb4200d5 100644 --- a/security/apparmor/match.c +++ b/security/apparmor/match.c @@ -4,7 +4,7 @@ * This file contains AppArmor dfa based regular expression matching engine * * Copyright (C) 1998-2008 Novell/SUSE - * Copyright 2009-2010 Canonical Ltd. + * Copyright 2009-2012 Canonical Ltd. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as @@ -23,6 +23,8 @@ #include "include/apparmor.h" #include "include/match.h" +#define base_idx(X) ((X) & 0xffffff) + /** * unpack_table - unpack a dfa table (one of accept, default, base, next check) * @blob: data to unpack (NOT NULL) @@ -30,7 +32,7 @@ * * Returns: pointer to table else NULL on failure * - * NOTE: must be freed by kvfree (not kmalloc) + * NOTE: must be freed by kvfree (not kfree) */ static struct table_header *unpack_table(char *blob, size_t bsize) { @@ -57,7 +59,7 @@ static struct table_header *unpack_table(char *blob, size_t bsize) if (bsize < tsize) goto out; - table = kvmalloc(tsize); + table = kvzalloc(tsize); if (table) { *table = th; if (th.td_flags == YYTD_DATA8) @@ -137,8 +139,7 @@ static int verify_dfa(struct aa_dfa *dfa, int flags) for (i = 0; i < state_count; i++) { if (DEFAULT_TABLE(dfa)[i] >= state_count) goto out; - /* TODO: do check that DEF state recursion terminates */ - if (BASE_TABLE(dfa)[i] + 255 >= trans_count) { + if (base_idx(BASE_TABLE(dfa)[i]) + 255 >= trans_count) { printk(KERN_ERR "AppArmor DFA next/check upper " "bounds error\n"); goto out; @@ -194,8 +195,8 @@ void aa_dfa_free_kref(struct kref *kref) * @flags: flags controlling what type of accept tables are acceptable * * Unpack a dfa that has been serialized. To find information on the dfa - * format look in Documentation/apparmor.txt - * Assumes the dfa @blob stream has been aligned on a 8 byte boundry + * format look in Documentation/security/apparmor.txt + * Assumes the dfa @blob stream has been aligned on a 8 byte boundary * * Returns: an unpacked dfa ready for matching or ERR_PTR on failure */ @@ -314,7 +315,7 @@ unsigned int aa_dfa_match_len(struct aa_dfa *dfa, unsigned int start, u8 *equiv = EQUIV_TABLE(dfa); /* default is direct to next state */ for (; len; len--) { - pos = base[state] + equiv[(u8) *str++]; + pos = base_idx(base[state]) + equiv[(u8) *str++]; if (check[pos] == state) state = next[pos]; else @@ -323,7 +324,7 @@ unsigned int aa_dfa_match_len(struct aa_dfa *dfa, unsigned int start, } else { /* default is direct to next state */ for (; len; len--) { - pos = base[state] + (u8) *str++; + pos = base_idx(base[state]) + (u8) *str++; if (check[pos] == state) state = next[pos]; else @@ -335,12 +336,12 @@ unsigned int aa_dfa_match_len(struct aa_dfa *dfa, unsigned int start, } /** - * aa_dfa_next_state - traverse @dfa to find state @str stops at + * aa_dfa_match - traverse @dfa to find state @str stops at * @dfa: the dfa to match @str against (NOT NULL) * @start: the state of the dfa to start matching in * @str: the null terminated string of bytes to match against the dfa (NOT NULL) * - * aa_dfa_next_state will match @str against the dfa and return the state it + * aa_dfa_match will match @str against the dfa and return the state it * finished matching in. The final state can be used to look up the accepting * label, or as the start state of a continuing match. * @@ -349,5 +350,79 @@ unsigned int aa_dfa_match_len(struct aa_dfa *dfa, unsigned int start, unsigned int aa_dfa_match(struct aa_dfa *dfa, unsigned int start, const char *str) { - return aa_dfa_match_len(dfa, start, str, strlen(str)); + u16 *def = DEFAULT_TABLE(dfa); + u32 *base = BASE_TABLE(dfa); + u16 *next = NEXT_TABLE(dfa); + u16 *check = CHECK_TABLE(dfa); + unsigned int state = start, pos; + + if (state == 0) + return 0; + + /* current state is <state>, matching character *str */ + if (dfa->tables[YYTD_ID_EC]) { + /* Equivalence class table defined */ + u8 *equiv = EQUIV_TABLE(dfa); + /* default is direct to next state */ + while (*str) { + pos = base_idx(base[state]) + equiv[(u8) *str++]; + if (check[pos] == state) + state = next[pos]; + else + state = def[state]; + } + } else { + /* default is direct to next state */ + while (*str) { + pos = base_idx(base[state]) + (u8) *str++; + if (check[pos] == state) + state = next[pos]; + else + state = def[state]; + } + } + + return state; +} + +/** + * aa_dfa_next - step one character to the next state in the dfa + * @dfa: the dfa to tranverse (NOT NULL) + * @state: the state to start in + * @c: the input character to transition on + * + * aa_dfa_match will step through the dfa by one input character @c + * + * Returns: state reach after input @c + */ +unsigned int aa_dfa_next(struct aa_dfa *dfa, unsigned int state, + const char c) +{ + u16 *def = DEFAULT_TABLE(dfa); + u32 *base = BASE_TABLE(dfa); + u16 *next = NEXT_TABLE(dfa); + u16 *check = CHECK_TABLE(dfa); + unsigned int pos; + + /* current state is <state>, matching character *str */ + if (dfa->tables[YYTD_ID_EC]) { + /* Equivalence class table defined */ + u8 *equiv = EQUIV_TABLE(dfa); + /* default is direct to next state */ + + pos = base_idx(base[state]) + equiv[(u8) c]; + if (check[pos] == state) + state = next[pos]; + else + state = def[state]; + } else { + /* default is direct to next state */ + pos = base_idx(base[state]) + (u8) c; + if (check[pos] == state) + state = next[pos]; + else + state = def[state]; + } + + return state; } diff --git a/security/apparmor/path.c b/security/apparmor/path.c index 36cc0cc39e7..35b394a75d7 100644 --- a/security/apparmor/path.c +++ b/security/apparmor/path.c @@ -13,7 +13,6 @@ */ #include <linux/magic.h> -#include <linux/mnt_namespace.h> #include <linux/mount.h> #include <linux/namei.h> #include <linux/nsproxy.h> @@ -57,33 +56,57 @@ static int prepend(char **buffer, int buflen, const char *str, int namelen) static int d_namespace_path(struct path *path, char *buf, int buflen, char **name, int flags) { - struct path root, tmp; char *res; - int connected, error = 0; + int error = 0; + int connected = 1; + + if (path->mnt->mnt_flags & MNT_INTERNAL) { + /* it's not mounted anywhere */ + res = dentry_path(path->dentry, buf, buflen); + *name = res; + if (IS_ERR(res)) { + *name = buf; + return PTR_ERR(res); + } + if (path->dentry->d_sb->s_magic == PROC_SUPER_MAGIC && + strncmp(*name, "/sys/", 5) == 0) { + /* TODO: convert over to using a per namespace + * control instead of hard coded /proc + */ + return prepend(name, *name - buf, "/proc", 5); + } + return 0; + } - /* Get the root we want to resolve too, released below */ + /* resolve paths relative to chroot?*/ if (flags & PATH_CHROOT_REL) { - /* resolve paths relative to chroot */ + struct path root; get_fs_root(current->fs, &root); + res = __d_path(path, &root, buf, buflen); + path_put(&root); } else { - /* resolve paths relative to namespace */ - root.mnt = current->nsproxy->mnt_ns->root; - root.dentry = root.mnt->mnt_root; - path_get(&root); + res = d_absolute_path(path, buf, buflen); + if (!our_mnt(path->mnt)) + connected = 0; } - tmp = root; - res = __d_path(path, &tmp, buf, buflen); - - *name = res; /* handle error conditions - and still allow a partial path to * be returned. */ - if (IS_ERR(res)) { - error = PTR_ERR(res); - *name = buf; - goto out; - } + if (!res || IS_ERR(res)) { + if (PTR_ERR(res) == -ENAMETOOLONG) + return -ENAMETOOLONG; + connected = 0; + res = dentry_path_raw(path->dentry, buf, buflen); + if (IS_ERR(res)) { + error = PTR_ERR(res); + *name = buf; + goto out; + }; + } else if (!our_mnt(path->mnt)) + connected = 0; + + *name = res; /* Handle two cases: * 1. A deleted dentry && profile is not allowing mediation of deleted @@ -97,10 +120,7 @@ static int d_namespace_path(struct path *path, char *buf, int buflen, goto out; } - /* Determine if the path is connected to the expected root */ - connected = tmp.dentry == root.dentry && tmp.mnt == root.mnt; - - /* If the path is not connected, + /* If the path is not connected to the expected root, * check if it is a sysctl and handle specially else remove any * leading / that __d_path may have returned. * Unless @@ -112,29 +132,19 @@ static int d_namespace_path(struct path *path, char *buf, int buflen, * namespace root. */ if (!connected) { - /* is the disconnect path a sysctl? */ - if (tmp.dentry->d_sb->s_magic == PROC_SUPER_MAGIC && - strncmp(*name, "/sys/", 5) == 0) { - /* TODO: convert over to using a per namespace - * control instead of hard coded /proc - */ - error = prepend(name, *name - buf, "/proc", 5); - } else if (!(flags & PATH_CONNECT_PATH) && + if (!(flags & PATH_CONNECT_PATH) && !(((flags & CHROOT_NSCONNECT) == CHROOT_NSCONNECT) && - (tmp.mnt == current->nsproxy->mnt_ns->root && - tmp.dentry == tmp.mnt->mnt_root))) { + our_mnt(path->mnt))) { /* disconnected path, don't return pathname starting * with '/' */ - error = -ESTALE; + error = -EACCES; if (*res == '/') *name = res + 1; } } out: - path_put(&root); - return error; } @@ -149,7 +159,7 @@ out: * Returns: %0 else error on failure */ static int get_name_to_buffer(struct path *path, int flags, char *buffer, - int size, char **name) + int size, char **name, const char **info) { int adjust = (flags & PATH_IS_DIR) ? 1 : 0; int error = d_namespace_path(path, buffer, size - adjust, name, flags); @@ -161,15 +171,27 @@ static int get_name_to_buffer(struct path *path, int flags, char *buffer, */ strcpy(&buffer[size - 2], "/"); + if (info && error) { + if (error == -ENOENT) + *info = "Failed name lookup - deleted entry"; + else if (error == -EACCES) + *info = "Failed name lookup - disconnected path"; + else if (error == -ENAMETOOLONG) + *info = "Failed name lookup - name too long"; + else + *info = "Failed name lookup"; + } + return error; } /** - * aa_get_name - compute the pathname of a file + * aa_path_name - compute the pathname of a file * @path: path the file (NOT NULL) * @flags: flags controlling path name generation * @buffer: buffer that aa_get_name() allocated (NOT NULL) * @name: Returns - the generated path name if !error (NOT NULL) + * @info: Returns - information on why the path lookup failed (MAYBE NULL) * * @name is a pointer to the beginning of the pathname (which usually differs * from the beginning of the buffer), or NULL. If there is an error @name @@ -182,7 +204,8 @@ static int get_name_to_buffer(struct path *path, int flags, char *buffer, * * Returns: %0 else error code if could retrieve name */ -int aa_get_name(struct path *path, int flags, char **buffer, const char **name) +int aa_path_name(struct path *path, int flags, char **buffer, const char **name, + const char **info) { char *buf, *str = NULL; int size = 256; @@ -196,7 +219,7 @@ int aa_get_name(struct path *path, int flags, char **buffer, const char **name) if (!buf) return -ENOMEM; - error = get_name_to_buffer(path, flags, buf, size, &str); + error = get_name_to_buffer(path, flags, buf, size, &str, info); if (error != -ENAMETOOLONG) break; @@ -204,6 +227,7 @@ int aa_get_name(struct path *path, int flags, char **buffer, const char **name) size <<= 1; if (size > aa_g_path_max) return -ENAMETOOLONG; + *info = NULL; } *buffer = buf; *name = str; diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c index 4f0eadee78b..705c2879d3a 100644 --- a/security/apparmor/policy.c +++ b/security/apparmor/policy.c @@ -87,16 +87,16 @@ #include "include/policy.h" #include "include/policy_unpack.h" #include "include/resource.h" -#include "include/sid.h" /* root profile namespace */ struct aa_namespace *root_ns; -const char *profile_mode_names[] = { +const char *const aa_profile_mode_names[] = { "enforce", "complain", "kill", + "unconfined", }; /** @@ -142,7 +142,6 @@ static bool policy_init(struct aa_policy *policy, const char *prefix, policy->name = (char *)hname_tail(policy->hname); INIT_LIST_HEAD(&policy->list); INIT_LIST_HEAD(&policy->profiles); - kref_init(&policy->count); return 1; } @@ -154,13 +153,13 @@ static bool policy_init(struct aa_policy *policy, const char *prefix, static void policy_destroy(struct aa_policy *policy) { /* still contains profiles -- invalid */ - if (!list_empty(&policy->profiles)) { + if (on_list_rcu(&policy->profiles)) { AA_ERROR("%s: internal error, " "policy '%s' still contains profiles\n", __func__, policy->name); BUG(); } - if (!list_empty(&policy->list)) { + if (on_list_rcu(&policy->list)) { AA_ERROR("%s: internal error, policy '%s' still on list\n", __func__, policy->name); BUG(); @@ -175,7 +174,7 @@ static void policy_destroy(struct aa_policy *policy) * @head: list to search (NOT NULL) * @name: name to search for (NOT NULL) * - * Requires: correct locks for the @head list be held + * Requires: rcu_read_lock be held * * Returns: unrefcounted policy that match @name or NULL if not found */ @@ -183,7 +182,7 @@ static struct aa_policy *__policy_find(struct list_head *head, const char *name) { struct aa_policy *policy; - list_for_each_entry(policy, head, list) { + list_for_each_entry_rcu(policy, head, list) { if (!strcmp(policy->name, name)) return policy; } @@ -196,7 +195,7 @@ static struct aa_policy *__policy_find(struct list_head *head, const char *name) * @str: string to search for (NOT NULL) * @len: length of match required * - * Requires: correct locks for the @head list be held + * Requires: rcu_read_lock be held * * Returns: unrefcounted policy that match @str or NULL if not found * @@ -208,7 +207,7 @@ static struct aa_policy *__policy_strn_find(struct list_head *head, { struct aa_policy *policy; - list_for_each_entry(policy, head, list) { + list_for_each_entry_rcu(policy, head, list) { if (aa_strneq(policy->name, str, len)) return policy; } @@ -285,23 +284,21 @@ static struct aa_namespace *alloc_namespace(const char *prefix, goto fail_ns; INIT_LIST_HEAD(&ns->sub_ns); - rwlock_init(&ns->lock); + mutex_init(&ns->lock); /* released by free_namespace */ ns->unconfined = aa_alloc_profile("unconfined"); if (!ns->unconfined) goto fail_unconfined; - ns->unconfined->sid = aa_alloc_sid(); - ns->unconfined->flags = PFLAG_UNCONFINED | PFLAG_IX_ON_NAME_ERROR | - PFLAG_IMMUTABLE; + ns->unconfined->flags = PFLAG_IX_ON_NAME_ERROR | + PFLAG_IMMUTABLE | PFLAG_NS_COUNT; + ns->unconfined->mode = APPARMOR_UNCONFINED; - /* - * released by free_namespace, however __remove_namespace breaks - * the cyclic references (ns->unconfined, and unconfined->ns) and - * replaces with refs to parent namespace unconfined - */ - ns->unconfined->ns = aa_get_namespace(ns); + /* ns and ns->unconfined share ns->unconfined refcount */ + ns->unconfined->ns = ns; + + atomic_set(&ns->uniq_null, 0); return ns; @@ -327,30 +324,19 @@ static void free_namespace(struct aa_namespace *ns) policy_destroy(&ns->base); aa_put_namespace(ns->parent); - if (ns->unconfined && ns->unconfined->ns == ns) - ns->unconfined->ns = NULL; - - aa_put_profile(ns->unconfined); + ns->unconfined->ns = NULL; + aa_free_profile(ns->unconfined); kzfree(ns); } /** - * aa_free_namespace_kref - free aa_namespace by kref (see aa_put_namespace) - * @kr: kref callback for freeing of a namespace (NOT NULL) - */ -void aa_free_namespace_kref(struct kref *kref) -{ - free_namespace(container_of(kref, struct aa_namespace, base.count)); -} - -/** * __aa_find_namespace - find a namespace on a list by @name * @head: list to search for namespace on (NOT NULL) * @name: name of namespace to look for (NOT NULL) * * Returns: unrefcounted namespace * - * Requires: ns lock be held + * Requires: rcu_read_lock be held */ static struct aa_namespace *__aa_find_namespace(struct list_head *head, const char *name) @@ -373,9 +359,9 @@ struct aa_namespace *aa_find_namespace(struct aa_namespace *root, { struct aa_namespace *ns = NULL; - read_lock(&root->lock); + rcu_read_lock(); ns = aa_get_namespace(__aa_find_namespace(&root->sub_ns, name)); - read_unlock(&root->lock); + rcu_read_unlock(); return ns; } @@ -392,7 +378,7 @@ static struct aa_namespace *aa_prepare_namespace(const char *name) root = aa_current_profile()->ns; - write_lock(&root->lock); + mutex_lock(&root->lock); /* if name isn't specified the profile is loaded to the current ns */ if (!name) { @@ -405,31 +391,23 @@ static struct aa_namespace *aa_prepare_namespace(const char *name) /* released by caller */ ns = aa_get_namespace(__aa_find_namespace(&root->sub_ns, name)); if (!ns) { - /* namespace not found */ - struct aa_namespace *new_ns; - write_unlock(&root->lock); - new_ns = alloc_namespace(root->base.hname, name); - if (!new_ns) - return NULL; - write_lock(&root->lock); - /* test for race when new_ns was allocated */ - ns = __aa_find_namespace(&root->sub_ns, name); - if (!ns) { - /* add parent ref */ - new_ns->parent = aa_get_namespace(root); - - list_add(&new_ns->base.list, &root->sub_ns); - /* add list ref */ - ns = aa_get_namespace(new_ns); - } else { - /* raced so free the new one */ - free_namespace(new_ns); - /* get reference on namespace */ - aa_get_namespace(ns); + ns = alloc_namespace(root->base.hname, name); + if (!ns) + goto out; + if (__aa_fs_namespace_mkdir(ns, ns_subns_dir(root), name)) { + AA_ERROR("Failed to create interface for ns %s\n", + ns->base.name); + free_namespace(ns); + ns = NULL; + goto out; } + ns->parent = aa_get_namespace(root); + list_add_rcu(&ns->base.list, &root->sub_ns); + /* add list ref */ + aa_get_namespace(ns); } out: - write_unlock(&root->lock); + mutex_unlock(&root->lock); /* return ref */ return ns; @@ -447,7 +425,7 @@ out: static void __list_add_profile(struct list_head *list, struct aa_profile *profile) { - list_add(&profile->base.list, list); + list_add_rcu(&profile->base.list, list); /* get list reference */ aa_get_profile(profile); } @@ -466,50 +444,8 @@ static void __list_add_profile(struct list_head *list, */ static void __list_remove_profile(struct aa_profile *profile) { - list_del_init(&profile->base.list); - if (!(profile->flags & PFLAG_NO_LIST_REF)) - /* release list reference */ - aa_put_profile(profile); -} - -/** - * __replace_profile - replace @old with @new on a list - * @old: profile to be replaced (NOT NULL) - * @new: profile to replace @old with (NOT NULL) - * - * Will duplicate and refcount elements that @new inherits from @old - * and will inherit @old children. - * - * refcount @new for list, put @old list refcount - * - * Requires: namespace list lock be held, or list not be shared - */ -static void __replace_profile(struct aa_profile *old, struct aa_profile *new) -{ - struct aa_policy *policy; - struct aa_profile *child, *tmp; - - if (old->parent) - policy = &old->parent->base; - else - policy = &old->ns->base; - - /* released when @new is freed */ - new->parent = aa_get_profile(old->parent); - new->ns = aa_get_namespace(old->ns); - new->sid = old->sid; - __list_add_profile(&policy->profiles, new); - /* inherit children */ - list_for_each_entry_safe(child, tmp, &old->base.profiles, base.list) { - aa_put_profile(child->parent); - child->parent = aa_get_profile(new); - /* list refcount transferred to @new*/ - list_move(&child->base.list, &new->base.profiles); - } - - /* released by free_profile */ - old->replacedby = aa_get_profile(new); - __list_remove_profile(old); + list_del_rcu(&profile->base.list); + aa_put_profile(profile); } static void __profile_list_release(struct list_head *head); @@ -525,7 +461,8 @@ static void __remove_profile(struct aa_profile *profile) /* release any children lists first */ __profile_list_release(&profile->base.profiles); /* released by free_profile */ - profile->replacedby = aa_get_profile(profile->ns->unconfined); + __aa_update_replacedby(profile, profile->ns->unconfined); + __aa_fs_profile_rmdir(profile); __list_remove_profile(profile); } @@ -553,14 +490,17 @@ static void destroy_namespace(struct aa_namespace *ns) if (!ns) return; - write_lock(&ns->lock); + mutex_lock(&ns->lock); /* release all profiles in this namespace */ __profile_list_release(&ns->base.profiles); /* release all sub namespaces */ __ns_list_release(&ns->sub_ns); - write_unlock(&ns->lock); + if (ns->parent) + __aa_update_replacedby(ns->unconfined, ns->parent->unconfined); + __aa_fs_namespace_rmdir(ns); + mutex_unlock(&ns->lock); } /** @@ -571,25 +511,9 @@ static void destroy_namespace(struct aa_namespace *ns) */ static void __remove_namespace(struct aa_namespace *ns) { - struct aa_profile *unconfined = ns->unconfined; - /* remove ns from namespace list */ - list_del_init(&ns->base.list); - - /* - * break the ns, unconfined profile cyclic reference and forward - * all new unconfined profiles requests to the parent namespace - * This will result in all confined tasks that have a profile - * being removed, inheriting the parent->unconfined profile. - */ - if (ns->parent) - ns->unconfined = aa_get_profile(ns->parent->unconfined); - + list_del_rcu(&ns->base.list); destroy_namespace(ns); - - /* release original ns->unconfined ref */ - aa_put_profile(unconfined); - /* release ns->base.list ref, from removal above */ aa_put_namespace(ns); } @@ -635,6 +559,84 @@ void __init aa_free_root_ns(void) aa_put_namespace(ns); } + +static void free_replacedby(struct aa_replacedby *r) +{ + if (r) { + /* r->profile will not be updated any more as r is dead */ + aa_put_profile(rcu_dereference_protected(r->profile, true)); + kzfree(r); + } +} + + +void aa_free_replacedby_kref(struct kref *kref) +{ + struct aa_replacedby *r = container_of(kref, struct aa_replacedby, + count); + free_replacedby(r); +} + +/** + * aa_free_profile - free a profile + * @profile: the profile to free (MAYBE NULL) + * + * Free a profile, its hats and null_profile. All references to the profile, + * its hats and null_profile must have been put. + * + * If the profile was referenced from a task context, free_profile() will + * be called from an rcu callback routine, so we must not sleep here. + */ +void aa_free_profile(struct aa_profile *profile) +{ + AA_DEBUG("%s(%p)\n", __func__, profile); + + if (!profile) + return; + + /* free children profiles */ + policy_destroy(&profile->base); + aa_put_profile(rcu_access_pointer(profile->parent)); + + aa_put_namespace(profile->ns); + kzfree(profile->rename); + + aa_free_file_rules(&profile->file); + aa_free_cap_rules(&profile->caps); + aa_free_rlimit_rules(&profile->rlimits); + + kzfree(profile->dirname); + aa_put_dfa(profile->xmatch); + aa_put_dfa(profile->policy.dfa); + aa_put_replacedby(profile->replacedby); + + kzfree(profile->hash); + kzfree(profile); +} + +/** + * aa_free_profile_rcu - free aa_profile by rcu (called by aa_free_profile_kref) + * @head: rcu_head callback for freeing of a profile (NOT NULL) + */ +static void aa_free_profile_rcu(struct rcu_head *head) +{ + struct aa_profile *p = container_of(head, struct aa_profile, rcu); + if (p->flags & PFLAG_NS_COUNT) + free_namespace(p->ns); + else + aa_free_profile(p); +} + +/** + * aa_free_profile_kref - free aa_profile by kref (called by aa_put_profile) + * @kr: kref callback for freeing of a profile (NOT NULL) + */ +void aa_free_profile_kref(struct kref *kref) +{ + struct aa_profile *p = container_of(kref, struct aa_profile, count); + call_rcu(&p->rcu, aa_free_profile_rcu); +} + /** * aa_alloc_profile - allocate, initialize and return a new profile * @hname: name of the profile (NOT NULL) @@ -650,13 +652,23 @@ struct aa_profile *aa_alloc_profile(const char *hname) if (!profile) return NULL; - if (!policy_init(&profile->base, NULL, hname)) { - kzfree(profile); - return NULL; - } + profile->replacedby = kzalloc(sizeof(struct aa_replacedby), GFP_KERNEL); + if (!profile->replacedby) + goto fail; + kref_init(&profile->replacedby->count); + + if (!policy_init(&profile->base, NULL, hname)) + goto fail; + kref_init(&profile->count); /* refcount released by caller */ return profile; + +fail: + kzfree(profile->replacedby); + kzfree(profile); + + return NULL; } /** @@ -665,7 +677,7 @@ struct aa_profile *aa_alloc_profile(const char *hname) * @hat: true if the null- learning profile is a hat * * Create a null- complain mode profile used in learning mode. The name of - * the profile is unique and follows the format of parent//null-sid. + * the profile is unique and follows the format of parent//null-<uniq>. * * null profiles are added to the profile list but the list does not * hold a count on them so that they are automatically released when @@ -677,96 +689,39 @@ struct aa_profile *aa_new_null_profile(struct aa_profile *parent, int hat) { struct aa_profile *profile = NULL; char *name; - u32 sid = aa_alloc_sid(); + int uniq = atomic_inc_return(&parent->ns->uniq_null); /* freed below */ name = kmalloc(strlen(parent->base.hname) + 2 + 7 + 8, GFP_KERNEL); if (!name) goto fail; - sprintf(name, "%s//null-%x", parent->base.hname, sid); + sprintf(name, "%s//null-%x", parent->base.hname, uniq); profile = aa_alloc_profile(name); kfree(name); if (!profile) goto fail; - profile->sid = sid; profile->mode = APPARMOR_COMPLAIN; profile->flags = PFLAG_NULL; if (hat) profile->flags |= PFLAG_HAT; /* released on free_profile */ - profile->parent = aa_get_profile(parent); + rcu_assign_pointer(profile->parent, aa_get_profile(parent)); profile->ns = aa_get_namespace(parent->ns); - write_lock(&profile->ns->lock); + mutex_lock(&profile->ns->lock); __list_add_profile(&parent->base.profiles, profile); - write_unlock(&profile->ns->lock); + mutex_unlock(&profile->ns->lock); /* refcount released by caller */ return profile; fail: - aa_free_sid(sid); return NULL; } -/** - * free_profile - free a profile - * @profile: the profile to free (MAYBE NULL) - * - * Free a profile, its hats and null_profile. All references to the profile, - * its hats and null_profile must have been put. - * - * If the profile was referenced from a task context, free_profile() will - * be called from an rcu callback routine, so we must not sleep here. - */ -static void free_profile(struct aa_profile *profile) -{ - AA_DEBUG("%s(%p)\n", __func__, profile); - - if (!profile) - return; - - if (!list_empty(&profile->base.list)) { - AA_ERROR("%s: internal error, " - "profile '%s' still on ns list\n", - __func__, profile->base.name); - BUG(); - } - - /* free children profiles */ - policy_destroy(&profile->base); - aa_put_profile(profile->parent); - - aa_put_namespace(profile->ns); - kzfree(profile->rename); - - aa_free_file_rules(&profile->file); - aa_free_cap_rules(&profile->caps); - aa_free_rlimit_rules(&profile->rlimits); - - aa_free_sid(profile->sid); - aa_put_dfa(profile->xmatch); - - aa_put_profile(profile->replacedby); - - kzfree(profile); -} - -/** - * aa_free_profile_kref - free aa_profile by kref (called by aa_put_profile) - * @kr: kref callback for freeing of a profile (NOT NULL) - */ -void aa_free_profile_kref(struct kref *kref) -{ - struct aa_profile *p = container_of(kref, struct aa_profile, - base.count); - - free_profile(p); -} - /* TODO: profile accounting - setup in remove */ /** @@ -774,7 +729,7 @@ void aa_free_profile_kref(struct kref *kref) * @head: list to search (NOT NULL) * @name: name of profile (NOT NULL) * - * Requires: ns lock protecting list be held + * Requires: rcu_read_lock be held * * Returns: unrefcounted profile ptr, or NULL if not found */ @@ -789,7 +744,7 @@ static struct aa_profile *__find_child(struct list_head *head, const char *name) * @name: name of profile (NOT NULL) * @len: length of @name substring to match * - * Requires: ns lock protecting list be held + * Requires: rcu_read_lock be held * * Returns: unrefcounted profile ptr, or NULL if not found */ @@ -810,9 +765,9 @@ struct aa_profile *aa_find_child(struct aa_profile *parent, const char *name) { struct aa_profile *profile; - read_lock(&parent->ns->lock); + rcu_read_lock(); profile = aa_get_profile(__find_child(&parent->base.profiles, name)); - read_unlock(&parent->ns->lock); + rcu_read_unlock(); /* refcount released by caller */ return profile; @@ -827,7 +782,7 @@ struct aa_profile *aa_find_child(struct aa_profile *parent, const char *name) * that matches hname does not need to exist, in general this * is used to load a new profile. * - * Requires: ns->lock be held + * Requires: rcu_read_lock be held * * Returns: unrefcounted policy or NULL if not found */ @@ -859,7 +814,7 @@ static struct aa_policy *__lookup_parent(struct aa_namespace *ns, * @base: base list to start looking up profile name from (NOT NULL) * @hname: hierarchical profile name (NOT NULL) * - * Requires: ns->lock be held + * Requires: rcu_read_lock be held * * Returns: unrefcounted profile pointer or NULL if not found * @@ -898,9 +853,15 @@ struct aa_profile *aa_lookup_profile(struct aa_namespace *ns, const char *hname) { struct aa_profile *profile; - read_lock(&ns->lock); - profile = aa_get_profile(__lookup_profile(&ns->base, hname)); - read_unlock(&ns->lock); + rcu_read_lock(); + do { + profile = __lookup_profile(&ns->base, hname); + } while (profile && !aa_get_profile_not0(profile)); + rcu_read_unlock(); + + /* the unconfined profile is not in the regular profile list */ + if (!profile && strcmp(hname, "unconfined") == 0) + profile = aa_get_newest_profile(ns->unconfined); /* refcount released by caller */ return profile; @@ -930,26 +891,6 @@ static int replacement_allowed(struct aa_profile *profile, int noreplace, } /** - * __add_new_profile - simple wrapper around __list_add_profile - * @ns: namespace that profile is being added to (NOT NULL) - * @policy: the policy container to add the profile to (NOT NULL) - * @profile: profile to add (NOT NULL) - * - * add a profile to a list and do other required basic allocations - */ -static void __add_new_profile(struct aa_namespace *ns, struct aa_policy *policy, - struct aa_profile *profile) -{ - if (policy != &ns->base) - /* released on profile replacement or free_profile */ - profile->parent = aa_get_profile((struct aa_profile *) policy); - __list_add_profile(&policy->profiles, profile); - /* released on free_profile */ - profile->sid = aa_alloc_sid(); - profile->ns = aa_get_namespace(ns); -} - -/** * aa_audit_policy - Do auditing of policy changes * @op: policy operation being performed * @gfp: memory allocation flags @@ -963,11 +904,13 @@ static int audit_policy(int op, gfp_t gfp, const char *name, const char *info, int error) { struct common_audit_data sa; - COMMON_AUDIT_DATA_INIT(&sa, NONE); - sa.aad.op = op; - sa.aad.name = name; - sa.aad.info = info; - sa.aad.error = error; + struct apparmor_audit_data aad = {0,}; + sa.type = LSM_AUDIT_DATA_NONE; + sa.aad = &aad; + aad.op = op; + aad.name = name; + aad.info = info; + aad.error = error; return aa_audit(AUDIT_APPARMOR_STATUS, __aa_current_profile(), gfp, &sa, NULL); @@ -995,6 +938,121 @@ bool aa_may_manage_policy(int op) return 1; } +static struct aa_profile *__list_lookup_parent(struct list_head *lh, + struct aa_profile *profile) +{ + const char *base = hname_tail(profile->base.hname); + long len = base - profile->base.hname; + struct aa_load_ent *ent; + + /* parent won't have trailing // so remove from len */ + if (len <= 2) + return NULL; + len -= 2; + + list_for_each_entry(ent, lh, list) { + if (ent->new == profile) + continue; + if (strncmp(ent->new->base.hname, profile->base.hname, len) == + 0 && ent->new->base.hname[len] == 0) + return ent->new; + } + + return NULL; +} + +/** + * __replace_profile - replace @old with @new on a list + * @old: profile to be replaced (NOT NULL) + * @new: profile to replace @old with (NOT NULL) + * @share_replacedby: transfer @old->replacedby to @new + * + * Will duplicate and refcount elements that @new inherits from @old + * and will inherit @old children. + * + * refcount @new for list, put @old list refcount + * + * Requires: namespace list lock be held, or list not be shared + */ +static void __replace_profile(struct aa_profile *old, struct aa_profile *new, + bool share_replacedby) +{ + struct aa_profile *child, *tmp; + + if (!list_empty(&old->base.profiles)) { + LIST_HEAD(lh); + list_splice_init_rcu(&old->base.profiles, &lh, synchronize_rcu); + + list_for_each_entry_safe(child, tmp, &lh, base.list) { + struct aa_profile *p; + + list_del_init(&child->base.list); + p = __find_child(&new->base.profiles, child->base.name); + if (p) { + /* @p replaces @child */ + __replace_profile(child, p, share_replacedby); + continue; + } + + /* inherit @child and its children */ + /* TODO: update hname of inherited children */ + /* list refcount transferred to @new */ + p = aa_deref_parent(child); + rcu_assign_pointer(child->parent, aa_get_profile(new)); + list_add_rcu(&child->base.list, &new->base.profiles); + aa_put_profile(p); + } + } + + if (!rcu_access_pointer(new->parent)) { + struct aa_profile *parent = aa_deref_parent(old); + rcu_assign_pointer(new->parent, aa_get_profile(parent)); + } + __aa_update_replacedby(old, new); + if (share_replacedby) { + aa_put_replacedby(new->replacedby); + new->replacedby = aa_get_replacedby(old->replacedby); + } else if (!rcu_access_pointer(new->replacedby->profile)) + /* aafs interface uses replacedby */ + rcu_assign_pointer(new->replacedby->profile, + aa_get_profile(new)); + __aa_fs_profile_migrate_dents(old, new); + + if (list_empty(&new->base.list)) { + /* new is not on a list already */ + list_replace_rcu(&old->base.list, &new->base.list); + aa_get_profile(new); + aa_put_profile(old); + } else + __list_remove_profile(old); +} + +/** + * __lookup_replace - lookup replacement information for a profile + * @ns - namespace the lookup occurs in + * @hname - name of profile to lookup + * @noreplace - true if not replacing an existing profile + * @p - Returns: profile to be replaced + * @info - Returns: info string on why lookup failed + * + * Returns: profile to replace (no ref) on success else ptr error + */ +static int __lookup_replace(struct aa_namespace *ns, const char *hname, + bool noreplace, struct aa_profile **p, + const char **info) +{ + *p = aa_get_profile(__lookup_profile(&ns->base, hname)); + if (*p) { + int error = replacement_allowed(*p, noreplace, info); + if (error) { + *info = "profile can not be replaced"; + return error; + } + } + + return 0; +} + /** * aa_replace_profiles - replace profile(s) on the profile list * @udata: serialized data stream (NOT NULL) @@ -1009,21 +1067,17 @@ bool aa_may_manage_policy(int op) */ ssize_t aa_replace_profiles(void *udata, size_t size, bool noreplace) { - struct aa_policy *policy; - struct aa_profile *old_profile = NULL, *new_profile = NULL; - struct aa_profile *rename_profile = NULL; - struct aa_namespace *ns = NULL; const char *ns_name, *name = NULL, *info = NULL; + struct aa_namespace *ns = NULL; + struct aa_load_ent *ent, *tmp; int op = OP_PROF_REPL; ssize_t error; + LIST_HEAD(lh); /* released below */ - new_profile = aa_unpack(udata, size, &ns_name); - if (IS_ERR(new_profile)) { - error = PTR_ERR(new_profile); - new_profile = NULL; - goto fail; - } + error = aa_unpack(udata, size, &lh, &ns_name); + if (error) + goto out; /* released below */ ns = aa_prepare_namespace(ns_name); @@ -1034,77 +1088,140 @@ ssize_t aa_replace_profiles(void *udata, size_t size, bool noreplace) goto fail; } - name = new_profile->base.hname; + mutex_lock(&ns->lock); + /* setup parent and ns info */ + list_for_each_entry(ent, &lh, list) { + struct aa_policy *policy; + + name = ent->new->base.hname; + error = __lookup_replace(ns, ent->new->base.hname, noreplace, + &ent->old, &info); + if (error) + goto fail_lock; + + if (ent->new->rename) { + error = __lookup_replace(ns, ent->new->rename, + noreplace, &ent->rename, + &info); + if (error) + goto fail_lock; + } - write_lock(&ns->lock); - /* no ref on policy only use inside lock */ - policy = __lookup_parent(ns, new_profile->base.hname); + /* released when @new is freed */ + ent->new->ns = aa_get_namespace(ns); - if (!policy) { - info = "parent does not exist"; - error = -ENOENT; - goto audit; + if (ent->old || ent->rename) + continue; + + /* no ref on policy only use inside lock */ + policy = __lookup_parent(ns, ent->new->base.hname); + if (!policy) { + struct aa_profile *p; + p = __list_lookup_parent(&lh, ent->new); + if (!p) { + error = -ENOENT; + info = "parent does not exist"; + name = ent->new->base.hname; + goto fail_lock; + } + rcu_assign_pointer(ent->new->parent, aa_get_profile(p)); + } else if (policy != &ns->base) { + /* released on profile replacement or free_profile */ + struct aa_profile *p = (struct aa_profile *) policy; + rcu_assign_pointer(ent->new->parent, aa_get_profile(p)); + } } - old_profile = __find_child(&policy->profiles, new_profile->base.name); - /* released below */ - aa_get_profile(old_profile); + /* create new fs entries for introspection if needed */ + list_for_each_entry(ent, &lh, list) { + if (ent->old) { + /* inherit old interface files */ - if (new_profile->rename) { - rename_profile = __lookup_profile(&ns->base, - new_profile->rename); - /* released below */ - aa_get_profile(rename_profile); + /* if (ent->rename) + TODO: support rename */ + /* } else if (ent->rename) { + TODO: support rename */ + } else { + struct dentry *parent; + if (rcu_access_pointer(ent->new->parent)) { + struct aa_profile *p; + p = aa_deref_parent(ent->new); + parent = prof_child_dir(p); + } else + parent = ns_subprofs_dir(ent->new->ns); + error = __aa_fs_profile_mkdir(ent->new, parent); + } - if (!rename_profile) { - info = "profile to rename does not exist"; - name = new_profile->rename; - error = -ENOENT; - goto audit; + if (error) { + info = "failed to create "; + goto fail_lock; } } - error = replacement_allowed(old_profile, noreplace, &info); - if (error) - goto audit; - - error = replacement_allowed(rename_profile, noreplace, &info); - if (error) - goto audit; - -audit: - if (!old_profile && !rename_profile) - op = OP_PROF_LOAD; - - error = audit_policy(op, GFP_ATOMIC, name, info, error); - - if (!error) { - if (rename_profile) - __replace_profile(rename_profile, new_profile); - if (old_profile) { - /* when there are both rename and old profiles - * inherit old profiles sid - */ - if (rename_profile) - aa_free_sid(new_profile->sid); - __replace_profile(old_profile, new_profile); + /* Done with checks that may fail - do actual replacement */ + list_for_each_entry_safe(ent, tmp, &lh, list) { + list_del_init(&ent->list); + op = (!ent->old && !ent->rename) ? OP_PROF_LOAD : OP_PROF_REPL; + + audit_policy(op, GFP_ATOMIC, ent->new->base.name, NULL, error); + + if (ent->old) { + __replace_profile(ent->old, ent->new, 1); + if (ent->rename) { + /* aafs interface uses replacedby */ + struct aa_replacedby *r = ent->new->replacedby; + rcu_assign_pointer(r->profile, + aa_get_profile(ent->new)); + __replace_profile(ent->rename, ent->new, 0); + } + } else if (ent->rename) { + /* aafs interface uses replacedby */ + rcu_assign_pointer(ent->new->replacedby->profile, + aa_get_profile(ent->new)); + __replace_profile(ent->rename, ent->new, 0); + } else if (ent->new->parent) { + struct aa_profile *parent, *newest; + parent = aa_deref_parent(ent->new); + newest = aa_get_newest_profile(parent); + + /* parent replaced in this atomic set? */ + if (newest != parent) { + aa_get_profile(newest); + aa_put_profile(parent); + rcu_assign_pointer(ent->new->parent, newest); + } else + aa_put_profile(newest); + /* aafs interface uses replacedby */ + rcu_assign_pointer(ent->new->replacedby->profile, + aa_get_profile(ent->new)); + __list_add_profile(&parent->base.profiles, ent->new); + } else { + /* aafs interface uses replacedby */ + rcu_assign_pointer(ent->new->replacedby->profile, + aa_get_profile(ent->new)); + __list_add_profile(&ns->base.profiles, ent->new); } - if (!(old_profile || rename_profile)) - __add_new_profile(ns, policy, new_profile); + aa_load_ent_free(ent); } - write_unlock(&ns->lock); + mutex_unlock(&ns->lock); out: aa_put_namespace(ns); - aa_put_profile(rename_profile); - aa_put_profile(old_profile); - aa_put_profile(new_profile); + if (error) return error; return size; +fail_lock: + mutex_unlock(&ns->lock); fail: error = audit_policy(op, GFP_KERNEL, name, info, error); + + list_for_each_entry_safe(ent, tmp, &lh, list) { + list_del_init(&ent->list); + aa_load_ent_free(ent); + } + goto out; } @@ -1138,14 +1255,12 @@ ssize_t aa_remove_profiles(char *fqname, size_t size) if (fqname[0] == ':') { char *ns_name; name = aa_split_fqname(fqname, &ns_name); - if (ns_name) { - /* released below */ - ns = aa_find_namespace(root, ns_name); - if (!ns) { - info = "namespace does not exist"; - error = -ENOENT; - goto fail; - } + /* released below */ + ns = aa_find_namespace(root, ns_name); + if (!ns) { + info = "namespace does not exist"; + error = -ENOENT; + goto fail; } } else /* released below */ @@ -1153,12 +1268,12 @@ ssize_t aa_remove_profiles(char *fqname, size_t size) if (!name) { /* remove namespace - can only happen if fqname[0] == ':' */ - write_lock(&ns->parent->lock); + mutex_lock(&ns->parent->lock); __remove_namespace(ns); - write_unlock(&ns->parent->lock); + mutex_unlock(&ns->parent->lock); } else { /* remove profile */ - write_lock(&ns->lock); + mutex_lock(&ns->lock); profile = aa_get_profile(__lookup_profile(&ns->base, name)); if (!profile) { error = -ENOENT; @@ -1167,7 +1282,7 @@ ssize_t aa_remove_profiles(char *fqname, size_t size) } name = profile->base.hname; __remove_profile(profile); - write_unlock(&ns->lock); + mutex_unlock(&ns->lock); } /* don't fail removal if audit fails */ @@ -1177,7 +1292,7 @@ ssize_t aa_remove_profiles(char *fqname, size_t size) return size; fail_ns_lock: - write_unlock(&ns->lock); + mutex_unlock(&ns->lock); aa_put_namespace(ns); fail: diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c index eb3700e9fd3..a689f10930b 100644 --- a/security/apparmor/policy_unpack.c +++ b/security/apparmor/policy_unpack.c @@ -12,8 +12,8 @@ * published by the Free Software Foundation, version 2 of the * License. * - * AppArmor uses a serialized binary format for loading policy. - * To find policy format documentation look in Documentation/apparmor.txt + * AppArmor uses a serialized binary format for loading policy. To find + * policy format documentation look in Documentation/security/apparmor.txt * All policy is validated before it is used. */ @@ -24,10 +24,10 @@ #include "include/apparmor.h" #include "include/audit.h" #include "include/context.h" +#include "include/crypto.h" #include "include/match.h" #include "include/policy.h" #include "include/policy_unpack.h" -#include "include/sid.h" /* * The AppArmor interface treats data as a type byte followed by the @@ -70,13 +70,13 @@ struct aa_ext { static void audit_cb(struct audit_buffer *ab, void *va) { struct common_audit_data *sa = va; - if (sa->aad.iface.target) { - struct aa_profile *name = sa->aad.iface.target; + if (sa->aad->iface.target) { + struct aa_profile *name = sa->aad->iface.target; audit_log_format(ab, " name="); audit_log_untrustedstring(ab, name->base.hname); } - if (sa->aad.iface.pos) - audit_log_format(ab, " offset=%ld", sa->aad.iface.pos); + if (sa->aad->iface.pos) + audit_log_format(ab, " offset=%ld", sa->aad->iface.pos); } /** @@ -84,7 +84,7 @@ static void audit_cb(struct audit_buffer *ab, void *va) * @new: profile if it has been allocated (MAYBE NULL) * @name: name of the profile being manipulated (MAYBE NULL) * @info: any extra info about the failure (MAYBE NULL) - * @e: buffer position info (NOT NULL) + * @e: buffer position info * @error: error code * * Returns: %0 or error @@ -94,12 +94,15 @@ static int audit_iface(struct aa_profile *new, const char *name, { struct aa_profile *profile = __aa_current_profile(); struct common_audit_data sa; - COMMON_AUDIT_DATA_INIT(&sa, NONE); - sa.aad.iface.pos = e->pos - e->start; - sa.aad.iface.target = new; - sa.aad.name = name; - sa.aad.info = info; - sa.aad.error = error; + struct apparmor_audit_data aad = {0,}; + sa.type = LSM_AUDIT_DATA_NONE; + sa.aad = &aad; + if (e) + aad.iface.pos = e->pos - e->start; + aad.iface.target = new; + aad.name = name; + aad.info = info; + aad.error = error; return aa_audit(AUDIT_APPARMOR_STATUS, profile, GFP_KERNEL, &sa, audit_cb); @@ -287,6 +290,9 @@ static int unpack_strdup(struct aa_ext *e, char **string, const char *name) return res; } +#define DFA_VALID_PERM_MASK 0xffffffff +#define DFA_VALID_PERM2_MASK 0xffffffff + /** * verify_accept - verify the accept tables of a dfa * @dfa: dfa to verify accept tables of (NOT NULL) @@ -328,8 +334,10 @@ static struct aa_dfa *unpack_dfa(struct aa_ext *e) /* * The dfa is aligned with in the blob to 8 bytes * from the beginning of the stream. + * alignment adjust needed by dfa unpack */ - size_t sz = blob - (char *)e->start; + size_t sz = blob - (char *) e->start - + ((e->pos - e->start) & 7); size_t pad = ALIGN(sz, 8) - sz; int flags = TO_ACCEPT1_FLAG(YYTD_DATA32) | TO_ACCEPT2_FLAG(YYTD_DATA32); @@ -359,7 +367,7 @@ fail: * @e: serialized data extent information (NOT NULL) * @profile: profile to add the accept table to (NOT NULL) * - * Returns: 1 if table succesfully unpacked + * Returns: 1 if table successfully unpacked */ static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile) { @@ -381,11 +389,11 @@ static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile) profile->file.trans.size = size; for (i = 0; i < size; i++) { char *str; - int c, j, size = unpack_strdup(e, &str, NULL); + int c, j, size2 = unpack_strdup(e, &str, NULL); /* unpack_strdup verifies that the last character is * null termination byte. */ - if (!size) + if (!size2) goto fail; profile->file.trans.table[i] = str; /* verify that name doesn't start with space */ @@ -393,7 +401,7 @@ static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile) goto fail; /* count internal # of internal \0 */ - for (c = j = 0; j < size - 2; j++) { + for (c = j = 0; j < size2 - 2; j++) { if (!str[j]) c++; } @@ -440,11 +448,11 @@ static bool unpack_rlimits(struct aa_ext *e, struct aa_profile *profile) if (size > RLIM_NLIMITS) goto fail; for (i = 0; i < size; i++) { - u64 tmp = 0; + u64 tmp2 = 0; int a = aa_map_resource(i); - if (!unpack_u64(e, &tmp, NULL)) + if (!unpack_u64(e, &tmp2, NULL)) goto fail; - profile->rlimits.limits[a].rlim_max = tmp; + profile->rlimits.limits[a].rlim_max = tmp2; } if (!unpack_nameX(e, AA_ARRAYEND, NULL)) goto fail; @@ -468,7 +476,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e) { struct aa_profile *profile = NULL; const char *name = NULL; - int error = -EPROTO; + int i, error = -EPROTO; kernel_cap_t tmpcap; u32 tmp; @@ -485,6 +493,9 @@ static struct aa_profile *unpack_profile(struct aa_ext *e) /* profile renaming is optional */ (void) unpack_str(e, &profile->rename, "rename"); + /* attachment string is optional */ + (void) unpack_str(e, &profile->attach, "attach"); + /* xmatch is optional and may be NULL */ profile->xmatch = unpack_dfa(e); if (IS_ERR(profile->xmatch)) { @@ -504,12 +515,16 @@ static struct aa_profile *unpack_profile(struct aa_ext *e) goto fail; if (!unpack_u32(e, &tmp, NULL)) goto fail; - if (tmp) + if (tmp & PACKED_FLAG_HAT) profile->flags |= PFLAG_HAT; if (!unpack_u32(e, &tmp, NULL)) goto fail; - if (tmp) + if (tmp == PACKED_MODE_COMPLAIN) profile->mode = APPARMOR_COMPLAIN; + else if (tmp == PACKED_MODE_KILL) + profile->mode = APPARMOR_KILL; + else if (tmp == PACKED_MODE_UNCONFINED) + profile->mode = APPARMOR_UNCONFINED; if (!unpack_u32(e, &tmp, NULL)) goto fail; if (tmp) @@ -554,11 +569,35 @@ static struct aa_profile *unpack_profile(struct aa_ext *e) goto fail; if (!unpack_u32(e, &(profile->caps.extended.cap[1]), NULL)) goto fail; + if (!unpack_nameX(e, AA_STRUCTEND, NULL)) + goto fail; } if (!unpack_rlimits(e, profile)) goto fail; + if (unpack_nameX(e, AA_STRUCT, "policydb")) { + /* generic policy dfa - optional and may be NULL */ + profile->policy.dfa = unpack_dfa(e); + if (IS_ERR(profile->policy.dfa)) { + error = PTR_ERR(profile->policy.dfa); + profile->policy.dfa = NULL; + goto fail; + } + if (!unpack_u32(e, &profile->policy.start[0], "start")) + /* default start state */ + profile->policy.start[0] = DFA_START; + /* setup class index */ + for (i = AA_CLASS_FILE; i <= AA_CLASS_LAST; i++) { + profile->policy.start[i] = + aa_dfa_next(profile->policy.dfa, + profile->policy.start[0], + i); + } + if (!unpack_nameX(e, AA_STRUCTEND, NULL)) + goto fail; + } + /* get file rules */ profile->file.dfa = unpack_dfa(e); if (IS_ERR(profile->file.dfa)) { @@ -585,7 +624,7 @@ fail: else if (!name) name = "unknown"; audit_iface(profile, name, "failed to unpack profile", e, error); - aa_put_profile(profile); + aa_free_profile(profile); return ERR_PTR(error); } @@ -593,29 +632,41 @@ fail: /** * verify_head - unpack serialized stream header * @e: serialized data read head (NOT NULL) + * @required: whether the header is required or optional * @ns: Returns - namespace if one is specified else NULL (NOT NULL) * * Returns: error or 0 if header is good */ -static int verify_header(struct aa_ext *e, const char **ns) +static int verify_header(struct aa_ext *e, int required, const char **ns) { int error = -EPROTONOSUPPORT; + const char *name = NULL; + *ns = NULL; + /* get the interface version */ if (!unpack_u32(e, &e->version, "version")) { - audit_iface(NULL, NULL, "invalid profile format", e, error); - return error; - } + if (required) { + audit_iface(NULL, NULL, "invalid profile format", e, + error); + return error; + } - /* check that the interface version is currently supported */ - if (e->version != 5) { - audit_iface(NULL, NULL, "unsupported interface version", e, - error); - return error; + /* check that the interface version is currently supported */ + if (e->version != 5) { + audit_iface(NULL, NULL, "unsupported interface version", + e, error); + return error; + } } + /* read the namespace if present */ - if (!unpack_str(e, ns, "namespace")) - *ns = NULL; + if (unpack_str(e, &name, "namespace")) { + if (*ns && strcmp(*ns, name)) + audit_iface(NULL, NULL, "invalid ns change", e, error); + else if (!*ns) + *ns = name; + } return 0; } @@ -664,18 +715,40 @@ static int verify_profile(struct aa_profile *profile) return 0; } +void aa_load_ent_free(struct aa_load_ent *ent) +{ + if (ent) { + aa_put_profile(ent->rename); + aa_put_profile(ent->old); + aa_put_profile(ent->new); + kzfree(ent); + } +} + +struct aa_load_ent *aa_load_ent_alloc(void) +{ + struct aa_load_ent *ent = kzalloc(sizeof(*ent), GFP_KERNEL); + if (ent) + INIT_LIST_HEAD(&ent->list); + return ent; +} + /** - * aa_unpack - unpack packed binary profile data loaded from user space + * aa_unpack - unpack packed binary profile(s) data loaded from user space * @udata: user data copied to kmem (NOT NULL) * @size: the size of the user data + * @lh: list to place unpacked profiles in a aa_repl_ws * @ns: Returns namespace profile is in if specified else NULL (NOT NULL) * - * Unpack user data and return refcounted allocated profile or ERR_PTR + * Unpack user data and return refcounted allocated profile(s) stored in + * @lh in order of discovery, with the list chain stored in base.list + * or error * - * Returns: profile else error pointer if fails to unpack + * Returns: profile(s) on @lh else error pointer if fails to unpack */ -struct aa_profile *aa_unpack(void *udata, size_t size, const char **ns) +int aa_unpack(void *udata, size_t size, struct list_head *lh, const char **ns) { + struct aa_load_ent *tmp, *ent; struct aa_profile *profile = NULL; int error; struct aa_ext e = { @@ -684,20 +757,49 @@ struct aa_profile *aa_unpack(void *udata, size_t size, const char **ns) .pos = udata, }; - error = verify_header(&e, ns); - if (error) - return ERR_PTR(error); + *ns = NULL; + while (e.pos < e.end) { + void *start; + error = verify_header(&e, e.pos == e.start, ns); + if (error) + goto fail; + + start = e.pos; + profile = unpack_profile(&e); + if (IS_ERR(profile)) { + error = PTR_ERR(profile); + goto fail; + } + + error = verify_profile(profile); + if (error) + goto fail_profile; - profile = unpack_profile(&e); - if (IS_ERR(profile)) - return profile; + error = aa_calc_profile_hash(profile, e.version, start, + e.pos - start); + if (error) + goto fail_profile; - error = verify_profile(profile); - if (error) { - aa_put_profile(profile); - profile = ERR_PTR(error); + ent = aa_load_ent_alloc(); + if (!ent) { + error = -ENOMEM; + goto fail_profile; + } + + ent->new = profile; + list_add_tail(&ent->list, lh); } - /* return refcount */ - return profile; + return 0; + +fail_profile: + aa_put_profile(profile); + +fail: + list_for_each_entry_safe(ent, tmp, lh, list) { + list_del_init(&ent->list); + aa_load_ent_free(ent); + } + + return error; } diff --git a/security/apparmor/procattr.c b/security/apparmor/procattr.c index 04a2cf8d1b6..b125acc9aa2 100644 --- a/security/apparmor/procattr.c +++ b/security/apparmor/procattr.c @@ -16,6 +16,7 @@ #include "include/context.h" #include "include/policy.h" #include "include/domain.h" +#include "include/procattr.h" /** @@ -36,7 +37,7 @@ int aa_getprocattr(struct aa_profile *profile, char **string) { char *str; int len = 0, mode_len = 0, ns_len = 0, name_len; - const char *mode_str = profile_mode_names[profile->mode]; + const char *mode_str = aa_profile_mode_names[profile->mode]; const char *ns_name = NULL; struct aa_namespace *ns = profile->ns; struct aa_namespace *current_ns = __aa_current_profile()->ns; @@ -162,9 +163,3 @@ int aa_setprocattr_changeprofile(char *fqname, bool onexec, int test) name = aa_split_fqname(fqname, &ns_name); return aa_change_profile(ns_name, name, onexec, test); } - -int aa_setprocattr_permipc(char *fqname) -{ - /* TODO: add ipc permission querying */ - return -ENOTSUPP; -} diff --git a/security/apparmor/resource.c b/security/apparmor/resource.c index a4136c10b1c..748bf0ca6c9 100644 --- a/security/apparmor/resource.c +++ b/security/apparmor/resource.c @@ -15,6 +15,7 @@ #include <linux/audit.h> #include "include/audit.h" +#include "include/context.h" #include "include/resource.h" #include "include/policy.h" @@ -23,13 +24,18 @@ */ #include "rlim_names.h" +struct aa_fs_entry aa_fs_entry_rlimit[] = { + AA_FS_FILE_STRING("mask", AA_FS_RLIMIT_MASK), + { } +}; + /* audit callback for resource specific fields */ static void audit_cb(struct audit_buffer *ab, void *va) { struct common_audit_data *sa = va; audit_log_format(ab, " rlimit=%s value=%lu", - rlim_names[sa->aad.rlim.rlim], sa->aad.rlim.max); + rlim_names[sa->aad->rlim.rlim], sa->aad->rlim.max); } /** @@ -45,12 +51,14 @@ static int audit_resource(struct aa_profile *profile, unsigned int resource, unsigned long value, int error) { struct common_audit_data sa; + struct apparmor_audit_data aad = {0,}; - COMMON_AUDIT_DATA_INIT(&sa, NONE); - sa.aad.op = OP_SETRLIMIT, - sa.aad.rlim.rlim = resource; - sa.aad.rlim.max = value; - sa.aad.error = error; + sa.type = LSM_AUDIT_DATA_NONE; + sa.aad = &aad; + aad.op = OP_SETRLIMIT, + aad.rlim.rlim = resource; + aad.rlim.max = value; + aad.error = error; return aa_audit(AUDIT_APPARMOR_AUTO, profile, GFP_KERNEL, &sa, audit_cb); } @@ -83,17 +91,25 @@ int aa_map_resource(int resource) int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *task, unsigned int resource, struct rlimit *new_rlim) { + struct aa_profile *task_profile; int error = 0; + rcu_read_lock(); + task_profile = aa_get_profile(aa_cred_profile(__task_cred(task))); + rcu_read_unlock(); + /* TODO: extend resource control to handle other (non current) - * processes. AppArmor rules currently have the implicit assumption - * that the task is setting the resource of the current process + * profiles. AppArmor rules currently have the implicit assumption + * that the task is setting the resource of a task confined with + * the same profile. */ - if ((task != current->group_leader) || + if (profile != task_profile || (profile->rlimits.mask & (1 << resource) && new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max)) error = -EACCES; + aa_put_profile(task_profile); + return audit_resource(profile, resource, new_rlim->rlim_max, error); } diff --git a/security/capability.c b/security/capability.c index 30ae00fbecd..e76373de312 100644 --- a/security/capability.c +++ b/security/capability.c @@ -12,7 +12,7 @@ #include <linux/security.h> -static int cap_sysctl(ctl_table *table, int op) +static int cap_syslog(int type) { return 0; } @@ -54,6 +54,11 @@ static int cap_sb_copy_data(char *orig, char *copy) return 0; } +static int cap_sb_remount(struct super_block *sb, void *data) +{ + return 0; +} + static int cap_sb_kern_mount(struct super_block *sb, int flags, void *data) { return 0; @@ -69,8 +74,8 @@ static int cap_sb_statfs(struct dentry *dentry) return 0; } -static int cap_sb_mount(char *dev_name, struct path *path, char *type, - unsigned long flags, void *data) +static int cap_sb_mount(const char *dev_name, struct path *path, + const char *type, unsigned long flags, void *data) { return 0; } @@ -86,16 +91,20 @@ static int cap_sb_pivotroot(struct path *old_path, struct path *new_path) } static int cap_sb_set_mnt_opts(struct super_block *sb, - struct security_mnt_opts *opts) + struct security_mnt_opts *opts, + unsigned long kern_flags, + unsigned long *set_kern_flags) + { if (unlikely(opts->num_mnt_opts)) return -EOPNOTSUPP; return 0; } -static void cap_sb_clone_mnt_opts(const struct super_block *oldsb, +static int cap_sb_clone_mnt_opts(const struct super_block *oldsb, struct super_block *newsb) { + return 0; } static int cap_sb_parse_opts_str(char *options, struct security_mnt_opts *opts) @@ -103,6 +112,13 @@ static int cap_sb_parse_opts_str(char *options, struct security_mnt_opts *opts) return 0; } +static int cap_dentry_init_security(struct dentry *dentry, int mode, + struct qstr *name, void **ctx, + u32 *ctxlen) +{ + return -EOPNOTSUPP; +} + static int cap_inode_alloc_security(struct inode *inode) { return 0; @@ -113,13 +129,14 @@ static void cap_inode_free_security(struct inode *inode) } static int cap_inode_init_security(struct inode *inode, struct inode *dir, - char **name, void **value, size_t *len) + const struct qstr *qstr, const char **name, + void **value, size_t *len) { return -EOPNOTSUPP; } static int cap_inode_create(struct inode *inode, struct dentry *dentry, - int mask) + umode_t mask) { return 0; } @@ -142,7 +159,7 @@ static int cap_inode_symlink(struct inode *inode, struct dentry *dentry, } static int cap_inode_mkdir(struct inode *inode, struct dentry *dentry, - int mask) + umode_t mask) { return 0; } @@ -153,7 +170,7 @@ static int cap_inode_rmdir(struct inode *inode, struct dentry *dentry) } static int cap_inode_mknod(struct inode *inode, struct dentry *dentry, - int mode, dev_t dev) + umode_t mode, dev_t dev) { return 0; } @@ -229,13 +246,13 @@ static void cap_inode_getsecid(const struct inode *inode, u32 *secid) } #ifdef CONFIG_SECURITY_PATH -static int cap_path_mknod(struct path *dir, struct dentry *dentry, int mode, +static int cap_path_mknod(struct path *dir, struct dentry *dentry, umode_t mode, unsigned int dev) { return 0; } -static int cap_path_mkdir(struct path *dir, struct dentry *dentry, int mode) +static int cap_path_mkdir(struct path *dir, struct dentry *dentry, umode_t mode) { return 0; } @@ -273,13 +290,12 @@ static int cap_path_truncate(struct path *path) return 0; } -static int cap_path_chmod(struct dentry *dentry, struct vfsmount *mnt, - mode_t mode) +static int cap_path_chmod(struct path *path, umode_t mode) { return 0; } -static int cap_path_chown(struct path *path, uid_t uid, gid_t gid) +static int cap_path_chown(struct path *path, kuid_t uid, kgid_t gid) { return 0; } @@ -343,7 +359,7 @@ static int cap_file_receive(struct file *file) return 0; } -static int cap_dentry_open(struct file *file, const struct cred *cred) +static int cap_file_open(struct file *file, const struct cred *cred) { return 0; } @@ -353,6 +369,10 @@ static int cap_task_create(unsigned long clone_flags) return 0; } +static void cap_task_free(struct task_struct *task) +{ +} + static int cap_cred_alloc_blank(struct cred *cred, gfp_t gfp) { return 0; @@ -386,6 +406,11 @@ static int cap_kernel_module_request(char *kmod_name) return 0; } +static int cap_kernel_module_from_file(struct file *file) +{ + return 0; +} + static int cap_task_setpgid(struct task_struct *p, pid_t pgid) { return 0; @@ -543,7 +568,7 @@ static int cap_sem_semop(struct sem_array *sma, struct sembuf *sops, } #ifdef CONFIG_SECURITY_NETWORK -static int cap_unix_stream_connect(struct socket *sock, struct socket *other, +static int cap_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk) { return 0; @@ -695,24 +720,45 @@ static void cap_req_classify_flow(const struct request_sock *req, { } +static int cap_tun_dev_alloc_security(void **security) +{ + return 0; +} + +static void cap_tun_dev_free_security(void *security) +{ +} + static int cap_tun_dev_create(void) { return 0; } -static void cap_tun_dev_post_create(struct sock *sk) +static int cap_tun_dev_attach_queue(void *security) { + return 0; } -static int cap_tun_dev_attach(struct sock *sk) +static int cap_tun_dev_attach(struct sock *sk, void *security) { return 0; } + +static int cap_tun_dev_open(void *security) +{ + return 0; +} + +static void cap_skb_owned_by(struct sk_buff *skb, struct sock *sk) +{ +} + #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_NETWORK_XFRM static int cap_xfrm_policy_alloc_security(struct xfrm_sec_ctx **ctxp, - struct xfrm_user_sec_ctx *sec_ctx) + struct xfrm_user_sec_ctx *sec_ctx, + gfp_t gfp) { return 0; } @@ -732,9 +778,15 @@ static int cap_xfrm_policy_delete_security(struct xfrm_sec_ctx *ctx) return 0; } -static int cap_xfrm_state_alloc_security(struct xfrm_state *x, - struct xfrm_user_sec_ctx *sec_ctx, - u32 secid) +static int cap_xfrm_state_alloc(struct xfrm_state *x, + struct xfrm_user_sec_ctx *sec_ctx) +{ + return 0; +} + +static int cap_xfrm_state_alloc_acquire(struct xfrm_state *x, + struct xfrm_sec_ctx *polsec, + u32 secid) { return 0; } @@ -755,7 +807,7 @@ static int cap_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 sk_sid, u8 dir) static int cap_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *xp, - struct flowi *fl) + const struct flowi *fl) { return 1; } @@ -781,6 +833,11 @@ static int cap_setprocattr(struct task_struct *p, char *name, void *value, return -EINVAL; } +static int cap_ismaclabel(const char *name) +{ + return 0; +} + static int cap_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) { return -EOPNOTSUPP; @@ -808,7 +865,7 @@ static int cap_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen) static int cap_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen) { - return 0; + return -EOPNOTSUPP; } #ifdef CONFIG_KEYS static int cap_key_alloc(struct key *key, const struct cred *cred, @@ -822,7 +879,7 @@ static void cap_key_free(struct key *key) } static int cap_key_permission(key_ref_t key_ref, const struct cred *cred, - key_perm_t perm) + unsigned perm) { return 0; } @@ -875,7 +932,6 @@ void __init security_fixup_ops(struct security_operations *ops) set_to_cap_if_null(ops, capable); set_to_cap_if_null(ops, quotactl); set_to_cap_if_null(ops, quota_on); - set_to_cap_if_null(ops, sysctl); set_to_cap_if_null(ops, syslog); set_to_cap_if_null(ops, settime); set_to_cap_if_null(ops, vm_enough_memory); @@ -887,6 +943,7 @@ void __init security_fixup_ops(struct security_operations *ops) set_to_cap_if_null(ops, sb_alloc_security); set_to_cap_if_null(ops, sb_free_security); set_to_cap_if_null(ops, sb_copy_data); + set_to_cap_if_null(ops, sb_remount); set_to_cap_if_null(ops, sb_kern_mount); set_to_cap_if_null(ops, sb_show_options); set_to_cap_if_null(ops, sb_statfs); @@ -896,6 +953,7 @@ void __init security_fixup_ops(struct security_operations *ops) set_to_cap_if_null(ops, sb_set_mnt_opts); set_to_cap_if_null(ops, sb_clone_mnt_opts); set_to_cap_if_null(ops, sb_parse_opts_str); + set_to_cap_if_null(ops, dentry_init_security); set_to_cap_if_null(ops, inode_alloc_security); set_to_cap_if_null(ops, inode_free_security); set_to_cap_if_null(ops, inode_init_security); @@ -940,15 +998,17 @@ void __init security_fixup_ops(struct security_operations *ops) set_to_cap_if_null(ops, file_alloc_security); set_to_cap_if_null(ops, file_free_security); set_to_cap_if_null(ops, file_ioctl); - set_to_cap_if_null(ops, file_mmap); + set_to_cap_if_null(ops, mmap_addr); + set_to_cap_if_null(ops, mmap_file); set_to_cap_if_null(ops, file_mprotect); set_to_cap_if_null(ops, file_lock); set_to_cap_if_null(ops, file_fcntl); set_to_cap_if_null(ops, file_set_fowner); set_to_cap_if_null(ops, file_send_sigiotask); set_to_cap_if_null(ops, file_receive); - set_to_cap_if_null(ops, dentry_open); + set_to_cap_if_null(ops, file_open); set_to_cap_if_null(ops, task_create); + set_to_cap_if_null(ops, task_free); set_to_cap_if_null(ops, cred_alloc_blank); set_to_cap_if_null(ops, cred_free); set_to_cap_if_null(ops, cred_prepare); @@ -956,6 +1016,7 @@ void __init security_fixup_ops(struct security_operations *ops) set_to_cap_if_null(ops, kernel_act_as); set_to_cap_if_null(ops, kernel_create_files_as); set_to_cap_if_null(ops, kernel_module_request); + set_to_cap_if_null(ops, kernel_module_from_file); set_to_cap_if_null(ops, task_fix_setuid); set_to_cap_if_null(ops, task_setpgid); set_to_cap_if_null(ops, task_getpgid); @@ -993,10 +1054,10 @@ void __init security_fixup_ops(struct security_operations *ops) set_to_cap_if_null(ops, sem_semctl); set_to_cap_if_null(ops, sem_semop); set_to_cap_if_null(ops, netlink_send); - set_to_cap_if_null(ops, netlink_recv); set_to_cap_if_null(ops, d_instantiate); set_to_cap_if_null(ops, getprocattr); set_to_cap_if_null(ops, setprocattr); + set_to_cap_if_null(ops, ismaclabel); set_to_cap_if_null(ops, secid_to_secctx); set_to_cap_if_null(ops, secctx_to_secid); set_to_cap_if_null(ops, release_secctx); @@ -1034,16 +1095,21 @@ void __init security_fixup_ops(struct security_operations *ops) set_to_cap_if_null(ops, secmark_refcount_inc); set_to_cap_if_null(ops, secmark_refcount_dec); set_to_cap_if_null(ops, req_classify_flow); + set_to_cap_if_null(ops, tun_dev_alloc_security); + set_to_cap_if_null(ops, tun_dev_free_security); set_to_cap_if_null(ops, tun_dev_create); - set_to_cap_if_null(ops, tun_dev_post_create); + set_to_cap_if_null(ops, tun_dev_open); + set_to_cap_if_null(ops, tun_dev_attach_queue); set_to_cap_if_null(ops, tun_dev_attach); + set_to_cap_if_null(ops, skb_owned_by); #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_NETWORK_XFRM set_to_cap_if_null(ops, xfrm_policy_alloc_security); set_to_cap_if_null(ops, xfrm_policy_clone_security); set_to_cap_if_null(ops, xfrm_policy_free_security); set_to_cap_if_null(ops, xfrm_policy_delete_security); - set_to_cap_if_null(ops, xfrm_state_alloc_security); + set_to_cap_if_null(ops, xfrm_state_alloc); + set_to_cap_if_null(ops, xfrm_state_alloc_acquire); set_to_cap_if_null(ops, xfrm_state_free_security); set_to_cap_if_null(ops, xfrm_state_delete_security); set_to_cap_if_null(ops, xfrm_policy_lookup); diff --git a/security/commoncap.c b/security/commoncap.c index 04b80f9912b..b9d613e0ef1 100644 --- a/security/commoncap.c +++ b/security/commoncap.c @@ -27,7 +27,9 @@ #include <linux/sched.h> #include <linux/prctl.h> #include <linux/securebits.h> -#include <linux/syslog.h> +#include <linux/user_namespace.h> +#include <linux/binfmts.h> +#include <linux/personality.h> /* * If a non-root user executes a setuid-root binary in @@ -53,22 +55,13 @@ static void warn_setuid_and_fcaps_mixed(const char *fname) int cap_netlink_send(struct sock *sk, struct sk_buff *skb) { - NETLINK_CB(skb).eff_cap = current_cap(); return 0; } -int cap_netlink_recv(struct sk_buff *skb, int cap) -{ - if (!cap_raised(NETLINK_CB(skb).eff_cap, cap)) - return -EPERM; - return 0; -} -EXPORT_SYMBOL(cap_netlink_recv); - /** * cap_capable - Determine whether a task has a particular effective capability - * @tsk: The task to query * @cred: The credentials to use + * @ns: The user namespace in which we need the capability * @cap: The capability to check for * @audit: Whether to write an audit message or not * @@ -80,10 +73,39 @@ EXPORT_SYMBOL(cap_netlink_recv); * cap_has_capability() returns 0 when a task has a capability, but the * kernel's capable() and has_capability() returns 1 for this case. */ -int cap_capable(struct task_struct *tsk, const struct cred *cred, int cap, - int audit) +int cap_capable(const struct cred *cred, struct user_namespace *targ_ns, + int cap, int audit) { - return cap_raised(cred->cap_effective, cap) ? 0 : -EPERM; + struct user_namespace *ns = targ_ns; + + /* See if cred has the capability in the target user namespace + * by examining the target user namespace and all of the target + * user namespace's parents. + */ + for (;;) { + /* Do we have the necessary capabilities? */ + if (ns == cred->user_ns) + return cap_raised(cred->cap_effective, cap) ? 0 : -EPERM; + + /* Have we tried all of the parent namespaces? */ + if (ns == &init_user_ns) + return -EPERM; + + /* + * The owner of the user namespace in the parent of the + * user namespace has all caps. + */ + if ((ns->parent == cred->user_ns) && uid_eq(ns->owner, cred->euid)) + return 0; + + /* + * If you have a capability in a parent user ns, then you have + * it over all children user namespaces as well. + */ + ns = ns->parent; + } + + /* We never get here */ } /** @@ -94,7 +116,7 @@ int cap_capable(struct task_struct *tsk, const struct cred *cred, int cap, * Determine whether the current process may set the system clock and timezone * information, returning 0 if permission granted, -ve if denied. */ -int cap_settime(struct timespec *ts, struct timezone *tz) +int cap_settime(const struct timespec *ts, const struct timezone *tz) { if (!capable(CAP_SYS_TIME)) return -EPERM; @@ -107,18 +129,30 @@ int cap_settime(struct timespec *ts, struct timezone *tz) * @child: The process to be accessed * @mode: The mode of attachment. * + * If we are in the same or an ancestor user_ns and have all the target + * task's capabilities, then ptrace access is allowed. + * If we have the ptrace capability to the target user_ns, then ptrace + * access is allowed. + * Else denied. + * * Determine whether a process may access another, returning 0 if permission * granted, -ve if denied. */ int cap_ptrace_access_check(struct task_struct *child, unsigned int mode) { int ret = 0; + const struct cred *cred, *child_cred; rcu_read_lock(); - if (!cap_issubset(__task_cred(child)->cap_permitted, - current_cred()->cap_permitted) && - !capable(CAP_SYS_PTRACE)) - ret = -EPERM; + cred = current_cred(); + child_cred = __task_cred(child); + if (cred->user_ns == child_cred->user_ns && + cap_issubset(child_cred->cap_permitted, cred->cap_permitted)) + goto out; + if (ns_capable(child_cred->user_ns, CAP_SYS_PTRACE)) + goto out; + ret = -EPERM; +out: rcu_read_unlock(); return ret; } @@ -127,18 +161,30 @@ int cap_ptrace_access_check(struct task_struct *child, unsigned int mode) * cap_ptrace_traceme - Determine whether another process may trace the current * @parent: The task proposed to be the tracer * + * If parent is in the same or an ancestor user_ns and has all current's + * capabilities, then ptrace access is allowed. + * If parent has the ptrace capability to current's user_ns, then ptrace + * access is allowed. + * Else denied. + * * Determine whether the nominated task is permitted to trace the current * process, returning 0 if permission is granted, -ve if denied. */ int cap_ptrace_traceme(struct task_struct *parent) { int ret = 0; + const struct cred *cred, *child_cred; rcu_read_lock(); - if (!cap_issubset(current_cred()->cap_permitted, - __task_cred(parent)->cap_permitted) && - !has_capability(parent, CAP_SYS_PTRACE)) - ret = -EPERM; + cred = __task_cred(parent); + child_cred = current_cred(); + if (cred->user_ns == child_cred->user_ns && + cap_issubset(child_cred->cap_permitted, cred->cap_permitted)) + goto out; + if (has_ns_capability(parent, child_cred->user_ns, CAP_SYS_PTRACE)) + goto out; + ret = -EPERM; +out: rcu_read_unlock(); return ret; } @@ -178,8 +224,8 @@ static inline int cap_inh_is_capped(void) /* they are so limited unless the current task has the CAP_SETPCAP * capability */ - if (cap_capable(current, current_cred(), CAP_SETPCAP, - SECURITY_CAP_AUDIT) == 0) + if (cap_capable(current_cred(), current_cred()->user_ns, + CAP_SETPCAP, SECURITY_CAP_AUDIT) == 0) return 0; return 1; } @@ -287,7 +333,8 @@ int cap_inode_killpriv(struct dentry *dentry) */ static inline int bprm_caps_from_vfs_caps(struct cpu_vfs_cap_data *caps, struct linux_binprm *bprm, - bool *effective) + bool *effective, + bool *has_cap) { struct cred *new = bprm->cred; unsigned i; @@ -296,6 +343,9 @@ static inline int bprm_caps_from_vfs_caps(struct cpu_vfs_cap_data *caps, if (caps->magic_etc & VFS_CAP_FLAGS_EFFECTIVE) *effective = true; + if (caps->magic_etc & VFS_CAP_REVISION_MASK) + *has_cap = true; + CAP_FOR_EACH_U32(i) { __u32 permitted = caps->permitted.cap[i]; __u32 inheritable = caps->inheritable.cap[i]; @@ -379,7 +429,7 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data * its xattrs and, if present, apply them to the proposed credentials being * constructed by execve(). */ -static int get_file_caps(struct linux_binprm *bprm, bool *effective) +static int get_file_caps(struct linux_binprm *bprm, bool *effective, bool *has_cap) { struct dentry *dentry; int rc = 0; @@ -390,7 +440,7 @@ static int get_file_caps(struct linux_binprm *bprm, bool *effective) if (!file_caps_enabled) return 0; - if (bprm->file->f_vfsmnt->mnt_flags & MNT_NOSUID) + if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) return 0; dentry = dget(bprm->file->f_dentry); @@ -405,7 +455,7 @@ static int get_file_caps(struct linux_binprm *bprm, bool *effective) goto out; } - rc = bprm_caps_from_vfs_caps(&vcaps, bprm, effective); + rc = bprm_caps_from_vfs_caps(&vcaps, bprm, effective, has_cap); if (rc == -EINVAL) printk(KERN_NOTICE "%s: cap_from_disk returned %d for %s\n", __func__, rc, bprm->filename); @@ -430,21 +480,24 @@ int cap_bprm_set_creds(struct linux_binprm *bprm) { const struct cred *old = current_cred(); struct cred *new = bprm->cred; - bool effective; + bool effective, has_cap = false; int ret; + kuid_t root_uid; effective = false; - ret = get_file_caps(bprm, &effective); + ret = get_file_caps(bprm, &effective, &has_cap); if (ret < 0) return ret; + root_uid = make_kuid(new->user_ns, 0); + if (!issecure(SECURE_NOROOT)) { /* * If the legacy file capability is set, then don't set privs * for a setuid root binary run by a non-root user. Do set it * for a root user just to cause least surprise to an admin. */ - if (effective && new->uid != 0 && new->euid == 0) { + if (has_cap && !uid_eq(new->uid, root_uid) && uid_eq(new->euid, root_uid)) { warn_setuid_and_fcaps_mixed(bprm->filename); goto skip; } @@ -455,25 +508,33 @@ int cap_bprm_set_creds(struct linux_binprm *bprm) * * If only the real uid is 0, we do not set the effective bit. */ - if (new->euid == 0 || new->uid == 0) { + if (uid_eq(new->euid, root_uid) || uid_eq(new->uid, root_uid)) { /* pP' = (cap_bset & ~0) | (pI & ~0) */ new->cap_permitted = cap_combine(old->cap_bset, old->cap_inheritable); } - if (new->euid == 0) + if (uid_eq(new->euid, root_uid)) effective = true; } skip: + /* if we have fs caps, clear dangerous personality flags */ + if (!cap_issubset(new->cap_permitted, old->cap_permitted)) + bprm->per_clear |= PER_CLEAR_ON_SETID; + + /* Don't let someone trace a set[ug]id/setpcap binary with the revised - * credentials unless they have the appropriate permit + * credentials unless they have the appropriate permit. + * + * In addition, if NO_NEW_PRIVS, then ensure we get no new privs. */ - if ((new->euid != old->uid || - new->egid != old->gid || + if ((!uid_eq(new->euid, old->uid) || + !gid_eq(new->egid, old->gid) || !cap_issubset(new->cap_permitted, old->cap_permitted)) && bprm->unsafe & ~LSM_UNSAFE_PTRACE_CAP) { /* downgrade; they get no more than they had, and maybe less */ - if (!capable(CAP_SETUID)) { + if (!capable(CAP_SETUID) || + (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS)) { new->euid = new->uid; new->egid = new->gid; } @@ -484,15 +545,10 @@ skip: new->suid = new->fsuid = new->euid; new->sgid = new->fsgid = new->egid; - /* For init, we want to retain the capabilities set in the initial - * task. Thus we skip the usual capability rules - */ - if (!is_global_init(current)) { - if (effective) - new->cap_effective = new->cap_permitted; - else - cap_clear(new->cap_effective); - } + if (effective) + new->cap_effective = new->cap_permitted; + else + cap_clear(new->cap_effective); bprm->cap_effective = effective; /* @@ -509,7 +565,7 @@ skip: */ if (!cap_isclear(new->cap_effective)) { if (!cap_issubset(CAP_FULL_SET, new->cap_effective) || - new->euid != 0 || new->uid != 0 || + !uid_eq(new->euid, root_uid) || !uid_eq(new->uid, root_uid) || issecure(SECURE_NOROOT)) { ret = audit_log_bprm_fcaps(bprm, new, old); if (ret < 0) @@ -534,16 +590,17 @@ skip: int cap_bprm_secureexec(struct linux_binprm *bprm) { const struct cred *cred = current_cred(); + kuid_t root_uid = make_kuid(cred->user_ns, 0); - if (cred->uid != 0) { + if (!uid_eq(cred->uid, root_uid)) { if (bprm->cap_effective) return 1; if (!cap_isclear(cred->cap_permitted)) return 1; } - return (cred->euid != cred->uid || - cred->egid != cred->gid); + return (!uid_eq(cred->euid, cred->uid) || + !gid_eq(cred->egid, cred->gid)); } /** @@ -633,15 +690,21 @@ int cap_inode_removexattr(struct dentry *dentry, const char *name) */ static inline void cap_emulate_setxuid(struct cred *new, const struct cred *old) { - if ((old->uid == 0 || old->euid == 0 || old->suid == 0) && - (new->uid != 0 && new->euid != 0 && new->suid != 0) && + kuid_t root_uid = make_kuid(old->user_ns, 0); + + if ((uid_eq(old->uid, root_uid) || + uid_eq(old->euid, root_uid) || + uid_eq(old->suid, root_uid)) && + (!uid_eq(new->uid, root_uid) && + !uid_eq(new->euid, root_uid) && + !uid_eq(new->suid, root_uid)) && !issecure(SECURE_KEEP_CAPS)) { cap_clear(new->cap_permitted); cap_clear(new->cap_effective); } - if (old->euid == 0 && new->euid != 0) + if (uid_eq(old->euid, root_uid) && !uid_eq(new->euid, root_uid)) cap_clear(new->cap_effective); - if (old->euid != 0 && new->euid == 0) + if (!uid_eq(old->euid, root_uid) && uid_eq(new->euid, root_uid)) new->cap_effective = new->cap_permitted; } @@ -674,11 +737,12 @@ int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags) * if not, we might be a bit too harsh here. */ if (!issecure(SECURE_NO_SETUID_FIXUP)) { - if (old->fsuid == 0 && new->fsuid != 0) + kuid_t root_uid = make_kuid(old->user_ns, 0); + if (uid_eq(old->fsuid, root_uid) && !uid_eq(new->fsuid, root_uid)) new->cap_effective = cap_drop_fs_set(new->cap_effective); - if (old->fsuid != 0 && new->fsuid == 0) + if (!uid_eq(old->fsuid, root_uid) && uid_eq(new->fsuid, root_uid)) new->cap_effective = cap_raise_fs_set(new->cap_effective, new->cap_permitted); @@ -704,16 +768,16 @@ int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags) */ static int cap_safe_nice(struct task_struct *p) { - int is_subset; + int is_subset, ret = 0; rcu_read_lock(); is_subset = cap_issubset(__task_cred(p)->cap_permitted, current_cred()->cap_permitted); + if (!is_subset && !ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE)) + ret = -EPERM; rcu_read_unlock(); - if (!is_subset && !capable(CAP_SYS_NICE)) - return -EPERM; - return 0; + return ret; } /** @@ -760,7 +824,7 @@ int cap_task_setnice(struct task_struct *p, int nice) */ static long cap_prctl_drop(struct cred *new, unsigned long cap) { - if (!capable(CAP_SETPCAP)) + if (!ns_capable(current_user_ns(), CAP_SETPCAP)) return -EPERM; if (!cap_valid(cap)) return -EINVAL; @@ -830,7 +894,8 @@ int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, & (new->securebits ^ arg2)) /*[1]*/ || ((new->securebits & SECURE_ALL_LOCKS & ~arg2)) /*[2]*/ || (arg2 & ~(SECURE_ALL_LOCKS | SECURE_ALL_BITS)) /*[3]*/ - || (cap_capable(current, current_cred(), CAP_SETPCAP, + || (cap_capable(current_cred(), + current_cred()->user_ns, CAP_SETPCAP, SECURITY_CAP_AUDIT) != 0) /*[4]*/ /* * [1] no changing of bits that are locked @@ -884,26 +949,6 @@ error: } /** - * cap_syslog - Determine whether syslog function is permitted - * @type: Function requested - * @from_file: Whether this request came from an open file (i.e. /proc) - * - * Determine whether the current process is permitted to use a particular - * syslog function, returning 0 if permission is granted, -ve if not. - */ -int cap_syslog(int type, bool from_file) -{ - if (type != SYSLOG_ACTION_OPEN && from_file) - return 0; - if (dmesg_restrict && !capable(CAP_SYS_ADMIN)) - return -EPERM; - if ((type != SYSLOG_ACTION_READ_ALL && - type != SYSLOG_ACTION_SIZE_BUFFER) && !capable(CAP_SYS_ADMIN)) - return -EPERM; - return 0; -} - -/** * cap_vm_enough_memory - Determine whether a new virtual mapping is permitted * @mm: The VM space in which the new mapping is to be made * @pages: The size of the mapping @@ -915,34 +960,27 @@ int cap_vm_enough_memory(struct mm_struct *mm, long pages) { int cap_sys_admin = 0; - if (cap_capable(current, current_cred(), CAP_SYS_ADMIN, + if (cap_capable(current_cred(), &init_user_ns, CAP_SYS_ADMIN, SECURITY_CAP_NOAUDIT) == 0) cap_sys_admin = 1; return __vm_enough_memory(mm, pages, cap_sys_admin); } /* - * cap_file_mmap - check if able to map given addr - * @file: unused - * @reqprot: unused - * @prot: unused - * @flags: unused + * cap_mmap_addr - check if able to map given addr * @addr: address attempting to be mapped - * @addr_only: unused * * If the process is attempting to map memory below dac_mmap_min_addr they need * CAP_SYS_RAWIO. The other parameters to this function are unused by the * capability security module. Returns 0 if this mapping should be allowed * -EPERM if not. */ -int cap_file_mmap(struct file *file, unsigned long reqprot, - unsigned long prot, unsigned long flags, - unsigned long addr, unsigned long addr_only) +int cap_mmap_addr(unsigned long addr) { int ret = 0; if (addr < dac_mmap_min_addr) { - ret = cap_capable(current, current_cred(), CAP_SYS_RAWIO, + ret = cap_capable(current_cred(), &init_user_ns, CAP_SYS_RAWIO, SECURITY_CAP_AUDIT); /* set PF_SUPERPRIV if it turns out we allow the low mmap */ if (ret == 0) @@ -950,3 +988,9 @@ int cap_file_mmap(struct file *file, unsigned long reqprot, } return ret; } + +int cap_mmap_file(struct file *file, unsigned long reqprot, + unsigned long prot, unsigned long flags) +{ + return 0; +} diff --git a/security/device_cgroup.c b/security/device_cgroup.c index 8d9c48f1377..d9d69e6930e 100644 --- a/security/device_cgroup.c +++ b/security/device_cgroup.c @@ -25,13 +25,19 @@ static DEFINE_MUTEX(devcgroup_mutex); +enum devcg_behavior { + DEVCG_DEFAULT_NONE, + DEVCG_DEFAULT_ALLOW, + DEVCG_DEFAULT_DENY, +}; + /* - * whitelist locking rules: + * exception list locking rules: * hold devcgroup_mutex for update/read. * hold rcu_read_lock() for read. */ -struct dev_whitelist_item { +struct dev_exception_item { u32 major, minor; short type; short access; @@ -41,45 +47,31 @@ struct dev_whitelist_item { struct dev_cgroup { struct cgroup_subsys_state css; - struct list_head whitelist; + struct list_head exceptions; + enum devcg_behavior behavior; }; static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s) { - return container_of(s, struct dev_cgroup, css); -} - -static inline struct dev_cgroup *cgroup_to_devcgroup(struct cgroup *cgroup) -{ - return css_to_devcgroup(cgroup_subsys_state(cgroup, devices_subsys_id)); + return s ? container_of(s, struct dev_cgroup, css) : NULL; } static inline struct dev_cgroup *task_devcgroup(struct task_struct *task) { - return css_to_devcgroup(task_subsys_state(task, devices_subsys_id)); -} - -struct cgroup_subsys devices_subsys; - -static int devcgroup_can_attach(struct cgroup_subsys *ss, - struct cgroup *new_cgroup, struct task_struct *task, - bool threadgroup) -{ - if (current != task && !capable(CAP_SYS_ADMIN)) - return -EPERM; - - return 0; + return css_to_devcgroup(task_css(task, devices_cgrp_id)); } /* * called under devcgroup_mutex */ -static int dev_whitelist_copy(struct list_head *dest, struct list_head *orig) +static int dev_exceptions_copy(struct list_head *dest, struct list_head *orig) { - struct dev_whitelist_item *wh, *tmp, *new; + struct dev_exception_item *ex, *tmp, *new; - list_for_each_entry(wh, orig, list) { - new = kmemdup(wh, sizeof(*wh), GFP_KERNEL); + lockdep_assert_held(&devcgroup_mutex); + + list_for_each_entry(ex, orig, list) { + new = kmemdup(ex, sizeof(*ex), GFP_KERNEL); if (!new) goto free_and_exit; list_add_tail(&new->list, dest); @@ -88,132 +80,157 @@ static int dev_whitelist_copy(struct list_head *dest, struct list_head *orig) return 0; free_and_exit: - list_for_each_entry_safe(wh, tmp, dest, list) { - list_del(&wh->list); - kfree(wh); + list_for_each_entry_safe(ex, tmp, dest, list) { + list_del(&ex->list); + kfree(ex); } return -ENOMEM; } -/* Stupid prototype - don't bother combining existing entries */ /* * called under devcgroup_mutex */ -static int dev_whitelist_add(struct dev_cgroup *dev_cgroup, - struct dev_whitelist_item *wh) +static int dev_exception_add(struct dev_cgroup *dev_cgroup, + struct dev_exception_item *ex) { - struct dev_whitelist_item *whcopy, *walk; + struct dev_exception_item *excopy, *walk; + + lockdep_assert_held(&devcgroup_mutex); - whcopy = kmemdup(wh, sizeof(*wh), GFP_KERNEL); - if (!whcopy) + excopy = kmemdup(ex, sizeof(*ex), GFP_KERNEL); + if (!excopy) return -ENOMEM; - list_for_each_entry(walk, &dev_cgroup->whitelist, list) { - if (walk->type != wh->type) + list_for_each_entry(walk, &dev_cgroup->exceptions, list) { + if (walk->type != ex->type) continue; - if (walk->major != wh->major) + if (walk->major != ex->major) continue; - if (walk->minor != wh->minor) + if (walk->minor != ex->minor) continue; - walk->access |= wh->access; - kfree(whcopy); - whcopy = NULL; + walk->access |= ex->access; + kfree(excopy); + excopy = NULL; } - if (whcopy != NULL) - list_add_tail_rcu(&whcopy->list, &dev_cgroup->whitelist); + if (excopy != NULL) + list_add_tail_rcu(&excopy->list, &dev_cgroup->exceptions); return 0; } -static void whitelist_item_free(struct rcu_head *rcu) -{ - struct dev_whitelist_item *item; - - item = container_of(rcu, struct dev_whitelist_item, rcu); - kfree(item); -} - /* * called under devcgroup_mutex */ -static void dev_whitelist_rm(struct dev_cgroup *dev_cgroup, - struct dev_whitelist_item *wh) +static void dev_exception_rm(struct dev_cgroup *dev_cgroup, + struct dev_exception_item *ex) { - struct dev_whitelist_item *walk, *tmp; + struct dev_exception_item *walk, *tmp; - list_for_each_entry_safe(walk, tmp, &dev_cgroup->whitelist, list) { - if (walk->type == DEV_ALL) - goto remove; - if (walk->type != wh->type) + lockdep_assert_held(&devcgroup_mutex); + + list_for_each_entry_safe(walk, tmp, &dev_cgroup->exceptions, list) { + if (walk->type != ex->type) continue; - if (walk->major != ~0 && walk->major != wh->major) + if (walk->major != ex->major) continue; - if (walk->minor != ~0 && walk->minor != wh->minor) + if (walk->minor != ex->minor) continue; -remove: - walk->access &= ~wh->access; + walk->access &= ~ex->access; if (!walk->access) { list_del_rcu(&walk->list); - call_rcu(&walk->rcu, whitelist_item_free); + kfree_rcu(walk, rcu); } } } +static void __dev_exception_clean(struct dev_cgroup *dev_cgroup) +{ + struct dev_exception_item *ex, *tmp; + + list_for_each_entry_safe(ex, tmp, &dev_cgroup->exceptions, list) { + list_del_rcu(&ex->list); + kfree_rcu(ex, rcu); + } +} + +/** + * dev_exception_clean - frees all entries of the exception list + * @dev_cgroup: dev_cgroup with the exception list to be cleaned + * + * called under devcgroup_mutex + */ +static void dev_exception_clean(struct dev_cgroup *dev_cgroup) +{ + lockdep_assert_held(&devcgroup_mutex); + + __dev_exception_clean(dev_cgroup); +} + +static inline bool is_devcg_online(const struct dev_cgroup *devcg) +{ + return (devcg->behavior != DEVCG_DEFAULT_NONE); +} + +/** + * devcgroup_online - initializes devcgroup's behavior and exceptions based on + * parent's + * @css: css getting online + * returns 0 in case of success, error code otherwise + */ +static int devcgroup_online(struct cgroup_subsys_state *css) +{ + struct dev_cgroup *dev_cgroup = css_to_devcgroup(css); + struct dev_cgroup *parent_dev_cgroup = css_to_devcgroup(css->parent); + int ret = 0; + + mutex_lock(&devcgroup_mutex); + + if (parent_dev_cgroup == NULL) + dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW; + else { + ret = dev_exceptions_copy(&dev_cgroup->exceptions, + &parent_dev_cgroup->exceptions); + if (!ret) + dev_cgroup->behavior = parent_dev_cgroup->behavior; + } + mutex_unlock(&devcgroup_mutex); + + return ret; +} + +static void devcgroup_offline(struct cgroup_subsys_state *css) +{ + struct dev_cgroup *dev_cgroup = css_to_devcgroup(css); + + mutex_lock(&devcgroup_mutex); + dev_cgroup->behavior = DEVCG_DEFAULT_NONE; + mutex_unlock(&devcgroup_mutex); +} + /* * called from kernel/cgroup.c with cgroup_lock() held. */ -static struct cgroup_subsys_state *devcgroup_create(struct cgroup_subsys *ss, - struct cgroup *cgroup) +static struct cgroup_subsys_state * +devcgroup_css_alloc(struct cgroup_subsys_state *parent_css) { - struct dev_cgroup *dev_cgroup, *parent_dev_cgroup; - struct cgroup *parent_cgroup; - int ret; + struct dev_cgroup *dev_cgroup; dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL); if (!dev_cgroup) return ERR_PTR(-ENOMEM); - INIT_LIST_HEAD(&dev_cgroup->whitelist); - parent_cgroup = cgroup->parent; - - if (parent_cgroup == NULL) { - struct dev_whitelist_item *wh; - wh = kmalloc(sizeof(*wh), GFP_KERNEL); - if (!wh) { - kfree(dev_cgroup); - return ERR_PTR(-ENOMEM); - } - wh->minor = wh->major = ~0; - wh->type = DEV_ALL; - wh->access = ACC_MASK; - list_add(&wh->list, &dev_cgroup->whitelist); - } else { - parent_dev_cgroup = cgroup_to_devcgroup(parent_cgroup); - mutex_lock(&devcgroup_mutex); - ret = dev_whitelist_copy(&dev_cgroup->whitelist, - &parent_dev_cgroup->whitelist); - mutex_unlock(&devcgroup_mutex); - if (ret) { - kfree(dev_cgroup); - return ERR_PTR(ret); - } - } + INIT_LIST_HEAD(&dev_cgroup->exceptions); + dev_cgroup->behavior = DEVCG_DEFAULT_NONE; return &dev_cgroup->css; } -static void devcgroup_destroy(struct cgroup_subsys *ss, - struct cgroup *cgroup) +static void devcgroup_css_free(struct cgroup_subsys_state *css) { - struct dev_cgroup *dev_cgroup; - struct dev_whitelist_item *wh, *tmp; + struct dev_cgroup *dev_cgroup = css_to_devcgroup(css); - dev_cgroup = cgroup_to_devcgroup(cgroup); - list_for_each_entry_safe(wh, tmp, &dev_cgroup->whitelist, list) { - list_del(&wh->list); - kfree(wh); - } + __dev_exception_clean(dev_cgroup); kfree(dev_cgroup); } @@ -255,112 +272,384 @@ static void set_majmin(char *str, unsigned m) sprintf(str, "%u", m); } -static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft, - struct seq_file *m) +static int devcgroup_seq_show(struct seq_file *m, void *v) { - struct dev_cgroup *devcgroup = cgroup_to_devcgroup(cgroup); - struct dev_whitelist_item *wh; + struct dev_cgroup *devcgroup = css_to_devcgroup(seq_css(m)); + struct dev_exception_item *ex; char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN]; rcu_read_lock(); - list_for_each_entry_rcu(wh, &devcgroup->whitelist, list) { - set_access(acc, wh->access); - set_majmin(maj, wh->major); - set_majmin(min, wh->minor); - seq_printf(m, "%c %s:%s %s\n", type_to_char(wh->type), + /* + * To preserve the compatibility: + * - Only show the "all devices" when the default policy is to allow + * - List the exceptions in case the default policy is to deny + * This way, the file remains as a "whitelist of devices" + */ + if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) { + set_access(acc, ACC_MASK); + set_majmin(maj, ~0); + set_majmin(min, ~0); + seq_printf(m, "%c %s:%s %s\n", type_to_char(DEV_ALL), maj, min, acc); + } else { + list_for_each_entry_rcu(ex, &devcgroup->exceptions, list) { + set_access(acc, ex->access); + set_majmin(maj, ex->major); + set_majmin(min, ex->minor); + seq_printf(m, "%c %s:%s %s\n", type_to_char(ex->type), + maj, min, acc); + } } rcu_read_unlock(); return 0; } -/* - * may_access_whitelist: - * does the access granted to dev_cgroup c contain the access - * requested in whitelist item refwh. - * return 1 if yes, 0 if no. - * call with devcgroup_mutex held +/** + * match_exception - iterates the exception list trying to find a complete match + * @exceptions: list of exceptions + * @type: device type (DEV_BLOCK or DEV_CHAR) + * @major: device file major number, ~0 to match all + * @minor: device file minor number, ~0 to match all + * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD) + * + * It is considered a complete match if an exception is found that will + * contain the entire range of provided parameters. + * + * Return: true in case it matches an exception completely */ -static int may_access_whitelist(struct dev_cgroup *c, - struct dev_whitelist_item *refwh) +static bool match_exception(struct list_head *exceptions, short type, + u32 major, u32 minor, short access) { - struct dev_whitelist_item *whitem; + struct dev_exception_item *ex; - list_for_each_entry(whitem, &c->whitelist, list) { - if (whitem->type & DEV_ALL) - return 1; - if ((refwh->type & DEV_BLOCK) && !(whitem->type & DEV_BLOCK)) + list_for_each_entry_rcu(ex, exceptions, list) { + if ((type & DEV_BLOCK) && !(ex->type & DEV_BLOCK)) continue; - if ((refwh->type & DEV_CHAR) && !(whitem->type & DEV_CHAR)) + if ((type & DEV_CHAR) && !(ex->type & DEV_CHAR)) continue; - if (whitem->major != ~0 && whitem->major != refwh->major) + if (ex->major != ~0 && ex->major != major) continue; - if (whitem->minor != ~0 && whitem->minor != refwh->minor) + if (ex->minor != ~0 && ex->minor != minor) continue; - if (refwh->access & (~whitem->access)) + /* provided access cannot have more than the exception rule */ + if (access & (~ex->access)) continue; - return 1; + return true; } - return 0; + return false; +} + +/** + * match_exception_partial - iterates the exception list trying to find a partial match + * @exceptions: list of exceptions + * @type: device type (DEV_BLOCK or DEV_CHAR) + * @major: device file major number, ~0 to match all + * @minor: device file minor number, ~0 to match all + * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD) + * + * It is considered a partial match if an exception's range is found to + * contain *any* of the devices specified by provided parameters. This is + * used to make sure no extra access is being granted that is forbidden by + * any of the exception list. + * + * Return: true in case the provided range mat matches an exception completely + */ +static bool match_exception_partial(struct list_head *exceptions, short type, + u32 major, u32 minor, short access) +{ + struct dev_exception_item *ex; + + list_for_each_entry_rcu(ex, exceptions, list) { + if ((type & DEV_BLOCK) && !(ex->type & DEV_BLOCK)) + continue; + if ((type & DEV_CHAR) && !(ex->type & DEV_CHAR)) + continue; + /* + * We must be sure that both the exception and the provided + * range aren't masking all devices + */ + if (ex->major != ~0 && major != ~0 && ex->major != major) + continue; + if (ex->minor != ~0 && minor != ~0 && ex->minor != minor) + continue; + /* + * In order to make sure the provided range isn't matching + * an exception, all its access bits shouldn't match the + * exception's access bits + */ + if (!(access & ex->access)) + continue; + return true; + } + return false; +} + +/** + * verify_new_ex - verifies if a new exception is allowed by parent cgroup's permissions + * @dev_cgroup: dev cgroup to be tested against + * @refex: new exception + * @behavior: behavior of the exception's dev_cgroup + * + * This is used to make sure a child cgroup won't have more privileges + * than its parent + */ +static bool verify_new_ex(struct dev_cgroup *dev_cgroup, + struct dev_exception_item *refex, + enum devcg_behavior behavior) +{ + bool match = false; + + rcu_lockdep_assert(rcu_read_lock_held() || + lockdep_is_held(&devcgroup_mutex), + "device_cgroup:verify_new_ex called without proper synchronization"); + + if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) { + if (behavior == DEVCG_DEFAULT_ALLOW) { + /* + * new exception in the child doesn't matter, only + * adding extra restrictions + */ + return true; + } else { + /* + * new exception in the child will add more devices + * that can be acessed, so it can't match any of + * parent's exceptions, even slightly + */ + match = match_exception_partial(&dev_cgroup->exceptions, + refex->type, + refex->major, + refex->minor, + refex->access); + + if (match) + return false; + return true; + } + } else { + /* + * Only behavior == DEVCG_DEFAULT_DENY allowed here, therefore + * the new exception will add access to more devices and must + * be contained completely in an parent's exception to be + * allowed + */ + match = match_exception(&dev_cgroup->exceptions, refex->type, + refex->major, refex->minor, + refex->access); + + if (match) + /* parent has an exception that matches the proposed */ + return true; + else + return false; + } + return false; } /* * parent_has_perm: - * when adding a new allow rule to a device whitelist, the rule + * when adding a new allow rule to a device exception list, the rule * must be allowed in the parent device */ static int parent_has_perm(struct dev_cgroup *childcg, - struct dev_whitelist_item *wh) + struct dev_exception_item *ex) { - struct cgroup *pcg = childcg->css.cgroup->parent; - struct dev_cgroup *parent; + struct dev_cgroup *parent = css_to_devcgroup(childcg->css.parent); - if (!pcg) + if (!parent) return 1; - parent = cgroup_to_devcgroup(pcg); - return may_access_whitelist(parent, wh); + return verify_new_ex(parent, ex, childcg->behavior); +} + +/** + * parent_allows_removal - verify if it's ok to remove an exception + * @childcg: child cgroup from where the exception will be removed + * @ex: exception being removed + * + * When removing an exception in cgroups with default ALLOW policy, it must + * be checked if removing it will give the child cgroup more access than the + * parent. + * + * Return: true if it's ok to remove exception, false otherwise + */ +static bool parent_allows_removal(struct dev_cgroup *childcg, + struct dev_exception_item *ex) +{ + struct dev_cgroup *parent = css_to_devcgroup(childcg->css.parent); + + if (!parent) + return true; + + /* It's always allowed to remove access to devices */ + if (childcg->behavior == DEVCG_DEFAULT_DENY) + return true; + + /* + * Make sure you're not removing part or a whole exception existing in + * the parent cgroup + */ + return !match_exception_partial(&parent->exceptions, ex->type, + ex->major, ex->minor, ex->access); +} + +/** + * may_allow_all - checks if it's possible to change the behavior to + * allow based on parent's rules. + * @parent: device cgroup's parent + * returns: != 0 in case it's allowed, 0 otherwise + */ +static inline int may_allow_all(struct dev_cgroup *parent) +{ + if (!parent) + return 1; + return parent->behavior == DEVCG_DEFAULT_ALLOW; +} + +/** + * revalidate_active_exceptions - walks through the active exception list and + * revalidates the exceptions based on parent's + * behavior and exceptions. The exceptions that + * are no longer valid will be removed. + * Called with devcgroup_mutex held. + * @devcg: cgroup which exceptions will be checked + * + * This is one of the three key functions for hierarchy implementation. + * This function is responsible for re-evaluating all the cgroup's active + * exceptions due to a parent's exception change. + * Refer to Documentation/cgroups/devices.txt for more details. + */ +static void revalidate_active_exceptions(struct dev_cgroup *devcg) +{ + struct dev_exception_item *ex; + struct list_head *this, *tmp; + + list_for_each_safe(this, tmp, &devcg->exceptions) { + ex = container_of(this, struct dev_exception_item, list); + if (!parent_has_perm(devcg, ex)) + dev_exception_rm(devcg, ex); + } +} + +/** + * propagate_exception - propagates a new exception to the children + * @devcg_root: device cgroup that added a new exception + * @ex: new exception to be propagated + * + * returns: 0 in case of success, != 0 in case of error + */ +static int propagate_exception(struct dev_cgroup *devcg_root, + struct dev_exception_item *ex) +{ + struct cgroup_subsys_state *pos; + int rc = 0; + + rcu_read_lock(); + + css_for_each_descendant_pre(pos, &devcg_root->css) { + struct dev_cgroup *devcg = css_to_devcgroup(pos); + + /* + * Because devcgroup_mutex is held, no devcg will become + * online or offline during the tree walk (see on/offline + * methods), and online ones are safe to access outside RCU + * read lock without bumping refcnt. + */ + if (pos == &devcg_root->css || !is_devcg_online(devcg)) + continue; + + rcu_read_unlock(); + + /* + * in case both root's behavior and devcg is allow, a new + * restriction means adding to the exception list + */ + if (devcg_root->behavior == DEVCG_DEFAULT_ALLOW && + devcg->behavior == DEVCG_DEFAULT_ALLOW) { + rc = dev_exception_add(devcg, ex); + if (rc) + break; + } else { + /* + * in the other possible cases: + * root's behavior: allow, devcg's: deny + * root's behavior: deny, devcg's: deny + * the exception will be removed + */ + dev_exception_rm(devcg, ex); + } + revalidate_active_exceptions(devcg); + + rcu_read_lock(); + } + + rcu_read_unlock(); + return rc; } /* - * Modify the whitelist using allow/deny rules. + * Modify the exception list using allow/deny rules. * CAP_SYS_ADMIN is needed for this. It's at least separate from CAP_MKNOD * so we can give a container CAP_MKNOD to let it create devices but not - * modify the whitelist. + * modify the exception list. * It seems likely we'll want to add a CAP_CONTAINER capability to allow * us to also grant CAP_SYS_ADMIN to containers without giving away the - * device whitelist controls, but for now we'll stick with CAP_SYS_ADMIN + * device exception list controls, but for now we'll stick with CAP_SYS_ADMIN * * Taking rules away is always allowed (given CAP_SYS_ADMIN). Granting * new access is only allowed if you're in the top-level cgroup, or your * parent cgroup has the access you're asking for. */ static int devcgroup_update_access(struct dev_cgroup *devcgroup, - int filetype, const char *buffer) + int filetype, char *buffer) { const char *b; - char *endp; - int count; - struct dev_whitelist_item wh; + char temp[12]; /* 11 + 1 characters needed for a u32 */ + int count, rc = 0; + struct dev_exception_item ex; + struct dev_cgroup *parent = css_to_devcgroup(devcgroup->css.parent); if (!capable(CAP_SYS_ADMIN)) return -EPERM; - memset(&wh, 0, sizeof(wh)); + memset(&ex, 0, sizeof(ex)); b = buffer; switch (*b) { case 'a': - wh.type = DEV_ALL; - wh.access = ACC_MASK; - wh.major = ~0; - wh.minor = ~0; - goto handle; + switch (filetype) { + case DEVCG_ALLOW: + if (css_has_online_children(&devcgroup->css)) + return -EINVAL; + + if (!may_allow_all(parent)) + return -EPERM; + dev_exception_clean(devcgroup); + devcgroup->behavior = DEVCG_DEFAULT_ALLOW; + if (!parent) + break; + + rc = dev_exceptions_copy(&devcgroup->exceptions, + &parent->exceptions); + if (rc) + return rc; + break; + case DEVCG_DENY: + if (css_has_online_children(&devcgroup->css)) + return -EINVAL; + + dev_exception_clean(devcgroup); + devcgroup->behavior = DEVCG_DEFAULT_DENY; + break; + default: + return -EINVAL; + } + return 0; case 'b': - wh.type = DEV_BLOCK; + ex.type = DEV_BLOCK; break; case 'c': - wh.type = DEV_CHAR; + ex.type = DEV_CHAR; break; default: return -EINVAL; @@ -370,11 +659,19 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup, return -EINVAL; b++; if (*b == '*') { - wh.major = ~0; + ex.major = ~0; b++; } else if (isdigit(*b)) { - wh.major = simple_strtoul(b, &endp, 10); - b = endp; + memset(temp, 0, sizeof(temp)); + for (count = 0; count < sizeof(temp) - 1; count++) { + temp[count] = *b; + b++; + if (!isdigit(*b)) + break; + } + rc = kstrtou32(temp, 10, &ex.major); + if (rc) + return -EINVAL; } else { return -EINVAL; } @@ -384,11 +681,19 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup, /* read minor */ if (*b == '*') { - wh.minor = ~0; + ex.minor = ~0; b++; } else if (isdigit(*b)) { - wh.minor = simple_strtoul(b, &endp, 10); - b = endp; + memset(temp, 0, sizeof(temp)); + for (count = 0; count < sizeof(temp) - 1; count++) { + temp[count] = *b; + b++; + if (!isdigit(*b)) + break; + } + rc = kstrtou32(temp, 10, &ex.minor); + if (rc) + return -EINVAL; } else { return -EINVAL; } @@ -397,13 +702,13 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup, for (b++, count = 0; count < 3; count++, b++) { switch (*b) { case 'r': - wh.access |= ACC_READ; + ex.access |= ACC_READ; break; case 'w': - wh.access |= ACC_WRITE; + ex.access |= ACC_WRITE; break; case 'm': - wh.access |= ACC_MKNOD; + ex.access |= ACC_MKNOD; break; case '\n': case '\0': @@ -414,140 +719,150 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup, } } -handle: switch (filetype) { case DEVCG_ALLOW: - if (!parent_has_perm(devcgroup, &wh)) + /* + * If the default policy is to allow by default, try to remove + * an matching exception instead. And be silent about it: we + * don't want to break compatibility + */ + if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) { + /* Check if the parent allows removing it first */ + if (!parent_allows_removal(devcgroup, &ex)) + return -EPERM; + dev_exception_rm(devcgroup, &ex); + break; + } + + if (!parent_has_perm(devcgroup, &ex)) return -EPERM; - return dev_whitelist_add(devcgroup, &wh); + rc = dev_exception_add(devcgroup, &ex); + break; case DEVCG_DENY: - dev_whitelist_rm(devcgroup, &wh); + /* + * If the default policy is to deny by default, try to remove + * an matching exception instead. And be silent about it: we + * don't want to break compatibility + */ + if (devcgroup->behavior == DEVCG_DEFAULT_DENY) + dev_exception_rm(devcgroup, &ex); + else + rc = dev_exception_add(devcgroup, &ex); + + if (rc) + break; + /* we only propagate new restrictions */ + rc = propagate_exception(devcgroup, &ex); break; default: - return -EINVAL; + rc = -EINVAL; } - return 0; + return rc; } -static int devcgroup_access_write(struct cgroup *cgrp, struct cftype *cft, - const char *buffer) +static ssize_t devcgroup_access_write(struct kernfs_open_file *of, + char *buf, size_t nbytes, loff_t off) { int retval; mutex_lock(&devcgroup_mutex); - retval = devcgroup_update_access(cgroup_to_devcgroup(cgrp), - cft->private, buffer); + retval = devcgroup_update_access(css_to_devcgroup(of_css(of)), + of_cft(of)->private, strstrip(buf)); mutex_unlock(&devcgroup_mutex); - return retval; + return retval ?: nbytes; } static struct cftype dev_cgroup_files[] = { { .name = "allow", - .write_string = devcgroup_access_write, + .write = devcgroup_access_write, .private = DEVCG_ALLOW, }, { .name = "deny", - .write_string = devcgroup_access_write, + .write = devcgroup_access_write, .private = DEVCG_DENY, }, { .name = "list", - .read_seq_string = devcgroup_seq_read, + .seq_show = devcgroup_seq_show, .private = DEVCG_LIST, }, + { } /* terminate */ }; -static int devcgroup_populate(struct cgroup_subsys *ss, - struct cgroup *cgroup) -{ - return cgroup_add_files(cgroup, ss, dev_cgroup_files, - ARRAY_SIZE(dev_cgroup_files)); -} - -struct cgroup_subsys devices_subsys = { - .name = "devices", - .can_attach = devcgroup_can_attach, - .create = devcgroup_create, - .destroy = devcgroup_destroy, - .populate = devcgroup_populate, - .subsys_id = devices_subsys_id, +struct cgroup_subsys devices_cgrp_subsys = { + .css_alloc = devcgroup_css_alloc, + .css_free = devcgroup_css_free, + .css_online = devcgroup_online, + .css_offline = devcgroup_offline, + .base_cftypes = dev_cgroup_files, }; -int devcgroup_inode_permission(struct inode *inode, int mask) +/** + * __devcgroup_check_permission - checks if an inode operation is permitted + * @dev_cgroup: the dev cgroup to be tested against + * @type: device type + * @major: device major number + * @minor: device minor number + * @access: combination of ACC_WRITE, ACC_READ and ACC_MKNOD + * + * returns 0 on success, -EPERM case the operation is not permitted + */ +static int __devcgroup_check_permission(short type, u32 major, u32 minor, + short access) { struct dev_cgroup *dev_cgroup; - struct dev_whitelist_item *wh; - - dev_t device = inode->i_rdev; - if (!device) - return 0; - if (!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode)) - return 0; + bool rc; rcu_read_lock(); - dev_cgroup = task_devcgroup(current); + if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) + /* Can't match any of the exceptions, even partially */ + rc = !match_exception_partial(&dev_cgroup->exceptions, + type, major, minor, access); + else + /* Need to match completely one exception to be allowed */ + rc = match_exception(&dev_cgroup->exceptions, type, major, + minor, access); + rcu_read_unlock(); - list_for_each_entry_rcu(wh, &dev_cgroup->whitelist, list) { - if (wh->type & DEV_ALL) - goto found; - if ((wh->type & DEV_BLOCK) && !S_ISBLK(inode->i_mode)) - continue; - if ((wh->type & DEV_CHAR) && !S_ISCHR(inode->i_mode)) - continue; - if (wh->major != ~0 && wh->major != imajor(inode)) - continue; - if (wh->minor != ~0 && wh->minor != iminor(inode)) - continue; - - if ((mask & MAY_WRITE) && !(wh->access & ACC_WRITE)) - continue; - if ((mask & MAY_READ) && !(wh->access & ACC_READ)) - continue; -found: - rcu_read_unlock(); - return 0; - } + if (!rc) + return -EPERM; - rcu_read_unlock(); + return 0; +} - return -EPERM; +int __devcgroup_inode_permission(struct inode *inode, int mask) +{ + short type, access = 0; + + if (S_ISBLK(inode->i_mode)) + type = DEV_BLOCK; + if (S_ISCHR(inode->i_mode)) + type = DEV_CHAR; + if (mask & MAY_WRITE) + access |= ACC_WRITE; + if (mask & MAY_READ) + access |= ACC_READ; + + return __devcgroup_check_permission(type, imajor(inode), iminor(inode), + access); } int devcgroup_inode_mknod(int mode, dev_t dev) { - struct dev_cgroup *dev_cgroup; - struct dev_whitelist_item *wh; + short type; if (!S_ISBLK(mode) && !S_ISCHR(mode)) return 0; - rcu_read_lock(); - - dev_cgroup = task_devcgroup(current); - - list_for_each_entry_rcu(wh, &dev_cgroup->whitelist, list) { - if (wh->type & DEV_ALL) - goto found; - if ((wh->type & DEV_BLOCK) && !S_ISBLK(mode)) - continue; - if ((wh->type & DEV_CHAR) && !S_ISCHR(mode)) - continue; - if (wh->major != ~0 && wh->major != MAJOR(dev)) - continue; - if (wh->minor != ~0 && wh->minor != MINOR(dev)) - continue; - - if (!(wh->access & ACC_MKNOD)) - continue; -found: - rcu_read_unlock(); - return 0; - } + if (S_ISBLK(mode)) + type = DEV_BLOCK; + else + type = DEV_CHAR; - rcu_read_unlock(); + return __devcgroup_check_permission(type, MAJOR(dev), MINOR(dev), + ACC_MKNOD); - return -EPERM; } diff --git a/security/inode.c b/security/inode.c index c4df2fbebe6..43ce6e19015 100644 --- a/security/inode.c +++ b/security/inode.c @@ -25,100 +25,6 @@ static struct vfsmount *mount; static int mount_count; -/* - * TODO: - * I think I can get rid of these default_file_ops, but not quite sure... - */ -static ssize_t default_read_file(struct file *file, char __user *buf, - size_t count, loff_t *ppos) -{ - return 0; -} - -static ssize_t default_write_file(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) -{ - return count; -} - -static int default_open(struct inode *inode, struct file *file) -{ - if (inode->i_private) - file->private_data = inode->i_private; - - return 0; -} - -static const struct file_operations default_file_ops = { - .read = default_read_file, - .write = default_write_file, - .open = default_open, - .llseek = noop_llseek, -}; - -static struct inode *get_inode(struct super_block *sb, int mode, dev_t dev) -{ - struct inode *inode = new_inode(sb); - - if (inode) { - inode->i_ino = get_next_ino(); - inode->i_mode = mode; - inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; - switch (mode & S_IFMT) { - default: - init_special_inode(inode, mode, dev); - break; - case S_IFREG: - inode->i_fop = &default_file_ops; - break; - case S_IFDIR: - inode->i_op = &simple_dir_inode_operations; - inode->i_fop = &simple_dir_operations; - - /* directory inodes start off with i_nlink == 2 (for "." entry) */ - inc_nlink(inode); - break; - } - } - return inode; -} - -/* SMP-safe */ -static int mknod(struct inode *dir, struct dentry *dentry, - int mode, dev_t dev) -{ - struct inode *inode; - int error = -ENOMEM; - - if (dentry->d_inode) - return -EEXIST; - - inode = get_inode(dir->i_sb, mode, dev); - if (inode) { - d_instantiate(dentry, inode); - dget(dentry); - error = 0; - } - return error; -} - -static int mkdir(struct inode *dir, struct dentry *dentry, int mode) -{ - int res; - - mode = (mode & (S_IRWXUGO | S_ISVTX)) | S_IFDIR; - res = mknod(dir, dentry, mode, 0); - if (!res) - inc_nlink(dir); - return res; -} - -static int create(struct inode *dir, struct dentry *dentry, int mode) -{ - mode = (mode & S_IALLUGO) | S_IFREG; - return mknod(dir, dentry, mode, 0); -} - static inline int positive(struct dentry *dentry) { return dentry->d_inode && !d_unhashed(dentry); @@ -145,38 +51,6 @@ static struct file_system_type fs_type = { .kill_sb = kill_litter_super, }; -static int create_by_name(const char *name, mode_t mode, - struct dentry *parent, - struct dentry **dentry) -{ - int error = 0; - - *dentry = NULL; - - /* If the parent is not specified, we create it in the root. - * We need the root dentry to do this, which is in the super - * block. A pointer to that is in the struct vfsmount that we - * have around. - */ - if (!parent) - parent = mount->mnt_sb->s_root; - - mutex_lock(&parent->d_inode->i_mutex); - *dentry = lookup_one_len(name, parent, strlen(name)); - if (!IS_ERR(*dentry)) { - if ((mode & S_IFMT) == S_IFDIR) - error = mkdir(parent->d_inode, *dentry, mode); - else - error = create(parent->d_inode, *dentry, mode); - if (error) - dput(*dentry); - } else - error = PTR_ERR(*dentry); - mutex_unlock(&parent->d_inode->i_mutex); - - return error; -} - /** * securityfs_create_file - create a file in the securityfs filesystem * @@ -205,35 +79,70 @@ static int create_by_name(const char *name, mode_t mode, * If securityfs is not enabled in the kernel, the value %-ENODEV is * returned. */ -struct dentry *securityfs_create_file(const char *name, mode_t mode, +struct dentry *securityfs_create_file(const char *name, umode_t mode, struct dentry *parent, void *data, const struct file_operations *fops) { - struct dentry *dentry = NULL; + struct dentry *dentry; + int is_dir = S_ISDIR(mode); + struct inode *dir, *inode; int error; + if (!is_dir) { + BUG_ON(!fops); + mode = (mode & S_IALLUGO) | S_IFREG; + } + pr_debug("securityfs: creating file '%s'\n",name); error = simple_pin_fs(&fs_type, &mount, &mount_count); - if (error) { - dentry = ERR_PTR(error); - goto exit; + if (error) + return ERR_PTR(error); + + if (!parent) + parent = mount->mnt_root; + + dir = parent->d_inode; + + mutex_lock(&dir->i_mutex); + dentry = lookup_one_len(name, parent, strlen(name)); + if (IS_ERR(dentry)) + goto out; + + if (dentry->d_inode) { + error = -EEXIST; + goto out1; } - error = create_by_name(name, mode, parent, &dentry); - if (error) { - dentry = ERR_PTR(error); - simple_release_fs(&mount, &mount_count); - goto exit; + inode = new_inode(dir->i_sb); + if (!inode) { + error = -ENOMEM; + goto out1; } - if (dentry->d_inode) { - if (fops) - dentry->d_inode->i_fop = fops; - if (data) - dentry->d_inode->i_private = data; + inode->i_ino = get_next_ino(); + inode->i_mode = mode; + inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; + inode->i_private = data; + if (is_dir) { + inode->i_op = &simple_dir_inode_operations; + inode->i_fop = &simple_dir_operations; + inc_nlink(inode); + inc_nlink(dir); + } else { + inode->i_fop = fops; } -exit: + d_instantiate(dentry, inode); + dget(dentry); + mutex_unlock(&dir->i_mutex); + return dentry; + +out1: + dput(dentry); + dentry = ERR_PTR(error); +out: + mutex_unlock(&dir->i_mutex); + simple_release_fs(&mount, &mount_count); return dentry; } EXPORT_SYMBOL_GPL(securityfs_create_file); diff --git a/security/integrity/Kconfig b/security/integrity/Kconfig new file mode 100644 index 00000000000..245c6d92065 --- /dev/null +++ b/security/integrity/Kconfig @@ -0,0 +1,48 @@ +# +config INTEGRITY + def_bool y + depends on IMA || EVM + +config INTEGRITY_SIGNATURE + boolean "Digital signature verification using multiple keyrings" + depends on INTEGRITY && KEYS + default n + select SIGNATURE + help + This option enables digital signature verification support + using multiple keyrings. It defines separate keyrings for each + of the different use cases - evm, ima, and modules. + Different keyrings improves search performance, but also allow + to "lock" certain keyring to prevent adding new keys. + This is useful for evm and module keyrings, when keys are + usually only added from initramfs. + +config INTEGRITY_AUDIT + bool "Enables integrity auditing support " + depends on INTEGRITY && AUDIT + default y + help + In addition to enabling integrity auditing support, this + option adds a kernel parameter 'integrity_audit', which + controls the level of integrity auditing messages. + 0 - basic integrity auditing messages (default) + 1 - additional integrity auditing messages + + Additional informational integrity auditing messages would + be enabled by specifying 'integrity_audit=1' on the kernel + command line. + +config INTEGRITY_ASYMMETRIC_KEYS + boolean "Enable asymmetric keys support" + depends on INTEGRITY_SIGNATURE + default n + select ASYMMETRIC_KEY_TYPE + select ASYMMETRIC_PUBLIC_KEY_SUBTYPE + select PUBLIC_KEY_ALGO_RSA + select X509_CERTIFICATE_PARSER + help + This option enables digital signature verification using + asymmetric keys. + +source security/integrity/ima/Kconfig +source security/integrity/evm/Kconfig diff --git a/security/integrity/Makefile b/security/integrity/Makefile new file mode 100644 index 00000000000..0793f4811cb --- /dev/null +++ b/security/integrity/Makefile @@ -0,0 +1,15 @@ +# +# Makefile for caching inode integrity data (iint) +# + +obj-$(CONFIG_INTEGRITY) += integrity.o +obj-$(CONFIG_INTEGRITY_AUDIT) += integrity_audit.o +obj-$(CONFIG_INTEGRITY_SIGNATURE) += digsig.o +obj-$(CONFIG_INTEGRITY_ASYMMETRIC_KEYS) += digsig_asymmetric.o + +integrity-y := iint.o + +subdir-$(CONFIG_IMA) += ima +obj-$(CONFIG_IMA) += ima/ +subdir-$(CONFIG_EVM) += evm +obj-$(CONFIG_EVM) += evm/ diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c new file mode 100644 index 00000000000..b4af4ebc5be --- /dev/null +++ b/security/integrity/digsig.c @@ -0,0 +1,58 @@ +/* + * Copyright (C) 2011 Intel Corporation + * + * Author: + * Dmitry Kasatkin <dmitry.kasatkin@intel.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/err.h> +#include <linux/rbtree.h> +#include <linux/key-type.h> +#include <linux/digsig.h> + +#include "integrity.h" + +static struct key *keyring[INTEGRITY_KEYRING_MAX]; + +static const char *keyring_name[INTEGRITY_KEYRING_MAX] = { + "_evm", + "_module", + "_ima", +}; + +int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen, + const char *digest, int digestlen) +{ + if (id >= INTEGRITY_KEYRING_MAX) + return -EINVAL; + + if (!keyring[id]) { + keyring[id] = + request_key(&key_type_keyring, keyring_name[id], NULL); + if (IS_ERR(keyring[id])) { + int err = PTR_ERR(keyring[id]); + pr_err("no %s keyring: %d\n", keyring_name[id], err); + keyring[id] = NULL; + return err; + } + } + + switch (sig[1]) { + case 1: + /* v1 API expect signature without xattr type */ + return digsig_verify(keyring[id], sig + 1, siglen - 1, + digest, digestlen); + case 2: + return asymmetric_verify(keyring[id], sig, siglen, + digest, digestlen); + } + + return -EOPNOTSUPP; +} diff --git a/security/integrity/digsig_asymmetric.c b/security/integrity/digsig_asymmetric.c new file mode 100644 index 00000000000..9eae4809006 --- /dev/null +++ b/security/integrity/digsig_asymmetric.c @@ -0,0 +1,104 @@ +/* + * Copyright (C) 2013 Intel Corporation + * + * Author: + * Dmitry Kasatkin <dmitry.kasatkin@intel.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. + * + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/err.h> +#include <linux/key-type.h> +#include <crypto/public_key.h> +#include <keys/asymmetric-type.h> + +#include "integrity.h" + +/* + * Request an asymmetric key. + */ +static struct key *request_asymmetric_key(struct key *keyring, uint32_t keyid) +{ + struct key *key; + char name[12]; + + sprintf(name, "id:%x", keyid); + + pr_debug("key search: \"%s\"\n", name); + + if (keyring) { + /* search in specific keyring */ + key_ref_t kref; + kref = keyring_search(make_key_ref(keyring, 1), + &key_type_asymmetric, name); + if (IS_ERR(kref)) + key = ERR_CAST(kref); + else + key = key_ref_to_ptr(kref); + } else { + key = request_key(&key_type_asymmetric, name, NULL); + } + + if (IS_ERR(key)) { + pr_warn("Request for unknown key '%s' err %ld\n", + name, PTR_ERR(key)); + switch (PTR_ERR(key)) { + /* Hide some search errors */ + case -EACCES: + case -ENOTDIR: + case -EAGAIN: + return ERR_PTR(-ENOKEY); + default: + return key; + } + } + + pr_debug("%s() = 0 [%x]\n", __func__, key_serial(key)); + + return key; +} + +int asymmetric_verify(struct key *keyring, const char *sig, + int siglen, const char *data, int datalen) +{ + struct public_key_signature pks; + struct signature_v2_hdr *hdr = (struct signature_v2_hdr *)sig; + struct key *key; + int ret = -ENOMEM; + + if (siglen <= sizeof(*hdr)) + return -EBADMSG; + + siglen -= sizeof(*hdr); + + if (siglen != __be16_to_cpu(hdr->sig_size)) + return -EBADMSG; + + if (hdr->hash_algo >= PKEY_HASH__LAST) + return -ENOPKG; + + key = request_asymmetric_key(keyring, __be32_to_cpu(hdr->keyid)); + if (IS_ERR(key)) + return PTR_ERR(key); + + memset(&pks, 0, sizeof(pks)); + + pks.pkey_hash_algo = hdr->hash_algo; + pks.digest = (u8 *)data; + pks.digest_size = datalen; + pks.nr_mpi = 1; + pks.rsa.s = mpi_read_raw_data(hdr->sig, siglen); + + if (pks.rsa.s) + ret = verify_signature(key, &pks); + + mpi_free(pks.rsa.s); + key_put(key); + pr_debug("%s() = %d\n", __func__, ret); + return ret; +} diff --git a/security/integrity/evm/Kconfig b/security/integrity/evm/Kconfig new file mode 100644 index 00000000000..d606f3d12d6 --- /dev/null +++ b/security/integrity/evm/Kconfig @@ -0,0 +1,52 @@ +config EVM + boolean "EVM support" + depends on SECURITY + select KEYS + select ENCRYPTED_KEYS + select CRYPTO_HMAC + select CRYPTO_SHA1 + default n + help + EVM protects a file's security extended attributes against + integrity attacks. + + If you are unsure how to answer this question, answer N. + +if EVM + +menu "EVM options" + +config EVM_ATTR_FSUUID + bool "FSUUID (version 2)" + default y + depends on EVM + help + Include filesystem UUID for HMAC calculation. + + Default value is 'selected', which is former version 2. + if 'not selected', it is former version 1 + + WARNING: changing the HMAC calculation method or adding + additional info to the calculation, requires existing EVM + labeled file systems to be relabeled. + +config EVM_EXTRA_SMACK_XATTRS + bool "Additional SMACK xattrs" + depends on EVM && SECURITY_SMACK + default n + help + Include additional SMACK xattrs for HMAC calculation. + + In addition to the original security xattrs (eg. security.selinux, + security.SMACK64, security.capability, and security.ima) included + in the HMAC calculation, enabling this option includes newly defined + Smack xattrs: security.SMACK64EXEC, security.SMACK64TRANSMUTE and + security.SMACK64MMAP. + + WARNING: changing the HMAC calculation method or adding + additional info to the calculation, requires existing EVM + labeled file systems to be relabeled. + +endmenu + +endif diff --git a/security/integrity/evm/Makefile b/security/integrity/evm/Makefile new file mode 100644 index 00000000000..7393c415a06 --- /dev/null +++ b/security/integrity/evm/Makefile @@ -0,0 +1,7 @@ +# +# Makefile for building the Extended Verification Module(EVM) +# +obj-$(CONFIG_EVM) += evm.o + +evm-y := evm_main.o evm_crypto.o evm_secfs.o +evm-$(CONFIG_FS_POSIX_ACL) += evm_posix_acl.o diff --git a/security/integrity/evm/evm.h b/security/integrity/evm/evm.h new file mode 100644 index 00000000000..88bfe77efa1 --- /dev/null +++ b/security/integrity/evm/evm.h @@ -0,0 +1,53 @@ +/* + * Copyright (C) 2005-2010 IBM Corporation + * + * Authors: + * Mimi Zohar <zohar@us.ibm.com> + * Kylene Hall <kjhall@us.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. + * + * File: evm.h + * + */ + +#ifndef __INTEGRITY_EVM_H +#define __INTEGRITY_EVM_H + +#include <linux/xattr.h> +#include <linux/security.h> + +#include "../integrity.h" + +extern int evm_initialized; +extern char *evm_hmac; +extern char *evm_hash; + +#define EVM_ATTR_FSUUID 0x0001 + +extern int evm_hmac_attrs; + +extern struct crypto_shash *hmac_tfm; +extern struct crypto_shash *hash_tfm; + +/* List of EVM protected security xattrs */ +extern char *evm_config_xattrnames[]; + +int evm_init_key(void); +int evm_update_evmxattr(struct dentry *dentry, + const char *req_xattr_name, + const char *req_xattr_value, + size_t req_xattr_value_len); +int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name, + const char *req_xattr_value, + size_t req_xattr_value_len, char *digest); +int evm_calc_hash(struct dentry *dentry, const char *req_xattr_name, + const char *req_xattr_value, + size_t req_xattr_value_len, char *digest); +int evm_init_hmac(struct inode *inode, const struct xattr *xattr, + char *hmac_val); +int evm_init_secfs(void); + +#endif diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c new file mode 100644 index 00000000000..5e9687f02e1 --- /dev/null +++ b/security/integrity/evm/evm_crypto.c @@ -0,0 +1,262 @@ +/* + * Copyright (C) 2005-2010 IBM Corporation + * + * Authors: + * Mimi Zohar <zohar@us.ibm.com> + * Kylene Hall <kjhall@us.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. + * + * File: evm_crypto.c + * Using root's kernel master key (kmk), calculate the HMAC + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/crypto.h> +#include <linux/xattr.h> +#include <keys/encrypted-type.h> +#include <crypto/hash.h> +#include "evm.h" + +#define EVMKEY "evm-key" +#define MAX_KEY_SIZE 128 +static unsigned char evmkey[MAX_KEY_SIZE]; +static int evmkey_len = MAX_KEY_SIZE; + +struct crypto_shash *hmac_tfm; +struct crypto_shash *hash_tfm; + +static DEFINE_MUTEX(mutex); + +static struct shash_desc *init_desc(char type) +{ + long rc; + char *algo; + struct crypto_shash **tfm; + struct shash_desc *desc; + + if (type == EVM_XATTR_HMAC) { + tfm = &hmac_tfm; + algo = evm_hmac; + } else { + tfm = &hash_tfm; + algo = evm_hash; + } + + if (*tfm == NULL) { + mutex_lock(&mutex); + if (*tfm) + goto out; + *tfm = crypto_alloc_shash(algo, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(*tfm)) { + rc = PTR_ERR(*tfm); + pr_err("Can not allocate %s (reason: %ld)\n", algo, rc); + *tfm = NULL; + mutex_unlock(&mutex); + return ERR_PTR(rc); + } + if (type == EVM_XATTR_HMAC) { + rc = crypto_shash_setkey(*tfm, evmkey, evmkey_len); + if (rc) { + crypto_free_shash(*tfm); + *tfm = NULL; + mutex_unlock(&mutex); + return ERR_PTR(rc); + } + } +out: + mutex_unlock(&mutex); + } + + desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(*tfm), + GFP_KERNEL); + if (!desc) + return ERR_PTR(-ENOMEM); + + desc->tfm = *tfm; + desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; + + rc = crypto_shash_init(desc); + if (rc) { + kfree(desc); + return ERR_PTR(rc); + } + return desc; +} + +/* Protect against 'cutting & pasting' security.evm xattr, include inode + * specific info. + * + * (Additional directory/file metadata needs to be added for more complete + * protection.) + */ +static void hmac_add_misc(struct shash_desc *desc, struct inode *inode, + char *digest) +{ + struct h_misc { + unsigned long ino; + __u32 generation; + uid_t uid; + gid_t gid; + umode_t mode; + } hmac_misc; + + memset(&hmac_misc, 0, sizeof(hmac_misc)); + hmac_misc.ino = inode->i_ino; + hmac_misc.generation = inode->i_generation; + hmac_misc.uid = from_kuid(&init_user_ns, inode->i_uid); + hmac_misc.gid = from_kgid(&init_user_ns, inode->i_gid); + hmac_misc.mode = inode->i_mode; + crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof(hmac_misc)); + if (evm_hmac_attrs & EVM_ATTR_FSUUID) + crypto_shash_update(desc, inode->i_sb->s_uuid, + sizeof(inode->i_sb->s_uuid)); + crypto_shash_final(desc, digest); +} + +/* + * Calculate the HMAC value across the set of protected security xattrs. + * + * Instead of retrieving the requested xattr, for performance, calculate + * the hmac using the requested xattr value. Don't alloc/free memory for + * each xattr, but attempt to re-use the previously allocated memory. + */ +static int evm_calc_hmac_or_hash(struct dentry *dentry, + const char *req_xattr_name, + const char *req_xattr_value, + size_t req_xattr_value_len, + char type, char *digest) +{ + struct inode *inode = dentry->d_inode; + struct shash_desc *desc; + char **xattrname; + size_t xattr_size = 0; + char *xattr_value = NULL; + int error; + int size; + + if (!inode->i_op->getxattr) + return -EOPNOTSUPP; + desc = init_desc(type); + if (IS_ERR(desc)) + return PTR_ERR(desc); + + error = -ENODATA; + for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++) { + if ((req_xattr_name && req_xattr_value) + && !strcmp(*xattrname, req_xattr_name)) { + error = 0; + crypto_shash_update(desc, (const u8 *)req_xattr_value, + req_xattr_value_len); + continue; + } + size = vfs_getxattr_alloc(dentry, *xattrname, + &xattr_value, xattr_size, GFP_NOFS); + if (size == -ENOMEM) { + error = -ENOMEM; + goto out; + } + if (size < 0) + continue; + + error = 0; + xattr_size = size; + crypto_shash_update(desc, (const u8 *)xattr_value, xattr_size); + } + hmac_add_misc(desc, inode, digest); + +out: + kfree(xattr_value); + kfree(desc); + return error; +} + +int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name, + const char *req_xattr_value, size_t req_xattr_value_len, + char *digest) +{ + return evm_calc_hmac_or_hash(dentry, req_xattr_name, req_xattr_value, + req_xattr_value_len, EVM_XATTR_HMAC, digest); +} + +int evm_calc_hash(struct dentry *dentry, const char *req_xattr_name, + const char *req_xattr_value, size_t req_xattr_value_len, + char *digest) +{ + return evm_calc_hmac_or_hash(dentry, req_xattr_name, req_xattr_value, + req_xattr_value_len, IMA_XATTR_DIGEST, digest); +} + +/* + * Calculate the hmac and update security.evm xattr + * + * Expects to be called with i_mutex locked. + */ +int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name, + const char *xattr_value, size_t xattr_value_len) +{ + struct inode *inode = dentry->d_inode; + struct evm_ima_xattr_data xattr_data; + int rc = 0; + + rc = evm_calc_hmac(dentry, xattr_name, xattr_value, + xattr_value_len, xattr_data.digest); + if (rc == 0) { + xattr_data.type = EVM_XATTR_HMAC; + rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_EVM, + &xattr_data, + sizeof(xattr_data), 0); + } else if (rc == -ENODATA && inode->i_op->removexattr) { + rc = inode->i_op->removexattr(dentry, XATTR_NAME_EVM); + } + return rc; +} + +int evm_init_hmac(struct inode *inode, const struct xattr *lsm_xattr, + char *hmac_val) +{ + struct shash_desc *desc; + + desc = init_desc(EVM_XATTR_HMAC); + if (IS_ERR(desc)) { + pr_info("init_desc failed\n"); + return PTR_ERR(desc); + } + + crypto_shash_update(desc, lsm_xattr->value, lsm_xattr->value_len); + hmac_add_misc(desc, inode, hmac_val); + kfree(desc); + return 0; +} + +/* + * Get the key from the TPM for the SHA1-HMAC + */ +int evm_init_key(void) +{ + struct key *evm_key; + struct encrypted_key_payload *ekp; + int rc = 0; + + evm_key = request_key(&key_type_encrypted, EVMKEY, NULL); + if (IS_ERR(evm_key)) + return -ENOENT; + + down_read(&evm_key->sem); + ekp = evm_key->payload.data; + if (ekp->decrypted_datalen > MAX_KEY_SIZE) { + rc = -EINVAL; + goto out; + } + memcpy(evmkey, ekp->decrypted_data, ekp->decrypted_datalen); +out: + /* burn the original key contents */ + memset(ekp->decrypted_data, 0, ekp->decrypted_datalen); + up_read(&evm_key->sem); + key_put(evm_key); + return rc; +} diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c new file mode 100644 index 00000000000..3bcb80df4d0 --- /dev/null +++ b/security/integrity/evm/evm_main.c @@ -0,0 +1,485 @@ +/* + * Copyright (C) 2005-2010 IBM Corporation + * + * Author: + * Mimi Zohar <zohar@us.ibm.com> + * Kylene Hall <kjhall@us.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. + * + * File: evm_main.c + * implements evm_inode_setxattr, evm_inode_post_setxattr, + * evm_inode_removexattr, and evm_verifyxattr + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/crypto.h> +#include <linux/audit.h> +#include <linux/xattr.h> +#include <linux/integrity.h> +#include <linux/evm.h> +#include <crypto/hash.h> +#include "evm.h" + +int evm_initialized; + +static char *integrity_status_msg[] = { + "pass", "fail", "no_label", "no_xattrs", "unknown" +}; +char *evm_hmac = "hmac(sha1)"; +char *evm_hash = "sha1"; +int evm_hmac_attrs; + +char *evm_config_xattrnames[] = { +#ifdef CONFIG_SECURITY_SELINUX + XATTR_NAME_SELINUX, +#endif +#ifdef CONFIG_SECURITY_SMACK + XATTR_NAME_SMACK, +#ifdef CONFIG_EVM_EXTRA_SMACK_XATTRS + XATTR_NAME_SMACKEXEC, + XATTR_NAME_SMACKTRANSMUTE, + XATTR_NAME_SMACKMMAP, +#endif +#endif +#ifdef CONFIG_IMA_APPRAISE + XATTR_NAME_IMA, +#endif + XATTR_NAME_CAPS, + NULL +}; + +static int evm_fixmode; +static int __init evm_set_fixmode(char *str) +{ + if (strncmp(str, "fix", 3) == 0) + evm_fixmode = 1; + return 0; +} +__setup("evm=", evm_set_fixmode); + +static void __init evm_init_config(void) +{ +#ifdef CONFIG_EVM_ATTR_FSUUID + evm_hmac_attrs |= EVM_ATTR_FSUUID; +#endif + pr_info("HMAC attrs: 0x%x\n", evm_hmac_attrs); +} + +static int evm_find_protected_xattrs(struct dentry *dentry) +{ + struct inode *inode = dentry->d_inode; + char **xattr; + int error; + int count = 0; + + if (!inode->i_op->getxattr) + return -EOPNOTSUPP; + + for (xattr = evm_config_xattrnames; *xattr != NULL; xattr++) { + error = inode->i_op->getxattr(dentry, *xattr, NULL, 0); + if (error < 0) { + if (error == -ENODATA) + continue; + return error; + } + count++; + } + + return count; +} + +/* + * evm_verify_hmac - calculate and compare the HMAC with the EVM xattr + * + * Compute the HMAC on the dentry's protected set of extended attributes + * and compare it against the stored security.evm xattr. + * + * For performance: + * - use the previoulsy retrieved xattr value and length to calculate the + * HMAC.) + * - cache the verification result in the iint, when available. + * + * Returns integrity status + */ +static enum integrity_status evm_verify_hmac(struct dentry *dentry, + const char *xattr_name, + char *xattr_value, + size_t xattr_value_len, + struct integrity_iint_cache *iint) +{ + struct evm_ima_xattr_data *xattr_data = NULL; + struct evm_ima_xattr_data calc; + enum integrity_status evm_status = INTEGRITY_PASS; + int rc, xattr_len; + + if (iint && iint->evm_status == INTEGRITY_PASS) + return iint->evm_status; + + /* if status is not PASS, try to check again - against -ENOMEM */ + + /* first need to know the sig type */ + rc = vfs_getxattr_alloc(dentry, XATTR_NAME_EVM, (char **)&xattr_data, 0, + GFP_NOFS); + if (rc <= 0) { + if (rc == 0) + evm_status = INTEGRITY_FAIL; /* empty */ + else if (rc == -ENODATA) { + rc = evm_find_protected_xattrs(dentry); + if (rc > 0) + evm_status = INTEGRITY_NOLABEL; + else if (rc == 0) + evm_status = INTEGRITY_NOXATTRS; /* new file */ + } + goto out; + } + + xattr_len = rc; + + /* check value type */ + switch (xattr_data->type) { + case EVM_XATTR_HMAC: + rc = evm_calc_hmac(dentry, xattr_name, xattr_value, + xattr_value_len, calc.digest); + if (rc) + break; + rc = memcmp(xattr_data->digest, calc.digest, + sizeof(calc.digest)); + if (rc) + rc = -EINVAL; + break; + case EVM_IMA_XATTR_DIGSIG: + rc = evm_calc_hash(dentry, xattr_name, xattr_value, + xattr_value_len, calc.digest); + if (rc) + break; + rc = integrity_digsig_verify(INTEGRITY_KEYRING_EVM, + (const char *)xattr_data, xattr_len, + calc.digest, sizeof(calc.digest)); + if (!rc) { + /* we probably want to replace rsa with hmac here */ + evm_update_evmxattr(dentry, xattr_name, xattr_value, + xattr_value_len); + } + break; + default: + rc = -EINVAL; + break; + } + + if (rc) + evm_status = (rc == -ENODATA) ? + INTEGRITY_NOXATTRS : INTEGRITY_FAIL; +out: + if (iint) + iint->evm_status = evm_status; + kfree(xattr_data); + return evm_status; +} + +static int evm_protected_xattr(const char *req_xattr_name) +{ + char **xattrname; + int namelen; + int found = 0; + + namelen = strlen(req_xattr_name); + for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++) { + if ((strlen(*xattrname) == namelen) + && (strncmp(req_xattr_name, *xattrname, namelen) == 0)) { + found = 1; + break; + } + if (strncmp(req_xattr_name, + *xattrname + XATTR_SECURITY_PREFIX_LEN, + strlen(req_xattr_name)) == 0) { + found = 1; + break; + } + } + return found; +} + +/** + * evm_verifyxattr - verify the integrity of the requested xattr + * @dentry: object of the verify xattr + * @xattr_name: requested xattr + * @xattr_value: requested xattr value + * @xattr_value_len: requested xattr value length + * + * Calculate the HMAC for the given dentry and verify it against the stored + * security.evm xattr. For performance, use the xattr value and length + * previously retrieved to calculate the HMAC. + * + * Returns the xattr integrity status. + * + * This function requires the caller to lock the inode's i_mutex before it + * is executed. + */ +enum integrity_status evm_verifyxattr(struct dentry *dentry, + const char *xattr_name, + void *xattr_value, size_t xattr_value_len, + struct integrity_iint_cache *iint) +{ + if (!evm_initialized || !evm_protected_xattr(xattr_name)) + return INTEGRITY_UNKNOWN; + + if (!iint) { + iint = integrity_iint_find(dentry->d_inode); + if (!iint) + return INTEGRITY_UNKNOWN; + } + return evm_verify_hmac(dentry, xattr_name, xattr_value, + xattr_value_len, iint); +} +EXPORT_SYMBOL_GPL(evm_verifyxattr); + +/* + * evm_verify_current_integrity - verify the dentry's metadata integrity + * @dentry: pointer to the affected dentry + * + * Verify and return the dentry's metadata integrity. The exceptions are + * before EVM is initialized or in 'fix' mode. + */ +static enum integrity_status evm_verify_current_integrity(struct dentry *dentry) +{ + struct inode *inode = dentry->d_inode; + + if (!evm_initialized || !S_ISREG(inode->i_mode) || evm_fixmode) + return 0; + return evm_verify_hmac(dentry, NULL, NULL, 0, NULL); +} + +/* + * evm_protect_xattr - protect the EVM extended attribute + * + * Prevent security.evm from being modified or removed without the + * necessary permissions or when the existing value is invalid. + * + * The posix xattr acls are 'system' prefixed, which normally would not + * affect security.evm. An interesting side affect of writing posix xattr + * acls is their modifying of the i_mode, which is included in security.evm. + * For posix xattr acls only, permit security.evm, even if it currently + * doesn't exist, to be updated. + */ +static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name, + const void *xattr_value, size_t xattr_value_len) +{ + enum integrity_status evm_status; + + if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) { + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + } else if (!evm_protected_xattr(xattr_name)) { + if (!posix_xattr_acl(xattr_name)) + return 0; + evm_status = evm_verify_current_integrity(dentry); + if ((evm_status == INTEGRITY_PASS) || + (evm_status == INTEGRITY_NOXATTRS)) + return 0; + goto out; + } + evm_status = evm_verify_current_integrity(dentry); +out: + if (evm_status != INTEGRITY_PASS) + integrity_audit_msg(AUDIT_INTEGRITY_METADATA, dentry->d_inode, + dentry->d_name.name, "appraise_metadata", + integrity_status_msg[evm_status], + -EPERM, 0); + return evm_status == INTEGRITY_PASS ? 0 : -EPERM; +} + +/** + * evm_inode_setxattr - protect the EVM extended attribute + * @dentry: pointer to the affected dentry + * @xattr_name: pointer to the affected extended attribute name + * @xattr_value: pointer to the new extended attribute value + * @xattr_value_len: pointer to the new extended attribute value length + * + * Before allowing the 'security.evm' protected xattr to be updated, + * verify the existing value is valid. As only the kernel should have + * access to the EVM encrypted key needed to calculate the HMAC, prevent + * userspace from writing HMAC value. Writing 'security.evm' requires + * requires CAP_SYS_ADMIN privileges. + */ +int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name, + const void *xattr_value, size_t xattr_value_len) +{ + const struct evm_ima_xattr_data *xattr_data = xattr_value; + + if ((strcmp(xattr_name, XATTR_NAME_EVM) == 0) + && (xattr_data->type == EVM_XATTR_HMAC)) + return -EPERM; + return evm_protect_xattr(dentry, xattr_name, xattr_value, + xattr_value_len); +} + +/** + * evm_inode_removexattr - protect the EVM extended attribute + * @dentry: pointer to the affected dentry + * @xattr_name: pointer to the affected extended attribute name + * + * Removing 'security.evm' requires CAP_SYS_ADMIN privileges and that + * the current value is valid. + */ +int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name) +{ + return evm_protect_xattr(dentry, xattr_name, NULL, 0); +} + +/** + * evm_inode_post_setxattr - update 'security.evm' to reflect the changes + * @dentry: pointer to the affected dentry + * @xattr_name: pointer to the affected extended attribute name + * @xattr_value: pointer to the new extended attribute value + * @xattr_value_len: pointer to the new extended attribute value length + * + * Update the HMAC stored in 'security.evm' to reflect the change. + * + * No need to take the i_mutex lock here, as this function is called from + * __vfs_setxattr_noperm(). The caller of which has taken the inode's + * i_mutex lock. + */ +void evm_inode_post_setxattr(struct dentry *dentry, const char *xattr_name, + const void *xattr_value, size_t xattr_value_len) +{ + if (!evm_initialized || (!evm_protected_xattr(xattr_name) + && !posix_xattr_acl(xattr_name))) + return; + + evm_update_evmxattr(dentry, xattr_name, xattr_value, xattr_value_len); + return; +} + +/** + * evm_inode_post_removexattr - update 'security.evm' after removing the xattr + * @dentry: pointer to the affected dentry + * @xattr_name: pointer to the affected extended attribute name + * + * Update the HMAC stored in 'security.evm' to reflect removal of the xattr. + */ +void evm_inode_post_removexattr(struct dentry *dentry, const char *xattr_name) +{ + struct inode *inode = dentry->d_inode; + + if (!evm_initialized || !evm_protected_xattr(xattr_name)) + return; + + mutex_lock(&inode->i_mutex); + evm_update_evmxattr(dentry, xattr_name, NULL, 0); + mutex_unlock(&inode->i_mutex); + return; +} + +/** + * evm_inode_setattr - prevent updating an invalid EVM extended attribute + * @dentry: pointer to the affected dentry + */ +int evm_inode_setattr(struct dentry *dentry, struct iattr *attr) +{ + unsigned int ia_valid = attr->ia_valid; + enum integrity_status evm_status; + + if (!(ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID))) + return 0; + evm_status = evm_verify_current_integrity(dentry); + if ((evm_status == INTEGRITY_PASS) || + (evm_status == INTEGRITY_NOXATTRS)) + return 0; + integrity_audit_msg(AUDIT_INTEGRITY_METADATA, dentry->d_inode, + dentry->d_name.name, "appraise_metadata", + integrity_status_msg[evm_status], -EPERM, 0); + return -EPERM; +} + +/** + * evm_inode_post_setattr - update 'security.evm' after modifying metadata + * @dentry: pointer to the affected dentry + * @ia_valid: for the UID and GID status + * + * For now, update the HMAC stored in 'security.evm' to reflect UID/GID + * changes. + * + * This function is called from notify_change(), which expects the caller + * to lock the inode's i_mutex. + */ +void evm_inode_post_setattr(struct dentry *dentry, int ia_valid) +{ + if (!evm_initialized) + return; + + if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)) + evm_update_evmxattr(dentry, NULL, NULL, 0); + return; +} + +/* + * evm_inode_init_security - initializes security.evm + */ +int evm_inode_init_security(struct inode *inode, + const struct xattr *lsm_xattr, + struct xattr *evm_xattr) +{ + struct evm_ima_xattr_data *xattr_data; + int rc; + + if (!evm_initialized || !evm_protected_xattr(lsm_xattr->name)) + return 0; + + xattr_data = kzalloc(sizeof(*xattr_data), GFP_NOFS); + if (!xattr_data) + return -ENOMEM; + + xattr_data->type = EVM_XATTR_HMAC; + rc = evm_init_hmac(inode, lsm_xattr, xattr_data->digest); + if (rc < 0) + goto out; + + evm_xattr->value = xattr_data; + evm_xattr->value_len = sizeof(*xattr_data); + evm_xattr->name = XATTR_EVM_SUFFIX; + return 0; +out: + kfree(xattr_data); + return rc; +} +EXPORT_SYMBOL_GPL(evm_inode_init_security); + +static int __init init_evm(void) +{ + int error; + + evm_init_config(); + + error = evm_init_secfs(); + if (error < 0) { + pr_info("Error registering secfs\n"); + goto err; + } + + return 0; +err: + return error; +} + +/* + * evm_display_config - list the EVM protected security extended attributes + */ +static int __init evm_display_config(void) +{ + char **xattrname; + + for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++) + pr_info("%s\n", *xattrname); + return 0; +} + +pure_initcall(evm_display_config); +late_initcall(init_evm); + +MODULE_DESCRIPTION("Extended Verification Module"); +MODULE_LICENSE("GPL"); diff --git a/security/integrity/evm/evm_posix_acl.c b/security/integrity/evm/evm_posix_acl.c new file mode 100644 index 00000000000..46408b9e62e --- /dev/null +++ b/security/integrity/evm/evm_posix_acl.c @@ -0,0 +1,27 @@ +/* + * Copyright (C) 2011 IBM Corporation + * + * Author: + * Mimi Zohar <zohar@us.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. + */ + +#include <linux/module.h> +#include <linux/xattr.h> +#include <linux/evm.h> + +int posix_xattr_acl(const char *xattr) +{ + int xattr_len = strlen(xattr); + + if ((strlen(XATTR_NAME_POSIX_ACL_ACCESS) == xattr_len) + && (strncmp(XATTR_NAME_POSIX_ACL_ACCESS, xattr, xattr_len) == 0)) + return 1; + if ((strlen(XATTR_NAME_POSIX_ACL_DEFAULT) == xattr_len) + && (strncmp(XATTR_NAME_POSIX_ACL_DEFAULT, xattr, xattr_len) == 0)) + return 1; + return 0; +} diff --git a/security/integrity/evm/evm_secfs.c b/security/integrity/evm/evm_secfs.c new file mode 100644 index 00000000000..cf12a04717d --- /dev/null +++ b/security/integrity/evm/evm_secfs.c @@ -0,0 +1,104 @@ +/* + * Copyright (C) 2010 IBM Corporation + * + * Authors: + * Mimi Zohar <zohar@us.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. + * + * File: evm_secfs.c + * - Used to signal when key is on keyring + * - Get the key and enable EVM + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/uaccess.h> +#include <linux/module.h> +#include "evm.h" + +static struct dentry *evm_init_tpm; + +/** + * evm_read_key - read() for <securityfs>/evm + * + * @filp: file pointer, not actually used + * @buf: where to put the result + * @count: maximum to send along + * @ppos: where to start + * + * Returns number of bytes read or error code, as appropriate + */ +static ssize_t evm_read_key(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + char temp[80]; + ssize_t rc; + + if (*ppos != 0) + return 0; + + sprintf(temp, "%d", evm_initialized); + rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp)); + + return rc; +} + +/** + * evm_write_key - write() for <securityfs>/evm + * @file: file pointer, not actually used + * @buf: where to get the data from + * @count: bytes sent + * @ppos: where to start + * + * Used to signal that key is on the kernel key ring. + * - get the integrity hmac key from the kernel key ring + * - create list of hmac protected extended attributes + * Returns number of bytes written or error code, as appropriate + */ +static ssize_t evm_write_key(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + char temp[80]; + int i, error; + + if (!capable(CAP_SYS_ADMIN) || evm_initialized) + return -EPERM; + + if (count >= sizeof(temp) || count == 0) + return -EINVAL; + + if (copy_from_user(temp, buf, count) != 0) + return -EFAULT; + + temp[count] = '\0'; + + if ((sscanf(temp, "%d", &i) != 1) || (i != 1)) + return -EINVAL; + + error = evm_init_key(); + if (!error) { + evm_initialized = 1; + pr_info("initialized\n"); + } else + pr_err("initialization failed\n"); + return count; +} + +static const struct file_operations evm_key_ops = { + .read = evm_read_key, + .write = evm_write_key, +}; + +int __init evm_init_secfs(void) +{ + int error = 0; + + evm_init_tpm = securityfs_create_file("evm", S_IRUSR | S_IRGRP, + NULL, NULL, &evm_key_ops); + if (!evm_init_tpm || IS_ERR(evm_init_tpm)) + error = -EFAULT; + return error; +} diff --git a/security/integrity/iint.c b/security/integrity/iint.c new file mode 100644 index 00000000000..a521edf4cbd --- /dev/null +++ b/security/integrity/iint.c @@ -0,0 +1,172 @@ +/* + * Copyright (C) 2008 IBM Corporation + * + * Authors: + * Mimi Zohar <zohar@us.ibm.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + * File: integrity_iint.c + * - implements the integrity hooks: integrity_inode_alloc, + * integrity_inode_free + * - cache integrity information associated with an inode + * using a rbtree tree. + */ +#include <linux/slab.h> +#include <linux/module.h> +#include <linux/spinlock.h> +#include <linux/rbtree.h> +#include "integrity.h" + +static struct rb_root integrity_iint_tree = RB_ROOT; +static DEFINE_RWLOCK(integrity_iint_lock); +static struct kmem_cache *iint_cache __read_mostly; + +int iint_initialized; + +/* + * __integrity_iint_find - return the iint associated with an inode + */ +static struct integrity_iint_cache *__integrity_iint_find(struct inode *inode) +{ + struct integrity_iint_cache *iint; + struct rb_node *n = integrity_iint_tree.rb_node; + + while (n) { + iint = rb_entry(n, struct integrity_iint_cache, rb_node); + + if (inode < iint->inode) + n = n->rb_left; + else if (inode > iint->inode) + n = n->rb_right; + else + break; + } + if (!n) + return NULL; + + return iint; +} + +/* + * integrity_iint_find - return the iint associated with an inode + */ +struct integrity_iint_cache *integrity_iint_find(struct inode *inode) +{ + struct integrity_iint_cache *iint; + + if (!IS_IMA(inode)) + return NULL; + + read_lock(&integrity_iint_lock); + iint = __integrity_iint_find(inode); + read_unlock(&integrity_iint_lock); + + return iint; +} + +static void iint_free(struct integrity_iint_cache *iint) +{ + kfree(iint->ima_hash); + iint->ima_hash = NULL; + iint->version = 0; + iint->flags = 0UL; + iint->ima_file_status = INTEGRITY_UNKNOWN; + iint->ima_mmap_status = INTEGRITY_UNKNOWN; + iint->ima_bprm_status = INTEGRITY_UNKNOWN; + iint->ima_module_status = INTEGRITY_UNKNOWN; + iint->evm_status = INTEGRITY_UNKNOWN; + kmem_cache_free(iint_cache, iint); +} + +/** + * integrity_inode_get - find or allocate an iint associated with an inode + * @inode: pointer to the inode + * @return: allocated iint + * + * Caller must lock i_mutex + */ +struct integrity_iint_cache *integrity_inode_get(struct inode *inode) +{ + struct rb_node **p; + struct rb_node *node, *parent = NULL; + struct integrity_iint_cache *iint, *test_iint; + + iint = integrity_iint_find(inode); + if (iint) + return iint; + + iint = kmem_cache_alloc(iint_cache, GFP_NOFS); + if (!iint) + return NULL; + + write_lock(&integrity_iint_lock); + + p = &integrity_iint_tree.rb_node; + while (*p) { + parent = *p; + test_iint = rb_entry(parent, struct integrity_iint_cache, + rb_node); + if (inode < test_iint->inode) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + + iint->inode = inode; + node = &iint->rb_node; + inode->i_flags |= S_IMA; + rb_link_node(node, parent, p); + rb_insert_color(node, &integrity_iint_tree); + + write_unlock(&integrity_iint_lock); + return iint; +} + +/** + * integrity_inode_free - called on security_inode_free + * @inode: pointer to the inode + * + * Free the integrity information(iint) associated with an inode. + */ +void integrity_inode_free(struct inode *inode) +{ + struct integrity_iint_cache *iint; + + if (!IS_IMA(inode)) + return; + + write_lock(&integrity_iint_lock); + iint = __integrity_iint_find(inode); + rb_erase(&iint->rb_node, &integrity_iint_tree); + write_unlock(&integrity_iint_lock); + + iint_free(iint); +} + +static void init_once(void *foo) +{ + struct integrity_iint_cache *iint = foo; + + memset(iint, 0, sizeof(*iint)); + iint->version = 0; + iint->flags = 0UL; + iint->ima_file_status = INTEGRITY_UNKNOWN; + iint->ima_mmap_status = INTEGRITY_UNKNOWN; + iint->ima_bprm_status = INTEGRITY_UNKNOWN; + iint->ima_module_status = INTEGRITY_UNKNOWN; + iint->evm_status = INTEGRITY_UNKNOWN; +} + +static int __init integrity_iintcache_init(void) +{ + iint_cache = + kmem_cache_create("iint_cache", sizeof(struct integrity_iint_cache), + 0, SLAB_PANIC, init_once); + iint_initialized = 1; + return 0; +} +security_initcall(integrity_iintcache_init); diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig index b6ecfd4d8d7..81a27971d88 100644 --- a/security/integrity/ima/Kconfig +++ b/security/integrity/ima/Kconfig @@ -3,13 +3,16 @@ config IMA bool "Integrity Measurement Architecture(IMA)" depends on SECURITY + select INTEGRITY select SECURITYFS select CRYPTO select CRYPTO_HMAC select CRYPTO_MD5 select CRYPTO_SHA1 - select TCG_TPM if !S390 - select TCG_TIS if TCG_TPM + select CRYPTO_HASH_INFO + select TCG_TPM if HAS_IOMEM && !UML + select TCG_TIS if TCG_TPM && X86 + select TCG_IBMVTPM if TCG_TPM && PPC64 help The Trusted Computing Group(TCG) runtime Integrity Measurement Architecture(IMA) maintains a list of hash @@ -36,20 +39,87 @@ config IMA_MEASURE_PCR_IDX that IMA uses to maintain the integrity aggregate of the measurement list. If unsure, use the default 10. -config IMA_AUDIT - bool - depends on IMA - default y - help - This option adds a kernel parameter 'ima_audit', which - allows informational auditing messages to be enabled - at boot. If this option is selected, informational integrity - auditing messages can be enabled with 'ima_audit=1' on - the kernel command line. - config IMA_LSM_RULES bool depends on IMA && AUDIT && (SECURITY_SELINUX || SECURITY_SMACK) default y help Disabling this option will disregard LSM based policy rules. + +choice + prompt "Default template" + default IMA_NG_TEMPLATE + depends on IMA + help + Select the default IMA measurement template. + + The original 'ima' measurement list template contains a + hash, defined as 20 bytes, and a null terminated pathname, + limited to 255 characters. The 'ima-ng' measurement list + template permits both larger hash digests and longer + pathnames. + + config IMA_TEMPLATE + bool "ima" + config IMA_NG_TEMPLATE + bool "ima-ng (default)" + config IMA_SIG_TEMPLATE + bool "ima-sig" +endchoice + +config IMA_DEFAULT_TEMPLATE + string + depends on IMA + default "ima" if IMA_TEMPLATE + default "ima-ng" if IMA_NG_TEMPLATE + default "ima-sig" if IMA_SIG_TEMPLATE + +choice + prompt "Default integrity hash algorithm" + default IMA_DEFAULT_HASH_SHA1 + depends on IMA + help + Select the default hash algorithm used for the measurement + list, integrity appraisal and audit log. The compiled default + hash algorithm can be overwritten using the kernel command + line 'ima_hash=' option. + + config IMA_DEFAULT_HASH_SHA1 + bool "SHA1 (default)" + depends on CRYPTO_SHA1 + + config IMA_DEFAULT_HASH_SHA256 + bool "SHA256" + depends on CRYPTO_SHA256 && !IMA_TEMPLATE + + config IMA_DEFAULT_HASH_SHA512 + bool "SHA512" + depends on CRYPTO_SHA512 && !IMA_TEMPLATE + + config IMA_DEFAULT_HASH_WP512 + bool "WP512" + depends on CRYPTO_WP512 && !IMA_TEMPLATE +endchoice + +config IMA_DEFAULT_HASH + string + depends on IMA + default "sha1" if IMA_DEFAULT_HASH_SHA1 + default "sha256" if IMA_DEFAULT_HASH_SHA256 + default "sha512" if IMA_DEFAULT_HASH_SHA512 + default "wp512" if IMA_DEFAULT_HASH_WP512 + +config IMA_APPRAISE + bool "Appraise integrity measurements" + depends on IMA + default n + help + This option enables local measurement integrity appraisal. + It requires the system to be labeled with a security extended + attribute containing the file hash measurement. To protect + the security extended attributes from offline attack, enable + and configure EVM. + + For more information on integrity appraisal refer to: + <http://linux-ima.sourceforge.net> + If unsure, say N. diff --git a/security/integrity/ima/Makefile b/security/integrity/ima/Makefile index 787c4cb916c..d79263d2fdb 100644 --- a/security/integrity/ima/Makefile +++ b/security/integrity/ima/Makefile @@ -6,4 +6,5 @@ obj-$(CONFIG_IMA) += ima.o ima-y := ima_fs.o ima_queue.o ima_init.o ima_main.o ima_crypto.o ima_api.o \ - ima_policy.o ima_iint.o ima_audit.o + ima_policy.o ima_template.o ima_template_lib.o +ima-$(CONFIG_IMA_APPRAISE) += ima_appraise.o diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h index ac79032bdf2..f79fa8be203 100644 --- a/security/integrity/ima/ima.h +++ b/security/integrity/ima/ima.h @@ -24,33 +24,61 @@ #include <linux/tpm.h> #include <linux/audit.h> -enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_ASCII }; +#include "../integrity.h" + +enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_BINARY_NO_FIELD_LEN, + IMA_SHOW_BINARY_OLD_STRING_FMT, IMA_SHOW_ASCII }; enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 }; /* digest size for IMA, fits SHA1 or MD5 */ -#define IMA_DIGEST_SIZE 20 +#define IMA_DIGEST_SIZE SHA1_DIGEST_SIZE #define IMA_EVENT_NAME_LEN_MAX 255 #define IMA_HASH_BITS 9 #define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS) +#define IMA_TEMPLATE_FIELD_ID_MAX_LEN 16 +#define IMA_TEMPLATE_NUM_FIELDS_MAX 15 + +#define IMA_TEMPLATE_IMA_NAME "ima" +#define IMA_TEMPLATE_IMA_FMT "d|n" + /* set during initialization */ -extern int iint_initialized; extern int ima_initialized; extern int ima_used_chip; -extern char *ima_hash; +extern int ima_hash_algo; +extern int ima_appraise; + +/* IMA template field data definition */ +struct ima_field_data { + u8 *data; + u32 len; +}; -/* IMA inode template definition */ -struct ima_template_data { - u8 digest[IMA_DIGEST_SIZE]; /* sha1/md5 measurement hash */ - char file_name[IMA_EVENT_NAME_LEN_MAX + 1]; /* name + \0 */ +/* IMA template field definition */ +struct ima_template_field { + const char field_id[IMA_TEMPLATE_FIELD_ID_MAX_LEN]; + int (*field_init) (struct integrity_iint_cache *iint, struct file *file, + const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, + int xattr_len, struct ima_field_data *field_data); + void (*field_show) (struct seq_file *m, enum ima_show_type show, + struct ima_field_data *field_data); +}; + +/* IMA template descriptor definition */ +struct ima_template_desc { + char *name; + char *fmt; + int num_fields; + struct ima_template_field **fields; }; struct ima_template_entry { - u8 digest[IMA_DIGEST_SIZE]; /* sha1 or md5 measurement hash */ - const char *template_name; - int template_len; - struct ima_template_data template; + u8 digest[TPM_DIGEST_SIZE]; /* sha1 or md5 measurement hash */ + struct ima_template_desc *template_desc; /* template descriptor */ + u32 template_data_len; + struct ima_field_data template_data[0]; /* template related data */ }; struct ima_queue_entry { @@ -60,11 +88,6 @@ struct ima_queue_entry { }; extern struct list_head ima_measurements; /* list of all measurements */ -/* declarations */ -void integrity_audit_msg(int audit_msgno, struct inode *inode, - const unsigned char *fname, const char *op, - const char *cause, int result, int info); - /* Internal IMA function definitions */ int ima_init(void); void ima_cleanup(void); @@ -72,12 +95,22 @@ int ima_fs_init(void); void ima_fs_cleanup(void); int ima_inode_alloc(struct inode *inode); int ima_add_template_entry(struct ima_template_entry *entry, int violation, - const char *op, struct inode *inode); -int ima_calc_hash(struct file *file, char *digest); -int ima_calc_template_hash(int template_len, void *template, char *digest); -int ima_calc_boot_aggregate(char *digest); -void ima_add_violation(struct inode *inode, const unsigned char *filename, + const char *op, struct inode *inode, + const unsigned char *filename); +int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash); +int ima_calc_field_array_hash(struct ima_field_data *field_data, + struct ima_template_desc *desc, int num_fields, + struct ima_digest_data *hash); +int __init ima_calc_boot_aggregate(struct ima_digest_data *hash); +void ima_add_violation(struct file *file, const unsigned char *filename, const char *op, const char *cause); +int ima_init_crypto(void); +void ima_putc(struct seq_file *m, void *data, int datalen); +void ima_print_digest(struct seq_file *m, u8 *digest, int size); +struct ima_template_desc *ima_template_desc_current(void); +int ima_init_template(void); + +int ima_init_template(void); /* * used to protect h_table and sha_table @@ -96,45 +129,105 @@ static inline unsigned long ima_hash_key(u8 *digest) return hash_long(*digest, IMA_HASH_BITS); } -/* iint cache flags */ -#define IMA_MEASURED 0x01 - -/* integrity data associated with an inode */ -struct ima_iint_cache { - struct rb_node rb_node; /* rooted in ima_iint_tree */ - struct inode *inode; /* back pointer to inode in question */ - u64 version; /* track inode changes */ - unsigned char flags; - u8 digest[IMA_DIGEST_SIZE]; - struct mutex mutex; /* protects: version, flags, digest */ -}; - /* LIM API function definitions */ -int ima_must_measure(struct ima_iint_cache *iint, struct inode *inode, - int mask, int function); -int ima_collect_measurement(struct ima_iint_cache *iint, struct file *file); -void ima_store_measurement(struct ima_iint_cache *iint, struct file *file, +int ima_get_action(struct inode *inode, int mask, int function); +int ima_must_measure(struct inode *inode, int mask, int function); +int ima_collect_measurement(struct integrity_iint_cache *iint, + struct file *file, + struct evm_ima_xattr_data **xattr_value, + int *xattr_len); +void ima_store_measurement(struct integrity_iint_cache *iint, struct file *file, + const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, + int xattr_len); +void ima_audit_measurement(struct integrity_iint_cache *iint, const unsigned char *filename); +int ima_alloc_init_template(struct integrity_iint_cache *iint, + struct file *file, const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, + int xattr_len, struct ima_template_entry **entry); int ima_store_template(struct ima_template_entry *entry, int violation, - struct inode *inode); -void ima_template_show(struct seq_file *m, void *e, - enum ima_show_type show); + struct inode *inode, const unsigned char *filename); +void ima_free_template_entry(struct ima_template_entry *entry); +const char *ima_d_path(struct path *path, char **pathbuf); /* rbtree tree calls to lookup, insert, delete * integrity data associated with an inode. */ -struct ima_iint_cache *ima_iint_insert(struct inode *inode); -struct ima_iint_cache *ima_iint_find(struct inode *inode); +struct integrity_iint_cache *integrity_iint_insert(struct inode *inode); +struct integrity_iint_cache *integrity_iint_find(struct inode *inode); /* IMA policy related functions */ -enum ima_hooks { FILE_CHECK = 1, FILE_MMAP, BPRM_CHECK }; +enum ima_hooks { FILE_CHECK = 1, MMAP_CHECK, BPRM_CHECK, MODULE_CHECK, POST_SETATTR }; -int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask); +int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask, + int flags); void ima_init_policy(void); void ima_update_policy(void); ssize_t ima_parse_add_rule(char *); void ima_delete_rules(void); +/* Appraise integrity measurements */ +#define IMA_APPRAISE_ENFORCE 0x01 +#define IMA_APPRAISE_FIX 0x02 +#define IMA_APPRAISE_MODULES 0x04 + +#ifdef CONFIG_IMA_APPRAISE +int ima_appraise_measurement(int func, struct integrity_iint_cache *iint, + struct file *file, const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, + int xattr_len); +int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func); +void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file); +enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint, + int func); +void ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value, int xattr_len, + struct ima_digest_data *hash); +int ima_read_xattr(struct dentry *dentry, + struct evm_ima_xattr_data **xattr_value); + +#else +static inline int ima_appraise_measurement(int func, + struct integrity_iint_cache *iint, + struct file *file, + const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, + int xattr_len) +{ + return INTEGRITY_UNKNOWN; +} + +static inline int ima_must_appraise(struct inode *inode, int mask, + enum ima_hooks func) +{ + return 0; +} + +static inline void ima_update_xattr(struct integrity_iint_cache *iint, + struct file *file) +{ +} + +static inline enum integrity_status ima_get_cache_status(struct integrity_iint_cache + *iint, int func) +{ + return INTEGRITY_UNKNOWN; +} + +static inline void ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value, + int xattr_len, + struct ima_digest_data *hash) +{ +} + +static inline int ima_read_xattr(struct dentry *dentry, + struct evm_ima_xattr_data **xattr_value) +{ + return 0; +} + +#endif + /* LSM based policy rules require audit */ #ifdef CONFIG_IMA_LSM_RULES diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c index d3963de6003..d9cd5ce14d2 100644 --- a/security/integrity/ima/ima_api.c +++ b/security/integrity/ima/ima_api.c @@ -9,14 +9,68 @@ * License. * * File: ima_api.c - * Implements must_measure, collect_measurement, store_measurement, - * and store_template. + * Implements must_appraise_or_measure, collect_measurement, + * appraise_measurement, store_measurement and store_template. */ #include <linux/module.h> #include <linux/slab.h> - +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/xattr.h> +#include <linux/evm.h> +#include <crypto/hash_info.h> #include "ima.h" -static const char *IMA_TEMPLATE_NAME = "ima"; + +/* + * ima_free_template_entry - free an existing template entry + */ +void ima_free_template_entry(struct ima_template_entry *entry) +{ + int i; + + for (i = 0; i < entry->template_desc->num_fields; i++) + kfree(entry->template_data[i].data); + + kfree(entry); +} + +/* + * ima_alloc_init_template - create and initialize a new template entry + */ +int ima_alloc_init_template(struct integrity_iint_cache *iint, + struct file *file, const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, + int xattr_len, struct ima_template_entry **entry) +{ + struct ima_template_desc *template_desc = ima_template_desc_current(); + int i, result = 0; + + *entry = kzalloc(sizeof(**entry) + template_desc->num_fields * + sizeof(struct ima_field_data), GFP_NOFS); + if (!*entry) + return -ENOMEM; + + (*entry)->template_desc = template_desc; + for (i = 0; i < template_desc->num_fields; i++) { + struct ima_template_field *field = template_desc->fields[i]; + u32 len; + + result = field->field_init(iint, file, filename, + xattr_value, xattr_len, + &((*entry)->template_data[i])); + if (result != 0) + goto out; + + len = (*entry)->template_data[i].len; + (*entry)->template_data_len += sizeof(len); + (*entry)->template_data_len += len; + } + return 0; +out: + ima_free_template_entry(*entry); + *entry = NULL; + return result; +} /* * ima_store_template - store ima template measurements @@ -35,28 +89,35 @@ static const char *IMA_TEMPLATE_NAME = "ima"; * Returns 0 on success, error code otherwise */ int ima_store_template(struct ima_template_entry *entry, - int violation, struct inode *inode) + int violation, struct inode *inode, + const unsigned char *filename) { - const char *op = "add_template_measure"; - const char *audit_cause = "hashing_error"; + static const char op[] = "add_template_measure"; + static const char audit_cause[] = "hashing_error"; + char *template_name = entry->template_desc->name; int result; - - memset(entry->digest, 0, sizeof(entry->digest)); - entry->template_name = IMA_TEMPLATE_NAME; - entry->template_len = sizeof(entry->template); + struct { + struct ima_digest_data hdr; + char digest[TPM_DIGEST_SIZE]; + } hash; if (!violation) { - result = ima_calc_template_hash(entry->template_len, - &entry->template, - entry->digest); + int num_fields = entry->template_desc->num_fields; + + /* this function uses default algo */ + hash.hdr.algo = HASH_ALGO_SHA1; + result = ima_calc_field_array_hash(&entry->template_data[0], + entry->template_desc, + num_fields, &hash.hdr); if (result < 0) { integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, - entry->template_name, op, + template_name, op, audit_cause, result, 0); return result; } + memcpy(entry->digest, hash.hdr.digest, hash.hdr.length); } - result = ima_add_template_entry(entry, violation, op, inode); + result = ima_add_template_entry(entry, violation, op, inode, filename); return result; } @@ -67,60 +128,60 @@ int ima_store_template(struct ima_template_entry *entry, * By extending the PCR with 0xFF's instead of with zeroes, the PCR * value is invalidated. */ -void ima_add_violation(struct inode *inode, const unsigned char *filename, +void ima_add_violation(struct file *file, const unsigned char *filename, const char *op, const char *cause) { struct ima_template_entry *entry; + struct inode *inode = file_inode(file); int violation = 1; int result; /* can overflow, only indicator */ atomic_long_inc(&ima_htable.violations); - entry = kmalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) { + result = ima_alloc_init_template(NULL, file, filename, + NULL, 0, &entry); + if (result < 0) { result = -ENOMEM; goto err_out; } - memset(&entry->template, 0, sizeof(entry->template)); - strncpy(entry->template.file_name, filename, IMA_EVENT_NAME_LEN_MAX); - result = ima_store_template(entry, violation, inode); + result = ima_store_template(entry, violation, inode, filename); if (result < 0) - kfree(entry); + ima_free_template_entry(entry); err_out: integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename, op, cause, result, 0); } /** - * ima_must_measure - measure decision based on policy. + * ima_get_action - appraise & measure decision based on policy. * @inode: pointer to inode to measure * @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXECUTE) - * @function: calling function (FILE_CHECK, BPRM_CHECK, FILE_MMAP) + * @function: calling function (FILE_CHECK, BPRM_CHECK, MMAP_CHECK, MODULE_CHECK) * * The policy is defined in terms of keypairs: - * subj=, obj=, type=, func=, mask=, fsmagic= + * subj=, obj=, type=, func=, mask=, fsmagic= * subj,obj, and type: are LSM specific. - * func: FILE_CHECK | BPRM_CHECK | FILE_MMAP - * mask: contains the permission mask + * func: FILE_CHECK | BPRM_CHECK | MMAP_CHECK | MODULE_CHECK + * mask: contains the permission mask * fsmagic: hex value * - * Must be called with iint->mutex held. + * Returns IMA_MEASURE, IMA_APPRAISE mask. * - * Return 0 to measure. Return 1 if already measured. - * For matching a DONT_MEASURE policy, no policy, or other - * error, return an error code. -*/ -int ima_must_measure(struct ima_iint_cache *iint, struct inode *inode, - int mask, int function) + */ +int ima_get_action(struct inode *inode, int mask, int function) { - int must_measure; + int flags = IMA_MEASURE | IMA_AUDIT | IMA_APPRAISE; + + if (!ima_appraise) + flags &= ~IMA_APPRAISE; - if (iint && iint->flags & IMA_MEASURED) - return 1; + return ima_match_policy(inode, function, mask, flags); +} - must_measure = ima_match_policy(inode, function, mask); - return must_measure ? 0 : -EACCES; +int ima_must_measure(struct inode *inode, int mask, int function) +{ + return ima_match_policy(inode, function, mask, IMA_MEASURE); } /* @@ -133,18 +194,57 @@ int ima_must_measure(struct ima_iint_cache *iint, struct inode *inode, * * Return 0 on success, error code otherwise */ -int ima_collect_measurement(struct ima_iint_cache *iint, struct file *file) +int ima_collect_measurement(struct integrity_iint_cache *iint, + struct file *file, + struct evm_ima_xattr_data **xattr_value, + int *xattr_len) { - int result = -EEXIST; + const char *audit_cause = "failed"; + struct inode *inode = file_inode(file); + const char *filename = file->f_dentry->d_name.name; + int result = 0; + struct { + struct ima_digest_data hdr; + char digest[IMA_MAX_DIGEST_SIZE]; + } hash; + + if (xattr_value) + *xattr_len = ima_read_xattr(file->f_dentry, xattr_value); - if (!(iint->flags & IMA_MEASURED)) { - u64 i_version = file->f_dentry->d_inode->i_version; + if (!(iint->flags & IMA_COLLECTED)) { + u64 i_version = file_inode(file)->i_version; - memset(iint->digest, 0, IMA_DIGEST_SIZE); - result = ima_calc_hash(file, iint->digest); - if (!result) - iint->version = i_version; + if (file->f_flags & O_DIRECT) { + audit_cause = "failed(directio)"; + result = -EACCES; + goto out; + } + + /* use default hash algorithm */ + hash.hdr.algo = ima_hash_algo; + + if (xattr_value) + ima_get_hash_algo(*xattr_value, *xattr_len, &hash.hdr); + + result = ima_calc_file_hash(file, &hash.hdr); + if (!result) { + int length = sizeof(hash.hdr) + hash.hdr.length; + void *tmpbuf = krealloc(iint->ima_hash, length, + GFP_NOFS); + if (tmpbuf) { + iint->ima_hash = tmpbuf; + memcpy(iint->ima_hash, &hash, length); + iint->version = i_version; + iint->flags |= IMA_COLLECTED; + } else + result = -ENOMEM; + } } +out: + if (result) + integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, + filename, "collect_data", audit_cause, + result, 0); return result; } @@ -156,36 +256,89 @@ int ima_collect_measurement(struct ima_iint_cache *iint, struct file *file) * * We only get here if the inode has not already been measured, * but the measurement could already exist: - * - multiple copies of the same file on either the same or + * - multiple copies of the same file on either the same or * different filesystems. * - the inode was previously flushed as well as the iint info, * containing the hashing info. * * Must be called with iint->mutex held. */ -void ima_store_measurement(struct ima_iint_cache *iint, struct file *file, - const unsigned char *filename) +void ima_store_measurement(struct integrity_iint_cache *iint, + struct file *file, const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, + int xattr_len) { - const char *op = "add_template_measure"; - const char *audit_cause = "ENOMEM"; + static const char op[] = "add_template_measure"; + static const char audit_cause[] = "ENOMEM"; int result = -ENOMEM; - struct inode *inode = file->f_dentry->d_inode; + struct inode *inode = file_inode(file); struct ima_template_entry *entry; int violation = 0; - entry = kmalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) { + if (iint->flags & IMA_MEASURED) + return; + + result = ima_alloc_init_template(iint, file, filename, + xattr_value, xattr_len, &entry); + if (result < 0) { integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename, op, audit_cause, result, 0); return; } - memset(&entry->template, 0, sizeof(entry->template)); - memcpy(entry->template.digest, iint->digest, IMA_DIGEST_SIZE); - strncpy(entry->template.file_name, filename, IMA_EVENT_NAME_LEN_MAX); - result = ima_store_template(entry, violation, inode); - if (!result) + result = ima_store_template(entry, violation, inode, filename); + if (!result || result == -EEXIST) iint->flags |= IMA_MEASURED; - else - kfree(entry); + if (result < 0) + ima_free_template_entry(entry); +} + +void ima_audit_measurement(struct integrity_iint_cache *iint, + const unsigned char *filename) +{ + struct audit_buffer *ab; + char hash[(iint->ima_hash->length * 2) + 1]; + const char *algo_name = hash_algo_name[iint->ima_hash->algo]; + char algo_hash[sizeof(hash) + strlen(algo_name) + 2]; + int i; + + if (iint->flags & IMA_AUDITED) + return; + + for (i = 0; i < iint->ima_hash->length; i++) + hex_byte_pack(hash + (i * 2), iint->ima_hash->digest[i]); + hash[i * 2] = '\0'; + + ab = audit_log_start(current->audit_context, GFP_KERNEL, + AUDIT_INTEGRITY_RULE); + if (!ab) + return; + + audit_log_format(ab, "file="); + audit_log_untrustedstring(ab, filename); + audit_log_format(ab, " hash="); + snprintf(algo_hash, sizeof(algo_hash), "%s:%s", algo_name, hash); + audit_log_untrustedstring(ab, algo_hash); + + audit_log_task_info(ab, current); + audit_log_end(ab); + + iint->flags |= IMA_AUDITED; +} + +const char *ima_d_path(struct path *path, char **pathbuf) +{ + char *pathname = NULL; + + /* We will allow 11 spaces for ' (deleted)' to be appended */ + *pathbuf = kmalloc(PATH_MAX + 11, GFP_KERNEL); + if (*pathbuf) { + pathname = d_path(path, *pathbuf, PATH_MAX + 11); + if (IS_ERR(pathname)) { + kfree(*pathbuf); + *pathbuf = NULL; + pathname = NULL; + } + } + return pathname ?: (const char *)path->dentry->d_name.name; } diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c new file mode 100644 index 00000000000..d3113d4aaa3 --- /dev/null +++ b/security/integrity/ima/ima_appraise.c @@ -0,0 +1,387 @@ +/* + * Copyright (C) 2011 IBM Corporation + * + * Author: + * Mimi Zohar <zohar@us.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. + */ +#include <linux/module.h> +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/xattr.h> +#include <linux/magic.h> +#include <linux/ima.h> +#include <linux/evm.h> +#include <crypto/hash_info.h> + +#include "ima.h" + +static int __init default_appraise_setup(char *str) +{ + if (strncmp(str, "off", 3) == 0) + ima_appraise = 0; + else if (strncmp(str, "fix", 3) == 0) + ima_appraise = IMA_APPRAISE_FIX; + return 1; +} + +__setup("ima_appraise=", default_appraise_setup); + +/* + * ima_must_appraise - set appraise flag + * + * Return 1 to appraise + */ +int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func) +{ + if (!ima_appraise) + return 0; + + return ima_match_policy(inode, func, mask, IMA_APPRAISE); +} + +static int ima_fix_xattr(struct dentry *dentry, + struct integrity_iint_cache *iint) +{ + int rc, offset; + u8 algo = iint->ima_hash->algo; + + if (algo <= HASH_ALGO_SHA1) { + offset = 1; + iint->ima_hash->xattr.sha1.type = IMA_XATTR_DIGEST; + } else { + offset = 0; + iint->ima_hash->xattr.ng.type = IMA_XATTR_DIGEST_NG; + iint->ima_hash->xattr.ng.algo = algo; + } + rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_IMA, + &iint->ima_hash->xattr.data[offset], + (sizeof(iint->ima_hash->xattr) - offset) + + iint->ima_hash->length, 0); + return rc; +} + +/* Return specific func appraised cached result */ +enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint, + int func) +{ + switch (func) { + case MMAP_CHECK: + return iint->ima_mmap_status; + case BPRM_CHECK: + return iint->ima_bprm_status; + case MODULE_CHECK: + return iint->ima_module_status; + case FILE_CHECK: + default: + return iint->ima_file_status; + } +} + +static void ima_set_cache_status(struct integrity_iint_cache *iint, + int func, enum integrity_status status) +{ + switch (func) { + case MMAP_CHECK: + iint->ima_mmap_status = status; + break; + case BPRM_CHECK: + iint->ima_bprm_status = status; + break; + case MODULE_CHECK: + iint->ima_module_status = status; + break; + case FILE_CHECK: + default: + iint->ima_file_status = status; + break; + } +} + +static void ima_cache_flags(struct integrity_iint_cache *iint, int func) +{ + switch (func) { + case MMAP_CHECK: + iint->flags |= (IMA_MMAP_APPRAISED | IMA_APPRAISED); + break; + case BPRM_CHECK: + iint->flags |= (IMA_BPRM_APPRAISED | IMA_APPRAISED); + break; + case MODULE_CHECK: + iint->flags |= (IMA_MODULE_APPRAISED | IMA_APPRAISED); + break; + case FILE_CHECK: + default: + iint->flags |= (IMA_FILE_APPRAISED | IMA_APPRAISED); + break; + } +} + +void ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value, int xattr_len, + struct ima_digest_data *hash) +{ + struct signature_v2_hdr *sig; + + if (!xattr_value || xattr_len < 2) + return; + + switch (xattr_value->type) { + case EVM_IMA_XATTR_DIGSIG: + sig = (typeof(sig))xattr_value; + if (sig->version != 2 || xattr_len <= sizeof(*sig)) + return; + hash->algo = sig->hash_algo; + break; + case IMA_XATTR_DIGEST_NG: + hash->algo = xattr_value->digest[0]; + break; + case IMA_XATTR_DIGEST: + /* this is for backward compatibility */ + if (xattr_len == 21) { + unsigned int zero = 0; + if (!memcmp(&xattr_value->digest[16], &zero, 4)) + hash->algo = HASH_ALGO_MD5; + else + hash->algo = HASH_ALGO_SHA1; + } else if (xattr_len == 17) + hash->algo = HASH_ALGO_MD5; + break; + } +} + +int ima_read_xattr(struct dentry *dentry, + struct evm_ima_xattr_data **xattr_value) +{ + struct inode *inode = dentry->d_inode; + + if (!inode->i_op->getxattr) + return 0; + + return vfs_getxattr_alloc(dentry, XATTR_NAME_IMA, (char **)xattr_value, + 0, GFP_NOFS); +} + +/* + * ima_appraise_measurement - appraise file measurement + * + * Call evm_verifyxattr() to verify the integrity of 'security.ima'. + * Assuming success, compare the xattr hash with the collected measurement. + * + * Return 0 on success, error code otherwise + */ +int ima_appraise_measurement(int func, struct integrity_iint_cache *iint, + struct file *file, const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, + int xattr_len) +{ + static const char op[] = "appraise_data"; + char *cause = "unknown"; + struct dentry *dentry = file->f_dentry; + struct inode *inode = dentry->d_inode; + enum integrity_status status = INTEGRITY_UNKNOWN; + int rc = xattr_len, hash_start = 0; + + if (!ima_appraise) + return 0; + if (!inode->i_op->getxattr) + return INTEGRITY_UNKNOWN; + + if (rc <= 0) { + if (rc && rc != -ENODATA) + goto out; + + cause = "missing-hash"; + status = + (inode->i_size == 0) ? INTEGRITY_PASS : INTEGRITY_NOLABEL; + goto out; + } + + status = evm_verifyxattr(dentry, XATTR_NAME_IMA, xattr_value, rc, iint); + if ((status != INTEGRITY_PASS) && (status != INTEGRITY_UNKNOWN)) { + if ((status == INTEGRITY_NOLABEL) + || (status == INTEGRITY_NOXATTRS)) + cause = "missing-HMAC"; + else if (status == INTEGRITY_FAIL) + cause = "invalid-HMAC"; + goto out; + } + switch (xattr_value->type) { + case IMA_XATTR_DIGEST_NG: + /* first byte contains algorithm id */ + hash_start = 1; + case IMA_XATTR_DIGEST: + if (iint->flags & IMA_DIGSIG_REQUIRED) { + cause = "IMA signature required"; + status = INTEGRITY_FAIL; + break; + } + if (xattr_len - sizeof(xattr_value->type) - hash_start >= + iint->ima_hash->length) + /* xattr length may be longer. md5 hash in previous + version occupied 20 bytes in xattr, instead of 16 + */ + rc = memcmp(&xattr_value->digest[hash_start], + iint->ima_hash->digest, + iint->ima_hash->length); + else + rc = -EINVAL; + if (rc) { + cause = "invalid-hash"; + status = INTEGRITY_FAIL; + break; + } + status = INTEGRITY_PASS; + break; + case EVM_IMA_XATTR_DIGSIG: + iint->flags |= IMA_DIGSIG; + rc = integrity_digsig_verify(INTEGRITY_KEYRING_IMA, + (const char *)xattr_value, rc, + iint->ima_hash->digest, + iint->ima_hash->length); + if (rc == -EOPNOTSUPP) { + status = INTEGRITY_UNKNOWN; + } else if (rc) { + cause = "invalid-signature"; + status = INTEGRITY_FAIL; + } else { + status = INTEGRITY_PASS; + } + break; + default: + status = INTEGRITY_UNKNOWN; + cause = "unknown-ima-data"; + break; + } + +out: + if (status != INTEGRITY_PASS) { + if ((ima_appraise & IMA_APPRAISE_FIX) && + (!xattr_value || + xattr_value->type != EVM_IMA_XATTR_DIGSIG)) { + if (!ima_fix_xattr(dentry, iint)) + status = INTEGRITY_PASS; + } + integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, filename, + op, cause, rc, 0); + } else { + ima_cache_flags(iint, func); + } + ima_set_cache_status(iint, func, status); + return status; +} + +/* + * ima_update_xattr - update 'security.ima' hash value + */ +void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file) +{ + struct dentry *dentry = file->f_dentry; + int rc = 0; + + /* do not collect and update hash for digital signatures */ + if (iint->flags & IMA_DIGSIG) + return; + + rc = ima_collect_measurement(iint, file, NULL, NULL); + if (rc < 0) + return; + + ima_fix_xattr(dentry, iint); +} + +/** + * ima_inode_post_setattr - reflect file metadata changes + * @dentry: pointer to the affected dentry + * + * Changes to a dentry's metadata might result in needing to appraise. + * + * This function is called from notify_change(), which expects the caller + * to lock the inode's i_mutex. + */ +void ima_inode_post_setattr(struct dentry *dentry) +{ + struct inode *inode = dentry->d_inode; + struct integrity_iint_cache *iint; + int must_appraise, rc; + + if (!ima_initialized || !ima_appraise || !S_ISREG(inode->i_mode) + || !inode->i_op->removexattr) + return; + + must_appraise = ima_must_appraise(inode, MAY_ACCESS, POST_SETATTR); + iint = integrity_iint_find(inode); + if (iint) { + iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED | + IMA_APPRAISE_SUBMASK | IMA_APPRAISED_SUBMASK | + IMA_ACTION_FLAGS); + if (must_appraise) + iint->flags |= IMA_APPRAISE; + } + if (!must_appraise) + rc = inode->i_op->removexattr(dentry, XATTR_NAME_IMA); + return; +} + +/* + * ima_protect_xattr - protect 'security.ima' + * + * Ensure that not just anyone can modify or remove 'security.ima'. + */ +static int ima_protect_xattr(struct dentry *dentry, const char *xattr_name, + const void *xattr_value, size_t xattr_value_len) +{ + if (strcmp(xattr_name, XATTR_NAME_IMA) == 0) { + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + return 1; + } + return 0; +} + +static void ima_reset_appraise_flags(struct inode *inode, int digsig) +{ + struct integrity_iint_cache *iint; + + if (!ima_initialized || !ima_appraise || !S_ISREG(inode->i_mode)) + return; + + iint = integrity_iint_find(inode); + if (!iint) + return; + + iint->flags &= ~IMA_DONE_MASK; + if (digsig) + iint->flags |= IMA_DIGSIG; + return; +} + +int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name, + const void *xattr_value, size_t xattr_value_len) +{ + const struct evm_ima_xattr_data *xvalue = xattr_value; + int result; + + result = ima_protect_xattr(dentry, xattr_name, xattr_value, + xattr_value_len); + if (result == 1) { + ima_reset_appraise_flags(dentry->d_inode, + (xvalue->type == EVM_IMA_XATTR_DIGSIG) ? 1 : 0); + result = 0; + } + return result; +} + +int ima_inode_removexattr(struct dentry *dentry, const char *xattr_name) +{ + int result; + + result = ima_protect_xattr(dentry, xattr_name, NULL, 0); + if (result == 1) { + ima_reset_appraise_flags(dentry->d_inode, 0); + result = 0; + } + return result; +} diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c index 9b3ade7468b..ccd0ac8fa9a 100644 --- a/security/integrity/ima/ima_crypto.c +++ b/security/integrity/ima/ima_crypto.c @@ -10,60 +10,131 @@ * the Free Software Foundation, version 2 of the License. * * File: ima_crypto.c - * Calculates md5/sha1 file hash, template hash, boot-aggreate hash + * Calculates md5/sha1 file hash, template hash, boot-aggreate hash */ +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/kernel.h> #include <linux/file.h> #include <linux/crypto.h> #include <linux/scatterlist.h> #include <linux/err.h> #include <linux/slab.h> +#include <crypto/hash.h> +#include <crypto/hash_info.h> #include "ima.h" -static int init_desc(struct hash_desc *desc) +static struct crypto_shash *ima_shash_tfm; + +/** + * ima_kernel_read - read file content + * + * This is a function for reading file content instead of kernel_read(). + * It does not perform locking checks to ensure it cannot be blocked. + * It does not perform security checks because it is irrelevant for IMA. + * + */ +static int ima_kernel_read(struct file *file, loff_t offset, + char *addr, unsigned long count) { - int rc; + mm_segment_t old_fs; + char __user *buf = addr; + ssize_t ret; + + if (!(file->f_mode & FMODE_READ)) + return -EBADF; + if (!file->f_op->read && !file->f_op->aio_read) + return -EINVAL; + + old_fs = get_fs(); + set_fs(get_ds()); + if (file->f_op->read) + ret = file->f_op->read(file, buf, count, &offset); + else + ret = do_sync_read(file, buf, count, &offset); + set_fs(old_fs); + return ret; +} - desc->tfm = crypto_alloc_hash(ima_hash, 0, CRYPTO_ALG_ASYNC); - if (IS_ERR(desc->tfm)) { - pr_info("IMA: failed to load %s transform: %ld\n", - ima_hash, PTR_ERR(desc->tfm)); - rc = PTR_ERR(desc->tfm); +int ima_init_crypto(void) +{ + long rc; + + ima_shash_tfm = crypto_alloc_shash(hash_algo_name[ima_hash_algo], 0, 0); + if (IS_ERR(ima_shash_tfm)) { + rc = PTR_ERR(ima_shash_tfm); + pr_err("Can not allocate %s (reason: %ld)\n", + hash_algo_name[ima_hash_algo], rc); return rc; } - desc->flags = 0; - rc = crypto_hash_init(desc); - if (rc) - crypto_free_hash(desc->tfm); - return rc; + return 0; +} + +static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo) +{ + struct crypto_shash *tfm = ima_shash_tfm; + int rc; + + if (algo != ima_hash_algo && algo < HASH_ALGO__LAST) { + tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0); + if (IS_ERR(tfm)) { + rc = PTR_ERR(tfm); + pr_err("Can not allocate %s (reason: %d)\n", + hash_algo_name[algo], rc); + } + } + return tfm; +} + +static void ima_free_tfm(struct crypto_shash *tfm) +{ + if (tfm != ima_shash_tfm) + crypto_free_shash(tfm); } /* * Calculate the MD5/SHA1 file digest */ -int ima_calc_hash(struct file *file, char *digest) +static int ima_calc_file_hash_tfm(struct file *file, + struct ima_digest_data *hash, + struct crypto_shash *tfm) { - struct hash_desc desc; - struct scatterlist sg[1]; loff_t i_size, offset = 0; char *rbuf; - int rc; + int rc, read = 0; + struct { + struct shash_desc shash; + char ctx[crypto_shash_descsize(tfm)]; + } desc; - rc = init_desc(&desc); + desc.shash.tfm = tfm; + desc.shash.flags = 0; + + hash->length = crypto_shash_digestsize(tfm); + + rc = crypto_shash_init(&desc.shash); if (rc != 0) return rc; - rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL); - if (!rbuf) { - rc = -ENOMEM; + i_size = i_size_read(file_inode(file)); + + if (i_size == 0) goto out; + + rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL); + if (!rbuf) + return -ENOMEM; + + if (!(file->f_mode & FMODE_READ)) { + file->f_mode |= FMODE_READ; + read = 1; } - i_size = i_size_read(file->f_dentry->d_inode); + while (offset < i_size) { int rbuf_len; - rbuf_len = kernel_read(file, offset, rbuf, PAGE_SIZE); + rbuf_len = ima_kernel_read(file, offset, rbuf, PAGE_SIZE); if (rbuf_len < 0) { rc = rbuf_len; break; @@ -71,38 +142,103 @@ int ima_calc_hash(struct file *file, char *digest) if (rbuf_len == 0) break; offset += rbuf_len; - sg_init_one(sg, rbuf, rbuf_len); - rc = crypto_hash_update(&desc, sg, rbuf_len); + rc = crypto_shash_update(&desc.shash, rbuf, rbuf_len); if (rc) break; } + if (read) + file->f_mode &= ~FMODE_READ; kfree(rbuf); - if (!rc) - rc = crypto_hash_final(&desc, digest); out: - crypto_free_hash(desc.tfm); + if (!rc) + rc = crypto_shash_final(&desc.shash, hash->digest); + return rc; +} + +int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash) +{ + struct crypto_shash *tfm; + int rc; + + tfm = ima_alloc_tfm(hash->algo); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + rc = ima_calc_file_hash_tfm(file, hash, tfm); + + ima_free_tfm(tfm); + return rc; } /* - * Calculate the hash of a given template + * Calculate the hash of template data */ -int ima_calc_template_hash(int template_len, void *template, char *digest) +static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data, + struct ima_template_desc *td, + int num_fields, + struct ima_digest_data *hash, + struct crypto_shash *tfm) { - struct hash_desc desc; - struct scatterlist sg[1]; - int rc; + struct { + struct shash_desc shash; + char ctx[crypto_shash_descsize(tfm)]; + } desc; + int rc, i; + + desc.shash.tfm = tfm; + desc.shash.flags = 0; - rc = init_desc(&desc); + hash->length = crypto_shash_digestsize(tfm); + + rc = crypto_shash_init(&desc.shash); if (rc != 0) return rc; - sg_init_one(sg, template, template_len); - rc = crypto_hash_update(&desc, sg, template_len); + for (i = 0; i < num_fields; i++) { + u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 }; + u8 *data_to_hash = field_data[i].data; + u32 datalen = field_data[i].len; + + if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) { + rc = crypto_shash_update(&desc.shash, + (const u8 *) &field_data[i].len, + sizeof(field_data[i].len)); + if (rc) + break; + } else if (strcmp(td->fields[i]->field_id, "n") == 0) { + memcpy(buffer, data_to_hash, datalen); + data_to_hash = buffer; + datalen = IMA_EVENT_NAME_LEN_MAX + 1; + } + rc = crypto_shash_update(&desc.shash, data_to_hash, datalen); + if (rc) + break; + } + if (!rc) - rc = crypto_hash_final(&desc, digest); - crypto_free_hash(desc.tfm); + rc = crypto_shash_final(&desc.shash, hash->digest); + + return rc; +} + +int ima_calc_field_array_hash(struct ima_field_data *field_data, + struct ima_template_desc *desc, int num_fields, + struct ima_digest_data *hash) +{ + struct crypto_shash *tfm; + int rc; + + tfm = ima_alloc_tfm(hash->algo); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + rc = ima_calc_field_array_hash_tfm(field_data, desc, num_fields, + hash, tfm); + + ima_free_tfm(tfm); + return rc; } @@ -112,20 +248,26 @@ static void __init ima_pcrread(int idx, u8 *pcr) return; if (tpm_pcr_read(TPM_ANY_NUM, idx, pcr) != 0) - pr_err("IMA: Error Communicating to TPM chip\n"); + pr_err("Error Communicating to TPM chip\n"); } /* * Calculate the boot aggregate hash */ -int __init ima_calc_boot_aggregate(char *digest) +static int __init ima_calc_boot_aggregate_tfm(char *digest, + struct crypto_shash *tfm) { - struct hash_desc desc; - struct scatterlist sg; - u8 pcr_i[IMA_DIGEST_SIZE]; + u8 pcr_i[TPM_DIGEST_SIZE]; int rc, i; + struct { + struct shash_desc shash; + char ctx[crypto_shash_descsize(tfm)]; + } desc; + + desc.shash.tfm = tfm; + desc.shash.flags = 0; - rc = init_desc(&desc); + rc = crypto_shash_init(&desc.shash); if (rc != 0) return rc; @@ -133,11 +275,26 @@ int __init ima_calc_boot_aggregate(char *digest) for (i = TPM_PCR0; i < TPM_PCR8; i++) { ima_pcrread(i, pcr_i); /* now accumulate with current aggregate */ - sg_init_one(&sg, pcr_i, IMA_DIGEST_SIZE); - rc = crypto_hash_update(&desc, &sg, IMA_DIGEST_SIZE); + rc = crypto_shash_update(&desc.shash, pcr_i, TPM_DIGEST_SIZE); } if (!rc) - crypto_hash_final(&desc, digest); - crypto_free_hash(desc.tfm); + crypto_shash_final(&desc.shash, digest); + return rc; +} + +int __init ima_calc_boot_aggregate(struct ima_digest_data *hash) +{ + struct crypto_shash *tfm; + int rc; + + tfm = ima_alloc_tfm(hash->algo); + if (IS_ERR(tfm)) + return PTR_ERR(tfm); + + hash->length = crypto_shash_digestsize(tfm); + rc = ima_calc_boot_aggregate_tfm(hash->digest, tfm); + + ima_free_tfm(tfm); + return rc; } diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c index ef21b96a0b4..da92fcc08d1 100644 --- a/security/integrity/ima/ima_fs.c +++ b/security/integrity/ima/ima_fs.c @@ -88,8 +88,7 @@ static void *ima_measurements_next(struct seq_file *m, void *v, loff_t *pos) * against concurrent list-extension */ rcu_read_lock(); - qe = list_entry_rcu(qe->later.next, - struct ima_queue_entry, later); + qe = list_entry_rcu(qe->later.next, struct ima_queue_entry, later); rcu_read_unlock(); (*pos)++; @@ -100,7 +99,7 @@ static void ima_measurements_stop(struct seq_file *m, void *v) { } -static void ima_putc(struct seq_file *m, void *data, int datalen) +void ima_putc(struct seq_file *m, void *data, int datalen) { while (datalen--) seq_putc(m, *(char *)data++); @@ -111,6 +110,7 @@ static void ima_putc(struct seq_file *m, void *data, int datalen) * char[20]=template digest * 32bit-le=template name size * char[n]=template name + * [eventdata length] * eventdata[n]=template specific data */ static int ima_measurements_show(struct seq_file *m, void *v) @@ -120,6 +120,8 @@ static int ima_measurements_show(struct seq_file *m, void *v) struct ima_template_entry *e; int namelen; u32 pcr = CONFIG_IMA_MEASURE_PCR_IDX; + bool is_ima_template = false; + int i; /* get entry */ e = qe->entry; @@ -131,21 +133,37 @@ static int ima_measurements_show(struct seq_file *m, void *v) * PCR used is always the same (config option) in * little-endian format */ - ima_putc(m, &pcr, sizeof pcr); + ima_putc(m, &pcr, sizeof(pcr)); /* 2nd: template digest */ - ima_putc(m, e->digest, IMA_DIGEST_SIZE); + ima_putc(m, e->digest, TPM_DIGEST_SIZE); /* 3rd: template name size */ - namelen = strlen(e->template_name); - ima_putc(m, &namelen, sizeof namelen); + namelen = strlen(e->template_desc->name); + ima_putc(m, &namelen, sizeof(namelen)); /* 4th: template name */ - ima_putc(m, (void *)e->template_name, namelen); - - /* 5th: template specific data */ - ima_template_show(m, (struct ima_template_data *)&e->template, - IMA_SHOW_BINARY); + ima_putc(m, e->template_desc->name, namelen); + + /* 5th: template length (except for 'ima' template) */ + if (strcmp(e->template_desc->name, IMA_TEMPLATE_IMA_NAME) == 0) + is_ima_template = true; + + if (!is_ima_template) + ima_putc(m, &e->template_data_len, + sizeof(e->template_data_len)); + + /* 6th: template specific data */ + for (i = 0; i < e->template_desc->num_fields; i++) { + enum ima_show_type show = IMA_SHOW_BINARY; + struct ima_template_field *field = e->template_desc->fields[i]; + + if (is_ima_template && strcmp(field->field_id, "d") == 0) + show = IMA_SHOW_BINARY_NO_FIELD_LEN; + if (is_ima_template && strcmp(field->field_id, "n") == 0) + show = IMA_SHOW_BINARY_OLD_STRING_FMT; + field->field_show(m, show, &e->template_data[i]); + } return 0; } @@ -168,41 +186,21 @@ static const struct file_operations ima_measurements_ops = { .release = seq_release, }; -static void ima_print_digest(struct seq_file *m, u8 *digest) +void ima_print_digest(struct seq_file *m, u8 *digest, int size) { int i; - for (i = 0; i < IMA_DIGEST_SIZE; i++) + for (i = 0; i < size; i++) seq_printf(m, "%02x", *(digest + i)); } -void ima_template_show(struct seq_file *m, void *e, enum ima_show_type show) -{ - struct ima_template_data *entry = e; - int namelen; - - switch (show) { - case IMA_SHOW_ASCII: - ima_print_digest(m, entry->digest); - seq_printf(m, " %s\n", entry->file_name); - break; - case IMA_SHOW_BINARY: - ima_putc(m, entry->digest, IMA_DIGEST_SIZE); - - namelen = strlen(entry->file_name); - ima_putc(m, &namelen, sizeof namelen); - ima_putc(m, entry->file_name, namelen); - default: - break; - } -} - /* print in ascii */ static int ima_ascii_measurements_show(struct seq_file *m, void *v) { /* the list never shrinks, so we don't need a lock here */ struct ima_queue_entry *qe = v; struct ima_template_entry *e; + int i; /* get entry */ e = qe->entry; @@ -213,14 +211,21 @@ static int ima_ascii_measurements_show(struct seq_file *m, void *v) seq_printf(m, "%2d ", CONFIG_IMA_MEASURE_PCR_IDX); /* 2nd: SHA1 template hash */ - ima_print_digest(m, e->digest); + ima_print_digest(m, e->digest, TPM_DIGEST_SIZE); /* 3th: template name */ - seq_printf(m, " %s ", e->template_name); + seq_printf(m, " %s", e->template_desc->name); /* 4th: template specific data */ - ima_template_show(m, (struct ima_template_data *)&e->template, - IMA_SHOW_ASCII); + for (i = 0; i < e->template_desc->num_fields; i++) { + seq_puts(m, " "); + if (e->template_data[i].len == 0) + continue; + + e->template_desc->fields[i]->field_show(m, IMA_SHOW_ASCII, + &e->template_data[i]); + } + seq_puts(m, "\n"); return 0; } @@ -287,7 +292,7 @@ static atomic_t policy_opencount = ATOMIC_INIT(1); /* * ima_open_policy: sequentialize access to the policy file */ -int ima_open_policy(struct inode * inode, struct file * filp) +static int ima_open_policy(struct inode *inode, struct file *filp) { /* No point in being allowed to open it if you aren't going to write */ if (!(filp->f_flags & O_WRONLY)) @@ -367,20 +372,11 @@ int __init ima_fs_init(void) return 0; out: - securityfs_remove(runtime_measurements_count); - securityfs_remove(ascii_runtime_measurements); - securityfs_remove(binary_runtime_measurements); - securityfs_remove(ima_dir); - securityfs_remove(ima_policy); - return -1; -} - -void __exit ima_fs_cleanup(void) -{ securityfs_remove(violations); securityfs_remove(runtime_measurements_count); securityfs_remove(ascii_runtime_measurements); securityfs_remove(binary_runtime_measurements); securityfs_remove(ima_dir); securityfs_remove(ima_policy); + return -1; } diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c deleted file mode 100644 index c442e47b678..00000000000 --- a/security/integrity/ima/ima_iint.c +++ /dev/null @@ -1,174 +0,0 @@ -/* - * Copyright (C) 2008 IBM Corporation - * - * Authors: - * Mimi Zohar <zohar@us.ibm.com> - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License as - * published by the Free Software Foundation, version 2 of the - * License. - * - * File: ima_iint.c - * - implements the IMA hooks: ima_inode_alloc, ima_inode_free - * - cache integrity information associated with an inode - * using a rbtree tree. - */ -#include <linux/slab.h> -#include <linux/module.h> -#include <linux/spinlock.h> -#include <linux/rbtree.h> -#include "ima.h" - -static struct rb_root ima_iint_tree = RB_ROOT; -static DEFINE_SPINLOCK(ima_iint_lock); -static struct kmem_cache *iint_cache __read_mostly; - -int iint_initialized = 0; - -/* - * __ima_iint_find - return the iint associated with an inode - */ -static struct ima_iint_cache *__ima_iint_find(struct inode *inode) -{ - struct ima_iint_cache *iint; - struct rb_node *n = ima_iint_tree.rb_node; - - assert_spin_locked(&ima_iint_lock); - - while (n) { - iint = rb_entry(n, struct ima_iint_cache, rb_node); - - if (inode < iint->inode) - n = n->rb_left; - else if (inode > iint->inode) - n = n->rb_right; - else - break; - } - if (!n) - return NULL; - - return iint; -} - -/* - * ima_iint_find - return the iint associated with an inode - */ -struct ima_iint_cache *ima_iint_find(struct inode *inode) -{ - struct ima_iint_cache *iint; - - if (!IS_IMA(inode)) - return NULL; - - spin_lock(&ima_iint_lock); - iint = __ima_iint_find(inode); - spin_unlock(&ima_iint_lock); - - return iint; -} - -static void iint_free(struct ima_iint_cache *iint) -{ - iint->version = 0; - iint->flags = 0UL; - kmem_cache_free(iint_cache, iint); -} - -/** - * ima_inode_alloc - allocate an iint associated with an inode - * @inode: pointer to the inode - */ -int ima_inode_alloc(struct inode *inode) -{ - struct rb_node **p; - struct rb_node *new_node, *parent = NULL; - struct ima_iint_cache *new_iint, *test_iint; - int rc; - - new_iint = kmem_cache_alloc(iint_cache, GFP_NOFS); - if (!new_iint) - return -ENOMEM; - - new_iint->inode = inode; - new_node = &new_iint->rb_node; - - mutex_lock(&inode->i_mutex); /* i_flags */ - spin_lock(&ima_iint_lock); - - p = &ima_iint_tree.rb_node; - while (*p) { - parent = *p; - test_iint = rb_entry(parent, struct ima_iint_cache, rb_node); - - rc = -EEXIST; - if (inode < test_iint->inode) - p = &(*p)->rb_left; - else if (inode > test_iint->inode) - p = &(*p)->rb_right; - else - goto out_err; - } - - inode->i_flags |= S_IMA; - rb_link_node(new_node, parent, p); - rb_insert_color(new_node, &ima_iint_tree); - - spin_unlock(&ima_iint_lock); - mutex_unlock(&inode->i_mutex); /* i_flags */ - - return 0; -out_err: - spin_unlock(&ima_iint_lock); - mutex_unlock(&inode->i_mutex); /* i_flags */ - iint_free(new_iint); - - return rc; -} - -/** - * ima_inode_free - called on security_inode_free - * @inode: pointer to the inode - * - * Free the integrity information(iint) associated with an inode. - */ -void ima_inode_free(struct inode *inode) -{ - struct ima_iint_cache *iint; - - if (inode->i_readcount) - printk(KERN_INFO "%s: readcount: %u\n", __func__, inode->i_readcount); - - inode->i_readcount = 0; - - if (!IS_IMA(inode)) - return; - - spin_lock(&ima_iint_lock); - iint = __ima_iint_find(inode); - rb_erase(&iint->rb_node, &ima_iint_tree); - spin_unlock(&ima_iint_lock); - - iint_free(iint); -} - -static void init_once(void *foo) -{ - struct ima_iint_cache *iint = foo; - - memset(iint, 0, sizeof *iint); - iint->version = 0; - iint->flags = 0UL; - mutex_init(&iint->mutex); -} - -static int __init ima_iintcache_init(void) -{ - iint_cache = - kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0, - SLAB_PANIC, init_once); - iint_initialized = 1; - return 0; -} -security_initcall(ima_iintcache_init); diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c index 17f1f060306..e8f9d70a465 100644 --- a/security/integrity/ima/ima_init.c +++ b/security/integrity/ima/ima_init.c @@ -14,10 +14,14 @@ * File: ima_init.c * initialization and cleanup functions */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <linux/err.h> +#include <crypto/hash_info.h> #include "ima.h" /* name for boot aggregate entry */ @@ -41,31 +45,40 @@ int ima_used_chip; */ static void __init ima_add_boot_aggregate(void) { - struct ima_template_entry *entry; - const char *op = "add_boot_aggregate"; + static const char op[] = "add_boot_aggregate"; const char *audit_cause = "ENOMEM"; + struct ima_template_entry *entry; + struct integrity_iint_cache tmp_iint, *iint = &tmp_iint; int result = -ENOMEM; - int violation = 1; + int violation = 0; + struct { + struct ima_digest_data hdr; + char digest[TPM_DIGEST_SIZE]; + } hash; - entry = kmalloc(sizeof(*entry), GFP_KERNEL); - if (!entry) - goto err_out; + memset(iint, 0, sizeof(*iint)); + memset(&hash, 0, sizeof(hash)); + iint->ima_hash = &hash.hdr; + iint->ima_hash->algo = HASH_ALGO_SHA1; + iint->ima_hash->length = SHA1_DIGEST_SIZE; - memset(&entry->template, 0, sizeof(entry->template)); - strncpy(entry->template.file_name, boot_aggregate_name, - IMA_EVENT_NAME_LEN_MAX); if (ima_used_chip) { - violation = 0; - result = ima_calc_boot_aggregate(entry->template.digest); + result = ima_calc_boot_aggregate(&hash.hdr); if (result < 0) { audit_cause = "hashing_error"; - kfree(entry); goto err_out; } } - result = ima_store_template(entry, violation, NULL); + + result = ima_alloc_init_template(iint, NULL, boot_aggregate_name, + NULL, 0, &entry); if (result < 0) - kfree(entry); + return; + + result = ima_store_template(entry, violation, NULL, + boot_aggregate_name); + if (result < 0) + ima_free_template_entry(entry); return; err_out: integrity_audit_msg(AUDIT_INTEGRITY_PCR, NULL, boot_aggregate_name, op, @@ -74,7 +87,7 @@ err_out: int __init ima_init(void) { - u8 pcr_i[IMA_DIGEST_SIZE]; + u8 pcr_i[TPM_DIGEST_SIZE]; int rc; ima_used_chip = 0; @@ -83,15 +96,17 @@ int __init ima_init(void) ima_used_chip = 1; if (!ima_used_chip) - pr_info("IMA: No TPM chip found, activating TPM-bypass!\n"); + pr_info("No TPM chip found, activating TPM-bypass!\n"); + + rc = ima_init_crypto(); + if (rc) + return rc; + rc = ima_init_template(); + if (rc != 0) + return rc; ima_add_boot_aggregate(); /* boot aggregate must be first entry */ ima_init_policy(); return ima_fs_init(); } - -void __exit ima_cleanup(void) -{ - ima_fs_cleanup(); -} diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c index 203de979d30..09baa335ebc 100644 --- a/security/integrity/ima/ima_main.c +++ b/security/integrity/ima/ima_main.c @@ -22,240 +22,224 @@ #include <linux/mount.h> #include <linux/mman.h> #include <linux/slab.h> +#include <linux/xattr.h> +#include <linux/ima.h> +#include <crypto/hash_info.h> #include "ima.h" int ima_initialized; -char *ima_hash = "sha1"; +#ifdef CONFIG_IMA_APPRAISE +int ima_appraise = IMA_APPRAISE_ENFORCE; +#else +int ima_appraise; +#endif + +int ima_hash_algo = HASH_ALGO_SHA1; +static int hash_setup_done; + static int __init hash_setup(char *str) { - if (strncmp(str, "md5", 3) == 0) - ima_hash = "md5"; - return 1; -} -__setup("ima_hash=", hash_setup); + struct ima_template_desc *template_desc = ima_template_desc_current(); + int i; -struct ima_imbalance { - struct hlist_node node; - unsigned long fsmagic; -}; + if (hash_setup_done) + return 1; -/* - * ima_limit_imbalance - emit one imbalance message per filesystem type - * - * Maintain list of filesystem types that do not measure files properly. - * Return false if unknown, true if known. - */ -static bool ima_limit_imbalance(struct file *file) -{ - static DEFINE_SPINLOCK(ima_imbalance_lock); - static HLIST_HEAD(ima_imbalance_list); - - struct super_block *sb = file->f_dentry->d_sb; - struct ima_imbalance *entry; - struct hlist_node *node; - bool found = false; - - rcu_read_lock(); - hlist_for_each_entry_rcu(entry, node, &ima_imbalance_list, node) { - if (entry->fsmagic == sb->s_magic) { - found = true; + if (strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) == 0) { + if (strncmp(str, "sha1", 4) == 0) + ima_hash_algo = HASH_ALGO_SHA1; + else if (strncmp(str, "md5", 3) == 0) + ima_hash_algo = HASH_ALGO_MD5; + goto out; + } + + for (i = 0; i < HASH_ALGO__LAST; i++) { + if (strcmp(str, hash_algo_name[i]) == 0) { + ima_hash_algo = i; break; } } - rcu_read_unlock(); - if (found) - goto out; - - entry = kmalloc(sizeof(*entry), GFP_NOFS); - if (!entry) - goto out; - entry->fsmagic = sb->s_magic; - spin_lock(&ima_imbalance_lock); - /* - * we could have raced and something else might have added this fs - * to the list, but we don't really care - */ - hlist_add_head_rcu(&entry->node, &ima_imbalance_list); - spin_unlock(&ima_imbalance_lock); - printk(KERN_INFO "IMA: unmeasured files on fsmagic: %lX\n", - entry->fsmagic); out: - return found; + hash_setup_done = 1; + return 1; } +__setup("ima_hash=", hash_setup); /* - * ima_counts_get - increment file counts + * ima_rdwr_violation_check * - * Maintain read/write counters for all files, but only - * invalidate the PCR for measured files: - * - Opening a file for write when already open for read, + * Only invalidate the PCR for measured files: + * - Opening a file for write when already open for read, * results in a time of measure, time of use (ToMToU) error. * - Opening a file for read when already open for write, - * could result in a file measurement error. + * could result in a file measurement error. * */ -void ima_counts_get(struct file *file) +static void ima_rdwr_violation_check(struct file *file) { - struct dentry *dentry = file->f_path.dentry; - struct inode *inode = dentry->d_inode; + struct inode *inode = file_inode(file); fmode_t mode = file->f_mode; - int rc; bool send_tomtou = false, send_writers = false; + char *pathbuf = NULL; + const char *pathname; - if (!S_ISREG(inode->i_mode)) + if (!S_ISREG(inode->i_mode) || !ima_initialized) return; - spin_lock(&inode->i_lock); - - if (!ima_initialized) - goto out; + mutex_lock(&inode->i_mutex); /* file metadata: permissions, xattr */ if (mode & FMODE_WRITE) { - if (inode->i_readcount && IS_IMA(inode)) - send_tomtou = true; - goto out; + if (atomic_read(&inode->i_readcount) && IS_IMA(inode)) { + struct integrity_iint_cache *iint; + iint = integrity_iint_find(inode); + /* IMA_MEASURE is set from reader side */ + if (iint && (iint->flags & IMA_MEASURE)) + send_tomtou = true; + } + } else { + if ((atomic_read(&inode->i_writecount) > 0) && + ima_must_measure(inode, MAY_READ, FILE_CHECK)) + send_writers = true; } - rc = ima_must_measure(NULL, inode, MAY_READ, FILE_CHECK); - if (rc < 0) - goto out; + mutex_unlock(&inode->i_mutex); - if (atomic_read(&inode->i_writecount) > 0) - send_writers = true; -out: - /* remember the vfs deals with i_writecount */ - if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) - inode->i_readcount++; + if (!send_tomtou && !send_writers) + return; - spin_unlock(&inode->i_lock); + pathname = ima_d_path(&file->f_path, &pathbuf); if (send_tomtou) - ima_add_violation(inode, dentry->d_name.name, "invalid_pcr", - "ToMToU"); + ima_add_violation(file, pathname, "invalid_pcr", "ToMToU"); if (send_writers) - ima_add_violation(inode, dentry->d_name.name, "invalid_pcr", - "open_writers"); + ima_add_violation(file, pathname, + "invalid_pcr", "open_writers"); + kfree(pathbuf); } -/* - * Decrement ima counts - */ -static void ima_dec_counts(struct inode *inode, struct file *file) +static void ima_check_last_writer(struct integrity_iint_cache *iint, + struct inode *inode, struct file *file) { - mode_t mode = file->f_mode; - - assert_spin_locked(&inode->i_lock); - - if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) { - if (unlikely(inode->i_readcount == 0)) { - if (!ima_limit_imbalance(file)) { - printk(KERN_INFO "%s: open/free imbalance (r:%u)\n", - __func__, inode->i_readcount); - dump_stack(); - } - return; - } - inode->i_readcount--; - } -} - -static void ima_check_last_writer(struct ima_iint_cache *iint, - struct inode *inode, - struct file *file) -{ - mode_t mode = file->f_mode; - - BUG_ON(!mutex_is_locked(&iint->mutex)); - assert_spin_locked(&inode->i_lock); - - if (mode & FMODE_WRITE && - atomic_read(&inode->i_writecount) == 1 && - iint->version != inode->i_version) - iint->flags &= ~IMA_MEASURED; -} - -static void ima_file_free_iint(struct ima_iint_cache *iint, struct inode *inode, - struct file *file) -{ - mutex_lock(&iint->mutex); - spin_lock(&inode->i_lock); - - ima_dec_counts(inode, file); - ima_check_last_writer(iint, inode, file); - - spin_unlock(&inode->i_lock); - mutex_unlock(&iint->mutex); -} - -static void ima_file_free_noiint(struct inode *inode, struct file *file) -{ - spin_lock(&inode->i_lock); + fmode_t mode = file->f_mode; - ima_dec_counts(inode, file); + if (!(mode & FMODE_WRITE)) + return; - spin_unlock(&inode->i_lock); + mutex_lock(&inode->i_mutex); + if (atomic_read(&inode->i_writecount) == 1 && + iint->version != inode->i_version) { + iint->flags &= ~IMA_DONE_MASK; + if (iint->flags & IMA_APPRAISE) + ima_update_xattr(iint, file); + } + mutex_unlock(&inode->i_mutex); } /** * ima_file_free - called on __fput() * @file: pointer to file structure being freed * - * Flag files that changed, based on i_version; - * and decrement the i_readcount. + * Flag files that changed, based on i_version */ void ima_file_free(struct file *file) { - struct inode *inode = file->f_dentry->d_inode; - struct ima_iint_cache *iint; + struct inode *inode = file_inode(file); + struct integrity_iint_cache *iint; if (!iint_initialized || !S_ISREG(inode->i_mode)) return; - iint = ima_iint_find(inode); - - if (iint) - ima_file_free_iint(iint, inode, file); - else - ima_file_free_noiint(inode, file); + iint = integrity_iint_find(inode); + if (!iint) + return; + ima_check_last_writer(iint, inode, file); } -static int process_measurement(struct file *file, const unsigned char *filename, +static int process_measurement(struct file *file, const char *filename, int mask, int function) { - struct inode *inode = file->f_dentry->d_inode; - struct ima_iint_cache *iint; - int rc = 0; + struct inode *inode = file_inode(file); + struct integrity_iint_cache *iint; + struct ima_template_desc *template_desc = ima_template_desc_current(); + char *pathbuf = NULL; + const char *pathname = NULL; + int rc = -ENOMEM, action, must_appraise, _func; + struct evm_ima_xattr_data *xattr_value = NULL, **xattr_ptr = NULL; + int xattr_len = 0; if (!ima_initialized || !S_ISREG(inode->i_mode)) return 0; - rc = ima_must_measure(NULL, inode, mask, function); - if (rc != 0) - return rc; -retry: - iint = ima_iint_find(inode); - if (!iint) { - rc = ima_inode_alloc(inode); - if (!rc || rc == -EEXIST) - goto retry; - return rc; - } + /* Return an IMA_MEASURE, IMA_APPRAISE, IMA_AUDIT action + * bitmask based on the appraise/audit/measurement policy. + * Included is the appraise submask. + */ + action = ima_get_action(inode, mask, function); + if (!action) + return 0; + + must_appraise = action & IMA_APPRAISE; + + /* Is the appraise rule hook specific? */ + _func = (action & IMA_FILE_APPRAISE) ? FILE_CHECK : function; - mutex_lock(&iint->mutex); + mutex_lock(&inode->i_mutex); - rc = ima_must_measure(iint, inode, mask, function); - if (rc != 0) + iint = integrity_inode_get(inode); + if (!iint) goto out; - rc = ima_collect_measurement(iint, file); - if (!rc) - ima_store_measurement(iint, file, filename); + /* Determine if already appraised/measured based on bitmask + * (IMA_MEASURE, IMA_MEASURED, IMA_XXXX_APPRAISE, IMA_XXXX_APPRAISED, + * IMA_AUDIT, IMA_AUDITED) + */ + iint->flags |= action; + action &= IMA_DO_MASK; + action &= ~((iint->flags & IMA_DONE_MASK) >> 1); + + /* Nothing to do, just return existing appraised status */ + if (!action) { + if (must_appraise) + rc = ima_get_cache_status(iint, _func); + goto out_digsig; + } + + if (strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) == 0) { + if (action & IMA_APPRAISE_SUBMASK) + xattr_ptr = &xattr_value; + } else + xattr_ptr = &xattr_value; + + rc = ima_collect_measurement(iint, file, xattr_ptr, &xattr_len); + if (rc != 0) { + if (file->f_flags & O_DIRECT) + rc = (iint->flags & IMA_PERMIT_DIRECTIO) ? 0 : -EACCES; + goto out_digsig; + } + + pathname = filename ?: ima_d_path(&file->f_path, &pathbuf); + + if (action & IMA_MEASURE) + ima_store_measurement(iint, file, pathname, + xattr_value, xattr_len); + if (action & IMA_APPRAISE_SUBMASK) + rc = ima_appraise_measurement(_func, iint, file, pathname, + xattr_value, xattr_len); + if (action & IMA_AUDIT) + ima_audit_measurement(iint, pathname); + kfree(pathbuf); +out_digsig: + if ((mask & MAY_WRITE) && (iint->flags & IMA_DIGSIG)) + rc = -EACCES; out: - mutex_unlock(&iint->mutex); - return rc; + mutex_unlock(&inode->i_mutex); + kfree(xattr_value); + if ((rc && must_appraise) && (ima_appraise & IMA_APPRAISE_ENFORCE)) + return -EACCES; + return 0; } /** @@ -266,18 +250,13 @@ out: * Measure files being mmapped executable based on the ima_must_measure() * policy decision. * - * Return 0 on success, an error code on failure. - * (Based on the results of appraise_measurement().) + * On success return 0. On integrity appraisal error, assuming the file + * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. */ int ima_file_mmap(struct file *file, unsigned long prot) { - int rc; - - if (!file) - return 0; - if (prot & PROT_EXEC) - rc = process_measurement(file, file->f_dentry->d_name.name, - MAY_EXEC, FILE_MMAP); + if (file && (prot & PROT_EXEC)) + return process_measurement(file, NULL, MAY_EXEC, MMAP_CHECK); return 0; } @@ -291,16 +270,15 @@ int ima_file_mmap(struct file *file, unsigned long prot) * So we can be certain that what we verify and measure here is actually * what is being executed. * - * Return 0 on success, an error code on failure. - * (Based on the results of appraise_measurement().) + * On success return 0. On integrity appraisal error, assuming the file + * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. */ int ima_bprm_check(struct linux_binprm *bprm) { - int rc; - - rc = process_measurement(bprm->file, bprm->filename, - MAY_EXEC, BPRM_CHECK); - return 0; + return process_measurement(bprm->file, + (strcmp(bprm->filename, bprm->interp) == 0) ? + bprm->filename : bprm->interp, + MAY_EXEC, BPRM_CHECK); } /** @@ -310,34 +288,51 @@ int ima_bprm_check(struct linux_binprm *bprm) * * Measure files based on the ima_must_measure() policy decision. * - * Always return 0 and audit dentry_open failures. - * (Return code will be based upon measurement appraisal.) + * On success return 0. On integrity appraisal error, assuming the file + * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. */ int ima_file_check(struct file *file, int mask) { - int rc; - - rc = process_measurement(file, file->f_dentry->d_name.name, - mask & (MAY_READ | MAY_WRITE | MAY_EXEC), - FILE_CHECK); - return 0; + ima_rdwr_violation_check(file); + return process_measurement(file, NULL, + mask & (MAY_READ | MAY_WRITE | MAY_EXEC), + FILE_CHECK); } EXPORT_SYMBOL_GPL(ima_file_check); +/** + * ima_module_check - based on policy, collect/store/appraise measurement. + * @file: pointer to the file to be measured/appraised + * + * Measure/appraise kernel modules based on policy. + * + * On success return 0. On integrity appraisal error, assuming the file + * is in policy and IMA-appraisal is in enforcing mode, return -EACCES. + */ +int ima_module_check(struct file *file) +{ + if (!file) { +#ifndef CONFIG_MODULE_SIG_FORCE + if ((ima_appraise & IMA_APPRAISE_MODULES) && + (ima_appraise & IMA_APPRAISE_ENFORCE)) + return -EACCES; /* INTEGRITY_UNKNOWN */ +#endif + return 0; /* We rely on module signature checking */ + } + return process_measurement(file, NULL, MAY_EXEC, MODULE_CHECK); +} + static int __init init_ima(void) { int error; + hash_setup(CONFIG_IMA_DEFAULT_HASH); error = ima_init(); - ima_initialized = 1; + if (!error) + ima_initialized = 1; return error; } -static void __exit cleanup_ima(void) -{ - ima_cleanup(); -} - late_initcall(init_ima); /* Start IMA after the TPM is available */ MODULE_DESCRIPTION("Integrity Measurement Architecture"); diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c index aef8c0a923a..40a7488f672 100644 --- a/security/integrity/ima/ima_policy.c +++ b/security/integrity/ima/ima_policy.c @@ -7,7 +7,7 @@ * the Free Software Foundation, version 2 of the License. * * ima_policy.c - * - initialize default measure policy rules + * - initialize default measure policy rules * */ #include <linux/module.h> @@ -16,39 +16,50 @@ #include <linux/magic.h> #include <linux/parser.h> #include <linux/slab.h> +#include <linux/genhd.h> #include "ima.h" /* flags definitions */ -#define IMA_FUNC 0x0001 -#define IMA_MASK 0x0002 +#define IMA_FUNC 0x0001 +#define IMA_MASK 0x0002 #define IMA_FSMAGIC 0x0004 #define IMA_UID 0x0008 +#define IMA_FOWNER 0x0010 +#define IMA_FSUUID 0x0020 -enum ima_action { UNKNOWN = -1, DONT_MEASURE = 0, MEASURE }; +#define UNKNOWN 0 +#define MEASURE 0x0001 /* same as IMA_MEASURE */ +#define DONT_MEASURE 0x0002 +#define APPRAISE 0x0004 /* same as IMA_APPRAISE */ +#define DONT_APPRAISE 0x0008 +#define AUDIT 0x0040 #define MAX_LSM_RULES 6 enum lsm_rule_types { LSM_OBJ_USER, LSM_OBJ_ROLE, LSM_OBJ_TYPE, LSM_SUBJ_USER, LSM_SUBJ_ROLE, LSM_SUBJ_TYPE }; -struct ima_measure_rule_entry { +struct ima_rule_entry { struct list_head list; - enum ima_action action; + int action; unsigned int flags; enum ima_hooks func; int mask; unsigned long fsmagic; - uid_t uid; + u8 fsuuid[16]; + kuid_t uid; + kuid_t fowner; struct { void *rule; /* LSM file metadata specific */ + void *args_p; /* audit value */ int type; /* audit type */ } lsm[MAX_LSM_RULES]; }; /* * Without LSM specific knowledge, the default policy can only be - * written in terms of .action, .func, .mask, .fsmagic, and .uid + * written in terms of .action, .func, .mask, .fsmagic, .uid, and .fowner */ /* @@ -57,34 +68,88 @@ struct ima_measure_rule_entry { * normal users can easily run the machine out of memory simply building * and running executables. */ -static struct ima_measure_rule_entry default_rules[] = { - {.action = DONT_MEASURE,.fsmagic = PROC_SUPER_MAGIC,.flags = IMA_FSMAGIC}, - {.action = DONT_MEASURE,.fsmagic = SYSFS_MAGIC,.flags = IMA_FSMAGIC}, - {.action = DONT_MEASURE,.fsmagic = DEBUGFS_MAGIC,.flags = IMA_FSMAGIC}, - {.action = DONT_MEASURE,.fsmagic = TMPFS_MAGIC,.flags = IMA_FSMAGIC}, - {.action = DONT_MEASURE,.fsmagic = SECURITYFS_MAGIC,.flags = IMA_FSMAGIC}, - {.action = DONT_MEASURE,.fsmagic = SELINUX_MAGIC,.flags = IMA_FSMAGIC}, - {.action = MEASURE,.func = FILE_MMAP,.mask = MAY_EXEC, +static struct ima_rule_entry default_rules[] = { + {.action = DONT_MEASURE, .fsmagic = PROC_SUPER_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_MEASURE, .fsmagic = SYSFS_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_MEASURE, .fsmagic = DEBUGFS_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_MEASURE, .fsmagic = TMPFS_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_MEASURE, .fsmagic = DEVPTS_SUPER_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_MEASURE, .fsmagic = BINFMTFS_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_MEASURE, .fsmagic = SECURITYFS_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_MEASURE, .fsmagic = SELINUX_MAGIC, .flags = IMA_FSMAGIC}, + {.action = MEASURE, .func = MMAP_CHECK, .mask = MAY_EXEC, .flags = IMA_FUNC | IMA_MASK}, - {.action = MEASURE,.func = BPRM_CHECK,.mask = MAY_EXEC, + {.action = MEASURE, .func = BPRM_CHECK, .mask = MAY_EXEC, .flags = IMA_FUNC | IMA_MASK}, - {.action = MEASURE,.func = FILE_CHECK,.mask = MAY_READ,.uid = 0, + {.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ, .uid = GLOBAL_ROOT_UID, .flags = IMA_FUNC | IMA_MASK | IMA_UID}, + {.action = MEASURE, .func = MODULE_CHECK, .flags = IMA_FUNC}, }; -static LIST_HEAD(measure_default_rules); -static LIST_HEAD(measure_policy_rules); -static struct list_head *ima_measure; +static struct ima_rule_entry default_appraise_rules[] = { + {.action = DONT_APPRAISE, .fsmagic = PROC_SUPER_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_APPRAISE, .fsmagic = SYSFS_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_APPRAISE, .fsmagic = DEBUGFS_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_APPRAISE, .fsmagic = TMPFS_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_APPRAISE, .fsmagic = RAMFS_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_APPRAISE, .fsmagic = DEVPTS_SUPER_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_APPRAISE, .fsmagic = BINFMTFS_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_APPRAISE, .fsmagic = SECURITYFS_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_APPRAISE, .fsmagic = SELINUX_MAGIC, .flags = IMA_FSMAGIC}, + {.action = DONT_APPRAISE, .fsmagic = CGROUP_SUPER_MAGIC, .flags = IMA_FSMAGIC}, + {.action = APPRAISE, .fowner = GLOBAL_ROOT_UID, .flags = IMA_FOWNER}, +}; + +static LIST_HEAD(ima_default_rules); +static LIST_HEAD(ima_policy_rules); +static struct list_head *ima_rules; -static DEFINE_MUTEX(ima_measure_mutex); +static DEFINE_MUTEX(ima_rules_mutex); static bool ima_use_tcb __initdata; -static int __init default_policy_setup(char *str) +static int __init default_measure_policy_setup(char *str) { ima_use_tcb = 1; return 1; } -__setup("ima_tcb", default_policy_setup); +__setup("ima_tcb", default_measure_policy_setup); + +static bool ima_use_appraise_tcb __initdata; +static int __init default_appraise_policy_setup(char *str) +{ + ima_use_appraise_tcb = 1; + return 1; +} +__setup("ima_appraise_tcb", default_appraise_policy_setup); + +/* + * Although the IMA policy does not change, the LSM policy can be + * reloaded, leaving the IMA LSM based rules referring to the old, + * stale LSM policy. + * + * Update the IMA LSM based rules to reflect the reloaded LSM policy. + * We assume the rules still exist; and BUG_ON() if they don't. + */ +static void ima_lsm_update_rules(void) +{ + struct ima_rule_entry *entry, *tmp; + int result; + int i; + + mutex_lock(&ima_rules_mutex); + list_for_each_entry_safe(entry, tmp, &ima_policy_rules, list) { + for (i = 0; i < MAX_LSM_RULES; i++) { + if (!entry->lsm[i].rule) + continue; + result = security_filter_rule_init(entry->lsm[i].type, + Audit_equal, + entry->lsm[i].args_p, + &entry->lsm[i].rule); + BUG_ON(!entry->lsm[i].rule); + } + } + mutex_unlock(&ima_rules_mutex); +} /** * ima_match_rules - determine whether an inode matches the measure rule. @@ -95,28 +160,37 @@ __setup("ima_tcb", default_policy_setup); * * Returns true on rule match, false on failure. */ -static bool ima_match_rules(struct ima_measure_rule_entry *rule, +static bool ima_match_rules(struct ima_rule_entry *rule, struct inode *inode, enum ima_hooks func, int mask) { struct task_struct *tsk = current; + const struct cred *cred = current_cred(); int i; - if ((rule->flags & IMA_FUNC) && rule->func != func) + if ((rule->flags & IMA_FUNC) && + (rule->func != func && func != POST_SETATTR)) return false; - if ((rule->flags & IMA_MASK) && rule->mask != mask) + if ((rule->flags & IMA_MASK) && + (rule->mask != mask && func != POST_SETATTR)) return false; if ((rule->flags & IMA_FSMAGIC) && rule->fsmagic != inode->i_sb->s_magic) return false; - if ((rule->flags & IMA_UID) && rule->uid != tsk->cred->uid) + if ((rule->flags & IMA_FSUUID) && + memcmp(rule->fsuuid, inode->i_sb->s_uuid, sizeof(rule->fsuuid))) + return false; + if ((rule->flags & IMA_UID) && !uid_eq(rule->uid, cred->uid)) + return false; + if ((rule->flags & IMA_FOWNER) && !uid_eq(rule->fowner, inode->i_uid)) return false; for (i = 0; i < MAX_LSM_RULES; i++) { int rc = 0; u32 osid, sid; + int retried = 0; if (!rule->lsm[i].rule) continue; - +retry: switch (i) { case LSM_OBJ_USER: case LSM_OBJ_ROLE: @@ -140,12 +214,39 @@ static bool ima_match_rules(struct ima_measure_rule_entry *rule, default: break; } + if ((rc < 0) && (!retried)) { + retried = 1; + ima_lsm_update_rules(); + goto retry; + } if (!rc) return false; } return true; } +/* + * In addition to knowing that we need to appraise the file in general, + * we need to differentiate between calling hooks, for hook specific rules. + */ +static int get_subaction(struct ima_rule_entry *rule, int func) +{ + if (!(rule->flags & IMA_FUNC)) + return IMA_FILE_APPRAISE; + + switch (func) { + case MMAP_CHECK: + return IMA_MMAP_APPRAISE; + case BPRM_CHECK: + return IMA_BPRM_APPRAISE; + case MODULE_CHECK: + return IMA_MODULE_APPRAISE; + case FILE_CHECK: + default: + return IMA_FILE_APPRAISE; + } +} + /** * ima_match_policy - decision based on LSM and other conditions * @inode: pointer to an inode for which the policy decision is being made @@ -159,39 +260,66 @@ static bool ima_match_rules(struct ima_measure_rule_entry *rule, * as elements in the list are never deleted, nor does the list * change.) */ -int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask) +int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask, + int flags) { - struct ima_measure_rule_entry *entry; + struct ima_rule_entry *entry; + int action = 0, actmask = flags | (flags << 1); + + list_for_each_entry(entry, ima_rules, list) { + + if (!(entry->action & actmask)) + continue; + + if (!ima_match_rules(entry, inode, func, mask)) + continue; + + action |= entry->flags & IMA_ACTION_FLAGS; - list_for_each_entry(entry, ima_measure, list) { - bool rc; + action |= entry->action & IMA_DO_MASK; + if (entry->action & IMA_APPRAISE) + action |= get_subaction(entry, func); - rc = ima_match_rules(entry, inode, func, mask); - if (rc) - return entry->action; + if (entry->action & IMA_DO_MASK) + actmask &= ~(entry->action | entry->action << 1); + else + actmask &= ~(entry->action | entry->action >> 1); + + if (!actmask) + break; } - return 0; + + return action; } /** * ima_init_policy - initialize the default measure rules. * - * ima_measure points to either the measure_default_rules or the - * the new measure_policy_rules. + * ima_rules points to either the ima_default_rules or the + * the new ima_policy_rules. */ void __init ima_init_policy(void) { - int i, entries; + int i, measure_entries, appraise_entries; /* if !ima_use_tcb set entries = 0 so we load NO default rules */ - if (ima_use_tcb) - entries = ARRAY_SIZE(default_rules); - else - entries = 0; - - for (i = 0; i < entries; i++) - list_add_tail(&default_rules[i].list, &measure_default_rules); - ima_measure = &measure_default_rules; + measure_entries = ima_use_tcb ? ARRAY_SIZE(default_rules) : 0; + appraise_entries = ima_use_appraise_tcb ? + ARRAY_SIZE(default_appraise_rules) : 0; + + for (i = 0; i < measure_entries + appraise_entries; i++) { + if (i < measure_entries) + list_add_tail(&default_rules[i].list, + &ima_default_rules); + else { + int j = i - measure_entries; + + list_add_tail(&default_appraise_rules[j].list, + &ima_default_rules); + } + } + + ima_rules = &ima_default_rules; } /** @@ -203,13 +331,13 @@ void __init ima_init_policy(void) */ void ima_update_policy(void) { - const char *op = "policy_update"; + static const char op[] = "policy_update"; const char *cause = "already exists"; int result = 1; int audit_info = 0; - if (ima_measure == &measure_default_rules) { - ima_measure = &measure_policy_rules; + if (ima_rules == &ima_default_rules) { + ima_rules = &ima_policy_rules; cause = "complete"; result = 0; } @@ -220,14 +348,20 @@ void ima_update_policy(void) enum { Opt_err = -1, Opt_measure = 1, Opt_dont_measure, + Opt_appraise, Opt_dont_appraise, + Opt_audit, Opt_obj_user, Opt_obj_role, Opt_obj_type, Opt_subj_user, Opt_subj_role, Opt_subj_type, - Opt_func, Opt_mask, Opt_fsmagic, Opt_uid + Opt_func, Opt_mask, Opt_fsmagic, Opt_uid, Opt_fowner, + Opt_appraise_type, Opt_fsuuid, Opt_permit_directio }; static match_table_t policy_tokens = { {Opt_measure, "measure"}, {Opt_dont_measure, "dont_measure"}, + {Opt_appraise, "appraise"}, + {Opt_dont_appraise, "dont_appraise"}, + {Opt_audit, "audit"}, {Opt_obj_user, "obj_user=%s"}, {Opt_obj_role, "obj_role=%s"}, {Opt_obj_type, "obj_type=%s"}, @@ -237,22 +371,36 @@ static match_table_t policy_tokens = { {Opt_func, "func=%s"}, {Opt_mask, "mask=%s"}, {Opt_fsmagic, "fsmagic=%s"}, + {Opt_fsuuid, "fsuuid=%s"}, {Opt_uid, "uid=%s"}, + {Opt_fowner, "fowner=%s"}, + {Opt_appraise_type, "appraise_type=%s"}, + {Opt_permit_directio, "permit_directio"}, {Opt_err, NULL} }; -static int ima_lsm_rule_init(struct ima_measure_rule_entry *entry, - char *args, int lsm_rule, int audit_type) +static int ima_lsm_rule_init(struct ima_rule_entry *entry, + substring_t *args, int lsm_rule, int audit_type) { int result; if (entry->lsm[lsm_rule].rule) return -EINVAL; + entry->lsm[lsm_rule].args_p = match_strdup(args); + if (!entry->lsm[lsm_rule].args_p) + return -ENOMEM; + entry->lsm[lsm_rule].type = audit_type; result = security_filter_rule_init(entry->lsm[lsm_rule].type, - Audit_equal, args, + Audit_equal, + entry->lsm[lsm_rule].args_p, &entry->lsm[lsm_rule].rule); + if (!entry->lsm[lsm_rule].rule) { + kfree(entry->lsm[lsm_rule].args_p); + return -EINVAL; + } + return result; } @@ -263,7 +411,7 @@ static void ima_log_string(struct audit_buffer *ab, char *key, char *value) audit_log_format(ab, " "); } -static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry) +static int ima_parse_rule(char *rule, struct ima_rule_entry *entry) { struct audit_buffer *ab; char *p; @@ -271,7 +419,8 @@ static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry) ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_INTEGRITY_RULE); - entry->uid = -1; + entry->uid = INVALID_UID; + entry->fowner = INVALID_UID; entry->action = UNKNOWN; while ((p = strsep(&rule, " \t")) != NULL) { substring_t args[MAX_OPT_ARGS]; @@ -300,19 +449,46 @@ static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry) entry->action = DONT_MEASURE; break; + case Opt_appraise: + ima_log_string(ab, "action", "appraise"); + + if (entry->action != UNKNOWN) + result = -EINVAL; + + entry->action = APPRAISE; + break; + case Opt_dont_appraise: + ima_log_string(ab, "action", "dont_appraise"); + + if (entry->action != UNKNOWN) + result = -EINVAL; + + entry->action = DONT_APPRAISE; + break; + case Opt_audit: + ima_log_string(ab, "action", "audit"); + + if (entry->action != UNKNOWN) + result = -EINVAL; + + entry->action = AUDIT; + break; case Opt_func: ima_log_string(ab, "func", args[0].from); if (entry->func) - result = -EINVAL; + result = -EINVAL; if (strcmp(args[0].from, "FILE_CHECK") == 0) entry->func = FILE_CHECK; /* PATH_CHECK is for backwards compat */ else if (strcmp(args[0].from, "PATH_CHECK") == 0) entry->func = FILE_CHECK; - else if (strcmp(args[0].from, "FILE_MMAP") == 0) - entry->func = FILE_MMAP; + else if (strcmp(args[0].from, "MODULE_CHECK") == 0) + entry->func = MODULE_CHECK; + else if ((strcmp(args[0].from, "FILE_MMAP") == 0) + || (strcmp(args[0].from, "MMAP_CHECK") == 0)) + entry->func = MMAP_CHECK; else if (strcmp(args[0].from, "BPRM_CHECK") == 0) entry->func = BPRM_CHECK; else @@ -347,64 +523,109 @@ static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry) break; } - result = strict_strtoul(args[0].from, 16, - &entry->fsmagic); + result = kstrtoul(args[0].from, 16, &entry->fsmagic); if (!result) entry->flags |= IMA_FSMAGIC; break; + case Opt_fsuuid: + ima_log_string(ab, "fsuuid", args[0].from); + + if (memchr_inv(entry->fsuuid, 0x00, + sizeof(entry->fsuuid))) { + result = -EINVAL; + break; + } + + result = blk_part_pack_uuid(args[0].from, + entry->fsuuid); + if (!result) + entry->flags |= IMA_FSUUID; + break; case Opt_uid: ima_log_string(ab, "uid", args[0].from); - if (entry->uid != -1) { + if (uid_valid(entry->uid)) { result = -EINVAL; break; } - result = strict_strtoul(args[0].from, 10, &lnum); + result = kstrtoul(args[0].from, 10, &lnum); if (!result) { - entry->uid = (uid_t) lnum; - if (entry->uid != lnum) + entry->uid = make_kuid(current_user_ns(), (uid_t)lnum); + if (!uid_valid(entry->uid) || (((uid_t)lnum) != lnum)) result = -EINVAL; else entry->flags |= IMA_UID; } break; + case Opt_fowner: + ima_log_string(ab, "fowner", args[0].from); + + if (uid_valid(entry->fowner)) { + result = -EINVAL; + break; + } + + result = kstrtoul(args[0].from, 10, &lnum); + if (!result) { + entry->fowner = make_kuid(current_user_ns(), (uid_t)lnum); + if (!uid_valid(entry->fowner) || (((uid_t)lnum) != lnum)) + result = -EINVAL; + else + entry->flags |= IMA_FOWNER; + } + break; case Opt_obj_user: ima_log_string(ab, "obj_user", args[0].from); - result = ima_lsm_rule_init(entry, args[0].from, + result = ima_lsm_rule_init(entry, args, LSM_OBJ_USER, AUDIT_OBJ_USER); break; case Opt_obj_role: ima_log_string(ab, "obj_role", args[0].from); - result = ima_lsm_rule_init(entry, args[0].from, + result = ima_lsm_rule_init(entry, args, LSM_OBJ_ROLE, AUDIT_OBJ_ROLE); break; case Opt_obj_type: ima_log_string(ab, "obj_type", args[0].from); - result = ima_lsm_rule_init(entry, args[0].from, + result = ima_lsm_rule_init(entry, args, LSM_OBJ_TYPE, AUDIT_OBJ_TYPE); break; case Opt_subj_user: ima_log_string(ab, "subj_user", args[0].from); - result = ima_lsm_rule_init(entry, args[0].from, + result = ima_lsm_rule_init(entry, args, LSM_SUBJ_USER, AUDIT_SUBJ_USER); break; case Opt_subj_role: ima_log_string(ab, "subj_role", args[0].from); - result = ima_lsm_rule_init(entry, args[0].from, + result = ima_lsm_rule_init(entry, args, LSM_SUBJ_ROLE, AUDIT_SUBJ_ROLE); break; case Opt_subj_type: ima_log_string(ab, "subj_type", args[0].from); - result = ima_lsm_rule_init(entry, args[0].from, + result = ima_lsm_rule_init(entry, args, LSM_SUBJ_TYPE, AUDIT_SUBJ_TYPE); break; + case Opt_appraise_type: + if (entry->action != APPRAISE) { + result = -EINVAL; + break; + } + + ima_log_string(ab, "appraise_type", args[0].from); + if ((strcmp(args[0].from, "imasig")) == 0) + entry->flags |= IMA_DIGSIG_REQUIRED; + else + result = -EINVAL; + break; + case Opt_permit_directio: + entry->flags |= IMA_PERMIT_DIRECTIO; + break; case Opt_err: ima_log_string(ab, "UNKNOWN", p); result = -EINVAL; @@ -413,14 +634,15 @@ static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry) } if (!result && (entry->action == UNKNOWN)) result = -EINVAL; - - audit_log_format(ab, "res=%d", !!result); + else if (entry->func == MODULE_CHECK) + ima_appraise |= IMA_APPRAISE_MODULES; + audit_log_format(ab, "res=%d", !result); audit_log_end(ab); return result; } /** - * ima_parse_add_rule - add a rule to measure_policy_rules + * ima_parse_add_rule - add a rule to ima_policy_rules * @rule - ima measurement policy rule * * Uses a mutex to protect the policy list from multiple concurrent writers. @@ -428,14 +650,14 @@ static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry) */ ssize_t ima_parse_add_rule(char *rule) { - const char *op = "update_policy"; + static const char op[] = "update_policy"; char *p; - struct ima_measure_rule_entry *entry; + struct ima_rule_entry *entry; ssize_t result, len; int audit_info = 0; /* Prevent installed policy from changing */ - if (ima_measure != &measure_default_rules) { + if (ima_rules != &ima_default_rules) { integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL, op, "already exists", -EACCES, audit_info); @@ -468,9 +690,9 @@ ssize_t ima_parse_add_rule(char *rule) return result; } - mutex_lock(&ima_measure_mutex); - list_add_tail(&entry->list, &measure_policy_rules); - mutex_unlock(&ima_measure_mutex); + mutex_lock(&ima_rules_mutex); + list_add_tail(&entry->list, &ima_policy_rules); + mutex_unlock(&ima_rules_mutex); return len; } @@ -478,12 +700,16 @@ ssize_t ima_parse_add_rule(char *rule) /* ima_delete_rules called to cleanup invalid policy */ void ima_delete_rules(void) { - struct ima_measure_rule_entry *entry, *tmp; + struct ima_rule_entry *entry, *tmp; + int i; + + mutex_lock(&ima_rules_mutex); + list_for_each_entry_safe(entry, tmp, &ima_policy_rules, list) { + for (i = 0; i < MAX_LSM_RULES; i++) + kfree(entry->lsm[i].args_p); - mutex_lock(&ima_measure_mutex); - list_for_each_entry_safe(entry, tmp, &measure_policy_rules, list) { list_del(&entry->list); kfree(entry); } - mutex_unlock(&ima_measure_mutex); + mutex_unlock(&ima_rules_mutex); } diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c index 8e28f04a5e2..552705d5a78 100644 --- a/security/integrity/ima/ima_queue.c +++ b/security/integrity/ima/ima_queue.c @@ -18,11 +18,16 @@ * The measurement list is append-only. No entry is * ever removed or changed during the boot-cycle. */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + #include <linux/module.h> #include <linux/rculist.h> #include <linux/slab.h> #include "ima.h" +#define AUDIT_CAUSE_LEN_MAX 32 + LIST_HEAD(ima_measurements); /* list of all measurements */ /* key: inode (before secure-hashing a file) */ @@ -43,13 +48,12 @@ static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value) { struct ima_queue_entry *qe, *ret = NULL; unsigned int key; - struct hlist_node *pos; int rc; key = ima_hash_key(digest_value); rcu_read_lock(); - hlist_for_each_entry_rcu(qe, pos, &ima_htable.queue[key], hnext) { - rc = memcmp(qe->entry->digest, digest_value, IMA_DIGEST_SIZE); + hlist_for_each_entry_rcu(qe, &ima_htable.queue[key], hnext) { + rc = memcmp(qe->entry->digest, digest_value, TPM_DIGEST_SIZE); if (rc == 0) { ret = qe; break; @@ -71,7 +75,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry) qe = kmalloc(sizeof(*qe), GFP_KERNEL); if (qe == NULL) { - pr_err("IMA: OUT OF MEMORY ERROR creating queue entry.\n"); + pr_err("OUT OF MEMORY ERROR creating queue entry\n"); return -ENOMEM; } qe->entry = entry; @@ -94,7 +98,7 @@ static int ima_pcr_extend(const u8 *hash) result = tpm_pcr_extend(TPM_ANY_NUM, CONFIG_IMA_MEASURE_PCR_IDX, hash); if (result != 0) - pr_err("IMA: Error Communicating to TPM chip\n"); + pr_err("Error Communicating to TPM chip, result: %d\n", result); return result; } @@ -102,18 +106,21 @@ static int ima_pcr_extend(const u8 *hash) * and extend the pcr. */ int ima_add_template_entry(struct ima_template_entry *entry, int violation, - const char *op, struct inode *inode) + const char *op, struct inode *inode, + const unsigned char *filename) { - u8 digest[IMA_DIGEST_SIZE]; + u8 digest[TPM_DIGEST_SIZE]; const char *audit_cause = "hash_added"; + char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX]; int audit_info = 1; - int result = 0; + int result = 0, tpmresult = 0; mutex_lock(&ima_extend_list_mutex); if (!violation) { - memcpy(digest, entry->digest, sizeof digest); + memcpy(digest, entry->digest, sizeof(digest)); if (ima_lookup_digest_entry(digest)) { audit_cause = "hash_exists"; + result = -EEXIST; goto out; } } @@ -126,17 +133,18 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation, } if (violation) /* invalidate pcr */ - memset(digest, 0xff, sizeof digest); + memset(digest, 0xff, sizeof(digest)); - result = ima_pcr_extend(digest); - if (result != 0) { - audit_cause = "TPM error"; + tpmresult = ima_pcr_extend(digest); + if (tpmresult != 0) { + snprintf(tpm_audit_cause, AUDIT_CAUSE_LEN_MAX, "TPM_error(%d)", + tpmresult); + audit_cause = tpm_audit_cause; audit_info = 0; } out: mutex_unlock(&ima_extend_list_mutex); - integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, - entry->template.file_name, + integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename, op, audit_cause, result, audit_info); return result; } diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c new file mode 100644 index 00000000000..a076a967ec4 --- /dev/null +++ b/security/integrity/ima/ima_template.c @@ -0,0 +1,190 @@ +/* + * Copyright (C) 2013 Politecnico di Torino, Italy + * TORSEC group -- http://security.polito.it + * + * Author: Roberto Sassu <roberto.sassu@polito.it> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + * File: ima_template.c + * Helpers to manage template descriptors. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <crypto/hash_info.h> + +#include "ima.h" +#include "ima_template_lib.h" + +static struct ima_template_desc defined_templates[] = { + {.name = IMA_TEMPLATE_IMA_NAME, .fmt = IMA_TEMPLATE_IMA_FMT}, + {.name = "ima-ng", .fmt = "d-ng|n-ng"}, + {.name = "ima-sig", .fmt = "d-ng|n-ng|sig"}, +}; + +static struct ima_template_field supported_fields[] = { + {.field_id = "d", .field_init = ima_eventdigest_init, + .field_show = ima_show_template_digest}, + {.field_id = "n", .field_init = ima_eventname_init, + .field_show = ima_show_template_string}, + {.field_id = "d-ng", .field_init = ima_eventdigest_ng_init, + .field_show = ima_show_template_digest_ng}, + {.field_id = "n-ng", .field_init = ima_eventname_ng_init, + .field_show = ima_show_template_string}, + {.field_id = "sig", .field_init = ima_eventsig_init, + .field_show = ima_show_template_sig}, +}; + +static struct ima_template_desc *ima_template; +static struct ima_template_desc *lookup_template_desc(const char *name); + +static int __init ima_template_setup(char *str) +{ + struct ima_template_desc *template_desc; + int template_len = strlen(str); + + /* + * Verify that a template with the supplied name exists. + * If not, use CONFIG_IMA_DEFAULT_TEMPLATE. + */ + template_desc = lookup_template_desc(str); + if (!template_desc) + return 1; + + /* + * Verify whether the current hash algorithm is supported + * by the 'ima' template. + */ + if (template_len == 3 && strcmp(str, IMA_TEMPLATE_IMA_NAME) == 0 && + ima_hash_algo != HASH_ALGO_SHA1 && ima_hash_algo != HASH_ALGO_MD5) { + pr_err("template does not support hash alg\n"); + return 1; + } + + ima_template = template_desc; + return 1; +} +__setup("ima_template=", ima_template_setup); + +static struct ima_template_desc *lookup_template_desc(const char *name) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(defined_templates); i++) { + if (strcmp(defined_templates[i].name, name) == 0) + return defined_templates + i; + } + + return NULL; +} + +static struct ima_template_field *lookup_template_field(const char *field_id) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(supported_fields); i++) + if (strncmp(supported_fields[i].field_id, field_id, + IMA_TEMPLATE_FIELD_ID_MAX_LEN) == 0) + return &supported_fields[i]; + return NULL; +} + +static int template_fmt_size(const char *template_fmt) +{ + char c; + int template_fmt_len = strlen(template_fmt); + int i = 0, j = 0; + + while (i < template_fmt_len) { + c = template_fmt[i]; + if (c == '|') + j++; + i++; + } + + return j + 1; +} + +static int template_desc_init_fields(const char *template_fmt, + struct ima_template_field ***fields, + int *num_fields) +{ + char *c, *template_fmt_copy, *template_fmt_ptr; + int template_num_fields = template_fmt_size(template_fmt); + int i, result = 0; + + if (template_num_fields > IMA_TEMPLATE_NUM_FIELDS_MAX) + return -EINVAL; + + /* copying is needed as strsep() modifies the original buffer */ + template_fmt_copy = kstrdup(template_fmt, GFP_KERNEL); + if (template_fmt_copy == NULL) + return -ENOMEM; + + *fields = kzalloc(template_num_fields * sizeof(*fields), GFP_KERNEL); + if (*fields == NULL) { + result = -ENOMEM; + goto out; + } + + template_fmt_ptr = template_fmt_copy; + for (i = 0; (c = strsep(&template_fmt_ptr, "|")) != NULL && + i < template_num_fields; i++) { + struct ima_template_field *f = lookup_template_field(c); + + if (!f) { + result = -ENOENT; + goto out; + } + (*fields)[i] = f; + } + *num_fields = i; +out: + if (result < 0) { + kfree(*fields); + *fields = NULL; + } + kfree(template_fmt_copy); + return result; +} + +static int init_defined_templates(void) +{ + int i = 0; + int result = 0; + + /* Init defined templates. */ + for (i = 0; i < ARRAY_SIZE(defined_templates); i++) { + struct ima_template_desc *template = &defined_templates[i]; + + result = template_desc_init_fields(template->fmt, + &(template->fields), + &(template->num_fields)); + if (result < 0) + return result; + } + return result; +} + +struct ima_template_desc *ima_template_desc_current(void) +{ + if (!ima_template) + ima_template = + lookup_template_desc(CONFIG_IMA_DEFAULT_TEMPLATE); + return ima_template; +} + +int ima_init_template(void) +{ + int result; + + result = init_defined_templates(); + if (result < 0) + return result; + + return 0; +} diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c new file mode 100644 index 00000000000..1506f024857 --- /dev/null +++ b/security/integrity/ima/ima_template_lib.c @@ -0,0 +1,342 @@ +/* + * Copyright (C) 2013 Politecnico di Torino, Italy + * TORSEC group -- http://security.polito.it + * + * Author: Roberto Sassu <roberto.sassu@polito.it> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + * File: ima_template_lib.c + * Library of supported template fields. + */ +#include <crypto/hash_info.h> + +#include "ima_template_lib.h" + +static bool ima_template_hash_algo_allowed(u8 algo) +{ + if (algo == HASH_ALGO_SHA1 || algo == HASH_ALGO_MD5) + return true; + + return false; +} + +enum data_formats { + DATA_FMT_DIGEST = 0, + DATA_FMT_DIGEST_WITH_ALGO, + DATA_FMT_STRING, + DATA_FMT_HEX +}; + +static int ima_write_template_field_data(const void *data, const u32 datalen, + enum data_formats datafmt, + struct ima_field_data *field_data) +{ + u8 *buf, *buf_ptr; + u32 buflen = datalen; + + if (datafmt == DATA_FMT_STRING) + buflen = datalen + 1; + + buf = kzalloc(buflen, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + memcpy(buf, data, datalen); + + /* + * Replace all space characters with underscore for event names and + * strings. This avoid that, during the parsing of a measurements list, + * filenames with spaces or that end with the suffix ' (deleted)' are + * split into multiple template fields (the space is the delimitator + * character for measurements lists in ASCII format). + */ + if (datafmt == DATA_FMT_STRING) { + for (buf_ptr = buf; buf_ptr - buf < datalen; buf_ptr++) + if (*buf_ptr == ' ') + *buf_ptr = '_'; + } + + field_data->data = buf; + field_data->len = buflen; + return 0; +} + +static void ima_show_template_data_ascii(struct seq_file *m, + enum ima_show_type show, + enum data_formats datafmt, + struct ima_field_data *field_data) +{ + u8 *buf_ptr = field_data->data, buflen = field_data->len; + + switch (datafmt) { + case DATA_FMT_DIGEST_WITH_ALGO: + buf_ptr = strnchr(field_data->data, buflen, ':'); + if (buf_ptr != field_data->data) + seq_printf(m, "%s", field_data->data); + + /* skip ':' and '\0' */ + buf_ptr += 2; + buflen -= buf_ptr - field_data->data; + case DATA_FMT_DIGEST: + case DATA_FMT_HEX: + if (!buflen) + break; + ima_print_digest(m, buf_ptr, buflen); + break; + case DATA_FMT_STRING: + seq_printf(m, "%s", buf_ptr); + break; + default: + break; + } +} + +static void ima_show_template_data_binary(struct seq_file *m, + enum ima_show_type show, + enum data_formats datafmt, + struct ima_field_data *field_data) +{ + u32 len = (show == IMA_SHOW_BINARY_OLD_STRING_FMT) ? + strlen(field_data->data) : field_data->len; + + if (show != IMA_SHOW_BINARY_NO_FIELD_LEN) + ima_putc(m, &len, sizeof(len)); + + if (!len) + return; + + ima_putc(m, field_data->data, len); +} + +static void ima_show_template_field_data(struct seq_file *m, + enum ima_show_type show, + enum data_formats datafmt, + struct ima_field_data *field_data) +{ + switch (show) { + case IMA_SHOW_ASCII: + ima_show_template_data_ascii(m, show, datafmt, field_data); + break; + case IMA_SHOW_BINARY: + case IMA_SHOW_BINARY_NO_FIELD_LEN: + case IMA_SHOW_BINARY_OLD_STRING_FMT: + ima_show_template_data_binary(m, show, datafmt, field_data); + break; + default: + break; + } +} + +void ima_show_template_digest(struct seq_file *m, enum ima_show_type show, + struct ima_field_data *field_data) +{ + ima_show_template_field_data(m, show, DATA_FMT_DIGEST, field_data); +} + +void ima_show_template_digest_ng(struct seq_file *m, enum ima_show_type show, + struct ima_field_data *field_data) +{ + ima_show_template_field_data(m, show, DATA_FMT_DIGEST_WITH_ALGO, + field_data); +} + +void ima_show_template_string(struct seq_file *m, enum ima_show_type show, + struct ima_field_data *field_data) +{ + ima_show_template_field_data(m, show, DATA_FMT_STRING, field_data); +} + +void ima_show_template_sig(struct seq_file *m, enum ima_show_type show, + struct ima_field_data *field_data) +{ + ima_show_template_field_data(m, show, DATA_FMT_HEX, field_data); +} + +static int ima_eventdigest_init_common(u8 *digest, u32 digestsize, u8 hash_algo, + struct ima_field_data *field_data) +{ + /* + * digest formats: + * - DATA_FMT_DIGEST: digest + * - DATA_FMT_DIGEST_WITH_ALGO: [<hash algo>] + ':' + '\0' + digest, + * where <hash algo> is provided if the hash algoritm is not + * SHA1 or MD5 + */ + u8 buffer[CRYPTO_MAX_ALG_NAME + 2 + IMA_MAX_DIGEST_SIZE] = { 0 }; + enum data_formats fmt = DATA_FMT_DIGEST; + u32 offset = 0; + + if (hash_algo < HASH_ALGO__LAST) { + fmt = DATA_FMT_DIGEST_WITH_ALGO; + offset += snprintf(buffer, CRYPTO_MAX_ALG_NAME + 1, "%s", + hash_algo_name[hash_algo]); + buffer[offset] = ':'; + offset += 2; + } + + if (digest) + memcpy(buffer + offset, digest, digestsize); + else + /* + * If digest is NULL, the event being recorded is a violation. + * Make room for the digest by increasing the offset of + * IMA_DIGEST_SIZE. + */ + offset += IMA_DIGEST_SIZE; + + return ima_write_template_field_data(buffer, offset + digestsize, + fmt, field_data); +} + +/* + * This function writes the digest of an event (with size limit). + */ +int ima_eventdigest_init(struct integrity_iint_cache *iint, struct file *file, + const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, int xattr_len, + struct ima_field_data *field_data) +{ + struct { + struct ima_digest_data hdr; + char digest[IMA_MAX_DIGEST_SIZE]; + } hash; + u8 *cur_digest = NULL; + u32 cur_digestsize = 0; + struct inode *inode; + int result; + + memset(&hash, 0, sizeof(hash)); + + if (!iint) /* recording a violation. */ + goto out; + + if (ima_template_hash_algo_allowed(iint->ima_hash->algo)) { + cur_digest = iint->ima_hash->digest; + cur_digestsize = iint->ima_hash->length; + goto out; + } + + if (!file) /* missing info to re-calculate the digest */ + return -EINVAL; + + inode = file_inode(file); + hash.hdr.algo = ima_template_hash_algo_allowed(ima_hash_algo) ? + ima_hash_algo : HASH_ALGO_SHA1; + result = ima_calc_file_hash(file, &hash.hdr); + if (result) { + integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, + filename, "collect_data", + "failed", result, 0); + return result; + } + cur_digest = hash.hdr.digest; + cur_digestsize = hash.hdr.length; +out: + return ima_eventdigest_init_common(cur_digest, cur_digestsize, + HASH_ALGO__LAST, field_data); +} + +/* + * This function writes the digest of an event (without size limit). + */ +int ima_eventdigest_ng_init(struct integrity_iint_cache *iint, + struct file *file, const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, + int xattr_len, struct ima_field_data *field_data) +{ + u8 *cur_digest = NULL, hash_algo = HASH_ALGO_SHA1; + u32 cur_digestsize = 0; + + /* If iint is NULL, we are recording a violation. */ + if (!iint) + goto out; + + cur_digest = iint->ima_hash->digest; + cur_digestsize = iint->ima_hash->length; + + hash_algo = iint->ima_hash->algo; +out: + return ima_eventdigest_init_common(cur_digest, cur_digestsize, + hash_algo, field_data); +} + +static int ima_eventname_init_common(struct integrity_iint_cache *iint, + struct file *file, + const unsigned char *filename, + struct ima_field_data *field_data, + bool size_limit) +{ + const char *cur_filename = NULL; + u32 cur_filename_len = 0; + + BUG_ON(filename == NULL && file == NULL); + + if (filename) { + cur_filename = filename; + cur_filename_len = strlen(filename); + + if (!size_limit || cur_filename_len <= IMA_EVENT_NAME_LEN_MAX) + goto out; + } + + if (file) { + cur_filename = file->f_dentry->d_name.name; + cur_filename_len = strlen(cur_filename); + } else + /* + * Truncate filename if the latter is too long and + * the file descriptor is not available. + */ + cur_filename_len = IMA_EVENT_NAME_LEN_MAX; +out: + return ima_write_template_field_data(cur_filename, cur_filename_len, + DATA_FMT_STRING, field_data); +} + +/* + * This function writes the name of an event (with size limit). + */ +int ima_eventname_init(struct integrity_iint_cache *iint, struct file *file, + const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, int xattr_len, + struct ima_field_data *field_data) +{ + return ima_eventname_init_common(iint, file, filename, + field_data, true); +} + +/* + * This function writes the name of an event (without size limit). + */ +int ima_eventname_ng_init(struct integrity_iint_cache *iint, struct file *file, + const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, int xattr_len, + struct ima_field_data *field_data) +{ + return ima_eventname_init_common(iint, file, filename, + field_data, false); +} + +/* + * ima_eventsig_init - include the file signature as part of the template data + */ +int ima_eventsig_init(struct integrity_iint_cache *iint, struct file *file, + const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, int xattr_len, + struct ima_field_data *field_data) +{ + enum data_formats fmt = DATA_FMT_HEX; + int rc = 0; + + if ((!xattr_value) || (xattr_value->type != EVM_IMA_XATTR_DIGSIG)) + goto out; + + rc = ima_write_template_field_data(xattr_value, xattr_len, fmt, + field_data); +out: + return rc; +} diff --git a/security/integrity/ima/ima_template_lib.h b/security/integrity/ima/ima_template_lib.h new file mode 100644 index 00000000000..63f6b52cb1c --- /dev/null +++ b/security/integrity/ima/ima_template_lib.h @@ -0,0 +1,49 @@ +/* + * Copyright (C) 2013 Politecnico di Torino, Italy + * TORSEC group -- http://security.polito.it + * + * Author: Roberto Sassu <roberto.sassu@polito.it> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + * File: ima_template_lib.h + * Header for the library of supported template fields. + */ +#ifndef __LINUX_IMA_TEMPLATE_LIB_H +#define __LINUX_IMA_TEMPLATE_LIB_H + +#include <linux/seq_file.h> +#include "ima.h" + +void ima_show_template_digest(struct seq_file *m, enum ima_show_type show, + struct ima_field_data *field_data); +void ima_show_template_digest_ng(struct seq_file *m, enum ima_show_type show, + struct ima_field_data *field_data); +void ima_show_template_string(struct seq_file *m, enum ima_show_type show, + struct ima_field_data *field_data); +void ima_show_template_sig(struct seq_file *m, enum ima_show_type show, + struct ima_field_data *field_data); +int ima_eventdigest_init(struct integrity_iint_cache *iint, struct file *file, + const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, int xattr_len, + struct ima_field_data *field_data); +int ima_eventname_init(struct integrity_iint_cache *iint, struct file *file, + const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, int xattr_len, + struct ima_field_data *field_data); +int ima_eventdigest_ng_init(struct integrity_iint_cache *iint, + struct file *file, const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, + int xattr_len, struct ima_field_data *field_data); +int ima_eventname_ng_init(struct integrity_iint_cache *iint, struct file *file, + const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, int xattr_len, + struct ima_field_data *field_data); +int ima_eventsig_init(struct integrity_iint_cache *iint, struct file *file, + const unsigned char *filename, + struct evm_ima_xattr_data *xattr_value, int xattr_len, + struct ima_field_data *field_data); +#endif /* __LINUX_IMA_TEMPLATE_LIB_H */ diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h new file mode 100644 index 00000000000..33c0a70f6b1 --- /dev/null +++ b/security/integrity/integrity.h @@ -0,0 +1,164 @@ +/* + * Copyright (C) 2009-2010 IBM Corporation + * + * Authors: + * Mimi Zohar <zohar@us.ibm.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + * + */ + +#include <linux/types.h> +#include <linux/integrity.h> +#include <crypto/sha.h> +#include <linux/key.h> + +/* iint action cache flags */ +#define IMA_MEASURE 0x00000001 +#define IMA_MEASURED 0x00000002 +#define IMA_APPRAISE 0x00000004 +#define IMA_APPRAISED 0x00000008 +/*#define IMA_COLLECT 0x00000010 do not use this flag */ +#define IMA_COLLECTED 0x00000020 +#define IMA_AUDIT 0x00000040 +#define IMA_AUDITED 0x00000080 + +/* iint cache flags */ +#define IMA_ACTION_FLAGS 0xff000000 +#define IMA_DIGSIG 0x01000000 +#define IMA_DIGSIG_REQUIRED 0x02000000 +#define IMA_PERMIT_DIRECTIO 0x04000000 + +#define IMA_DO_MASK (IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \ + IMA_APPRAISE_SUBMASK) +#define IMA_DONE_MASK (IMA_MEASURED | IMA_APPRAISED | IMA_AUDITED | \ + IMA_COLLECTED | IMA_APPRAISED_SUBMASK) + +/* iint subaction appraise cache flags */ +#define IMA_FILE_APPRAISE 0x00000100 +#define IMA_FILE_APPRAISED 0x00000200 +#define IMA_MMAP_APPRAISE 0x00000400 +#define IMA_MMAP_APPRAISED 0x00000800 +#define IMA_BPRM_APPRAISE 0x00001000 +#define IMA_BPRM_APPRAISED 0x00002000 +#define IMA_MODULE_APPRAISE 0x00004000 +#define IMA_MODULE_APPRAISED 0x00008000 +#define IMA_APPRAISE_SUBMASK (IMA_FILE_APPRAISE | IMA_MMAP_APPRAISE | \ + IMA_BPRM_APPRAISE | IMA_MODULE_APPRAISE) +#define IMA_APPRAISED_SUBMASK (IMA_FILE_APPRAISED | IMA_MMAP_APPRAISED | \ + IMA_BPRM_APPRAISED | IMA_MODULE_APPRAISED) + +enum evm_ima_xattr_type { + IMA_XATTR_DIGEST = 0x01, + EVM_XATTR_HMAC, + EVM_IMA_XATTR_DIGSIG, + IMA_XATTR_DIGEST_NG, +}; + +struct evm_ima_xattr_data { + u8 type; + u8 digest[SHA1_DIGEST_SIZE]; +} __packed; + +#define IMA_MAX_DIGEST_SIZE 64 + +struct ima_digest_data { + u8 algo; + u8 length; + union { + struct { + u8 unused; + u8 type; + } sha1; + struct { + u8 type; + u8 algo; + } ng; + u8 data[2]; + } xattr; + u8 digest[0]; +} __packed; + +/* + * signature format v2 - for using with asymmetric keys + */ +struct signature_v2_hdr { + uint8_t type; /* xattr type */ + uint8_t version; /* signature format version */ + uint8_t hash_algo; /* Digest algorithm [enum pkey_hash_algo] */ + uint32_t keyid; /* IMA key identifier - not X509/PGP specific */ + uint16_t sig_size; /* signature size */ + uint8_t sig[0]; /* signature payload */ +} __packed; + +/* integrity data associated with an inode */ +struct integrity_iint_cache { + struct rb_node rb_node; /* rooted in integrity_iint_tree */ + struct inode *inode; /* back pointer to inode in question */ + u64 version; /* track inode changes */ + unsigned long flags; + enum integrity_status ima_file_status:4; + enum integrity_status ima_mmap_status:4; + enum integrity_status ima_bprm_status:4; + enum integrity_status ima_module_status:4; + enum integrity_status evm_status:4; + struct ima_digest_data *ima_hash; +}; + +/* rbtree tree calls to lookup, insert, delete + * integrity data associated with an inode. + */ +struct integrity_iint_cache *integrity_iint_insert(struct inode *inode); +struct integrity_iint_cache *integrity_iint_find(struct inode *inode); + +#define INTEGRITY_KEYRING_EVM 0 +#define INTEGRITY_KEYRING_MODULE 1 +#define INTEGRITY_KEYRING_IMA 2 +#define INTEGRITY_KEYRING_MAX 3 + +#ifdef CONFIG_INTEGRITY_SIGNATURE + +int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen, + const char *digest, int digestlen); + +#else + +static inline int integrity_digsig_verify(const unsigned int id, + const char *sig, int siglen, + const char *digest, int digestlen) +{ + return -EOPNOTSUPP; +} + +#endif /* CONFIG_INTEGRITY_SIGNATURE */ + +#ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS +int asymmetric_verify(struct key *keyring, const char *sig, + int siglen, const char *data, int datalen); +#else +static inline int asymmetric_verify(struct key *keyring, const char *sig, + int siglen, const char *data, int datalen) +{ + return -EOPNOTSUPP; +} +#endif + +#ifdef CONFIG_INTEGRITY_AUDIT +/* declarations */ +void integrity_audit_msg(int audit_msgno, struct inode *inode, + const unsigned char *fname, const char *op, + const char *cause, int result, int info); +#else +static inline void integrity_audit_msg(int audit_msgno, struct inode *inode, + const unsigned char *fname, + const char *op, const char *cause, + int result, int info) +{ +} +#endif + +/* set during initialization */ +extern int iint_initialized; diff --git a/security/integrity/ima/ima_audit.c b/security/integrity/integrity_audit.c index c5c5a72c30b..90987d15b6f 100644 --- a/security/integrity/ima/ima_audit.c +++ b/security/integrity/integrity_audit.c @@ -7,43 +7,42 @@ * the Free Software Foundation, version 2 of the License. * * File: integrity_audit.c - * Audit calls for the integrity subsystem + * Audit calls for the integrity subsystem */ #include <linux/fs.h> #include <linux/gfp.h> #include <linux/audit.h> -#include "ima.h" +#include "integrity.h" -static int ima_audit; - -#ifdef CONFIG_IMA_AUDIT +static int integrity_audit_info; /* ima_audit_setup - enable informational auditing messages */ -static int __init ima_audit_setup(char *str) +static int __init integrity_audit_setup(char *str) { unsigned long audit; - if (!strict_strtoul(str, 0, &audit)) - ima_audit = audit ? 1 : 0; + if (!kstrtoul(str, 0, &audit)) + integrity_audit_info = audit ? 1 : 0; return 1; } -__setup("ima_audit=", ima_audit_setup); -#endif +__setup("integrity_audit=", integrity_audit_setup); void integrity_audit_msg(int audit_msgno, struct inode *inode, const unsigned char *fname, const char *op, const char *cause, int result, int audit_info) { struct audit_buffer *ab; + char name[TASK_COMM_LEN]; - if (!ima_audit && audit_info == 1) /* Skip informational messages */ + if (!integrity_audit_info && audit_info == 1) /* Skip info messages */ return; ab = audit_log_start(current->audit_context, GFP_KERNEL, audit_msgno); audit_log_format(ab, "pid=%d uid=%u auid=%u ses=%u", - current->pid, current_cred()->uid, - audit_get_loginuid(current), + task_pid_nr(current), + from_kuid(&init_user_ns, current_cred()->uid), + from_kuid(&init_user_ns, audit_get_loginuid(current)), audit_get_sessionid(current)); audit_log_task_context(ab); audit_log_format(ab, " op="); @@ -51,14 +50,16 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode, audit_log_format(ab, " cause="); audit_log_string(ab, cause); audit_log_format(ab, " comm="); - audit_log_untrustedstring(ab, current->comm); + audit_log_untrustedstring(ab, get_task_comm(name, current)); if (fname) { audit_log_format(ab, " name="); audit_log_untrustedstring(ab, fname); } - if (inode) - audit_log_format(ab, " dev=%s ino=%lu", - inode->i_sb->s_id, inode->i_ino); - audit_log_format(ab, " res=%d", !result ? 0 : 1); + if (inode) { + audit_log_format(ab, " dev="); + audit_log_untrustedstring(ab, inode->i_sb->s_id); + audit_log_format(ab, " ino=%lu", inode->i_ino); + } + audit_log_format(ab, " res=%d", !result); audit_log_end(ab); } diff --git a/security/keys/Kconfig b/security/keys/Kconfig new file mode 100644 index 00000000000..a4f3f8c48d6 --- /dev/null +++ b/security/keys/Kconfig @@ -0,0 +1,100 @@ +# +# Key management configuration +# + +config KEYS + bool "Enable access key retention support" + select ASSOCIATIVE_ARRAY + help + This option provides support for retaining authentication tokens and + access keys in the kernel. + + It also includes provision of methods by which such keys might be + associated with a process so that network filesystems, encryption + support and the like can find them. + + Furthermore, a special type of key is available that acts as keyring: + a searchable sequence of keys. Each process is equipped with access + to five standard keyrings: UID-specific, GID-specific, session, + process and thread. + + If you are unsure as to whether this is required, answer N. + +config PERSISTENT_KEYRINGS + bool "Enable register of persistent per-UID keyrings" + depends on KEYS + help + This option provides a register of persistent per-UID keyrings, + primarily aimed at Kerberos key storage. The keyrings are persistent + in the sense that they stay around after all processes of that UID + have exited, not that they survive the machine being rebooted. + + A particular keyring may be accessed by either the user whose keyring + it is or by a process with administrative privileges. The active + LSMs gets to rule on which admin-level processes get to access the + cache. + + Keyrings are created and added into the register upon demand and get + removed if they expire (a default timeout is set upon creation). + +config BIG_KEYS + bool "Large payload keys" + depends on KEYS + depends on TMPFS + help + This option provides support for holding large keys within the kernel + (for example Kerberos ticket caches). The data may be stored out to + swapspace by tmpfs. + + If you are unsure as to whether this is required, answer N. + +config TRUSTED_KEYS + tristate "TRUSTED KEYS" + depends on KEYS && TCG_TPM + select CRYPTO + select CRYPTO_HMAC + select CRYPTO_SHA1 + help + This option provides support for creating, sealing, and unsealing + keys in the kernel. Trusted keys are random number symmetric keys, + generated and RSA-sealed by the TPM. The TPM only unseals the keys, + if the boot PCRs and other criteria match. Userspace will only ever + see encrypted blobs. + + If you are unsure as to whether this is required, answer N. + +config ENCRYPTED_KEYS + tristate "ENCRYPTED KEYS" + depends on KEYS + select CRYPTO + select CRYPTO_HMAC + select CRYPTO_AES + select CRYPTO_CBC + select CRYPTO_SHA256 + select CRYPTO_RNG + help + This option provides support for create/encrypting/decrypting keys + in the kernel. Encrypted keys are kernel generated random numbers, + which are encrypted/decrypted with a 'master' symmetric key. The + 'master' key can be either a trusted-key or user-key type. + Userspace only ever sees/stores encrypted blobs. + + If you are unsure as to whether this is required, answer N. + +config KEYS_DEBUG_PROC_KEYS + bool "Enable the /proc/keys file by which keys may be viewed" + depends on KEYS + help + This option turns on support for the /proc/keys file - through which + can be listed all the keys on the system that are viewable by the + reading process. + + The only keys included in the list are those that grant View + permission to the reading process whether or not it possesses them. + Note that LSM security checks are still performed, and may further + filter out keys that the current process is not authorised to view. + + Only key attributes are listed here; key payloads are not included in + the resulting table. + + If you are unsure as to whether this is required, answer N. diff --git a/security/keys/Makefile b/security/keys/Makefile index 74d5447d7df..dfb3a7beded 100644 --- a/security/keys/Makefile +++ b/security/keys/Makefile @@ -2,6 +2,9 @@ # Makefile for key management # +# +# Core +# obj-y := \ gc.o \ key.o \ @@ -12,7 +15,14 @@ obj-y := \ request_key.o \ request_key_auth.o \ user_defined.o - obj-$(CONFIG_KEYS_COMPAT) += compat.o obj-$(CONFIG_PROC_FS) += proc.o obj-$(CONFIG_SYSCTL) += sysctl.o +obj-$(CONFIG_PERSISTENT_KEYRINGS) += persistent.o + +# +# Key types +# +obj-$(CONFIG_BIG_KEYS) += big_key.o +obj-$(CONFIG_TRUSTED_KEYS) += trusted.o +obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted-keys/ diff --git a/security/keys/big_key.c b/security/keys/big_key.c new file mode 100644 index 00000000000..8137b27d641 --- /dev/null +++ b/security/keys/big_key.c @@ -0,0 +1,207 @@ +/* Large capacity key type + * + * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#include <linux/module.h> +#include <linux/init.h> +#include <linux/seq_file.h> +#include <linux/file.h> +#include <linux/shmem_fs.h> +#include <linux/err.h> +#include <keys/user-type.h> +#include <keys/big_key-type.h> + +MODULE_LICENSE("GPL"); + +/* + * If the data is under this limit, there's no point creating a shm file to + * hold it as the permanently resident metadata for the shmem fs will be at + * least as large as the data. + */ +#define BIG_KEY_FILE_THRESHOLD (sizeof(struct inode) + sizeof(struct dentry)) + +/* + * big_key defined keys take an arbitrary string as the description and an + * arbitrary blob of data as the payload + */ +struct key_type key_type_big_key = { + .name = "big_key", + .def_lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, + .instantiate = big_key_instantiate, + .match = user_match, + .revoke = big_key_revoke, + .destroy = big_key_destroy, + .describe = big_key_describe, + .read = big_key_read, +}; + +/* + * Instantiate a big key + */ +int big_key_instantiate(struct key *key, struct key_preparsed_payload *prep) +{ + struct path *path = (struct path *)&key->payload.data2; + struct file *file; + ssize_t written; + size_t datalen = prep->datalen; + int ret; + + ret = -EINVAL; + if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data) + goto error; + + /* Set an arbitrary quota */ + ret = key_payload_reserve(key, 16); + if (ret < 0) + goto error; + + key->type_data.x[1] = datalen; + + if (datalen > BIG_KEY_FILE_THRESHOLD) { + /* Create a shmem file to store the data in. This will permit the data + * to be swapped out if needed. + * + * TODO: Encrypt the stored data with a temporary key. + */ + file = shmem_kernel_file_setup("", datalen, 0); + if (IS_ERR(file)) { + ret = PTR_ERR(file); + goto err_quota; + } + + written = kernel_write(file, prep->data, prep->datalen, 0); + if (written != datalen) { + ret = written; + if (written >= 0) + ret = -ENOMEM; + goto err_fput; + } + + /* Pin the mount and dentry to the key so that we can open it again + * later + */ + *path = file->f_path; + path_get(path); + fput(file); + } else { + /* Just store the data in a buffer */ + void *data = kmalloc(datalen, GFP_KERNEL); + if (!data) { + ret = -ENOMEM; + goto err_quota; + } + + key->payload.data = memcpy(data, prep->data, prep->datalen); + } + return 0; + +err_fput: + fput(file); +err_quota: + key_payload_reserve(key, 0); +error: + return ret; +} + +/* + * dispose of the links from a revoked keyring + * - called with the key sem write-locked + */ +void big_key_revoke(struct key *key) +{ + struct path *path = (struct path *)&key->payload.data2; + + /* clear the quota */ + key_payload_reserve(key, 0); + if (key_is_instantiated(key) && key->type_data.x[1] > BIG_KEY_FILE_THRESHOLD) + vfs_truncate(path, 0); +} + +/* + * dispose of the data dangling from the corpse of a big_key key + */ +void big_key_destroy(struct key *key) +{ + if (key->type_data.x[1] > BIG_KEY_FILE_THRESHOLD) { + struct path *path = (struct path *)&key->payload.data2; + path_put(path); + path->mnt = NULL; + path->dentry = NULL; + } else { + kfree(key->payload.data); + key->payload.data = NULL; + } +} + +/* + * describe the big_key key + */ +void big_key_describe(const struct key *key, struct seq_file *m) +{ + unsigned long datalen = key->type_data.x[1]; + + seq_puts(m, key->description); + + if (key_is_instantiated(key)) + seq_printf(m, ": %lu [%s]", + datalen, + datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff"); +} + +/* + * read the key data + * - the key's semaphore is read-locked + */ +long big_key_read(const struct key *key, char __user *buffer, size_t buflen) +{ + unsigned long datalen = key->type_data.x[1]; + long ret; + + if (!buffer || buflen < datalen) + return datalen; + + if (datalen > BIG_KEY_FILE_THRESHOLD) { + struct path *path = (struct path *)&key->payload.data2; + struct file *file; + loff_t pos; + + file = dentry_open(path, O_RDONLY, current_cred()); + if (IS_ERR(file)) + return PTR_ERR(file); + + pos = 0; + ret = vfs_read(file, buffer, datalen, &pos); + fput(file); + if (ret >= 0 && ret != datalen) + ret = -EIO; + } else { + ret = datalen; + if (copy_to_user(buffer, key->payload.data, datalen) != 0) + ret = -EFAULT; + } + + return ret; +} + +/* + * Module stuff + */ +static int __init big_key_init(void) +{ + return register_key_type(&key_type_big_key); +} + +static void __exit big_key_cleanup(void) +{ + unregister_key_type(&key_type_big_key); +} + +module_init(big_key_init); +module_exit(big_key_cleanup); diff --git a/security/keys/compat.c b/security/keys/compat.c index 792c0a611a6..347896548ad 100644 --- a/security/keys/compat.c +++ b/security/keys/compat.c @@ -1,4 +1,4 @@ -/* compat.c: 32-bit compatibility syscall for 64-bit systems +/* 32-bit compatibility syscall for 64-bit systems * * Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) @@ -12,18 +12,61 @@ #include <linux/syscalls.h> #include <linux/keyctl.h> #include <linux/compat.h> +#include <linux/slab.h> #include "internal.h" -/*****************************************************************************/ /* - * the key control system call, 32-bit compatibility version for 64-bit archs - * - this should only be called if the 64-bit arch uses weird pointers in - * 32-bit mode or doesn't guarantee that the top 32-bits of the argument - * registers on taking a 32-bit syscall are zero - * - if you can, you should call sys_keyctl directly + * Instantiate a key with the specified compatibility multipart payload and + * link the key into the destination keyring if one is given. + * + * The caller must have the appropriate instantiation permit set for this to + * work (see keyctl_assume_authority). No other permissions are required. + * + * If successful, 0 will be returned. + */ +static long compat_keyctl_instantiate_key_iov( + key_serial_t id, + const struct compat_iovec __user *_payload_iov, + unsigned ioc, + key_serial_t ringid) +{ + struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; + long ret; + + if (!_payload_iov || !ioc) + goto no_payload; + + ret = compat_rw_copy_check_uvector(WRITE, _payload_iov, ioc, + ARRAY_SIZE(iovstack), + iovstack, &iov); + if (ret < 0) + goto err; + if (ret == 0) + goto no_payload_free; + + ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid); +err: + if (iov != iovstack) + kfree(iov); + return ret; + +no_payload_free: + if (iov != iovstack) + kfree(iov); +no_payload: + return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid); +} + +/* + * The key control system call, 32-bit compatibility version for 64-bit archs + * + * This should only be called if the 64-bit arch uses weird pointers in 32-bit + * mode or doesn't guarantee that the top 32-bits of the argument registers on + * taking a 32-bit syscall are zero. If you can, you should call sys_keyctl() + * directly. */ -asmlinkage long compat_sys_keyctl(u32 option, - u32 arg2, u32 arg3, u32 arg4, u32 arg5) +COMPAT_SYSCALL_DEFINE5(keyctl, u32, option, + u32, arg2, u32, arg3, u32, arg4, u32, arg5) { switch (option) { case KEYCTL_GET_KEYRING_ID: @@ -85,8 +128,20 @@ asmlinkage long compat_sys_keyctl(u32 option, case KEYCTL_SESSION_TO_PARENT: return keyctl_session_to_parent(); + case KEYCTL_REJECT: + return keyctl_reject_key(arg2, arg3, arg4, arg5); + + case KEYCTL_INSTANTIATE_IOV: + return compat_keyctl_instantiate_key_iov( + arg2, compat_ptr(arg3), arg4, arg5); + + case KEYCTL_INVALIDATE: + return keyctl_invalidate_key(arg2); + + case KEYCTL_GET_PERSISTENT: + return keyctl_get_persistent(arg2, arg3); + default: return -EOPNOTSUPP; } - -} /* end compat_sys_keyctl() */ +} diff --git a/security/keys/encrypted-keys/Makefile b/security/keys/encrypted-keys/Makefile new file mode 100644 index 00000000000..d6f8433250a --- /dev/null +++ b/security/keys/encrypted-keys/Makefile @@ -0,0 +1,10 @@ +# +# Makefile for encrypted keys +# + +obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted-keys.o + +encrypted-keys-y := encrypted.o ecryptfs_format.o +masterkey-$(CONFIG_TRUSTED_KEYS) := masterkey_trusted.o +masterkey-$(CONFIG_TRUSTED_KEYS)-$(CONFIG_ENCRYPTED_KEYS) := masterkey_trusted.o +encrypted-keys-y += $(masterkey-y) $(masterkey-m-m) diff --git a/security/keys/encrypted-keys/ecryptfs_format.c b/security/keys/encrypted-keys/ecryptfs_format.c new file mode 100644 index 00000000000..6daa3b6ff9e --- /dev/null +++ b/security/keys/encrypted-keys/ecryptfs_format.c @@ -0,0 +1,81 @@ +/* + * ecryptfs_format.c: helper functions for the encrypted key type + * + * Copyright (C) 2006 International Business Machines Corp. + * Copyright (C) 2010 Politecnico di Torino, Italy + * TORSEC group -- http://security.polito.it + * + * Authors: + * Michael A. Halcrow <mahalcro@us.ibm.com> + * Tyler Hicks <tyhicks@ou.edu> + * Roberto Sassu <roberto.sassu@polito.it> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. + */ + +#include <linux/module.h> +#include "ecryptfs_format.h" + +u8 *ecryptfs_get_auth_tok_key(struct ecryptfs_auth_tok *auth_tok) +{ + return auth_tok->token.password.session_key_encryption_key; +} +EXPORT_SYMBOL(ecryptfs_get_auth_tok_key); + +/* + * ecryptfs_get_versions() + * + * Source code taken from the software 'ecryptfs-utils' version 83. + * + */ +void ecryptfs_get_versions(int *major, int *minor, int *file_version) +{ + *major = ECRYPTFS_VERSION_MAJOR; + *minor = ECRYPTFS_VERSION_MINOR; + if (file_version) + *file_version = ECRYPTFS_SUPPORTED_FILE_VERSION; +} +EXPORT_SYMBOL(ecryptfs_get_versions); + +/* + * ecryptfs_fill_auth_tok - fill the ecryptfs_auth_tok structure + * + * Fill the ecryptfs_auth_tok structure with required ecryptfs data. + * The source code is inspired to the original function generate_payload() + * shipped with the software 'ecryptfs-utils' version 83. + * + */ +int ecryptfs_fill_auth_tok(struct ecryptfs_auth_tok *auth_tok, + const char *key_desc) +{ + int major, minor; + + ecryptfs_get_versions(&major, &minor, NULL); + auth_tok->version = (((uint16_t)(major << 8) & 0xFF00) + | ((uint16_t)minor & 0x00FF)); + auth_tok->token_type = ECRYPTFS_PASSWORD; + strncpy((char *)auth_tok->token.password.signature, key_desc, + ECRYPTFS_PASSWORD_SIG_SIZE); + auth_tok->token.password.session_key_encryption_key_bytes = + ECRYPTFS_MAX_KEY_BYTES; + /* + * Removed auth_tok->token.password.salt and + * auth_tok->token.password.session_key_encryption_key + * initialization from the original code + */ + /* TODO: Make the hash parameterizable via policy */ + auth_tok->token.password.flags |= + ECRYPTFS_SESSION_KEY_ENCRYPTION_KEY_SET; + /* The kernel code will encrypt the session key. */ + auth_tok->session_key.encrypted_key[0] = 0; + auth_tok->session_key.encrypted_key_size = 0; + /* Default; subject to change by kernel eCryptfs */ + auth_tok->token.password.hash_algo = PGP_DIGEST_ALGO_SHA512; + auth_tok->token.password.flags &= ~(ECRYPTFS_PERSISTENT_PASSWORD); + return 0; +} +EXPORT_SYMBOL(ecryptfs_fill_auth_tok); + +MODULE_LICENSE("GPL"); diff --git a/security/keys/encrypted-keys/ecryptfs_format.h b/security/keys/encrypted-keys/ecryptfs_format.h new file mode 100644 index 00000000000..40294de238b --- /dev/null +++ b/security/keys/encrypted-keys/ecryptfs_format.h @@ -0,0 +1,30 @@ +/* + * ecryptfs_format.h: helper functions for the encrypted key type + * + * Copyright (C) 2006 International Business Machines Corp. + * Copyright (C) 2010 Politecnico di Torino, Italy + * TORSEC group -- http://security.polito.it + * + * Authors: + * Michael A. Halcrow <mahalcro@us.ibm.com> + * Tyler Hicks <tyhicks@ou.edu> + * Roberto Sassu <roberto.sassu@polito.it> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. + */ + +#ifndef __KEYS_ECRYPTFS_H +#define __KEYS_ECRYPTFS_H + +#include <linux/ecryptfs.h> + +#define PGP_DIGEST_ALGO_SHA512 10 + +u8 *ecryptfs_get_auth_tok_key(struct ecryptfs_auth_tok *auth_tok); +void ecryptfs_get_versions(int *major, int *minor, int *file_version); +int ecryptfs_fill_auth_tok(struct ecryptfs_auth_tok *auth_tok, + const char *key_desc); + +#endif /* __KEYS_ECRYPTFS_H */ diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c new file mode 100644 index 00000000000..5fe443d120a --- /dev/null +++ b/security/keys/encrypted-keys/encrypted.c @@ -0,0 +1,1040 @@ +/* + * Copyright (C) 2010 IBM Corporation + * Copyright (C) 2010 Politecnico di Torino, Italy + * TORSEC group -- http://security.polito.it + * + * Authors: + * Mimi Zohar <zohar@us.ibm.com> + * Roberto Sassu <roberto.sassu@polito.it> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. + * + * See Documentation/security/keys-trusted-encrypted.txt + */ + +#include <linux/uaccess.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/parser.h> +#include <linux/string.h> +#include <linux/err.h> +#include <keys/user-type.h> +#include <keys/trusted-type.h> +#include <keys/encrypted-type.h> +#include <linux/key-type.h> +#include <linux/random.h> +#include <linux/rcupdate.h> +#include <linux/scatterlist.h> +#include <linux/crypto.h> +#include <linux/ctype.h> +#include <crypto/hash.h> +#include <crypto/sha.h> +#include <crypto/aes.h> + +#include "encrypted.h" +#include "ecryptfs_format.h" + +static const char KEY_TRUSTED_PREFIX[] = "trusted:"; +static const char KEY_USER_PREFIX[] = "user:"; +static const char hash_alg[] = "sha256"; +static const char hmac_alg[] = "hmac(sha256)"; +static const char blkcipher_alg[] = "cbc(aes)"; +static const char key_format_default[] = "default"; +static const char key_format_ecryptfs[] = "ecryptfs"; +static unsigned int ivsize; +static int blksize; + +#define KEY_TRUSTED_PREFIX_LEN (sizeof (KEY_TRUSTED_PREFIX) - 1) +#define KEY_USER_PREFIX_LEN (sizeof (KEY_USER_PREFIX) - 1) +#define KEY_ECRYPTFS_DESC_LEN 16 +#define HASH_SIZE SHA256_DIGEST_SIZE +#define MAX_DATA_SIZE 4096 +#define MIN_DATA_SIZE 20 + +struct sdesc { + struct shash_desc shash; + char ctx[]; +}; + +static struct crypto_shash *hashalg; +static struct crypto_shash *hmacalg; + +enum { + Opt_err = -1, Opt_new, Opt_load, Opt_update +}; + +enum { + Opt_error = -1, Opt_default, Opt_ecryptfs +}; + +static const match_table_t key_format_tokens = { + {Opt_default, "default"}, + {Opt_ecryptfs, "ecryptfs"}, + {Opt_error, NULL} +}; + +static const match_table_t key_tokens = { + {Opt_new, "new"}, + {Opt_load, "load"}, + {Opt_update, "update"}, + {Opt_err, NULL} +}; + +static int aes_get_sizes(void) +{ + struct crypto_blkcipher *tfm; + + tfm = crypto_alloc_blkcipher(blkcipher_alg, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(tfm)) { + pr_err("encrypted_key: failed to alloc_cipher (%ld)\n", + PTR_ERR(tfm)); + return PTR_ERR(tfm); + } + ivsize = crypto_blkcipher_ivsize(tfm); + blksize = crypto_blkcipher_blocksize(tfm); + crypto_free_blkcipher(tfm); + return 0; +} + +/* + * valid_ecryptfs_desc - verify the description of a new/loaded encrypted key + * + * The description of a encrypted key with format 'ecryptfs' must contain + * exactly 16 hexadecimal characters. + * + */ +static int valid_ecryptfs_desc(const char *ecryptfs_desc) +{ + int i; + + if (strlen(ecryptfs_desc) != KEY_ECRYPTFS_DESC_LEN) { + pr_err("encrypted_key: key description must be %d hexadecimal " + "characters long\n", KEY_ECRYPTFS_DESC_LEN); + return -EINVAL; + } + + for (i = 0; i < KEY_ECRYPTFS_DESC_LEN; i++) { + if (!isxdigit(ecryptfs_desc[i])) { + pr_err("encrypted_key: key description must contain " + "only hexadecimal characters\n"); + return -EINVAL; + } + } + + return 0; +} + +/* + * valid_master_desc - verify the 'key-type:desc' of a new/updated master-key + * + * key-type:= "trusted:" | "user:" + * desc:= master-key description + * + * Verify that 'key-type' is valid and that 'desc' exists. On key update, + * only the master key description is permitted to change, not the key-type. + * The key-type remains constant. + * + * On success returns 0, otherwise -EINVAL. + */ +static int valid_master_desc(const char *new_desc, const char *orig_desc) +{ + if (!memcmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN)) { + if (strlen(new_desc) == KEY_TRUSTED_PREFIX_LEN) + goto out; + if (orig_desc) + if (memcmp(new_desc, orig_desc, KEY_TRUSTED_PREFIX_LEN)) + goto out; + } else if (!memcmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN)) { + if (strlen(new_desc) == KEY_USER_PREFIX_LEN) + goto out; + if (orig_desc) + if (memcmp(new_desc, orig_desc, KEY_USER_PREFIX_LEN)) + goto out; + } else + goto out; + return 0; +out: + return -EINVAL; +} + +/* + * datablob_parse - parse the keyctl data + * + * datablob format: + * new [<format>] <master-key name> <decrypted data length> + * load [<format>] <master-key name> <decrypted data length> + * <encrypted iv + data> + * update <new-master-key name> + * + * Tokenizes a copy of the keyctl data, returning a pointer to each token, + * which is null terminated. + * + * On success returns 0, otherwise -EINVAL. + */ +static int datablob_parse(char *datablob, const char **format, + char **master_desc, char **decrypted_datalen, + char **hex_encoded_iv) +{ + substring_t args[MAX_OPT_ARGS]; + int ret = -EINVAL; + int key_cmd; + int key_format; + char *p, *keyword; + + keyword = strsep(&datablob, " \t"); + if (!keyword) { + pr_info("encrypted_key: insufficient parameters specified\n"); + return ret; + } + key_cmd = match_token(keyword, key_tokens, args); + + /* Get optional format: default | ecryptfs */ + p = strsep(&datablob, " \t"); + if (!p) { + pr_err("encrypted_key: insufficient parameters specified\n"); + return ret; + } + + key_format = match_token(p, key_format_tokens, args); + switch (key_format) { + case Opt_ecryptfs: + case Opt_default: + *format = p; + *master_desc = strsep(&datablob, " \t"); + break; + case Opt_error: + *master_desc = p; + break; + } + + if (!*master_desc) { + pr_info("encrypted_key: master key parameter is missing\n"); + goto out; + } + + if (valid_master_desc(*master_desc, NULL) < 0) { + pr_info("encrypted_key: master key parameter \'%s\' " + "is invalid\n", *master_desc); + goto out; + } + + if (decrypted_datalen) { + *decrypted_datalen = strsep(&datablob, " \t"); + if (!*decrypted_datalen) { + pr_info("encrypted_key: keylen parameter is missing\n"); + goto out; + } + } + + switch (key_cmd) { + case Opt_new: + if (!decrypted_datalen) { + pr_info("encrypted_key: keyword \'%s\' not allowed " + "when called from .update method\n", keyword); + break; + } + ret = 0; + break; + case Opt_load: + if (!decrypted_datalen) { + pr_info("encrypted_key: keyword \'%s\' not allowed " + "when called from .update method\n", keyword); + break; + } + *hex_encoded_iv = strsep(&datablob, " \t"); + if (!*hex_encoded_iv) { + pr_info("encrypted_key: hex blob is missing\n"); + break; + } + ret = 0; + break; + case Opt_update: + if (decrypted_datalen) { + pr_info("encrypted_key: keyword \'%s\' not allowed " + "when called from .instantiate method\n", + keyword); + break; + } + ret = 0; + break; + case Opt_err: + pr_info("encrypted_key: keyword \'%s\' not recognized\n", + keyword); + break; + } +out: + return ret; +} + +/* + * datablob_format - format as an ascii string, before copying to userspace + */ +static char *datablob_format(struct encrypted_key_payload *epayload, + size_t asciiblob_len) +{ + char *ascii_buf, *bufp; + u8 *iv = epayload->iv; + int len; + int i; + + ascii_buf = kmalloc(asciiblob_len + 1, GFP_KERNEL); + if (!ascii_buf) + goto out; + + ascii_buf[asciiblob_len] = '\0'; + + /* copy datablob master_desc and datalen strings */ + len = sprintf(ascii_buf, "%s %s %s ", epayload->format, + epayload->master_desc, epayload->datalen); + + /* convert the hex encoded iv, encrypted-data and HMAC to ascii */ + bufp = &ascii_buf[len]; + for (i = 0; i < (asciiblob_len - len) / 2; i++) + bufp = hex_byte_pack(bufp, iv[i]); +out: + return ascii_buf; +} + +/* + * request_user_key - request the user key + * + * Use a user provided key to encrypt/decrypt an encrypted-key. + */ +static struct key *request_user_key(const char *master_desc, u8 **master_key, + size_t *master_keylen) +{ + struct user_key_payload *upayload; + struct key *ukey; + + ukey = request_key(&key_type_user, master_desc, NULL); + if (IS_ERR(ukey)) + goto error; + + down_read(&ukey->sem); + upayload = ukey->payload.data; + *master_key = upayload->data; + *master_keylen = upayload->datalen; +error: + return ukey; +} + +static struct sdesc *alloc_sdesc(struct crypto_shash *alg) +{ + struct sdesc *sdesc; + int size; + + size = sizeof(struct shash_desc) + crypto_shash_descsize(alg); + sdesc = kmalloc(size, GFP_KERNEL); + if (!sdesc) + return ERR_PTR(-ENOMEM); + sdesc->shash.tfm = alg; + sdesc->shash.flags = 0x0; + return sdesc; +} + +static int calc_hmac(u8 *digest, const u8 *key, unsigned int keylen, + const u8 *buf, unsigned int buflen) +{ + struct sdesc *sdesc; + int ret; + + sdesc = alloc_sdesc(hmacalg); + if (IS_ERR(sdesc)) { + pr_info("encrypted_key: can't alloc %s\n", hmac_alg); + return PTR_ERR(sdesc); + } + + ret = crypto_shash_setkey(hmacalg, key, keylen); + if (!ret) + ret = crypto_shash_digest(&sdesc->shash, buf, buflen, digest); + kfree(sdesc); + return ret; +} + +static int calc_hash(u8 *digest, const u8 *buf, unsigned int buflen) +{ + struct sdesc *sdesc; + int ret; + + sdesc = alloc_sdesc(hashalg); + if (IS_ERR(sdesc)) { + pr_info("encrypted_key: can't alloc %s\n", hash_alg); + return PTR_ERR(sdesc); + } + + ret = crypto_shash_digest(&sdesc->shash, buf, buflen, digest); + kfree(sdesc); + return ret; +} + +enum derived_key_type { ENC_KEY, AUTH_KEY }; + +/* Derive authentication/encryption key from trusted key */ +static int get_derived_key(u8 *derived_key, enum derived_key_type key_type, + const u8 *master_key, size_t master_keylen) +{ + u8 *derived_buf; + unsigned int derived_buf_len; + int ret; + + derived_buf_len = strlen("AUTH_KEY") + 1 + master_keylen; + if (derived_buf_len < HASH_SIZE) + derived_buf_len = HASH_SIZE; + + derived_buf = kzalloc(derived_buf_len, GFP_KERNEL); + if (!derived_buf) { + pr_err("encrypted_key: out of memory\n"); + return -ENOMEM; + } + if (key_type) + strcpy(derived_buf, "AUTH_KEY"); + else + strcpy(derived_buf, "ENC_KEY"); + + memcpy(derived_buf + strlen(derived_buf) + 1, master_key, + master_keylen); + ret = calc_hash(derived_key, derived_buf, derived_buf_len); + kfree(derived_buf); + return ret; +} + +static int init_blkcipher_desc(struct blkcipher_desc *desc, const u8 *key, + unsigned int key_len, const u8 *iv, + unsigned int ivsize) +{ + int ret; + + desc->tfm = crypto_alloc_blkcipher(blkcipher_alg, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(desc->tfm)) { + pr_err("encrypted_key: failed to load %s transform (%ld)\n", + blkcipher_alg, PTR_ERR(desc->tfm)); + return PTR_ERR(desc->tfm); + } + desc->flags = 0; + + ret = crypto_blkcipher_setkey(desc->tfm, key, key_len); + if (ret < 0) { + pr_err("encrypted_key: failed to setkey (%d)\n", ret); + crypto_free_blkcipher(desc->tfm); + return ret; + } + crypto_blkcipher_set_iv(desc->tfm, iv, ivsize); + return 0; +} + +static struct key *request_master_key(struct encrypted_key_payload *epayload, + u8 **master_key, size_t *master_keylen) +{ + struct key *mkey = NULL; + + if (!strncmp(epayload->master_desc, KEY_TRUSTED_PREFIX, + KEY_TRUSTED_PREFIX_LEN)) { + mkey = request_trusted_key(epayload->master_desc + + KEY_TRUSTED_PREFIX_LEN, + master_key, master_keylen); + } else if (!strncmp(epayload->master_desc, KEY_USER_PREFIX, + KEY_USER_PREFIX_LEN)) { + mkey = request_user_key(epayload->master_desc + + KEY_USER_PREFIX_LEN, + master_key, master_keylen); + } else + goto out; + + if (IS_ERR(mkey)) { + int ret = PTR_ERR(mkey); + + if (ret == -ENOTSUPP) + pr_info("encrypted_key: key %s not supported", + epayload->master_desc); + else + pr_info("encrypted_key: key %s not found", + epayload->master_desc); + goto out; + } + + dump_master_key(*master_key, *master_keylen); +out: + return mkey; +} + +/* Before returning data to userspace, encrypt decrypted data. */ +static int derived_key_encrypt(struct encrypted_key_payload *epayload, + const u8 *derived_key, + unsigned int derived_keylen) +{ + struct scatterlist sg_in[2]; + struct scatterlist sg_out[1]; + struct blkcipher_desc desc; + unsigned int encrypted_datalen; + unsigned int padlen; + char pad[16]; + int ret; + + encrypted_datalen = roundup(epayload->decrypted_datalen, blksize); + padlen = encrypted_datalen - epayload->decrypted_datalen; + + ret = init_blkcipher_desc(&desc, derived_key, derived_keylen, + epayload->iv, ivsize); + if (ret < 0) + goto out; + dump_decrypted_data(epayload); + + memset(pad, 0, sizeof pad); + sg_init_table(sg_in, 2); + sg_set_buf(&sg_in[0], epayload->decrypted_data, + epayload->decrypted_datalen); + sg_set_buf(&sg_in[1], pad, padlen); + + sg_init_table(sg_out, 1); + sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen); + + ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, encrypted_datalen); + crypto_free_blkcipher(desc.tfm); + if (ret < 0) + pr_err("encrypted_key: failed to encrypt (%d)\n", ret); + else + dump_encrypted_data(epayload, encrypted_datalen); +out: + return ret; +} + +static int datablob_hmac_append(struct encrypted_key_payload *epayload, + const u8 *master_key, size_t master_keylen) +{ + u8 derived_key[HASH_SIZE]; + u8 *digest; + int ret; + + ret = get_derived_key(derived_key, AUTH_KEY, master_key, master_keylen); + if (ret < 0) + goto out; + + digest = epayload->format + epayload->datablob_len; + ret = calc_hmac(digest, derived_key, sizeof derived_key, + epayload->format, epayload->datablob_len); + if (!ret) + dump_hmac(NULL, digest, HASH_SIZE); +out: + return ret; +} + +/* verify HMAC before decrypting encrypted key */ +static int datablob_hmac_verify(struct encrypted_key_payload *epayload, + const u8 *format, const u8 *master_key, + size_t master_keylen) +{ + u8 derived_key[HASH_SIZE]; + u8 digest[HASH_SIZE]; + int ret; + char *p; + unsigned short len; + + ret = get_derived_key(derived_key, AUTH_KEY, master_key, master_keylen); + if (ret < 0) + goto out; + + len = epayload->datablob_len; + if (!format) { + p = epayload->master_desc; + len -= strlen(epayload->format) + 1; + } else + p = epayload->format; + + ret = calc_hmac(digest, derived_key, sizeof derived_key, p, len); + if (ret < 0) + goto out; + ret = memcmp(digest, epayload->format + epayload->datablob_len, + sizeof digest); + if (ret) { + ret = -EINVAL; + dump_hmac("datablob", + epayload->format + epayload->datablob_len, + HASH_SIZE); + dump_hmac("calc", digest, HASH_SIZE); + } +out: + return ret; +} + +static int derived_key_decrypt(struct encrypted_key_payload *epayload, + const u8 *derived_key, + unsigned int derived_keylen) +{ + struct scatterlist sg_in[1]; + struct scatterlist sg_out[2]; + struct blkcipher_desc desc; + unsigned int encrypted_datalen; + char pad[16]; + int ret; + + encrypted_datalen = roundup(epayload->decrypted_datalen, blksize); + ret = init_blkcipher_desc(&desc, derived_key, derived_keylen, + epayload->iv, ivsize); + if (ret < 0) + goto out; + dump_encrypted_data(epayload, encrypted_datalen); + + memset(pad, 0, sizeof pad); + sg_init_table(sg_in, 1); + sg_init_table(sg_out, 2); + sg_set_buf(sg_in, epayload->encrypted_data, encrypted_datalen); + sg_set_buf(&sg_out[0], epayload->decrypted_data, + epayload->decrypted_datalen); + sg_set_buf(&sg_out[1], pad, sizeof pad); + + ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, encrypted_datalen); + crypto_free_blkcipher(desc.tfm); + if (ret < 0) + goto out; + dump_decrypted_data(epayload); +out: + return ret; +} + +/* Allocate memory for decrypted key and datablob. */ +static struct encrypted_key_payload *encrypted_key_alloc(struct key *key, + const char *format, + const char *master_desc, + const char *datalen) +{ + struct encrypted_key_payload *epayload = NULL; + unsigned short datablob_len; + unsigned short decrypted_datalen; + unsigned short payload_datalen; + unsigned int encrypted_datalen; + unsigned int format_len; + long dlen; + int ret; + + ret = kstrtol(datalen, 10, &dlen); + if (ret < 0 || dlen < MIN_DATA_SIZE || dlen > MAX_DATA_SIZE) + return ERR_PTR(-EINVAL); + + format_len = (!format) ? strlen(key_format_default) : strlen(format); + decrypted_datalen = dlen; + payload_datalen = decrypted_datalen; + if (format && !strcmp(format, key_format_ecryptfs)) { + if (dlen != ECRYPTFS_MAX_KEY_BYTES) { + pr_err("encrypted_key: keylen for the ecryptfs format " + "must be equal to %d bytes\n", + ECRYPTFS_MAX_KEY_BYTES); + return ERR_PTR(-EINVAL); + } + decrypted_datalen = ECRYPTFS_MAX_KEY_BYTES; + payload_datalen = sizeof(struct ecryptfs_auth_tok); + } + + encrypted_datalen = roundup(decrypted_datalen, blksize); + + datablob_len = format_len + 1 + strlen(master_desc) + 1 + + strlen(datalen) + 1 + ivsize + 1 + encrypted_datalen; + + ret = key_payload_reserve(key, payload_datalen + datablob_len + + HASH_SIZE + 1); + if (ret < 0) + return ERR_PTR(ret); + + epayload = kzalloc(sizeof(*epayload) + payload_datalen + + datablob_len + HASH_SIZE + 1, GFP_KERNEL); + if (!epayload) + return ERR_PTR(-ENOMEM); + + epayload->payload_datalen = payload_datalen; + epayload->decrypted_datalen = decrypted_datalen; + epayload->datablob_len = datablob_len; + return epayload; +} + +static int encrypted_key_decrypt(struct encrypted_key_payload *epayload, + const char *format, const char *hex_encoded_iv) +{ + struct key *mkey; + u8 derived_key[HASH_SIZE]; + u8 *master_key; + u8 *hmac; + const char *hex_encoded_data; + unsigned int encrypted_datalen; + size_t master_keylen; + size_t asciilen; + int ret; + + encrypted_datalen = roundup(epayload->decrypted_datalen, blksize); + asciilen = (ivsize + 1 + encrypted_datalen + HASH_SIZE) * 2; + if (strlen(hex_encoded_iv) != asciilen) + return -EINVAL; + + hex_encoded_data = hex_encoded_iv + (2 * ivsize) + 2; + ret = hex2bin(epayload->iv, hex_encoded_iv, ivsize); + if (ret < 0) + return -EINVAL; + ret = hex2bin(epayload->encrypted_data, hex_encoded_data, + encrypted_datalen); + if (ret < 0) + return -EINVAL; + + hmac = epayload->format + epayload->datablob_len; + ret = hex2bin(hmac, hex_encoded_data + (encrypted_datalen * 2), + HASH_SIZE); + if (ret < 0) + return -EINVAL; + + mkey = request_master_key(epayload, &master_key, &master_keylen); + if (IS_ERR(mkey)) + return PTR_ERR(mkey); + + ret = datablob_hmac_verify(epayload, format, master_key, master_keylen); + if (ret < 0) { + pr_err("encrypted_key: bad hmac (%d)\n", ret); + goto out; + } + + ret = get_derived_key(derived_key, ENC_KEY, master_key, master_keylen); + if (ret < 0) + goto out; + + ret = derived_key_decrypt(epayload, derived_key, sizeof derived_key); + if (ret < 0) + pr_err("encrypted_key: failed to decrypt key (%d)\n", ret); +out: + up_read(&mkey->sem); + key_put(mkey); + return ret; +} + +static void __ekey_init(struct encrypted_key_payload *epayload, + const char *format, const char *master_desc, + const char *datalen) +{ + unsigned int format_len; + + format_len = (!format) ? strlen(key_format_default) : strlen(format); + epayload->format = epayload->payload_data + epayload->payload_datalen; + epayload->master_desc = epayload->format + format_len + 1; + epayload->datalen = epayload->master_desc + strlen(master_desc) + 1; + epayload->iv = epayload->datalen + strlen(datalen) + 1; + epayload->encrypted_data = epayload->iv + ivsize + 1; + epayload->decrypted_data = epayload->payload_data; + + if (!format) + memcpy(epayload->format, key_format_default, format_len); + else { + if (!strcmp(format, key_format_ecryptfs)) + epayload->decrypted_data = + ecryptfs_get_auth_tok_key((struct ecryptfs_auth_tok *)epayload->payload_data); + + memcpy(epayload->format, format, format_len); + } + + memcpy(epayload->master_desc, master_desc, strlen(master_desc)); + memcpy(epayload->datalen, datalen, strlen(datalen)); +} + +/* + * encrypted_init - initialize an encrypted key + * + * For a new key, use a random number for both the iv and data + * itself. For an old key, decrypt the hex encoded data. + */ +static int encrypted_init(struct encrypted_key_payload *epayload, + const char *key_desc, const char *format, + const char *master_desc, const char *datalen, + const char *hex_encoded_iv) +{ + int ret = 0; + + if (format && !strcmp(format, key_format_ecryptfs)) { + ret = valid_ecryptfs_desc(key_desc); + if (ret < 0) + return ret; + + ecryptfs_fill_auth_tok((struct ecryptfs_auth_tok *)epayload->payload_data, + key_desc); + } + + __ekey_init(epayload, format, master_desc, datalen); + if (!hex_encoded_iv) { + get_random_bytes(epayload->iv, ivsize); + + get_random_bytes(epayload->decrypted_data, + epayload->decrypted_datalen); + } else + ret = encrypted_key_decrypt(epayload, format, hex_encoded_iv); + return ret; +} + +/* + * encrypted_instantiate - instantiate an encrypted key + * + * Decrypt an existing encrypted datablob or create a new encrypted key + * based on a kernel random number. + * + * On success, return 0. Otherwise return errno. + */ +static int encrypted_instantiate(struct key *key, + struct key_preparsed_payload *prep) +{ + struct encrypted_key_payload *epayload = NULL; + char *datablob = NULL; + const char *format = NULL; + char *master_desc = NULL; + char *decrypted_datalen = NULL; + char *hex_encoded_iv = NULL; + size_t datalen = prep->datalen; + int ret; + + if (datalen <= 0 || datalen > 32767 || !prep->data) + return -EINVAL; + + datablob = kmalloc(datalen + 1, GFP_KERNEL); + if (!datablob) + return -ENOMEM; + datablob[datalen] = 0; + memcpy(datablob, prep->data, datalen); + ret = datablob_parse(datablob, &format, &master_desc, + &decrypted_datalen, &hex_encoded_iv); + if (ret < 0) + goto out; + + epayload = encrypted_key_alloc(key, format, master_desc, + decrypted_datalen); + if (IS_ERR(epayload)) { + ret = PTR_ERR(epayload); + goto out; + } + ret = encrypted_init(epayload, key->description, format, master_desc, + decrypted_datalen, hex_encoded_iv); + if (ret < 0) { + kfree(epayload); + goto out; + } + + rcu_assign_keypointer(key, epayload); +out: + kfree(datablob); + return ret; +} + +static void encrypted_rcu_free(struct rcu_head *rcu) +{ + struct encrypted_key_payload *epayload; + + epayload = container_of(rcu, struct encrypted_key_payload, rcu); + memset(epayload->decrypted_data, 0, epayload->decrypted_datalen); + kfree(epayload); +} + +/* + * encrypted_update - update the master key description + * + * Change the master key description for an existing encrypted key. + * The next read will return an encrypted datablob using the new + * master key description. + * + * On success, return 0. Otherwise return errno. + */ +static int encrypted_update(struct key *key, struct key_preparsed_payload *prep) +{ + struct encrypted_key_payload *epayload = key->payload.data; + struct encrypted_key_payload *new_epayload; + char *buf; + char *new_master_desc = NULL; + const char *format = NULL; + size_t datalen = prep->datalen; + int ret = 0; + + if (datalen <= 0 || datalen > 32767 || !prep->data) + return -EINVAL; + + buf = kmalloc(datalen + 1, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + buf[datalen] = 0; + memcpy(buf, prep->data, datalen); + ret = datablob_parse(buf, &format, &new_master_desc, NULL, NULL); + if (ret < 0) + goto out; + + ret = valid_master_desc(new_master_desc, epayload->master_desc); + if (ret < 0) + goto out; + + new_epayload = encrypted_key_alloc(key, epayload->format, + new_master_desc, epayload->datalen); + if (IS_ERR(new_epayload)) { + ret = PTR_ERR(new_epayload); + goto out; + } + + __ekey_init(new_epayload, epayload->format, new_master_desc, + epayload->datalen); + + memcpy(new_epayload->iv, epayload->iv, ivsize); + memcpy(new_epayload->payload_data, epayload->payload_data, + epayload->payload_datalen); + + rcu_assign_keypointer(key, new_epayload); + call_rcu(&epayload->rcu, encrypted_rcu_free); +out: + kfree(buf); + return ret; +} + +/* + * encrypted_read - format and copy the encrypted data to userspace + * + * The resulting datablob format is: + * <master-key name> <decrypted data length> <encrypted iv> <encrypted data> + * + * On success, return to userspace the encrypted key datablob size. + */ +static long encrypted_read(const struct key *key, char __user *buffer, + size_t buflen) +{ + struct encrypted_key_payload *epayload; + struct key *mkey; + u8 *master_key; + size_t master_keylen; + char derived_key[HASH_SIZE]; + char *ascii_buf; + size_t asciiblob_len; + int ret; + + epayload = rcu_dereference_key(key); + + /* returns the hex encoded iv, encrypted-data, and hmac as ascii */ + asciiblob_len = epayload->datablob_len + ivsize + 1 + + roundup(epayload->decrypted_datalen, blksize) + + (HASH_SIZE * 2); + + if (!buffer || buflen < asciiblob_len) + return asciiblob_len; + + mkey = request_master_key(epayload, &master_key, &master_keylen); + if (IS_ERR(mkey)) + return PTR_ERR(mkey); + + ret = get_derived_key(derived_key, ENC_KEY, master_key, master_keylen); + if (ret < 0) + goto out; + + ret = derived_key_encrypt(epayload, derived_key, sizeof derived_key); + if (ret < 0) + goto out; + + ret = datablob_hmac_append(epayload, master_key, master_keylen); + if (ret < 0) + goto out; + + ascii_buf = datablob_format(epayload, asciiblob_len); + if (!ascii_buf) { + ret = -ENOMEM; + goto out; + } + + up_read(&mkey->sem); + key_put(mkey); + + if (copy_to_user(buffer, ascii_buf, asciiblob_len) != 0) + ret = -EFAULT; + kfree(ascii_buf); + + return asciiblob_len; +out: + up_read(&mkey->sem); + key_put(mkey); + return ret; +} + +/* + * encrypted_destroy - before freeing the key, clear the decrypted data + * + * Before freeing the key, clear the memory containing the decrypted + * key data. + */ +static void encrypted_destroy(struct key *key) +{ + struct encrypted_key_payload *epayload = key->payload.data; + + if (!epayload) + return; + + memset(epayload->decrypted_data, 0, epayload->decrypted_datalen); + kfree(key->payload.data); +} + +struct key_type key_type_encrypted = { + .name = "encrypted", + .instantiate = encrypted_instantiate, + .update = encrypted_update, + .match = user_match, + .destroy = encrypted_destroy, + .describe = user_describe, + .read = encrypted_read, +}; +EXPORT_SYMBOL_GPL(key_type_encrypted); + +static void encrypted_shash_release(void) +{ + if (hashalg) + crypto_free_shash(hashalg); + if (hmacalg) + crypto_free_shash(hmacalg); +} + +static int __init encrypted_shash_alloc(void) +{ + int ret; + + hmacalg = crypto_alloc_shash(hmac_alg, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(hmacalg)) { + pr_info("encrypted_key: could not allocate crypto %s\n", + hmac_alg); + return PTR_ERR(hmacalg); + } + + hashalg = crypto_alloc_shash(hash_alg, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(hashalg)) { + pr_info("encrypted_key: could not allocate crypto %s\n", + hash_alg); + ret = PTR_ERR(hashalg); + goto hashalg_fail; + } + + return 0; + +hashalg_fail: + crypto_free_shash(hmacalg); + return ret; +} + +static int __init init_encrypted(void) +{ + int ret; + + ret = encrypted_shash_alloc(); + if (ret < 0) + return ret; + ret = register_key_type(&key_type_encrypted); + if (ret < 0) + goto out; + return aes_get_sizes(); +out: + encrypted_shash_release(); + return ret; + +} + +static void __exit cleanup_encrypted(void) +{ + encrypted_shash_release(); + unregister_key_type(&key_type_encrypted); +} + +late_initcall(init_encrypted); +module_exit(cleanup_encrypted); + +MODULE_LICENSE("GPL"); diff --git a/security/keys/encrypted-keys/encrypted.h b/security/keys/encrypted-keys/encrypted.h new file mode 100644 index 00000000000..8136a2d44c6 --- /dev/null +++ b/security/keys/encrypted-keys/encrypted.h @@ -0,0 +1,66 @@ +#ifndef __ENCRYPTED_KEY_H +#define __ENCRYPTED_KEY_H + +#define ENCRYPTED_DEBUG 0 +#if defined(CONFIG_TRUSTED_KEYS) || \ + (defined(CONFIG_TRUSTED_KEYS_MODULE) && defined(CONFIG_ENCRYPTED_KEYS_MODULE)) +extern struct key *request_trusted_key(const char *trusted_desc, + u8 **master_key, size_t *master_keylen); +#else +static inline struct key *request_trusted_key(const char *trusted_desc, + u8 **master_key, + size_t *master_keylen) +{ + return ERR_PTR(-EOPNOTSUPP); +} +#endif + +#if ENCRYPTED_DEBUG +static inline void dump_master_key(const u8 *master_key, size_t master_keylen) +{ + print_hex_dump(KERN_ERR, "master key: ", DUMP_PREFIX_NONE, 32, 1, + master_key, master_keylen, 0); +} + +static inline void dump_decrypted_data(struct encrypted_key_payload *epayload) +{ + print_hex_dump(KERN_ERR, "decrypted data: ", DUMP_PREFIX_NONE, 32, 1, + epayload->decrypted_data, + epayload->decrypted_datalen, 0); +} + +static inline void dump_encrypted_data(struct encrypted_key_payload *epayload, + unsigned int encrypted_datalen) +{ + print_hex_dump(KERN_ERR, "encrypted data: ", DUMP_PREFIX_NONE, 32, 1, + epayload->encrypted_data, encrypted_datalen, 0); +} + +static inline void dump_hmac(const char *str, const u8 *digest, + unsigned int hmac_size) +{ + if (str) + pr_info("encrypted_key: %s", str); + print_hex_dump(KERN_ERR, "hmac: ", DUMP_PREFIX_NONE, 32, 1, digest, + hmac_size, 0); +} +#else +static inline void dump_master_key(const u8 *master_key, size_t master_keylen) +{ +} + +static inline void dump_decrypted_data(struct encrypted_key_payload *epayload) +{ +} + +static inline void dump_encrypted_data(struct encrypted_key_payload *epayload, + unsigned int encrypted_datalen) +{ +} + +static inline void dump_hmac(const char *str, const u8 *digest, + unsigned int hmac_size) +{ +} +#endif +#endif diff --git a/security/keys/encrypted-keys/masterkey_trusted.c b/security/keys/encrypted-keys/masterkey_trusted.c new file mode 100644 index 00000000000..013f7e5d3a2 --- /dev/null +++ b/security/keys/encrypted-keys/masterkey_trusted.c @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2010 IBM Corporation + * Copyright (C) 2010 Politecnico di Torino, Italy + * TORSEC group -- http://security.polito.it + * + * Authors: + * Mimi Zohar <zohar@us.ibm.com> + * Roberto Sassu <roberto.sassu@polito.it> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. + * + * See Documentation/security/keys-trusted-encrypted.txt + */ + +#include <linux/uaccess.h> +#include <linux/module.h> +#include <linux/err.h> +#include <keys/trusted-type.h> +#include <keys/encrypted-type.h> +#include "encrypted.h" + +/* + * request_trusted_key - request the trusted key + * + * Trusted keys are sealed to PCRs and other metadata. Although userspace + * manages both trusted/encrypted key-types, like the encrypted key type + * data, trusted key type data is not visible decrypted from userspace. + */ +struct key *request_trusted_key(const char *trusted_desc, + u8 **master_key, size_t *master_keylen) +{ + struct trusted_key_payload *tpayload; + struct key *tkey; + + tkey = request_key(&key_type_trusted, trusted_desc, NULL); + if (IS_ERR(tkey)) + goto error; + + down_read(&tkey->sem); + tpayload = tkey->payload.data; + *master_key = tpayload->key; + *master_keylen = tpayload->key_len; +error: + return tkey; +} diff --git a/security/keys/gc.c b/security/keys/gc.c index a46e825cbf0..d3222b6d7d5 100644 --- a/security/keys/gc.c +++ b/security/keys/gc.c @@ -1,6 +1,6 @@ /* Key garbage collector * - * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved. + * Copyright (C) 2009-2011 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or @@ -10,6 +10,8 @@ */ #include <linux/module.h> +#include <linux/slab.h> +#include <linux/security.h> #include <keys/keyring-type.h> #include "internal.h" @@ -19,21 +21,37 @@ unsigned key_gc_delay = 5 * 60; /* - * Reaper + * Reaper for unused keys. + */ +static void key_garbage_collector(struct work_struct *work); +DECLARE_WORK(key_gc_work, key_garbage_collector); + +/* + * Reaper for links from keyrings to dead keys. */ static void key_gc_timer_func(unsigned long); -static void key_garbage_collector(struct work_struct *); static DEFINE_TIMER(key_gc_timer, key_gc_timer_func, 0, 0); -static DECLARE_WORK(key_gc_work, key_garbage_collector); -static key_serial_t key_gc_cursor; /* the last key the gc considered */ -static bool key_gc_again; -static unsigned long key_gc_executing; + static time_t key_gc_next_run = LONG_MAX; -static time_t key_gc_new_timer; +static struct key_type *key_gc_dead_keytype; + +static unsigned long key_gc_flags; +#define KEY_GC_KEY_EXPIRED 0 /* A key expired and needs unlinking */ +#define KEY_GC_REAP_KEYTYPE 1 /* A keytype is being unregistered */ +#define KEY_GC_REAPING_KEYTYPE 2 /* Cleared when keytype reaped */ + /* - * Schedule a garbage collection run - * - precision isn't particularly important + * Any key whose type gets unregistered will be re-typed to this if it can't be + * immediately unlinked. + */ +struct key_type key_type_dead = { + .name = "dead", +}; + +/* + * Schedule a garbage collection run. + * - time precision isn't particularly important */ void key_schedule_gc(time_t gc_at) { @@ -42,181 +60,308 @@ void key_schedule_gc(time_t gc_at) kenter("%ld", gc_at - now); - if (gc_at <= now) { + if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) { + kdebug("IMMEDIATE"); schedule_work(&key_gc_work); } else if (gc_at < key_gc_next_run) { + kdebug("DEFERRED"); + key_gc_next_run = gc_at; expires = jiffies + (gc_at - now) * HZ; mod_timer(&key_gc_timer, expires); } } /* - * The garbage collector timer kicked off + * Schedule a dead links collection run. + */ +void key_schedule_gc_links(void) +{ + set_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags); + schedule_work(&key_gc_work); +} + +/* + * Some key's cleanup time was met after it expired, so we need to get the + * reaper to go through a cycle finding expired keys. */ static void key_gc_timer_func(unsigned long data) { kenter(""); key_gc_next_run = LONG_MAX; + key_schedule_gc_links(); +} + +/* + * wait_on_bit() sleep function for uninterruptible waiting + */ +static int key_gc_wait_bit(void *flags) +{ + schedule(); + return 0; +} + +/* + * Reap keys of dead type. + * + * We use three flags to make sure we see three complete cycles of the garbage + * collector: the first to mark keys of that type as being dead, the second to + * collect dead links and the third to clean up the dead keys. We have to be + * careful as there may already be a cycle in progress. + * + * The caller must be holding key_types_sem. + */ +void key_gc_keytype(struct key_type *ktype) +{ + kenter("%s", ktype->name); + + key_gc_dead_keytype = ktype; + set_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags); + smp_mb(); + set_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags); + + kdebug("schedule"); schedule_work(&key_gc_work); + + kdebug("sleep"); + wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE, key_gc_wait_bit, + TASK_UNINTERRUPTIBLE); + + key_gc_dead_keytype = NULL; + kleave(""); } /* - * Garbage collect pointers from a keyring - * - return true if we altered the keyring + * Garbage collect a list of unreferenced, detached keys */ -static bool key_gc_keyring(struct key *keyring, time_t limit) - __releases(key_serial_lock) +static noinline void key_gc_unused_keys(struct list_head *keys) { - struct keyring_list *klist; - struct key *key; - int loop; + while (!list_empty(keys)) { + struct key *key = + list_entry(keys->next, struct key, graveyard_link); + list_del(&key->graveyard_link); - kenter("%x", key_serial(keyring)); + kdebug("- %u", key->serial); + key_check(key); - if (test_bit(KEY_FLAG_REVOKED, &keyring->flags)) - goto dont_gc; + security_key_free(key); - /* scan the keyring looking for dead keys */ - rcu_read_lock(); - klist = rcu_dereference(keyring->payload.subscriptions); - if (!klist) - goto unlock_dont_gc; + /* deal with the user's key tracking and quota */ + if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { + spin_lock(&key->user->lock); + key->user->qnkeys--; + key->user->qnbytes -= key->quotalen; + spin_unlock(&key->user->lock); + } - for (loop = klist->nkeys - 1; loop >= 0; loop--) { - key = klist->keys[loop]; - if (test_bit(KEY_FLAG_DEAD, &key->flags) || - (key->expiry > 0 && key->expiry <= limit)) - goto do_gc; - } + atomic_dec(&key->user->nkeys); + if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) + atomic_dec(&key->user->nikeys); -unlock_dont_gc: - rcu_read_unlock(); -dont_gc: - kleave(" = false"); - return false; + key_user_put(key->user); -do_gc: - rcu_read_unlock(); - key_gc_cursor = keyring->serial; - key_get(keyring); - spin_unlock(&key_serial_lock); - keyring_gc(keyring, limit); - key_put(keyring); - kleave(" = true"); - return true; + /* now throw away the key memory */ + if (key->type->destroy) + key->type->destroy(key); + + kfree(key->description); + +#ifdef KEY_DEBUGGING + key->magic = KEY_DEBUG_MAGIC_X; +#endif + kmem_cache_free(key_jar, key); + } } /* - * Garbage collector for keys - * - this involves scanning the keyrings for dead, expired and revoked keys - * that have overstayed their welcome + * Garbage collector for unused keys. + * + * This is done in process context so that we don't have to disable interrupts + * all over the place. key_put() schedules this rather than trying to do the + * cleanup itself, which means key_put() doesn't have to sleep. */ static void key_garbage_collector(struct work_struct *work) { - struct rb_node *rb; - key_serial_t cursor; - struct key *key, *xkey; - time_t new_timer = LONG_MAX, limit, now; - - now = current_kernel_time().tv_sec; - kenter("[%x,%ld]", key_gc_cursor, key_gc_new_timer - now); - - if (test_and_set_bit(0, &key_gc_executing)) { - key_schedule_gc(current_kernel_time().tv_sec + 1); - kleave(" [busy; deferring]"); - return; - } + static LIST_HEAD(graveyard); + static u8 gc_state; /* Internal persistent state */ +#define KEY_GC_REAP_AGAIN 0x01 /* - Need another cycle */ +#define KEY_GC_REAPING_LINKS 0x02 /* - We need to reap links */ +#define KEY_GC_SET_TIMER 0x04 /* - We need to restart the timer */ +#define KEY_GC_REAPING_DEAD_1 0x10 /* - We need to mark dead keys */ +#define KEY_GC_REAPING_DEAD_2 0x20 /* - We need to reap dead key links */ +#define KEY_GC_REAPING_DEAD_3 0x40 /* - We need to reap dead keys */ +#define KEY_GC_FOUND_DEAD_KEY 0x80 /* - We found at least one dead key */ - limit = now; + struct rb_node *cursor; + struct key *key; + time_t new_timer, limit; + + kenter("[%lx,%x]", key_gc_flags, gc_state); + + limit = current_kernel_time().tv_sec; if (limit > key_gc_delay) limit -= key_gc_delay; else limit = key_gc_delay; + /* Work out what we're going to be doing in this pass */ + gc_state &= KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2; + gc_state <<= 1; + if (test_and_clear_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags)) + gc_state |= KEY_GC_REAPING_LINKS | KEY_GC_SET_TIMER; + + if (test_and_clear_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) + gc_state |= KEY_GC_REAPING_DEAD_1; + kdebug("new pass %x", gc_state); + + new_timer = LONG_MAX; + + /* As only this function is permitted to remove things from the key + * serial tree, if cursor is non-NULL then it will always point to a + * valid node in the tree - even if lock got dropped. + */ spin_lock(&key_serial_lock); + cursor = rb_first(&key_serial_tree); - if (unlikely(RB_EMPTY_ROOT(&key_serial_tree))) { - spin_unlock(&key_serial_lock); - clear_bit(0, &key_gc_executing); - return; - } +continue_scanning: + while (cursor) { + key = rb_entry(cursor, struct key, serial_node); + cursor = rb_next(cursor); - cursor = key_gc_cursor; - if (cursor < 0) - cursor = 0; - if (cursor > 0) - new_timer = key_gc_new_timer; - else - key_gc_again = false; - - /* find the first key above the cursor */ - key = NULL; - rb = key_serial_tree.rb_node; - while (rb) { - xkey = rb_entry(rb, struct key, serial_node); - if (cursor < xkey->serial) { - key = xkey; - rb = rb->rb_left; - } else if (cursor > xkey->serial) { - rb = rb->rb_right; - } else { - rb = rb_next(rb); - if (!rb) - goto reached_the_end; - key = rb_entry(rb, struct key, serial_node); - break; + if (atomic_read(&key->usage) == 0) + goto found_unreferenced_key; + + if (unlikely(gc_state & KEY_GC_REAPING_DEAD_1)) { + if (key->type == key_gc_dead_keytype) { + gc_state |= KEY_GC_FOUND_DEAD_KEY; + set_bit(KEY_FLAG_DEAD, &key->flags); + key->perm = 0; + goto skip_dead_key; + } } - } - if (!key) - goto reached_the_end; + if (gc_state & KEY_GC_SET_TIMER) { + if (key->expiry > limit && key->expiry < new_timer) { + kdebug("will expire %x in %ld", + key_serial(key), key->expiry - limit); + new_timer = key->expiry; + } + } + + if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) + if (key->type == key_gc_dead_keytype) + gc_state |= KEY_GC_FOUND_DEAD_KEY; - /* trawl through the keys looking for keyrings */ - for (;;) { - if (key->expiry > limit && key->expiry < new_timer) { - kdebug("will expire %x in %ld", - key_serial(key), key->expiry - limit); - new_timer = key->expiry; + if ((gc_state & KEY_GC_REAPING_LINKS) || + unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) { + if (key->type == &key_type_keyring) + goto found_keyring; } - if (key->type == &key_type_keyring && - key_gc_keyring(key, limit)) - /* the gc had to release our lock so that the keyring - * could be modified, so we have to get it again */ - goto gc_released_our_lock; + if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3)) + if (key->type == key_gc_dead_keytype) + goto destroy_dead_key; - rb = rb_next(&key->serial_node); - if (!rb) - goto reached_the_end; - key = rb_entry(rb, struct key, serial_node); + skip_dead_key: + if (spin_is_contended(&key_serial_lock) || need_resched()) + goto contended; } -gc_released_our_lock: - kdebug("gc_released_our_lock"); - key_gc_new_timer = new_timer; - key_gc_again = true; - clear_bit(0, &key_gc_executing); - schedule_work(&key_gc_work); - kleave(" [continue]"); - return; - - /* when we reach the end of the run, we set the timer for the next one */ -reached_the_end: - kdebug("reached_the_end"); +contended: spin_unlock(&key_serial_lock); - key_gc_new_timer = new_timer; - key_gc_cursor = 0; - clear_bit(0, &key_gc_executing); - - if (key_gc_again) { - /* there may have been a key that expired whilst we were - * scanning, so if we discarded any links we should do another - * scan */ - new_timer = now + 1; - key_schedule_gc(new_timer); - } else if (new_timer < LONG_MAX) { + +maybe_resched: + if (cursor) { + cond_resched(); + spin_lock(&key_serial_lock); + goto continue_scanning; + } + + /* We've completed the pass. Set the timer if we need to and queue a + * new cycle if necessary. We keep executing cycles until we find one + * where we didn't reap any keys. + */ + kdebug("pass complete"); + + if (gc_state & KEY_GC_SET_TIMER && new_timer != (time_t)LONG_MAX) { new_timer += key_gc_delay; key_schedule_gc(new_timer); } - kleave(" [end]"); + + if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2) || + !list_empty(&graveyard)) { + /* Make sure that all pending keyring payload destructions are + * fulfilled and that people aren't now looking at dead or + * dying keys that they don't have a reference upon or a link + * to. + */ + kdebug("gc sync"); + synchronize_rcu(); + } + + if (!list_empty(&graveyard)) { + kdebug("gc keys"); + key_gc_unused_keys(&graveyard); + } + + if (unlikely(gc_state & (KEY_GC_REAPING_DEAD_1 | + KEY_GC_REAPING_DEAD_2))) { + if (!(gc_state & KEY_GC_FOUND_DEAD_KEY)) { + /* No remaining dead keys: short circuit the remaining + * keytype reap cycles. + */ + kdebug("dead short"); + gc_state &= ~(KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2); + gc_state |= KEY_GC_REAPING_DEAD_3; + } else { + gc_state |= KEY_GC_REAP_AGAIN; + } + } + + if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3)) { + kdebug("dead wake"); + smp_mb(); + clear_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags); + wake_up_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE); + } + + if (gc_state & KEY_GC_REAP_AGAIN) + schedule_work(&key_gc_work); + kleave(" [end %x]", gc_state); + return; + + /* We found an unreferenced key - once we've removed it from the tree, + * we can safely drop the lock. + */ +found_unreferenced_key: + kdebug("unrefd key %d", key->serial); + rb_erase(&key->serial_node, &key_serial_tree); + spin_unlock(&key_serial_lock); + + list_add_tail(&key->graveyard_link, &graveyard); + gc_state |= KEY_GC_REAP_AGAIN; + goto maybe_resched; + + /* We found a keyring and we need to check the payload for links to + * dead or expired keys. We don't flag another reap immediately as we + * have to wait for the old payload to be destroyed by RCU before we + * can reap the keys to which it refers. + */ +found_keyring: + spin_unlock(&key_serial_lock); + keyring_gc(key, limit); + goto maybe_resched; + + /* We found a dead key that is still referenced. Reset its type and + * destroy its payload with its semaphore held. + */ +destroy_dead_key: + spin_unlock(&key_serial_lock); + kdebug("destroy key %d", key->serial); + down_write(&key->sem); + key->type = &key_type_dead; + if (key_gc_dead_keytype->destroy) + key_gc_dead_keytype->destroy(key); + memset(&key->payload, KEY_DESTROY, sizeof(key->payload)); + up_write(&key->sem); + goto maybe_resched; } diff --git a/security/keys/internal.h b/security/keys/internal.h index 56a133d8f37..5f20da01fd8 100644 --- a/security/keys/internal.h +++ b/security/keys/internal.h @@ -1,4 +1,4 @@ -/* internal.h: authentication token and access key management internal defs +/* Authentication token and access key management internal defs * * Copyright (C) 2003-5, 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) @@ -14,6 +14,9 @@ #include <linux/sched.h> #include <linux/key-type.h> +#include <linux/task_work.h> + +struct iovec; #ifdef __KDEBUG #define kenter(FMT, ...) \ @@ -31,14 +34,18 @@ no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__) #endif +extern struct key_type key_type_dead; extern struct key_type key_type_user; +extern struct key_type key_type_logon; /*****************************************************************************/ /* - * keep track of keys for a user - * - this needs to be separate to user_struct to avoid a refcount-loop - * (user_struct pins some keyrings which pin this struct) - * - this also keeps track of keys under request from userspace for this UID + * Keep track of keys for a user. + * + * This needs to be separate to user_struct to avoid a refcount-loop + * (user_struct pins some keyrings which pin this struct). + * + * We also keep track of keys under request from userspace for this UID here. */ struct key_user { struct rb_node node; @@ -47,8 +54,7 @@ struct key_user { atomic_t usage; /* for accessing qnkeys & qnbytes */ atomic_t nkeys; /* number of keys */ atomic_t nikeys; /* number of instantiated keys */ - uid_t uid; - struct user_namespace *user_ns; + kuid_t uid; int qnkeys; /* number of keys allocated to this user */ int qnbytes; /* number of bytes allocated to this user */ }; @@ -57,12 +63,11 @@ extern struct rb_root key_user_tree; extern spinlock_t key_user_lock; extern struct key_user root_key_user; -extern struct key_user *key_user_lookup(uid_t uid, - struct user_namespace *user_ns); +extern struct key_user *key_user_lookup(kuid_t uid); extern void key_user_put(struct key_user *user); /* - * key quota limits + * Key quota limits. * - root has its own separate limits to everyone else */ extern unsigned key_quota_root_maxkeys; @@ -73,6 +78,7 @@ extern unsigned key_quota_maxbytes; #define KEYQUOTA_LINK_BYTES 4 /* a link in a keyring is worth 4 bytes */ +extern struct kmem_cache *key_jar; extern struct rb_root key_serial_tree; extern spinlock_t key_serial_lock; extern struct mutex key_construction_mutex; @@ -83,40 +89,53 @@ extern struct key_type *key_type_lookup(const char *type); extern void key_type_put(struct key_type *ktype); extern int __key_link_begin(struct key *keyring, - const struct key_type *type, - const char *description, - struct keyring_list **_prealloc); + const struct keyring_index_key *index_key, + struct assoc_array_edit **_edit); extern int __key_link_check_live_key(struct key *keyring, struct key *key); -extern void __key_link(struct key *keyring, struct key *key, - struct keyring_list **_prealloc); +extern void __key_link(struct key *key, struct assoc_array_edit **_edit); extern void __key_link_end(struct key *keyring, - struct key_type *type, - struct keyring_list *prealloc); + const struct keyring_index_key *index_key, + struct assoc_array_edit *edit); -extern key_ref_t __keyring_search_one(key_ref_t keyring_ref, - const struct key_type *type, - const char *description, - key_perm_t perm); +extern key_ref_t find_key_to_update(key_ref_t keyring_ref, + const struct keyring_index_key *index_key); extern struct key *keyring_search_instkey(struct key *keyring, key_serial_t target_id); +extern int iterate_over_keyring(const struct key *keyring, + int (*func)(const struct key *key, void *data), + void *data); + typedef int (*key_match_func_t)(const struct key *, const void *); +struct keyring_search_context { + struct keyring_index_key index_key; + const struct cred *cred; + key_match_func_t match; + const void *match_data; + unsigned flags; +#define KEYRING_SEARCH_LOOKUP_TYPE 0x0001 /* [as type->def_lookup_type] */ +#define KEYRING_SEARCH_NO_STATE_CHECK 0x0002 /* Skip state checks */ +#define KEYRING_SEARCH_DO_STATE_CHECK 0x0004 /* Override NO_STATE_CHECK */ +#define KEYRING_SEARCH_NO_UPDATE_TIME 0x0008 /* Don't update times */ +#define KEYRING_SEARCH_NO_CHECK_PERM 0x0010 /* Don't check permissions */ +#define KEYRING_SEARCH_DETECT_TOO_DEEP 0x0020 /* Give an error on excessive depth */ + + int (*iterator)(const void *object, void *iterator_data); + + /* Internal stuff */ + int skipped_ret; + bool possessed; + key_ref_t result; + struct timespec now; +}; + extern key_ref_t keyring_search_aux(key_ref_t keyring_ref, - const struct cred *cred, - struct key_type *type, - const void *description, - key_match_func_t match); - -extern key_ref_t search_my_process_keyrings(struct key_type *type, - const void *description, - key_match_func_t match, - const struct cred *cred); -extern key_ref_t search_process_keyrings(struct key_type *type, - const void *description, - key_match_func_t match, - const struct cred *cred); + struct keyring_search_context *ctx); + +extern key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx); +extern key_ref_t search_process_keyrings(struct keyring_search_context *ctx); extern struct key *find_keyring_by_name(const char *name, bool skip_perm_check); @@ -141,34 +160,29 @@ extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags, #define KEY_LOOKUP_FOR_UNLINK 0x04 extern long join_session_keyring(const char *name); +extern void key_change_session_keyring(struct callback_head *twork); +extern struct work_struct key_gc_work; extern unsigned key_gc_delay; extern void keyring_gc(struct key *keyring, time_t limit); -extern void key_schedule_gc(time_t expiry_at); +extern void key_schedule_gc(time_t gc_at); +extern void key_schedule_gc_links(void); +extern void key_gc_keytype(struct key_type *ktype); -/* - * check to see whether permission is granted to use a key in the desired way - */ extern int key_task_permission(const key_ref_t key_ref, const struct cred *cred, key_perm_t perm); -static inline int key_permission(const key_ref_t key_ref, key_perm_t perm) +/* + * Check to see whether permission is granted to use a key in the desired way. + */ +static inline int key_permission(const key_ref_t key_ref, unsigned perm) { return key_task_permission(key_ref, current_cred(), perm); } -/* required permissions */ -#define KEY_VIEW 0x01 /* require permission to view attributes */ -#define KEY_READ 0x02 /* require permission to read content */ -#define KEY_WRITE 0x04 /* require permission to update / modify */ -#define KEY_SEARCH 0x08 /* require permission to search (keyring) or find (key) */ -#define KEY_LINK 0x10 /* require permission to link */ -#define KEY_SETATTR 0x20 /* require permission to change attributes */ -#define KEY_ALL 0x3f /* all the above permissions */ - /* - * request_key authorisation + * Authorisation record for request_key(). */ struct request_key_auth { struct key *target_key; @@ -188,7 +202,18 @@ extern struct key *request_key_auth_new(struct key *target, extern struct key *key_get_instantiation_authkey(key_serial_t target_id); /* - * keyctl functions + * Determine whether a key is dead. + */ +static inline bool key_is_dead(const struct key *key, time_t limit) +{ + return + key->flags & ((1 << KEY_FLAG_DEAD) | + (1 << KEY_FLAG_INVALIDATED)) || + (key->expiry > 0 && key->expiry <= limit); +} + +/* + * keyctl() functions */ extern long keyctl_get_keyring_ID(key_serial_t, int); extern long keyctl_join_session_keyring(const char __user *); @@ -212,9 +237,27 @@ extern long keyctl_assume_authority(key_serial_t); extern long keyctl_get_security(key_serial_t keyid, char __user *buffer, size_t buflen); extern long keyctl_session_to_parent(void); +extern long keyctl_reject_key(key_serial_t, unsigned, unsigned, key_serial_t); +extern long keyctl_instantiate_key_iov(key_serial_t, + const struct iovec __user *, + unsigned, key_serial_t); +extern long keyctl_invalidate_key(key_serial_t); + +extern long keyctl_instantiate_key_common(key_serial_t, + const struct iovec *, + unsigned, size_t, key_serial_t); +#ifdef CONFIG_PERSISTENT_KEYRINGS +extern long keyctl_get_persistent(uid_t, key_serial_t); +extern unsigned persistent_keyring_expiry; +#else +static inline long keyctl_get_persistent(uid_t uid, key_serial_t destring) +{ + return -EOPNOTSUPP; +} +#endif /* - * debugging key validation + * Debugging key validation */ #ifdef KEY_DEBUGGING extern void __key_check(const struct key *); diff --git a/security/keys/key.c b/security/keys/key.c index c1eac8084ad..2048a110e7f 100644 --- a/security/keys/key.c +++ b/security/keys/key.c @@ -18,10 +18,9 @@ #include <linux/workqueue.h> #include <linux/random.h> #include <linux/err.h> -#include <linux/user_namespace.h> #include "internal.h" -static struct kmem_cache *key_jar; +struct kmem_cache *key_jar; struct rb_root key_serial_tree; /* tree of keys indexed by serial */ DEFINE_SPINLOCK(key_serial_lock); @@ -36,17 +35,9 @@ unsigned int key_quota_maxbytes = 20000; /* general key space quota */ static LIST_HEAD(key_types_list); static DECLARE_RWSEM(key_types_sem); -static void key_cleanup(struct work_struct *work); -static DECLARE_WORK(key_cleanup_task, key_cleanup); - -/* we serialise key instantiation and link */ +/* We serialise key instantiation and link */ DEFINE_MUTEX(key_construction_mutex); -/* any key who's type gets unegistered will be re-typed to this */ -static struct key_type key_type_dead = { - .name = "dead", -}; - #ifdef KEY_DEBUGGING void __key_check(const struct key *key) { @@ -56,18 +47,17 @@ void __key_check(const struct key *key) } #endif -/*****************************************************************************/ /* - * get the key quota record for a user, allocating a new record if one doesn't - * already exist + * Get the key quota record for a user, allocating a new record if one doesn't + * already exist. */ -struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns) +struct key_user *key_user_lookup(kuid_t uid) { struct key_user *candidate = NULL, *user; struct rb_node *parent = NULL; struct rb_node **p; - try_again: +try_again: p = &key_user_tree.rb_node; spin_lock(&key_user_lock); @@ -76,13 +66,9 @@ struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns) parent = *p; user = rb_entry(parent, struct key_user, node); - if (uid < user->uid) - p = &(*p)->rb_left; - else if (uid > user->uid) - p = &(*p)->rb_right; - else if (user_ns < user->user_ns) + if (uid_lt(uid, user->uid)) p = &(*p)->rb_left; - else if (user_ns > user->user_ns) + else if (uid_gt(uid, user->uid)) p = &(*p)->rb_right; else goto found; @@ -111,7 +97,6 @@ struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns) atomic_set(&candidate->nkeys, 0); atomic_set(&candidate->nikeys, 0); candidate->uid = uid; - candidate->user_ns = get_user_ns(user_ns); candidate->qnkeys = 0; candidate->qnbytes = 0; spin_lock_init(&candidate->lock); @@ -124,36 +109,30 @@ struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns) goto out; /* okay - we found a user record for this UID */ - found: +found: atomic_inc(&user->usage); spin_unlock(&key_user_lock); kfree(candidate); - out: +out: return user; +} -} /* end key_user_lookup() */ - -/*****************************************************************************/ /* - * dispose of a user structure + * Dispose of a user structure */ void key_user_put(struct key_user *user) { if (atomic_dec_and_lock(&user->usage, &key_user_lock)) { rb_erase(&user->node, &key_user_tree); spin_unlock(&key_user_lock); - put_user_ns(user->user_ns); kfree(user); } +} -} /* end key_user_put() */ - -/*****************************************************************************/ /* - * assign a key the next unique serial number - * - these are assigned randomly to avoid security issues through covert - * channel problems + * Allocate a serial number for a key. These are assigned randomly to avoid + * security issues through covert channel problems. */ static inline void key_alloc_serial(struct key *key) { @@ -211,21 +190,39 @@ serial_exists: if (key->serial < xkey->serial) goto attempt_insertion; } +} -} /* end key_alloc_serial() */ - -/*****************************************************************************/ -/* - * allocate a key of the specified type - * - update the user's quota to reflect the existence of the key - * - called from a key-type operation with key_types_sem read-locked by - * key_create_or_update() - * - this prevents unregistration of the key type - * - upon return the key is as yet uninstantiated; the caller needs to either - * instantiate the key or discard it before returning +/** + * key_alloc - Allocate a key of the specified type. + * @type: The type of key to allocate. + * @desc: The key description to allow the key to be searched out. + * @uid: The owner of the new key. + * @gid: The group ID for the new key's group permissions. + * @cred: The credentials specifying UID namespace. + * @perm: The permissions mask of the new key. + * @flags: Flags specifying quota properties. + * + * Allocate a key of the specified type with the attributes given. The key is + * returned in an uninstantiated state and the caller needs to instantiate the + * key before returning. + * + * The user's key count quota is updated to reflect the creation of the key and + * the user's key data quota has the default for the key type reserved. The + * instantiation function should amend this as necessary. If insufficient + * quota is available, -EDQUOT will be returned. + * + * The LSM security modules can prevent a key being created, in which case + * -EACCES will be returned. + * + * Returns a pointer to the new key if successful and an error code otherwise. + * + * Note that the caller needs to ensure the key type isn't uninstantiated. + * Internally this can be done by locking key_types_sem. Externally, this can + * be done by either never unregistering the key type, or making sure + * key_alloc() calls don't race with module unloading. */ struct key *key_alloc(struct key_type *type, const char *desc, - uid_t uid, gid_t gid, const struct cred *cred, + kuid_t uid, kgid_t gid, const struct cred *cred, key_perm_t perm, unsigned long flags) { struct key_user *user = NULL; @@ -237,20 +234,28 @@ struct key *key_alloc(struct key_type *type, const char *desc, if (!desc || !*desc) goto error; - desclen = strlen(desc) + 1; - quotalen = desclen + type->def_datalen; + if (type->vet_description) { + ret = type->vet_description(desc); + if (ret < 0) { + key = ERR_PTR(ret); + goto error; + } + } + + desclen = strlen(desc); + quotalen = desclen + 1 + type->def_datalen; /* get hold of the key tracking for this user */ - user = key_user_lookup(uid, cred->user->user_ns); + user = key_user_lookup(uid); if (!user) goto no_memory_1; /* check that the user's quota permits allocation of another key and * its description */ if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) { - unsigned maxkeys = (uid == 0) ? + unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxkeys : key_quota_maxkeys; - unsigned maxbytes = (uid == 0) ? + unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxbytes : key_quota_maxbytes; spin_lock(&user->lock); @@ -267,34 +272,32 @@ struct key *key_alloc(struct key_type *type, const char *desc, } /* allocate and initialise the key and its description */ - key = kmem_cache_alloc(key_jar, GFP_KERNEL); + key = kmem_cache_zalloc(key_jar, GFP_KERNEL); if (!key) goto no_memory_2; if (desc) { - key->description = kmemdup(desc, desclen, GFP_KERNEL); + key->index_key.desc_len = desclen; + key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL); if (!key->description) goto no_memory_3; } atomic_set(&key->usage, 1); init_rwsem(&key->sem); - key->type = type; + lockdep_set_class(&key->sem, &type->lock_class); + key->index_key.type = type; key->user = user; key->quotalen = quotalen; key->datalen = type->def_datalen; key->uid = uid; key->gid = gid; key->perm = perm; - key->flags = 0; - key->expiry = 0; - key->payload.data = NULL; - key->security = NULL; if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) key->flags |= 1 << KEY_FLAG_IN_QUOTA; - - memset(&key->type_data, 0, sizeof(key->type_data)); + if (flags & KEY_ALLOC_TRUSTED) + key->flags |= 1 << KEY_FLAG_TRUSTED; #ifdef KEY_DEBUGGING key->magic = KEY_DEBUG_MAGIC; @@ -344,14 +347,19 @@ no_quota: key_user_put(user); key = ERR_PTR(-EDQUOT); goto error; - -} /* end key_alloc() */ - +} EXPORT_SYMBOL(key_alloc); -/*****************************************************************************/ -/* - * reserve an amount of quota for the key's payload +/** + * key_payload_reserve - Adjust data quota reservation for the key's payload + * @key: The key to make the reservation for. + * @datalen: The amount of data payload the caller now wants. + * + * Adjust the amount of the owning user's key data quota that a key reserves. + * If the amount is increased, then -EDQUOT may be returned if there isn't + * enough free quota available. + * + * If successful, 0 is returned. */ int key_payload_reserve(struct key *key, size_t datalen) { @@ -362,7 +370,7 @@ int key_payload_reserve(struct key *key, size_t datalen) /* contemplate the quota adjustment */ if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { - unsigned maxbytes = (key->user->uid == 0) ? + unsigned maxbytes = uid_eq(key->user->uid, GLOBAL_ROOT_UID) ? key_quota_root_maxbytes : key_quota_maxbytes; spin_lock(&key->user->lock); @@ -384,22 +392,20 @@ int key_payload_reserve(struct key *key, size_t datalen) key->datalen = datalen; return ret; - -} /* end key_payload_reserve() */ - +} EXPORT_SYMBOL(key_payload_reserve); -/*****************************************************************************/ /* - * instantiate a key and link it into the target keyring atomically - * - called with the target keyring's semaphore writelocked + * Instantiate a key and link it into the target keyring atomically. Must be + * called with the target keyring's semaphore writelocked. The target key's + * semaphore need not be locked as instantiation is serialised by + * key_construction_mutex. */ static int __key_instantiate_and_link(struct key *key, - const void *data, - size_t datalen, + struct key_preparsed_payload *prep, struct key *keyring, struct key *authkey, - struct keyring_list **_prealloc) + struct assoc_array_edit **_edit) { int ret, awaken; @@ -414,7 +420,7 @@ static int __key_instantiate_and_link(struct key *key, /* can't instantiate twice */ if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { /* instantiate the key */ - ret = key->type->instantiate(key, data, datalen); + ret = key->type->instantiate(key, prep); if (ret == 0) { /* mark the key as being instantiated */ @@ -426,7 +432,7 @@ static int __key_instantiate_and_link(struct key *key, /* and link it into the destination keyring */ if (keyring) - __key_link(keyring, key, _prealloc); + __key_link(key, _edit); /* disable the authorisation key */ if (authkey) @@ -441,12 +447,23 @@ static int __key_instantiate_and_link(struct key *key, wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); return ret; +} -} /* end __key_instantiate_and_link() */ - -/*****************************************************************************/ -/* - * instantiate a key and link it into the target keyring atomically +/** + * key_instantiate_and_link - Instantiate a key and link it into the keyring. + * @key: The key to instantiate. + * @data: The data to use to instantiate the keyring. + * @datalen: The length of @data. + * @keyring: Keyring to create a link in on success (or NULL). + * @authkey: The authorisation token permitting instantiation. + * + * Instantiate a key that's in the uninstantiated state using the provided data + * and, if successful, link it in to the destination keyring if one is + * supplied. + * + * If successful, 0 is returned, the authorisation token is revoked and anyone + * waiting for the key is woken up. If the key was already instantiated, + * -EBUSY will be returned. */ int key_instantiate_and_link(struct key *key, const void *data, @@ -454,38 +471,68 @@ int key_instantiate_and_link(struct key *key, struct key *keyring, struct key *authkey) { - struct keyring_list *prealloc; + struct key_preparsed_payload prep; + struct assoc_array_edit *edit; int ret; + memset(&prep, 0, sizeof(prep)); + prep.data = data; + prep.datalen = datalen; + prep.quotalen = key->type->def_datalen; + if (key->type->preparse) { + ret = key->type->preparse(&prep); + if (ret < 0) + goto error; + } + if (keyring) { - ret = __key_link_begin(keyring, key->type, key->description, - &prealloc); + ret = __key_link_begin(keyring, &key->index_key, &edit); if (ret < 0) - return ret; + goto error_free_preparse; } - ret = __key_instantiate_and_link(key, data, datalen, keyring, authkey, - &prealloc); + ret = __key_instantiate_and_link(key, &prep, keyring, authkey, &edit); if (keyring) - __key_link_end(keyring, key->type, prealloc); + __key_link_end(keyring, &key->index_key, edit); +error_free_preparse: + if (key->type->preparse) + key->type->free_preparse(&prep); +error: return ret; - -} /* end key_instantiate_and_link() */ +} EXPORT_SYMBOL(key_instantiate_and_link); -/*****************************************************************************/ -/* - * negatively instantiate a key and link it into the target keyring atomically +/** + * key_reject_and_link - Negatively instantiate a key and link it into the keyring. + * @key: The key to instantiate. + * @timeout: The timeout on the negative key. + * @error: The error to return when the key is hit. + * @keyring: Keyring to create a link in on success (or NULL). + * @authkey: The authorisation token permitting instantiation. + * + * Negatively instantiate a key that's in the uninstantiated state and, if + * successful, set its timeout and stored error and link it in to the + * destination keyring if one is supplied. The key and any links to the key + * will be automatically garbage collected after the timeout expires. + * + * Negative keys are used to rate limit repeated request_key() calls by causing + * them to return the stored error code (typically ENOKEY) until the negative + * key expires. + * + * If successful, 0 is returned, the authorisation token is revoked and anyone + * waiting for the key is woken up. If the key was already instantiated, + * -EBUSY will be returned. */ -int key_negate_and_link(struct key *key, +int key_reject_and_link(struct key *key, unsigned timeout, + unsigned error, struct key *keyring, struct key *authkey) { - struct keyring_list *prealloc; + struct assoc_array_edit *edit; struct timespec now; int ret, awaken, link_ret = 0; @@ -496,8 +543,7 @@ int key_negate_and_link(struct key *key, ret = -EBUSY; if (keyring) - link_ret = __key_link_begin(keyring, key->type, - key->description, &prealloc); + link_ret = __key_link_begin(keyring, &key->index_key, &edit); mutex_lock(&key_construction_mutex); @@ -505,6 +551,8 @@ int key_negate_and_link(struct key *key, if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) { /* mark the key as being negatively instantiated */ atomic_inc(&key->user->nikeys); + key->type_data.reject_error = -error; + smp_wmb(); set_bit(KEY_FLAG_NEGATIVE, &key->flags); set_bit(KEY_FLAG_INSTANTIATED, &key->flags); now = current_kernel_time(); @@ -518,7 +566,7 @@ int key_negate_and_link(struct key *key, /* and link it into the destination keyring */ if (keyring && link_ret == 0) - __key_link(keyring, key, &prealloc); + __key_link(key, &edit); /* disable the authorisation key */ if (authkey) @@ -528,87 +576,23 @@ int key_negate_and_link(struct key *key, mutex_unlock(&key_construction_mutex); if (keyring) - __key_link_end(keyring, key->type, prealloc); + __key_link_end(keyring, &key->index_key, edit); /* wake up anyone waiting for a key to be constructed */ if (awaken) wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT); return ret == 0 ? link_ret : ret; +} +EXPORT_SYMBOL(key_reject_and_link); -} /* end key_negate_and_link() */ - -EXPORT_SYMBOL(key_negate_and_link); - -/*****************************************************************************/ -/* - * do cleaning up in process context so that we don't have to disable - * interrupts all over the place - */ -static void key_cleanup(struct work_struct *work) -{ - struct rb_node *_n; - struct key *key; - - go_again: - /* look for a dead key in the tree */ - spin_lock(&key_serial_lock); - - for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { - key = rb_entry(_n, struct key, serial_node); - - if (atomic_read(&key->usage) == 0) - goto found_dead_key; - } - - spin_unlock(&key_serial_lock); - return; - - found_dead_key: - /* we found a dead key - once we've removed it from the tree, we can - * drop the lock */ - rb_erase(&key->serial_node, &key_serial_tree); - spin_unlock(&key_serial_lock); - - key_check(key); - - security_key_free(key); - - /* deal with the user's key tracking and quota */ - if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { - spin_lock(&key->user->lock); - key->user->qnkeys--; - key->user->qnbytes -= key->quotalen; - spin_unlock(&key->user->lock); - } - - atomic_dec(&key->user->nkeys); - if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) - atomic_dec(&key->user->nikeys); - - key_user_put(key->user); - - /* now throw away the key memory */ - if (key->type->destroy) - key->type->destroy(key); - - kfree(key->description); - -#ifdef KEY_DEBUGGING - key->magic = KEY_DEBUG_MAGIC_X; -#endif - kmem_cache_free(key_jar, key); - - /* there may, of course, be more than one key to destroy */ - goto go_again; - -} /* end key_cleanup() */ - -/*****************************************************************************/ -/* - * dispose of a reference to a key - * - when all the references are gone, we schedule the cleanup task to come and - * pull it out of the tree in definite process context +/** + * key_put - Discard a reference to a key. + * @key: The key to discard a reference from. + * + * Discard a reference to a key, and when all the references are gone, we + * schedule the cleanup task to come and pull it out of the tree in process + * context at some later time. */ void key_put(struct key *key) { @@ -616,16 +600,13 @@ void key_put(struct key *key) key_check(key); if (atomic_dec_and_test(&key->usage)) - schedule_work(&key_cleanup_task); + schedule_work(&key_gc_work); } - -} /* end key_put() */ - +} EXPORT_SYMBOL(key_put); -/*****************************************************************************/ /* - * find a key by its serial number + * Find a key by its serial number. */ struct key *key_lookup(key_serial_t id) { @@ -647,11 +628,11 @@ struct key *key_lookup(key_serial_t id) goto found; } - not_found: +not_found: key = ERR_PTR(-ENOKEY); goto error; - found: +found: /* pretend it doesn't exist if it is awaiting deletion */ if (atomic_read(&key->usage) == 0) goto not_found; @@ -659,18 +640,18 @@ struct key *key_lookup(key_serial_t id) /* this races with key_put(), but that doesn't matter since key_put() * doesn't actually change the key */ - atomic_inc(&key->usage); + __key_get(key); - error: +error: spin_unlock(&key_serial_lock); return key; +} -} /* end key_lookup() */ - -/*****************************************************************************/ /* - * find and lock the specified key type against removal - * - we return with the sem readlocked + * Find and lock the specified key type against removal. + * + * We return with the sem read-locked if successful. If the type wasn't + * available -ENOKEY is returned instead. */ struct key_type *key_type_lookup(const char *type) { @@ -688,35 +669,52 @@ struct key_type *key_type_lookup(const char *type) up_read(&key_types_sem); ktype = ERR_PTR(-ENOKEY); - found_kernel_type: +found_kernel_type: return ktype; +} + +void key_set_timeout(struct key *key, unsigned timeout) +{ + struct timespec now; + time_t expiry = 0; + + /* make the changes with the locks held to prevent races */ + down_write(&key->sem); -} /* end key_type_lookup() */ + if (timeout > 0) { + now = current_kernel_time(); + expiry = now.tv_sec + timeout; + } + + key->expiry = expiry; + key_schedule_gc(key->expiry + key_gc_delay); + + up_write(&key->sem); +} +EXPORT_SYMBOL_GPL(key_set_timeout); -/*****************************************************************************/ /* - * unlock a key type + * Unlock a key type locked by key_type_lookup(). */ void key_type_put(struct key_type *ktype) { up_read(&key_types_sem); +} -} /* end key_type_put() */ - -/*****************************************************************************/ /* - * attempt to update an existing key - * - the key has an incremented refcount - * - we need to put the key if we get an error + * Attempt to update an existing key. + * + * The key is given to us with an incremented refcount that we need to discard + * if we get an error. */ static inline key_ref_t __key_update(key_ref_t key_ref, - const void *payload, size_t plen) + struct key_preparsed_payload *prep) { struct key *key = key_ref_to_ptr(key_ref); int ret; /* need write permission on the key to update it */ - ret = key_permission(key_ref, KEY_WRITE); + ret = key_permission(key_ref, KEY_NEED_WRITE); if (ret < 0) goto error; @@ -726,7 +724,7 @@ static inline key_ref_t __key_update(key_ref_t key_ref, down_write(&key->sem); - ret = key->type->update(key, payload, plen); + ret = key->type->update(key, prep); if (ret == 0) /* updating a negative key instantiates it */ clear_bit(KEY_FLAG_NEGATIVE, &key->flags); @@ -742,13 +740,32 @@ error: key_put(key); key_ref = ERR_PTR(ret); goto out; +} -} /* end __key_update() */ - -/*****************************************************************************/ -/* - * search the specified keyring for a key of the same description; if one is - * found, update it, otherwise add a new one +/** + * key_create_or_update - Update or create and instantiate a key. + * @keyring_ref: A pointer to the destination keyring with possession flag. + * @type: The type of key. + * @description: The searchable description for the key. + * @payload: The data to use to instantiate or update the key. + * @plen: The length of @payload. + * @perm: The permissions mask for a new key. + * @flags: The quota flags for a new key. + * + * Search the destination keyring for a key of the same description and if one + * is found, update it, otherwise create and instantiate a new one and create a + * link to it from that keyring. + * + * If perm is KEY_PERM_UNDEF then an appropriate key permissions mask will be + * concocted. + * + * Returns a pointer to the new key if successful, -ENODEV if the key type + * wasn't available, -ENOTDIR if the keyring wasn't a keyring, -EACCES if the + * caller isn't permitted to modify the keyring or the LSM did not permit + * creation of the key. + * + * On success, the possession flag from the keyring ref will be tacked on to + * the key ref before it is returned. */ key_ref_t key_create_or_update(key_ref_t keyring_ref, const char *type, @@ -758,24 +775,28 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref, key_perm_t perm, unsigned long flags) { - struct keyring_list *prealloc; + struct keyring_index_key index_key = { + .description = description, + }; + struct key_preparsed_payload prep; + struct assoc_array_edit *edit; const struct cred *cred = current_cred(); - struct key_type *ktype; struct key *keyring, *key = NULL; key_ref_t key_ref; int ret; /* look up the key type to see if it's one of the registered kernel * types */ - ktype = key_type_lookup(type); - if (IS_ERR(ktype)) { + index_key.type = key_type_lookup(type); + if (IS_ERR(index_key.type)) { key_ref = ERR_PTR(-ENODEV); goto error; } key_ref = ERR_PTR(-EINVAL); - if (!ktype->match || !ktype->instantiate) - goto error_2; + if (!index_key.type->match || !index_key.type->instantiate || + (!index_key.description && !index_key.type->preparse)) + goto error_put_type; keyring = key_ref_to_ptr(keyring_ref); @@ -783,122 +804,173 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref, key_ref = ERR_PTR(-ENOTDIR); if (keyring->type != &key_type_keyring) - goto error_2; + goto error_put_type; + + memset(&prep, 0, sizeof(prep)); + prep.data = payload; + prep.datalen = plen; + prep.quotalen = index_key.type->def_datalen; + prep.trusted = flags & KEY_ALLOC_TRUSTED; + if (index_key.type->preparse) { + ret = index_key.type->preparse(&prep); + if (ret < 0) { + key_ref = ERR_PTR(ret); + goto error_put_type; + } + if (!index_key.description) + index_key.description = prep.description; + key_ref = ERR_PTR(-EINVAL); + if (!index_key.description) + goto error_free_prep; + } + index_key.desc_len = strlen(index_key.description); - ret = __key_link_begin(keyring, ktype, description, &prealloc); - if (ret < 0) - goto error_2; + key_ref = ERR_PTR(-EPERM); + if (!prep.trusted && test_bit(KEY_FLAG_TRUSTED_ONLY, &keyring->flags)) + goto error_free_prep; + flags |= prep.trusted ? KEY_ALLOC_TRUSTED : 0; + + ret = __key_link_begin(keyring, &index_key, &edit); + if (ret < 0) { + key_ref = ERR_PTR(ret); + goto error_free_prep; + } /* if we're going to allocate a new key, we're going to have * to modify the keyring */ - ret = key_permission(keyring_ref, KEY_WRITE); + ret = key_permission(keyring_ref, KEY_NEED_WRITE); if (ret < 0) { key_ref = ERR_PTR(ret); - goto error_3; + goto error_link_end; } /* if it's possible to update this type of key, search for an existing * key of the same type and description in the destination keyring and * update that instead if possible */ - if (ktype->update) { - key_ref = __keyring_search_one(keyring_ref, ktype, description, - 0); - if (!IS_ERR(key_ref)) + if (index_key.type->update) { + key_ref = find_key_to_update(keyring_ref, &index_key); + if (key_ref) goto found_matching_key; } /* if the client doesn't provide, decide on the permissions we want */ if (perm == KEY_PERM_UNDEF) { perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR; - perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR; + perm |= KEY_USR_VIEW; - if (ktype->read) - perm |= KEY_POS_READ | KEY_USR_READ; + if (index_key.type->read) + perm |= KEY_POS_READ; - if (ktype == &key_type_keyring || ktype->update) - perm |= KEY_USR_WRITE; + if (index_key.type == &key_type_keyring || + index_key.type->update) + perm |= KEY_POS_WRITE; } /* allocate a new key */ - key = key_alloc(ktype, description, cred->fsuid, cred->fsgid, cred, - perm, flags); + key = key_alloc(index_key.type, index_key.description, + cred->fsuid, cred->fsgid, cred, perm, flags); if (IS_ERR(key)) { key_ref = ERR_CAST(key); - goto error_3; + goto error_link_end; } /* instantiate it and link it into the target keyring */ - ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL, - &prealloc); + ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &edit); if (ret < 0) { key_put(key); key_ref = ERR_PTR(ret); - goto error_3; + goto error_link_end; } key_ref = make_key_ref(key, is_key_possessed(keyring_ref)); - error_3: - __key_link_end(keyring, ktype, prealloc); - error_2: - key_type_put(ktype); - error: +error_link_end: + __key_link_end(keyring, &index_key, edit); +error_free_prep: + if (index_key.type->preparse) + index_key.type->free_preparse(&prep); +error_put_type: + key_type_put(index_key.type); +error: return key_ref; found_matching_key: /* we found a matching key, so we're going to try to update it * - we can drop the locks first as we have the key pinned */ - __key_link_end(keyring, ktype, prealloc); - key_type_put(ktype); - - key_ref = __key_update(key_ref, payload, plen); - goto error; - -} /* end key_create_or_update() */ + __key_link_end(keyring, &index_key, edit); + key_ref = __key_update(key_ref, &prep); + goto error_free_prep; +} EXPORT_SYMBOL(key_create_or_update); -/*****************************************************************************/ -/* - * update a key +/** + * key_update - Update a key's contents. + * @key_ref: The pointer (plus possession flag) to the key. + * @payload: The data to be used to update the key. + * @plen: The length of @payload. + * + * Attempt to update the contents of a key with the given payload data. The + * caller must be granted Write permission on the key. Negative keys can be + * instantiated by this method. + * + * Returns 0 on success, -EACCES if not permitted and -EOPNOTSUPP if the key + * type does not support updating. The key type may return other errors. */ int key_update(key_ref_t key_ref, const void *payload, size_t plen) { + struct key_preparsed_payload prep; struct key *key = key_ref_to_ptr(key_ref); int ret; key_check(key); /* the key must be writable */ - ret = key_permission(key_ref, KEY_WRITE); + ret = key_permission(key_ref, KEY_NEED_WRITE); if (ret < 0) goto error; /* attempt to update it if supported */ ret = -EOPNOTSUPP; - if (key->type->update) { - down_write(&key->sem); - - ret = key->type->update(key, payload, plen); - if (ret == 0) - /* updating a negative key instantiates it */ - clear_bit(KEY_FLAG_NEGATIVE, &key->flags); + if (!key->type->update) + goto error; - up_write(&key->sem); + memset(&prep, 0, sizeof(prep)); + prep.data = payload; + prep.datalen = plen; + prep.quotalen = key->type->def_datalen; + if (key->type->preparse) { + ret = key->type->preparse(&prep); + if (ret < 0) + goto error; } - error: - return ret; + down_write(&key->sem); -} /* end key_update() */ + ret = key->type->update(key, &prep); + if (ret == 0) + /* updating a negative key instantiates it */ + clear_bit(KEY_FLAG_NEGATIVE, &key->flags); + up_write(&key->sem); + + if (key->type->preparse) + key->type->free_preparse(&prep); +error: + return ret; +} EXPORT_SYMBOL(key_update); -/*****************************************************************************/ -/* - * revoke a key +/** + * key_revoke - Revoke a key. + * @key: The key to be revoked. + * + * Mark a key as being revoked and ask the type to free up its resources. The + * revocation timeout is set and the key and all its links will be + * automatically garbage collected after key_gc_delay amount of time if they + * are not manually dealt with first. */ void key_revoke(struct key *key) { @@ -926,20 +998,46 @@ void key_revoke(struct key *key) } up_write(&key->sem); +} +EXPORT_SYMBOL(key_revoke); -} /* end key_revoke() */ +/** + * key_invalidate - Invalidate a key. + * @key: The key to be invalidated. + * + * Mark a key as being invalidated and have it cleaned up immediately. The key + * is ignored by all searches and other operations from this point. + */ +void key_invalidate(struct key *key) +{ + kenter("%d", key_serial(key)); -EXPORT_SYMBOL(key_revoke); + key_check(key); -/*****************************************************************************/ -/* - * register a type of key + if (!test_bit(KEY_FLAG_INVALIDATED, &key->flags)) { + down_write_nested(&key->sem, 1); + if (!test_and_set_bit(KEY_FLAG_INVALIDATED, &key->flags)) + key_schedule_gc_links(); + up_write(&key->sem); + } +} +EXPORT_SYMBOL(key_invalidate); + +/** + * register_key_type - Register a type of key. + * @ktype: The new key type. + * + * Register a new key type. + * + * Returns 0 on success or -EEXIST if a type of this name already exists. */ int register_key_type(struct key_type *ktype) { struct key_type *p; int ret; + memset(&ktype->lock_class, 0, sizeof(ktype->lock_class)); + ret = -EEXIST; down_write(&key_types_sem); @@ -951,73 +1049,37 @@ int register_key_type(struct key_type *ktype) /* store the type */ list_add(&ktype->link, &key_types_list); + + pr_notice("Key type %s registered\n", ktype->name); ret = 0; - out: +out: up_write(&key_types_sem); return ret; - -} /* end register_key_type() */ - +} EXPORT_SYMBOL(register_key_type); -/*****************************************************************************/ -/* - * unregister a type of key +/** + * unregister_key_type - Unregister a type of key. + * @ktype: The key type. + * + * Unregister a key type and mark all the extant keys of this type as dead. + * Those keys of this type are then destroyed to get rid of their payloads and + * they and their links will be garbage collected as soon as possible. */ void unregister_key_type(struct key_type *ktype) { - struct rb_node *_n; - struct key *key; - down_write(&key_types_sem); - - /* withdraw the key type */ list_del_init(&ktype->link); - - /* mark all the keys of this type dead */ - spin_lock(&key_serial_lock); - - for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { - key = rb_entry(_n, struct key, serial_node); - - if (key->type == ktype) { - key->type = &key_type_dead; - set_bit(KEY_FLAG_DEAD, &key->flags); - } - } - - spin_unlock(&key_serial_lock); - - /* make sure everyone revalidates their keys */ - synchronize_rcu(); - - /* we should now be able to destroy the payloads of all the keys of - * this type with impunity */ - spin_lock(&key_serial_lock); - - for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) { - key = rb_entry(_n, struct key, serial_node); - - if (key->type == ktype) { - if (ktype->destroy) - ktype->destroy(key); - memset(&key->payload, KEY_DESTROY, sizeof(key->payload)); - } - } - - spin_unlock(&key_serial_lock); - up_write(&key_types_sem); - - key_schedule_gc(0); - -} /* end unregister_key_type() */ - + downgrade_write(&key_types_sem); + key_gc_keytype(ktype); + pr_notice("Key type %s unregistered\n", ktype->name); + up_read(&key_types_sem); +} EXPORT_SYMBOL(unregister_key_type); -/*****************************************************************************/ /* - * initialise the key management stuff + * Initialise the key management state. */ void __init key_init(void) { @@ -1029,6 +1091,7 @@ void __init key_init(void) list_add_tail(&key_type_keyring.link, &key_types_list); list_add_tail(&key_type_dead.link, &key_types_list); list_add_tail(&key_type_user.link, &key_types_list); + list_add_tail(&key_type_logon.link, &key_types_list); /* record the root user tracking */ rb_link_node(&root_key_user.node, @@ -1037,5 +1100,4 @@ void __init key_init(void) rb_insert_color(&root_key_user.node, &key_user_tree); - -} /* end key_init() */ +} diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c index 60924f6a52d..cd5bd0cef25 100644 --- a/security/keys/keyctl.c +++ b/security/keys/keyctl.c @@ -1,4 +1,4 @@ -/* keyctl.c: userspace keyctl operations +/* Userspace key control operations * * Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) @@ -14,6 +14,7 @@ #include <linux/sched.h> #include <linux/slab.h> #include <linux/syscalls.h> +#include <linux/key.h> #include <linux/keyctl.h> #include <linux/fs.h> #include <linux/capability.h> @@ -21,6 +22,7 @@ #include <linux/err.h> #include <linux/vmalloc.h> #include <linux/security.h> +#include <linux/uio.h> #include <asm/uaccess.h> #include "internal.h" @@ -31,28 +33,27 @@ static int key_get_type_from_user(char *type, int ret; ret = strncpy_from_user(type, _type, len); - if (ret < 0) return ret; - if (ret == 0 || ret >= len) return -EINVAL; - if (type[0] == '.') return -EPERM; - type[len - 1] = '\0'; - return 0; } -/*****************************************************************************/ /* - * extract the description of a new key from userspace and either add it as a - * new key to the specified keyring or update a matching key in that keyring - * - the keyring must be writable - * - returns the new key's serial number - * - implements add_key() + * Extract the description of a new key from userspace and either add it as a + * new key to the specified keyring or update a matching key in that keyring. + * + * If the description is NULL or an empty string, the key type is asked to + * generate one from the payload. + * + * The keyring must be writable so that we can attach the key to it. + * + * If successful, the new key's serial number is returned, otherwise an error + * code is returned. */ SYSCALL_DEFINE5(add_key, const char __user *, _type, const char __user *, _description, @@ -75,10 +76,17 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type, if (ret < 0) goto error; - description = strndup_user(_description, PAGE_SIZE); - if (IS_ERR(description)) { - ret = PTR_ERR(description); - goto error; + description = NULL; + if (_description) { + description = strndup_user(_description, PAGE_SIZE); + if (IS_ERR(description)) { + ret = PTR_ERR(description); + goto error; + } + if (!*description) { + kfree(description); + description = NULL; + } } /* pull the payload in if one was supplied */ @@ -87,7 +95,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type, vm = false; if (_payload) { ret = -ENOMEM; - payload = kmalloc(plen, GFP_KERNEL); + payload = kmalloc(plen, GFP_KERNEL | __GFP_NOWARN); if (!payload) { if (plen <= PAGE_SIZE) goto error2; @@ -103,7 +111,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type, } /* find the target keyring (which must be writable) */ - keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_WRITE); + keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); goto error3; @@ -132,19 +140,20 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type, kfree(description); error: return ret; +} -} /* end sys_add_key() */ - -/*****************************************************************************/ /* - * search the process keyrings for a matching key - * - nested keyrings may also be searched if they have Search permission - * - if a key is found, it will be attached to the destination keyring if - * there's one specified - * - /sbin/request-key will be invoked if _callout_info is non-NULL - * - the _callout_info string will be passed to /sbin/request-key - * - if the _callout_info string is empty, it will be rendered as "-" - * - implements request_key() + * Search the process keyrings and keyring trees linked from those for a + * matching key. Keyrings must have appropriate Search permission to be + * searched. + * + * If a key is found, it will be attached to the destination keyring if there's + * one specified and the serial number of the key will be returned. + * + * If no key is found, /sbin/request-key will be invoked if _callout_info is + * non-NULL in an attempt to create a key. The _callout_info string will be + * passed to /sbin/request-key to aid with completing the request. If the + * _callout_info string is "" then it will be changed to "-". */ SYSCALL_DEFINE4(request_key, const char __user *, _type, const char __user *, _description, @@ -186,7 +195,7 @@ SYSCALL_DEFINE4(request_key, const char __user *, _type, dest_ref = NULL; if (destringid) { dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE, - KEY_WRITE); + KEY_NEED_WRITE); if (IS_ERR(dest_ref)) { ret = PTR_ERR(dest_ref); goto error3; @@ -209,8 +218,14 @@ SYSCALL_DEFINE4(request_key, const char __user *, _type, goto error5; } + /* wait for the key to finish being constructed */ + ret = wait_for_key_construction(key, 1); + if (ret < 0) + goto error6; + ret = key->serial; +error6: key_put(key); error5: key_type_put(ktype); @@ -222,14 +237,14 @@ error2: kfree(description); error: return ret; +} -} /* end sys_request_key() */ - -/*****************************************************************************/ /* - * get the ID of the specified process keyring - * - the keyring must have search permission to be found - * - implements keyctl(KEYCTL_GET_KEYRING_ID) + * Get the ID of the specified process keyring. + * + * The requested keyring must have search permission to be found. + * + * If successful, the ID of the requested keyring will be returned. */ long keyctl_get_keyring_ID(key_serial_t id, int create) { @@ -238,7 +253,7 @@ long keyctl_get_keyring_ID(key_serial_t id, int create) long ret; lflags = create ? KEY_LOOKUP_CREATE : 0; - key_ref = lookup_user_key(id, lflags, KEY_SEARCH); + key_ref = lookup_user_key(id, lflags, KEY_NEED_SEARCH); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; @@ -248,13 +263,17 @@ long keyctl_get_keyring_ID(key_serial_t id, int create) key_ref_put(key_ref); error: return ret; +} -} /* end keyctl_get_keyring_ID() */ - -/*****************************************************************************/ /* - * join the session keyring - * - implements keyctl(KEYCTL_JOIN_SESSION_KEYRING) + * Join a (named) session keyring. + * + * Create and join an anonymous session keyring or join a named session + * keyring, creating it if necessary. A named session keyring must have Search + * permission for it to be joined. Session keyrings without this permit will + * be skipped over. + * + * If successful, the ID of the joined session keyring will be returned. */ long keyctl_join_session_keyring(const char __user *_name) { @@ -277,14 +296,17 @@ long keyctl_join_session_keyring(const char __user *_name) error: return ret; +} -} /* end keyctl_join_session_keyring() */ - -/*****************************************************************************/ /* - * update a key's data payload - * - the key must be writable - * - implements keyctl(KEYCTL_UPDATE) + * Update a key's data payload from the given data. + * + * The key must grant the caller Write permission and the key type must support + * updating for this to work. A negative key can be positively instantiated + * with this call. + * + * If successful, 0 will be returned. If the key type does not support + * updating, then -EOPNOTSUPP will be returned. */ long keyctl_update_key(key_serial_t id, const void __user *_payload, @@ -312,7 +334,7 @@ long keyctl_update_key(key_serial_t id, } /* find the target key (which must be writable) */ - key_ref = lookup_user_key(id, 0, KEY_WRITE); + key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error2; @@ -326,26 +348,29 @@ error2: kfree(payload); error: return ret; +} -} /* end keyctl_update_key() */ - -/*****************************************************************************/ /* - * revoke a key - * - the key must be writable - * - implements keyctl(KEYCTL_REVOKE) + * Revoke a key. + * + * The key must be grant the caller Write or Setattr permission for this to + * work. The key type should give up its quota claim when revoked. The key + * and any links to the key will be automatically garbage collected after a + * certain amount of time (/proc/sys/kernel/keys/gc_delay). + * + * If successful, 0 is returned. */ long keyctl_revoke_key(key_serial_t id) { key_ref_t key_ref; long ret; - key_ref = lookup_user_key(id, 0, KEY_WRITE); + key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); if (ret != -EACCES) goto error; - key_ref = lookup_user_key(id, 0, KEY_SETATTR); + key_ref = lookup_user_key(id, 0, KEY_NEED_SETATTR); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; @@ -358,53 +383,100 @@ long keyctl_revoke_key(key_serial_t id) key_ref_put(key_ref); error: return ret; +} + +/* + * Invalidate a key. + * + * The key must be grant the caller Invalidate permission for this to work. + * The key and any links to the key will be automatically garbage collected + * immediately. + * + * If successful, 0 is returned. + */ +long keyctl_invalidate_key(key_serial_t id) +{ + key_ref_t key_ref; + long ret; + + kenter("%d", id); + + key_ref = lookup_user_key(id, 0, KEY_NEED_SEARCH); + if (IS_ERR(key_ref)) { + ret = PTR_ERR(key_ref); + goto error; + } -} /* end keyctl_revoke_key() */ + key_invalidate(key_ref_to_ptr(key_ref)); + ret = 0; + + key_ref_put(key_ref); +error: + kleave(" = %ld", ret); + return ret; +} -/*****************************************************************************/ /* - * clear the specified process keyring - * - the keyring must be writable - * - implements keyctl(KEYCTL_CLEAR) + * Clear the specified keyring, creating an empty process keyring if one of the + * special keyring IDs is used. + * + * The keyring must grant the caller Write permission for this to work. If + * successful, 0 will be returned. */ long keyctl_keyring_clear(key_serial_t ringid) { key_ref_t keyring_ref; long ret; - keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_WRITE); + keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); + + /* Root is permitted to invalidate certain special keyrings */ + if (capable(CAP_SYS_ADMIN)) { + keyring_ref = lookup_user_key(ringid, 0, 0); + if (IS_ERR(keyring_ref)) + goto error; + if (test_bit(KEY_FLAG_ROOT_CAN_CLEAR, + &key_ref_to_ptr(keyring_ref)->flags)) + goto clear; + goto error_put; + } + goto error; } +clear: ret = keyring_clear(key_ref_to_ptr(keyring_ref)); - +error_put: key_ref_put(keyring_ref); error: return ret; +} -} /* end keyctl_keyring_clear() */ - -/*****************************************************************************/ /* - * link a key into a keyring - * - the keyring must be writable - * - the key must be linkable - * - implements keyctl(KEYCTL_LINK) + * Create a link from a keyring to a key if there's no matching key in the + * keyring, otherwise replace the link to the matching key with a link to the + * new key. + * + * The key must grant the caller Link permission and the the keyring must grant + * the caller Write permission. Furthermore, if an additional link is created, + * the keyring's quota will be extended. + * + * If successful, 0 will be returned. */ long keyctl_keyring_link(key_serial_t id, key_serial_t ringid) { key_ref_t keyring_ref, key_ref; long ret; - keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_WRITE); + keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); goto error; } - key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE, KEY_LINK); + key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE, KEY_NEED_LINK); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error2; @@ -417,22 +489,23 @@ error2: key_ref_put(keyring_ref); error: return ret; +} -} /* end keyctl_keyring_link() */ - -/*****************************************************************************/ /* - * unlink the first attachment of a key from a keyring - * - the keyring must be writable - * - we don't need any permissions on the key - * - implements keyctl(KEYCTL_UNLINK) + * Unlink a key from a keyring. + * + * The keyring must grant the caller Write permission for this to work; the key + * itself need not grant the caller anything. If the last link to a key is + * removed then that key will be scheduled for destruction. + * + * If successful, 0 will be returned. */ long keyctl_keyring_unlink(key_serial_t id, key_serial_t ringid) { key_ref_t keyring_ref, key_ref; long ret; - keyring_ref = lookup_user_key(ringid, 0, KEY_WRITE); + keyring_ref = lookup_user_key(ringid, 0, KEY_NEED_WRITE); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); goto error; @@ -451,19 +524,20 @@ error2: key_ref_put(keyring_ref); error: return ret; +} -} /* end keyctl_keyring_unlink() */ - -/*****************************************************************************/ /* - * describe a user key - * - the key must have view permission - * - if there's a buffer, we place up to buflen bytes of data into it - * - unless there's an error, we return the amount of description available, - * irrespective of how much we may have copied - * - the description is formatted thus: + * Return a description of a key to userspace. + * + * The key must grant the caller View permission for this to work. + * + * If there's a buffer, we place up to buflen bytes of data into it formatted + * in the following way: + * * type;uid;gid;perm;description<NUL> - * - implements keyctl(KEYCTL_DESCRIBE) + * + * If successful, we return the amount of description available, irrespective + * of how much we may have copied into the buffer. */ long keyctl_describe_key(key_serial_t keyid, char __user *buffer, @@ -474,7 +548,7 @@ long keyctl_describe_key(key_serial_t keyid, char *tmpbuf; long ret; - key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_VIEW); + key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW); if (IS_ERR(key_ref)) { /* viewing a key under construction is permitted if we have the * authorisation token handy */ @@ -506,8 +580,8 @@ okay: ret = snprintf(tmpbuf, PAGE_SIZE - 1, "%s;%d;%d;%08x;%s", key->type->name, - key->uid, - key->gid, + from_kuid_munged(current_user_ns(), key->uid), + from_kgid_munged(current_user_ns(), key->gid), key->perm, key->description ?: ""); @@ -531,18 +605,17 @@ error2: key_ref_put(key_ref); error: return ret; +} -} /* end keyctl_describe_key() */ - -/*****************************************************************************/ /* - * search the specified keyring for a matching key - * - the start keyring must be searchable - * - nested keyrings may also be searched if they are searchable - * - only keys with search permission may be found - * - if a key is found, it will be attached to the destination keyring if - * there's one specified - * - implements keyctl(KEYCTL_SEARCH) + * Search the specified keyring and any keyrings it links to for a matching + * key. Only keyrings that grant the caller Search permission will be searched + * (this includes the starting keyring). Only keys with Search permission can + * be found. + * + * If successful, the found key will be linked to the destination keyring if + * supplied and the key has Link permission, and the found key ID will be + * returned. */ long keyctl_keyring_search(key_serial_t ringid, const char __user *_type, @@ -566,7 +639,7 @@ long keyctl_keyring_search(key_serial_t ringid, } /* get the keyring at which to begin the search */ - keyring_ref = lookup_user_key(ringid, 0, KEY_SEARCH); + keyring_ref = lookup_user_key(ringid, 0, KEY_NEED_SEARCH); if (IS_ERR(keyring_ref)) { ret = PTR_ERR(keyring_ref); goto error2; @@ -576,7 +649,7 @@ long keyctl_keyring_search(key_serial_t ringid, dest_ref = NULL; if (destringid) { dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE, - KEY_WRITE); + KEY_NEED_WRITE); if (IS_ERR(dest_ref)) { ret = PTR_ERR(dest_ref); goto error3; @@ -603,7 +676,7 @@ long keyctl_keyring_search(key_serial_t ringid, /* link the resulting key to the destination keyring if we can */ if (dest_ref) { - ret = key_permission(key_ref, KEY_LINK); + ret = key_permission(key_ref, KEY_NEED_LINK); if (ret < 0) goto error6; @@ -626,18 +699,17 @@ error2: kfree(description); error: return ret; +} -} /* end keyctl_keyring_search() */ - -/*****************************************************************************/ /* - * read a user key's payload - * - the keyring must be readable or the key must be searchable from the - * process's keyrings - * - if there's a buffer, we place up to buflen bytes of data into it - * - unless there's an error, we return the amount of data in the key, - * irrespective of how much we may have copied - * - implements keyctl(KEYCTL_READ) + * Read a key's payload. + * + * The key must either grant the caller Read permission, or it must grant the + * caller Search permission when searched for from the process keyrings. + * + * If successful, we place up to buflen bytes of data into the buffer, if one + * is provided, and return the amount of data that is available in the key, + * irrespective of how much we copied into the buffer. */ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) { @@ -655,7 +727,7 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen) key = key_ref_to_ptr(key_ref); /* see if we can read it directly */ - ret = key_permission(key_ref, KEY_READ); + ret = key_permission(key_ref, KEY_NEED_READ); if (ret == 0) goto can_read_key; if (ret != -EACCES) @@ -688,29 +760,46 @@ error2: key_put(key); error: return ret; +} -} /* end keyctl_read_key() */ - -/*****************************************************************************/ /* - * change the ownership of a key - * - the keyring owned by the changer - * - if the uid or gid is -1, then that parameter is not changed - * - implements keyctl(KEYCTL_CHOWN) + * Change the ownership of a key + * + * The key must grant the caller Setattr permission for this to work, though + * the key need not be fully instantiated yet. For the UID to be changed, or + * for the GID to be changed to a group the caller is not a member of, the + * caller must have sysadmin capability. If either uid or gid is -1 then that + * attribute is not changed. + * + * If the UID is to be changed, the new user must have sufficient quota to + * accept the key. The quota deduction will be removed from the old user to + * the new user should the attribute be changed. + * + * If successful, 0 will be returned. */ -long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid) +long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group) { struct key_user *newowner, *zapowner = NULL; struct key *key; key_ref_t key_ref; long ret; + kuid_t uid; + kgid_t gid; + + uid = make_kuid(current_user_ns(), user); + gid = make_kgid(current_user_ns(), group); + ret = -EINVAL; + if ((user != (uid_t) -1) && !uid_valid(uid)) + goto error; + if ((group != (gid_t) -1) && !gid_valid(gid)) + goto error; ret = 0; - if (uid == (uid_t) -1 && gid == (gid_t) -1) + if (user == (uid_t) -1 && group == (gid_t) -1) goto error; key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL, - KEY_SETATTR); + KEY_NEED_SETATTR); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; @@ -724,27 +813,27 @@ long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid) if (!capable(CAP_SYS_ADMIN)) { /* only the sysadmin can chown a key to some other UID */ - if (uid != (uid_t) -1 && key->uid != uid) + if (user != (uid_t) -1 && !uid_eq(key->uid, uid)) goto error_put; /* only the sysadmin can set the key's GID to a group other * than one of those that the current process subscribes to */ - if (gid != (gid_t) -1 && gid != key->gid && !in_group_p(gid)) + if (group != (gid_t) -1 && !gid_eq(gid, key->gid) && !in_group_p(gid)) goto error_put; } /* change the UID */ - if (uid != (uid_t) -1 && uid != key->uid) { + if (user != (uid_t) -1 && !uid_eq(uid, key->uid)) { ret = -ENOMEM; - newowner = key_user_lookup(uid, current_user_ns()); + newowner = key_user_lookup(uid); if (!newowner) goto error_put; /* transfer the quota burden to the new user */ if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) { - unsigned maxkeys = (uid == 0) ? + unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxkeys : key_quota_maxkeys; - unsigned maxbytes = (uid == 0) ? + unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ? key_quota_root_maxbytes : key_quota_maxbytes; spin_lock(&newowner->lock); @@ -778,7 +867,7 @@ long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid) } /* change the GID */ - if (gid != (gid_t) -1) + if (group != (gid_t) -1) key->gid = gid; ret = 0; @@ -796,14 +885,14 @@ quota_overrun: zapowner = newowner; ret = -EDQUOT; goto error_put; +} -} /* end keyctl_chown_key() */ - -/*****************************************************************************/ /* - * change the permission mask on a key - * - the keyring owned by the changer - * - implements keyctl(KEYCTL_SETPERM) + * Change the permission mask on a key. + * + * The key must grant the caller Setattr permission for this to work, though + * the key need not be fully instantiated yet. If the caller does not have + * sysadmin capability, it may only change the permission on keys that it owns. */ long keyctl_setperm_key(key_serial_t id, key_perm_t perm) { @@ -816,7 +905,7 @@ long keyctl_setperm_key(key_serial_t id, key_perm_t perm) goto error; key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL, - KEY_SETATTR); + KEY_NEED_SETATTR); if (IS_ERR(key_ref)) { ret = PTR_ERR(key_ref); goto error; @@ -829,7 +918,7 @@ long keyctl_setperm_key(key_serial_t id, key_perm_t perm) down_write(&key->sem); /* if we're not the sysadmin, we can only change a key that we own */ - if (capable(CAP_SYS_ADMIN) || key->uid == current_fsuid()) { + if (capable(CAP_SYS_ADMIN) || uid_eq(key->uid, current_fsuid())) { key->perm = perm; ret = 0; } @@ -838,11 +927,11 @@ long keyctl_setperm_key(key_serial_t id, key_perm_t perm) key_put(key); error: return ret; - -} /* end keyctl_setperm_key() */ +} /* - * get the destination keyring for instantiation + * Get the destination keyring for instantiation and check that the caller has + * Write permission on it. */ static long get_instantiation_keyring(key_serial_t ringid, struct request_key_auth *rka, @@ -858,7 +947,7 @@ static long get_instantiation_keyring(key_serial_t ringid, /* if a specific keyring is nominated by ID, then use that */ if (ringid > 0) { - dkref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_WRITE); + dkref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); if (IS_ERR(dkref)) return PTR_ERR(dkref); *_dest_keyring = key_ref_to_ptr(dkref); @@ -879,7 +968,7 @@ static long get_instantiation_keyring(key_serial_t ringid, } /* - * change the request_key authorisation key on the current process + * Change the request_key authorisation key on the current process. */ static int keyctl_change_reqkey_auth(struct key *key) { @@ -895,15 +984,35 @@ static int keyctl_change_reqkey_auth(struct key *key) return commit_creds(new); } -/*****************************************************************************/ /* - * instantiate the key with the specified payload, and, if one is given, link - * the key into the keyring + * Copy the iovec data from userspace */ -long keyctl_instantiate_key(key_serial_t id, - const void __user *_payload, - size_t plen, - key_serial_t ringid) +static long copy_from_user_iovec(void *buffer, const struct iovec *iov, + unsigned ioc) +{ + for (; ioc > 0; ioc--) { + if (copy_from_user(buffer, iov->iov_base, iov->iov_len) != 0) + return -EFAULT; + buffer += iov->iov_len; + iov++; + } + return 0; +} + +/* + * Instantiate a key with the specified payload and link the key into the + * destination keyring if one is given. + * + * The caller must have the appropriate instantiation permit set for this to + * work (see keyctl_assume_authority). No other permissions are required. + * + * If successful, 0 will be returned. + */ +long keyctl_instantiate_key_common(key_serial_t id, + const struct iovec *payload_iov, + unsigned ioc, + size_t plen, + key_serial_t ringid) { const struct cred *cred = current_cred(); struct request_key_auth *rka; @@ -932,7 +1041,7 @@ long keyctl_instantiate_key(key_serial_t id, /* pull the payload in if one was supplied */ payload = NULL; - if (_payload) { + if (payload_iov) { ret = -ENOMEM; payload = kmalloc(plen, GFP_KERNEL); if (!payload) { @@ -944,8 +1053,8 @@ long keyctl_instantiate_key(key_serial_t id, goto error; } - ret = -EFAULT; - if (copy_from_user(payload, _payload, plen) != 0) + ret = copy_from_user_iovec(payload, payload_iov, ioc); + if (ret < 0) goto error2; } @@ -973,22 +1082,127 @@ error2: vfree(payload); error: return ret; +} + +/* + * Instantiate a key with the specified payload and link the key into the + * destination keyring if one is given. + * + * The caller must have the appropriate instantiation permit set for this to + * work (see keyctl_assume_authority). No other permissions are required. + * + * If successful, 0 will be returned. + */ +long keyctl_instantiate_key(key_serial_t id, + const void __user *_payload, + size_t plen, + key_serial_t ringid) +{ + if (_payload && plen) { + struct iovec iov[1] = { + [0].iov_base = (void __user *)_payload, + [0].iov_len = plen + }; + + return keyctl_instantiate_key_common(id, iov, 1, plen, ringid); + } + + return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid); +} + +/* + * Instantiate a key with the specified multipart payload and link the key into + * the destination keyring if one is given. + * + * The caller must have the appropriate instantiation permit set for this to + * work (see keyctl_assume_authority). No other permissions are required. + * + * If successful, 0 will be returned. + */ +long keyctl_instantiate_key_iov(key_serial_t id, + const struct iovec __user *_payload_iov, + unsigned ioc, + key_serial_t ringid) +{ + struct iovec iovstack[UIO_FASTIOV], *iov = iovstack; + long ret; + + if (!_payload_iov || !ioc) + goto no_payload; + + ret = rw_copy_check_uvector(WRITE, _payload_iov, ioc, + ARRAY_SIZE(iovstack), iovstack, &iov); + if (ret < 0) + goto err; + if (ret == 0) + goto no_payload_free; + + ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid); +err: + if (iov != iovstack) + kfree(iov); + return ret; -} /* end keyctl_instantiate_key() */ +no_payload_free: + if (iov != iovstack) + kfree(iov); +no_payload: + return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid); +} -/*****************************************************************************/ /* - * negatively instantiate the key with the given timeout (in seconds), and, if - * one is given, link the key into the keyring + * Negatively instantiate the key with the given timeout (in seconds) and link + * the key into the destination keyring if one is given. + * + * The caller must have the appropriate instantiation permit set for this to + * work (see keyctl_assume_authority). No other permissions are required. + * + * The key and any links to the key will be automatically garbage collected + * after the timeout expires. + * + * Negative keys are used to rate limit repeated request_key() calls by causing + * them to return -ENOKEY until the negative key expires. + * + * If successful, 0 will be returned. */ long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid) { + return keyctl_reject_key(id, timeout, ENOKEY, ringid); +} + +/* + * Negatively instantiate the key with the given timeout (in seconds) and error + * code and link the key into the destination keyring if one is given. + * + * The caller must have the appropriate instantiation permit set for this to + * work (see keyctl_assume_authority). No other permissions are required. + * + * The key and any links to the key will be automatically garbage collected + * after the timeout expires. + * + * Negative keys are used to rate limit repeated request_key() calls by causing + * them to return the specified error code until the negative key expires. + * + * If successful, 0 will be returned. + */ +long keyctl_reject_key(key_serial_t id, unsigned timeout, unsigned error, + key_serial_t ringid) +{ const struct cred *cred = current_cred(); struct request_key_auth *rka; struct key *instkey, *dest_keyring; long ret; - kenter("%d,%u,%d", id, timeout, ringid); + kenter("%d,%u,%u,%d", id, timeout, error, ringid); + + /* must be a valid error code and mustn't be a kernel special */ + if (error <= 0 || + error >= MAX_ERRNO || + error == ERESTARTSYS || + error == ERESTARTNOINTR || + error == ERESTARTNOHAND || + error == ERESTART_RESTARTBLOCK) + return -EINVAL; /* the appropriate instantiation authorisation key must have been * assumed before calling this */ @@ -1008,7 +1222,7 @@ long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid) goto error; /* instantiate the key and link it into a keyring */ - ret = key_negate_and_link(rka->target_key, timeout, + ret = key_reject_and_link(rka->target_key, timeout, error, dest_keyring, instkey); key_put(dest_keyring); @@ -1020,13 +1234,14 @@ long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid) error: return ret; +} -} /* end keyctl_negate_key() */ - -/*****************************************************************************/ /* - * set the default keyring in which request_key() will cache keys - * - return the old setting + * Read or set the default keyring in which request_key() will cache keys and + * return the old setting. + * + * If a process keyring is specified then this will be created if it doesn't + * yet exist. The old setting will be returned if successful. */ long keyctl_set_reqkey_keyring(int reqkey_defl) { @@ -1079,23 +1294,28 @@ set: error: abort_creds(new); return ret; +} -} /* end keyctl_set_reqkey_keyring() */ - -/*****************************************************************************/ /* - * set or clear the timeout for a key + * Set or clear the timeout on a key. + * + * Either the key must grant the caller Setattr permission or else the caller + * must hold an instantiation authorisation token for the key. + * + * The timeout is either 0 to clear the timeout, or a number of seconds from + * the current time. The key and any links to the key will be automatically + * garbage collected after the timeout expires. + * + * If successful, 0 is returned. */ long keyctl_set_timeout(key_serial_t id, unsigned timeout) { - struct timespec now; struct key *key, *instkey; key_ref_t key_ref; - time_t expiry; long ret; key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL, - KEY_SETATTR); + KEY_NEED_SETATTR); if (IS_ERR(key_ref)) { /* setting the timeout on a key under construction is permitted * if we have the authorisation token handy */ @@ -1117,31 +1337,30 @@ long keyctl_set_timeout(key_serial_t id, unsigned timeout) okay: key = key_ref_to_ptr(key_ref); - - /* make the changes with the locks held to prevent races */ - down_write(&key->sem); - - expiry = 0; - if (timeout > 0) { - now = current_kernel_time(); - expiry = now.tv_sec + timeout; - } - - key->expiry = expiry; - key_schedule_gc(key->expiry + key_gc_delay); - - up_write(&key->sem); + key_set_timeout(key, timeout); key_put(key); ret = 0; error: return ret; +} -} /* end keyctl_set_timeout() */ - -/*****************************************************************************/ /* - * assume the authority to instantiate the specified key + * Assume (or clear) the authority to instantiate the specified key. + * + * This sets the authoritative token currently in force for key instantiation. + * This must be done for a key to be instantiated. It has the effect of making + * available all the keys from the caller of the request_key() that created a + * key to request_key() calls made by the caller of this function. + * + * The caller must have the instantiation key in their process keyrings with a + * Search permission grant available to the caller. + * + * If the ID given is 0, then the setting will be cleared and 0 returned. + * + * If the ID given has a matching an authorisation key, then that key will be + * set and its ID will be returned. The authorisation key can be read to get + * the callout information passed to request_key(). */ long keyctl_assume_authority(key_serial_t id) { @@ -1178,16 +1397,17 @@ long keyctl_assume_authority(key_serial_t id) ret = authkey->serial; error: return ret; - -} /* end keyctl_assume_authority() */ +} /* - * get the security label of a key - * - the key must grant us view permission - * - if there's a buffer, we place up to buflen bytes of data into it - * - unless there's an error, we return the amount of information available, - * irrespective of how much we may have copied (including the terminal NUL) - * - implements keyctl(KEYCTL_GET_SECURITY) + * Get a key's the LSM security label. + * + * The key must grant the caller View permission for this to work. + * + * If there's a buffer, then up to buflen bytes of data will be placed into it. + * + * If successful, the amount of information available will be returned, + * irrespective of how much was copied (including the terminal NUL). */ long keyctl_get_security(key_serial_t keyid, char __user *buffer, @@ -1198,7 +1418,7 @@ long keyctl_get_security(key_serial_t keyid, char *context; long ret; - key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_VIEW); + key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW); if (IS_ERR(key_ref)) { if (PTR_ERR(key_ref) != -EACCES) return PTR_ERR(key_ref); @@ -1242,115 +1462,110 @@ long keyctl_get_security(key_serial_t keyid, } /* - * attempt to install the calling process's session keyring on the process's - * parent process - * - the keyring must exist and must grant us LINK permission - * - implements keyctl(KEYCTL_SESSION_TO_PARENT) + * Attempt to install the calling process's session keyring on the process's + * parent process. + * + * The keyring must exist and must grant the caller LINK permission, and the + * parent process must be single-threaded and must have the same effective + * ownership as this process and mustn't be SUID/SGID. + * + * The keyring will be emplaced on the parent when it next resumes userspace. + * + * If successful, 0 will be returned. */ long keyctl_session_to_parent(void) { -#ifdef TIF_NOTIFY_RESUME struct task_struct *me, *parent; const struct cred *mycred, *pcred; - struct cred *cred, *oldcred; + struct callback_head *newwork, *oldwork; key_ref_t keyring_r; + struct cred *cred; int ret; - keyring_r = lookup_user_key(KEY_SPEC_SESSION_KEYRING, 0, KEY_LINK); + keyring_r = lookup_user_key(KEY_SPEC_SESSION_KEYRING, 0, KEY_NEED_LINK); if (IS_ERR(keyring_r)) return PTR_ERR(keyring_r); + ret = -ENOMEM; + /* our parent is going to need a new cred struct, a new tgcred struct * and new security data, so we allocate them here to prevent ENOMEM in * our parent */ - ret = -ENOMEM; cred = cred_alloc_blank(); if (!cred) goto error_keyring; + newwork = &cred->rcu; - cred->tgcred->session_keyring = key_ref_to_ptr(keyring_r); + cred->session_keyring = key_ref_to_ptr(keyring_r); keyring_r = NULL; + init_task_work(newwork, key_change_session_keyring); me = current; rcu_read_lock(); write_lock_irq(&tasklist_lock); - parent = me->real_parent; ret = -EPERM; + oldwork = NULL; + parent = me->real_parent; /* the parent mustn't be init and mustn't be a kernel thread */ if (parent->pid <= 1 || !parent->mm) - goto not_permitted; + goto unlock; /* the parent must be single threaded */ if (!thread_group_empty(parent)) - goto not_permitted; + goto unlock; /* the parent and the child must have different session keyrings or * there's no point */ mycred = current_cred(); pcred = __task_cred(parent); if (mycred == pcred || - mycred->tgcred->session_keyring == pcred->tgcred->session_keyring) - goto already_same; + mycred->session_keyring == pcred->session_keyring) { + ret = 0; + goto unlock; + } /* the parent must have the same effective ownership and mustn't be * SUID/SGID */ - if (pcred->uid != mycred->euid || - pcred->euid != mycred->euid || - pcred->suid != mycred->euid || - pcred->gid != mycred->egid || - pcred->egid != mycred->egid || - pcred->sgid != mycred->egid) - goto not_permitted; + if (!uid_eq(pcred->uid, mycred->euid) || + !uid_eq(pcred->euid, mycred->euid) || + !uid_eq(pcred->suid, mycred->euid) || + !gid_eq(pcred->gid, mycred->egid) || + !gid_eq(pcred->egid, mycred->egid) || + !gid_eq(pcred->sgid, mycred->egid)) + goto unlock; /* the keyrings must have the same UID */ - if ((pcred->tgcred->session_keyring && - pcred->tgcred->session_keyring->uid != mycred->euid) || - mycred->tgcred->session_keyring->uid != mycred->euid) - goto not_permitted; + if ((pcred->session_keyring && + !uid_eq(pcred->session_keyring->uid, mycred->euid)) || + !uid_eq(mycred->session_keyring->uid, mycred->euid)) + goto unlock; - /* if there's an already pending keyring replacement, then we replace - * that */ - oldcred = parent->replacement_session_keyring; + /* cancel an already pending keyring replacement */ + oldwork = task_work_cancel(parent, key_change_session_keyring); /* the replacement session keyring is applied just prior to userspace * restarting */ - parent->replacement_session_keyring = cred; - cred = NULL; - set_ti_thread_flag(task_thread_info(parent), TIF_NOTIFY_RESUME); - - write_unlock_irq(&tasklist_lock); - rcu_read_unlock(); - if (oldcred) - put_cred(oldcred); - return 0; - -already_same: - ret = 0; -not_permitted: + ret = task_work_add(parent, newwork, true); + if (!ret) + newwork = NULL; +unlock: write_unlock_irq(&tasklist_lock); rcu_read_unlock(); - put_cred(cred); + if (oldwork) + put_cred(container_of(oldwork, struct cred, rcu)); + if (newwork) + put_cred(cred); return ret; error_keyring: key_ref_put(keyring_r); return ret; - -#else /* !TIF_NOTIFY_RESUME */ - /* - * To be removed when TIF_NOTIFY_RESUME has been implemented on - * m68k/xtensa - */ -#warning TIF_NOTIFY_RESUME not implemented - return -EOPNOTSUPP; -#endif /* !TIF_NOTIFY_RESUME */ } -/*****************************************************************************/ /* - * the key control system call + * The key control system call */ SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3, unsigned long, arg4, unsigned long, arg5) @@ -1436,8 +1651,26 @@ SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3, case KEYCTL_SESSION_TO_PARENT: return keyctl_session_to_parent(); + case KEYCTL_REJECT: + return keyctl_reject_key((key_serial_t) arg2, + (unsigned) arg3, + (unsigned) arg4, + (key_serial_t) arg5); + + case KEYCTL_INSTANTIATE_IOV: + return keyctl_instantiate_key_iov( + (key_serial_t) arg2, + (const struct iovec __user *) arg3, + (unsigned) arg4, + (key_serial_t) arg5); + + case KEYCTL_INVALIDATE: + return keyctl_invalidate_key((key_serial_t) arg2); + + case KEYCTL_GET_PERSISTENT: + return keyctl_get_persistent((uid_t)arg2, (key_serial_t)arg3); + default: return -EOPNOTSUPP; } - -} /* end sys_keyctl() */ +} diff --git a/security/keys/keyring.c b/security/keys/keyring.c index d37f713e73c..9cf2575f0d9 100644 --- a/security/keys/keyring.c +++ b/security/keys/keyring.c @@ -1,6 +1,6 @@ /* Keyring handling * - * Copyright (C) 2004-2005, 2008 Red Hat, Inc. All Rights Reserved. + * Copyright (C) 2004-2005, 2008, 2013 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or @@ -17,25 +17,44 @@ #include <linux/seq_file.h> #include <linux/err.h> #include <keys/keyring-type.h> +#include <keys/user-type.h> +#include <linux/assoc_array_priv.h> #include <linux/uaccess.h> #include "internal.h" -#define rcu_dereference_locked_keyring(keyring) \ - (rcu_dereference_protected( \ - (keyring)->payload.subscriptions, \ - rwsem_is_locked((struct rw_semaphore *)&(keyring)->sem))) - /* - * when plumbing the depths of the key tree, this sets a hard limit set on how - * deep we're willing to go + * When plumbing the depths of the key tree, this sets a hard limit + * set on how deep we're willing to go. */ #define KEYRING_SEARCH_MAX_DEPTH 6 /* - * we keep all named keyrings in a hash to speed looking them up + * We keep all named keyrings in a hash to speed looking them up. */ #define KEYRING_NAME_HASH_SIZE (1 << 5) +/* + * We mark pointers we pass to the associative array with bit 1 set if + * they're keyrings and clear otherwise. + */ +#define KEYRING_PTR_SUBTYPE 0x2UL + +static inline bool keyring_ptr_is_keyring(const struct assoc_array_ptr *x) +{ + return (unsigned long)x & KEYRING_PTR_SUBTYPE; +} +static inline struct key *keyring_ptr_to_key(const struct assoc_array_ptr *x) +{ + void *object = assoc_array_ptr_to_leaf(x); + return (struct key *)((unsigned long)object & ~KEYRING_PTR_SUBTYPE); +} +static inline void *keyring_key_to_ptr(struct key *key) +{ + if (key->type == &key_type_keyring) + return (void *)((unsigned long)key | KEYRING_PTR_SUBTYPE); + return key; +} + static struct list_head keyring_name_hash[KEYRING_NAME_HASH_SIZE]; static DEFINE_RWLOCK(keyring_name_lock); @@ -50,11 +69,12 @@ static inline unsigned keyring_hash(const char *desc) } /* - * the keyring type definition + * The keyring key type definition. Keyrings are simply keys of this type and + * can be treated as ordinary keys in addition to having their own special + * operations. */ static int keyring_instantiate(struct key *keyring, - const void *data, size_t datalen); -static int keyring_match(const struct key *keyring, const void *criterion); + struct key_preparsed_payload *prep); static void keyring_revoke(struct key *keyring); static void keyring_destroy(struct key *keyring); static void keyring_describe(const struct key *keyring, struct seq_file *m); @@ -63,27 +83,25 @@ static long keyring_read(const struct key *keyring, struct key_type key_type_keyring = { .name = "keyring", - .def_datalen = sizeof(struct keyring_list), + .def_datalen = 0, .instantiate = keyring_instantiate, - .match = keyring_match, + .match = user_match, .revoke = keyring_revoke, .destroy = keyring_destroy, .describe = keyring_describe, .read = keyring_read, }; - EXPORT_SYMBOL(key_type_keyring); /* - * semaphore to serialise link/link calls to prevent two link calls in parallel - * introducing a cycle + * Semaphore to serialise link/link calls to prevent two link calls in parallel + * introducing a cycle. */ static DECLARE_RWSEM(keyring_serialise_link_sem); -/*****************************************************************************/ /* - * publish the name of a keyring so that it can be found by name (if it has - * one) + * Publish the name of a keyring so that it can be found by name (if it has + * one). */ static void keyring_publish_name(struct key *keyring) { @@ -102,50 +120,259 @@ static void keyring_publish_name(struct key *keyring) write_unlock(&keyring_name_lock); } +} -} /* end keyring_publish_name() */ - -/*****************************************************************************/ /* - * initialise a keyring - * - we object if we were given any data + * Initialise a keyring. + * + * Returns 0 on success, -EINVAL if given any data. */ static int keyring_instantiate(struct key *keyring, - const void *data, size_t datalen) + struct key_preparsed_payload *prep) { int ret; ret = -EINVAL; - if (datalen == 0) { + if (prep->datalen == 0) { + assoc_array_init(&keyring->keys); /* make the keyring available by name if it has one */ keyring_publish_name(keyring); ret = 0; } return ret; +} -} /* end keyring_instantiate() */ +/* + * Multiply 64-bits by 32-bits to 96-bits and fold back to 64-bit. Ideally we'd + * fold the carry back too, but that requires inline asm. + */ +static u64 mult_64x32_and_fold(u64 x, u32 y) +{ + u64 hi = (u64)(u32)(x >> 32) * y; + u64 lo = (u64)(u32)(x) * y; + return lo + ((u64)(u32)hi << 32) + (u32)(hi >> 32); +} -/*****************************************************************************/ /* - * match keyrings on their name + * Hash a key type and description. */ -static int keyring_match(const struct key *keyring, const void *description) +static unsigned long hash_key_type_and_desc(const struct keyring_index_key *index_key) { - return keyring->description && - strcmp(keyring->description, description) == 0; + const unsigned level_shift = ASSOC_ARRAY_LEVEL_STEP; + const unsigned long fan_mask = ASSOC_ARRAY_FAN_MASK; + const char *description = index_key->description; + unsigned long hash, type; + u32 piece; + u64 acc; + int n, desc_len = index_key->desc_len; + + type = (unsigned long)index_key->type; + + acc = mult_64x32_and_fold(type, desc_len + 13); + acc = mult_64x32_and_fold(acc, 9207); + for (;;) { + n = desc_len; + if (n <= 0) + break; + if (n > 4) + n = 4; + piece = 0; + memcpy(&piece, description, n); + description += n; + desc_len -= n; + acc = mult_64x32_and_fold(acc, piece); + acc = mult_64x32_and_fold(acc, 9207); + } -} /* end keyring_match() */ + /* Fold the hash down to 32 bits if need be. */ + hash = acc; + if (ASSOC_ARRAY_KEY_CHUNK_SIZE == 32) + hash ^= acc >> 32; + + /* Squidge all the keyrings into a separate part of the tree to + * ordinary keys by making sure the lowest level segment in the hash is + * zero for keyrings and non-zero otherwise. + */ + if (index_key->type != &key_type_keyring && (hash & fan_mask) == 0) + return hash | (hash >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - level_shift)) | 1; + if (index_key->type == &key_type_keyring && (hash & fan_mask) != 0) + return (hash + (hash << level_shift)) & ~fan_mask; + return hash; +} -/*****************************************************************************/ /* - * dispose of the data dangling from the corpse of a keyring + * Build the next index key chunk. + * + * On 32-bit systems the index key is laid out as: + * + * 0 4 5 9... + * hash desclen typeptr desc[] + * + * On 64-bit systems: + * + * 0 8 9 17... + * hash desclen typeptr desc[] + * + * We return it one word-sized chunk at a time. */ -static void keyring_destroy(struct key *keyring) +static unsigned long keyring_get_key_chunk(const void *data, int level) +{ + const struct keyring_index_key *index_key = data; + unsigned long chunk = 0; + long offset = 0; + int desc_len = index_key->desc_len, n = sizeof(chunk); + + level /= ASSOC_ARRAY_KEY_CHUNK_SIZE; + switch (level) { + case 0: + return hash_key_type_and_desc(index_key); + case 1: + return ((unsigned long)index_key->type << 8) | desc_len; + case 2: + if (desc_len == 0) + return (u8)((unsigned long)index_key->type >> + (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8)); + n--; + offset = 1; + default: + offset += sizeof(chunk) - 1; + offset += (level - 3) * sizeof(chunk); + if (offset >= desc_len) + return 0; + desc_len -= offset; + if (desc_len > n) + desc_len = n; + offset += desc_len; + do { + chunk <<= 8; + chunk |= ((u8*)index_key->description)[--offset]; + } while (--desc_len > 0); + + if (level == 2) { + chunk <<= 8; + chunk |= (u8)((unsigned long)index_key->type >> + (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8)); + } + return chunk; + } +} + +static unsigned long keyring_get_object_key_chunk(const void *object, int level) +{ + const struct key *key = keyring_ptr_to_key(object); + return keyring_get_key_chunk(&key->index_key, level); +} + +static bool keyring_compare_object(const void *object, const void *data) +{ + const struct keyring_index_key *index_key = data; + const struct key *key = keyring_ptr_to_key(object); + + return key->index_key.type == index_key->type && + key->index_key.desc_len == index_key->desc_len && + memcmp(key->index_key.description, index_key->description, + index_key->desc_len) == 0; +} + +/* + * Compare the index keys of a pair of objects and determine the bit position + * at which they differ - if they differ. + */ +static int keyring_diff_objects(const void *object, const void *data) +{ + const struct key *key_a = keyring_ptr_to_key(object); + const struct keyring_index_key *a = &key_a->index_key; + const struct keyring_index_key *b = data; + unsigned long seg_a, seg_b; + int level, i; + + level = 0; + seg_a = hash_key_type_and_desc(a); + seg_b = hash_key_type_and_desc(b); + if ((seg_a ^ seg_b) != 0) + goto differ; + + /* The number of bits contributed by the hash is controlled by a + * constant in the assoc_array headers. Everything else thereafter we + * can deal with as being machine word-size dependent. + */ + level += ASSOC_ARRAY_KEY_CHUNK_SIZE / 8; + seg_a = a->desc_len; + seg_b = b->desc_len; + if ((seg_a ^ seg_b) != 0) + goto differ; + + /* The next bit may not work on big endian */ + level++; + seg_a = (unsigned long)a->type; + seg_b = (unsigned long)b->type; + if ((seg_a ^ seg_b) != 0) + goto differ; + + level += sizeof(unsigned long); + if (a->desc_len == 0) + goto same; + + i = 0; + if (((unsigned long)a->description | (unsigned long)b->description) & + (sizeof(unsigned long) - 1)) { + do { + seg_a = *(unsigned long *)(a->description + i); + seg_b = *(unsigned long *)(b->description + i); + if ((seg_a ^ seg_b) != 0) + goto differ_plus_i; + i += sizeof(unsigned long); + } while (i < (a->desc_len & (sizeof(unsigned long) - 1))); + } + + for (; i < a->desc_len; i++) { + seg_a = *(unsigned char *)(a->description + i); + seg_b = *(unsigned char *)(b->description + i); + if ((seg_a ^ seg_b) != 0) + goto differ_plus_i; + } + +same: + return -1; + +differ_plus_i: + level += i; +differ: + i = level * 8 + __ffs(seg_a ^ seg_b); + return i; +} + +/* + * Free an object after stripping the keyring flag off of the pointer. + */ +static void keyring_free_object(void *object) { - struct keyring_list *klist; - int loop; + key_put(keyring_ptr_to_key(object)); +} + +/* + * Operations for keyring management by the index-tree routines. + */ +static const struct assoc_array_ops keyring_assoc_array_ops = { + .get_key_chunk = keyring_get_key_chunk, + .get_object_key_chunk = keyring_get_object_key_chunk, + .compare_object = keyring_compare_object, + .diff_objects = keyring_diff_objects, + .free_object = keyring_free_object, +}; +/* + * Clean up a keyring when it is destroyed. Unpublish its name if it had one + * and dispose of its data. + * + * The garbage collector detects the final key_put(), removes the keyring from + * the serial number tree and then does RCU synchronisation before coming here, + * so we shouldn't need to worry about code poking around here with the RCU + * readlock held by this time. + */ +static void keyring_destroy(struct key *keyring) +{ if (keyring->description) { write_lock(&keyring_name_lock); @@ -156,110 +383,110 @@ static void keyring_destroy(struct key *keyring) write_unlock(&keyring_name_lock); } - klist = rcu_dereference_check(keyring->payload.subscriptions, - rcu_read_lock_held() || - atomic_read(&keyring->usage) == 0); - if (klist) { - for (loop = klist->nkeys - 1; loop >= 0; loop--) - key_put(klist->keys[loop]); - kfree(klist); - } - -} /* end keyring_destroy() */ + assoc_array_destroy(&keyring->keys, &keyring_assoc_array_ops); +} -/*****************************************************************************/ /* - * describe the keyring + * Describe a keyring for /proc. */ static void keyring_describe(const struct key *keyring, struct seq_file *m) { - struct keyring_list *klist; - if (keyring->description) seq_puts(m, keyring->description); else seq_puts(m, "[anon]"); - rcu_read_lock(); - klist = rcu_dereference(keyring->payload.subscriptions); - if (klist) - seq_printf(m, ": %u/%u", klist->nkeys, klist->maxkeys); - else - seq_puts(m, ": empty"); - rcu_read_unlock(); + if (key_is_instantiated(keyring)) { + if (keyring->keys.nr_leaves_on_tree != 0) + seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree); + else + seq_puts(m, ": empty"); + } +} + +struct keyring_read_iterator_context { + size_t qty; + size_t count; + key_serial_t __user *buffer; +}; -} /* end keyring_describe() */ +static int keyring_read_iterator(const void *object, void *data) +{ + struct keyring_read_iterator_context *ctx = data; + const struct key *key = keyring_ptr_to_key(object); + int ret; + + kenter("{%s,%d},,{%zu/%zu}", + key->type->name, key->serial, ctx->count, ctx->qty); + + if (ctx->count >= ctx->qty) + return 1; + + ret = put_user(key->serial, ctx->buffer); + if (ret < 0) + return ret; + ctx->buffer++; + ctx->count += sizeof(key->serial); + return 0; +} -/*****************************************************************************/ /* - * read a list of key IDs from the keyring's contents - * - the keyring's semaphore is read-locked + * Read a list of key IDs from the keyring's contents in binary form + * + * The keyring's semaphore is read-locked by the caller. This prevents someone + * from modifying it under us - which could cause us to read key IDs multiple + * times. */ static long keyring_read(const struct key *keyring, char __user *buffer, size_t buflen) { - struct keyring_list *klist; - struct key *key; - size_t qty, tmp; - int loop, ret; + struct keyring_read_iterator_context ctx; + unsigned long nr_keys; + int ret; - ret = 0; - klist = rcu_dereference_locked_keyring(keyring); - if (klist) { - /* calculate how much data we could return */ - qty = klist->nkeys * sizeof(key_serial_t); - - if (buffer && buflen > 0) { - if (buflen > qty) - buflen = qty; - - /* copy the IDs of the subscribed keys into the - * buffer */ - ret = -EFAULT; - - for (loop = 0; loop < klist->nkeys; loop++) { - key = klist->keys[loop]; - - tmp = sizeof(key_serial_t); - if (tmp > buflen) - tmp = buflen; - - if (copy_to_user(buffer, - &key->serial, - tmp) != 0) - goto error; - - buflen -= tmp; - if (buflen == 0) - break; - buffer += tmp; - } - } + kenter("{%d},,%zu", key_serial(keyring), buflen); - ret = qty; - } + if (buflen & (sizeof(key_serial_t) - 1)) + return -EINVAL; -error: - return ret; + nr_keys = keyring->keys.nr_leaves_on_tree; + if (nr_keys == 0) + return 0; -} /* end keyring_read() */ + /* Calculate how much data we could return */ + ctx.qty = nr_keys * sizeof(key_serial_t); + + if (!buffer || !buflen) + return ctx.qty; + + if (buflen > ctx.qty) + ctx.qty = buflen; + + /* Copy the IDs of the subscribed keys into the buffer */ + ctx.buffer = (key_serial_t __user *)buffer; + ctx.count = 0; + ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx); + if (ret < 0) { + kleave(" = %d [iterate]", ret); + return ret; + } + + kleave(" = %zu [ok]", ctx.count); + return ctx.count; +} -/*****************************************************************************/ /* - * allocate a keyring and link into the destination keyring + * Allocate a keyring and link into the destination keyring. */ -struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid, - const struct cred *cred, unsigned long flags, - struct key *dest) +struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid, + const struct cred *cred, key_perm_t perm, + unsigned long flags, struct key *dest) { struct key *keyring; int ret; keyring = key_alloc(&key_type_keyring, description, - uid, gid, cred, - (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL, - flags); - + uid, gid, cred, perm, flags); if (!IS_ERR(keyring)) { ret = key_instantiate_and_link(keyring, NULL, 0, dest, NULL); if (ret < 0) { @@ -269,259 +496,452 @@ struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid, } return keyring; +} +EXPORT_SYMBOL(keyring_alloc); -} /* end keyring_alloc() */ - -/*****************************************************************************/ /* - * search the supplied keyring tree for a key that matches the criterion - * - perform a breadth-then-depth search up to the prescribed limit - * - we only find keys on which we have search permission - * - we use the supplied match function to see if the description (or other - * feature of interest) matches - * - we rely on RCU to prevent the keyring lists from disappearing on us - * - we return -EAGAIN if we didn't find any matching key - * - we return -ENOKEY if we only found negative matching keys - * - we propagate the possession attribute from the keyring ref to the key ref + * Iteration function to consider each key found. */ -key_ref_t keyring_search_aux(key_ref_t keyring_ref, - const struct cred *cred, - struct key_type *type, - const void *description, - key_match_func_t match) +static int keyring_search_iterator(const void *object, void *iterator_data) { - struct { - struct keyring_list *keylist; - int kix; - } stack[KEYRING_SEARCH_MAX_DEPTH]; + struct keyring_search_context *ctx = iterator_data; + const struct key *key = keyring_ptr_to_key(object); + unsigned long kflags = key->flags; - struct keyring_list *keylist; - struct timespec now; - unsigned long possessed, kflags; - struct key *keyring, *key; - key_ref_t key_ref; - long err; - int sp, kix; - - keyring = key_ref_to_ptr(keyring_ref); - possessed = is_key_possessed(keyring_ref); - key_check(keyring); + kenter("{%d}", key->serial); - /* top keyring must have search permission to begin the search */ - err = key_task_permission(keyring_ref, cred, KEY_SEARCH); - if (err < 0) { - key_ref = ERR_PTR(err); - goto error; + /* ignore keys not of this type */ + if (key->type != ctx->index_key.type) { + kleave(" = 0 [!type]"); + return 0; } - key_ref = ERR_PTR(-ENOTDIR); - if (keyring->type != &key_type_keyring) - goto error; + /* skip invalidated, revoked and expired keys */ + if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { + if (kflags & ((1 << KEY_FLAG_INVALIDATED) | + (1 << KEY_FLAG_REVOKED))) { + ctx->result = ERR_PTR(-EKEYREVOKED); + kleave(" = %d [invrev]", ctx->skipped_ret); + goto skipped; + } - rcu_read_lock(); + if (key->expiry && ctx->now.tv_sec >= key->expiry) { + ctx->result = ERR_PTR(-EKEYEXPIRED); + kleave(" = %d [expire]", ctx->skipped_ret); + goto skipped; + } + } - now = current_kernel_time(); - err = -EAGAIN; - sp = 0; - - /* firstly we should check to see if this top-level keyring is what we - * are looking for */ - key_ref = ERR_PTR(-EAGAIN); - kflags = keyring->flags; - if (keyring->type == type && match(keyring, description)) { - key = keyring; - - /* check it isn't negative and hasn't expired or been - * revoked */ - if (kflags & (1 << KEY_FLAG_REVOKED)) - goto error_2; - if (key->expiry && now.tv_sec >= key->expiry) - goto error_2; - key_ref = ERR_PTR(-ENOKEY); - if (kflags & (1 << KEY_FLAG_NEGATIVE)) - goto error_2; - goto found; + /* keys that don't match */ + if (!ctx->match(key, ctx->match_data)) { + kleave(" = 0 [!match]"); + return 0; } - /* otherwise, the top keyring must not be revoked, expired, or - * negatively instantiated if we are to search it */ - key_ref = ERR_PTR(-EAGAIN); - if (kflags & ((1 << KEY_FLAG_REVOKED) | (1 << KEY_FLAG_NEGATIVE)) || - (keyring->expiry && now.tv_sec >= keyring->expiry)) - goto error_2; + /* key must have search permissions */ + if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) && + key_task_permission(make_key_ref(key, ctx->possessed), + ctx->cred, KEY_NEED_SEARCH) < 0) { + ctx->result = ERR_PTR(-EACCES); + kleave(" = %d [!perm]", ctx->skipped_ret); + goto skipped; + } - /* start processing a new keyring */ -descend: - if (test_bit(KEY_FLAG_REVOKED, &keyring->flags)) - goto not_this_keyring; + if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) { + /* we set a different error code if we pass a negative key */ + if (kflags & (1 << KEY_FLAG_NEGATIVE)) { + smp_rmb(); + ctx->result = ERR_PTR(key->type_data.reject_error); + kleave(" = %d [neg]", ctx->skipped_ret); + goto skipped; + } + } - keylist = rcu_dereference(keyring->payload.subscriptions); - if (!keylist) - goto not_this_keyring; + /* Found */ + ctx->result = make_key_ref(key, ctx->possessed); + kleave(" = 1 [found]"); + return 1; - /* iterate through the keys in this keyring first */ - for (kix = 0; kix < keylist->nkeys; kix++) { - key = keylist->keys[kix]; - kflags = key->flags; +skipped: + return ctx->skipped_ret; +} - /* ignore keys not of this type */ - if (key->type != type) - continue; +/* + * Search inside a keyring for a key. We can search by walking to it + * directly based on its index-key or we can iterate over the entire + * tree looking for it, based on the match function. + */ +static int search_keyring(struct key *keyring, struct keyring_search_context *ctx) +{ + if ((ctx->flags & KEYRING_SEARCH_LOOKUP_TYPE) == + KEYRING_SEARCH_LOOKUP_DIRECT) { + const void *object; + + object = assoc_array_find(&keyring->keys, + &keyring_assoc_array_ops, + &ctx->index_key); + return object ? ctx->iterator(object, ctx) : 0; + } + return assoc_array_iterate(&keyring->keys, ctx->iterator, ctx); +} - /* skip revoked keys and expired keys */ - if (kflags & (1 << KEY_FLAG_REVOKED)) - continue; +/* + * Search a tree of keyrings that point to other keyrings up to the maximum + * depth. + */ +static bool search_nested_keyrings(struct key *keyring, + struct keyring_search_context *ctx) +{ + struct { + struct key *keyring; + struct assoc_array_node *node; + int slot; + } stack[KEYRING_SEARCH_MAX_DEPTH]; - if (key->expiry && now.tv_sec >= key->expiry) - continue; + struct assoc_array_shortcut *shortcut; + struct assoc_array_node *node; + struct assoc_array_ptr *ptr; + struct key *key; + int sp = 0, slot; - /* keys that don't match */ - if (!match(key, description)) - continue; + kenter("{%d},{%s,%s}", + keyring->serial, + ctx->index_key.type->name, + ctx->index_key.description); - /* key must have search permissions */ - if (key_task_permission(make_key_ref(key, possessed), - cred, KEY_SEARCH) < 0) - continue; + if (ctx->index_key.description) + ctx->index_key.desc_len = strlen(ctx->index_key.description); - /* we set a different error code if we pass a negative key */ - if (kflags & (1 << KEY_FLAG_NEGATIVE)) { - err = -ENOKEY; - continue; + /* Check to see if this top-level keyring is what we are looking for + * and whether it is valid or not. + */ + if (ctx->flags & KEYRING_SEARCH_LOOKUP_ITERATE || + keyring_compare_object(keyring, &ctx->index_key)) { + ctx->skipped_ret = 2; + ctx->flags |= KEYRING_SEARCH_DO_STATE_CHECK; + switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) { + case 1: + goto found; + case 2: + return false; + default: + break; } + } + + ctx->skipped_ret = 0; + if (ctx->flags & KEYRING_SEARCH_NO_STATE_CHECK) + ctx->flags &= ~KEYRING_SEARCH_DO_STATE_CHECK; + /* Start processing a new keyring */ +descend_to_keyring: + kdebug("descend to %d", keyring->serial); + if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) | + (1 << KEY_FLAG_REVOKED))) + goto not_this_keyring; + + /* Search through the keys in this keyring before its searching its + * subtrees. + */ + if (search_keyring(keyring, ctx)) goto found; - } - /* search through the keyrings nested in this one */ - kix = 0; -ascend: - for (; kix < keylist->nkeys; kix++) { - key = keylist->keys[kix]; - if (key->type != &key_type_keyring) - continue; + /* Then manually iterate through the keyrings nested in this one. + * + * Start from the root node of the index tree. Because of the way the + * hash function has been set up, keyrings cluster on the leftmost + * branch of the root node (root slot 0) or in the root node itself. + * Non-keyrings avoid the leftmost branch of the root entirely (root + * slots 1-15). + */ + ptr = ACCESS_ONCE(keyring->keys.root); + if (!ptr) + goto not_this_keyring; - /* recursively search nested keyrings - * - only search keyrings for which we have search permission + if (assoc_array_ptr_is_shortcut(ptr)) { + /* If the root is a shortcut, either the keyring only contains + * keyring pointers (everything clusters behind root slot 0) or + * doesn't contain any keyring pointers. */ - if (sp >= KEYRING_SEARCH_MAX_DEPTH) + shortcut = assoc_array_ptr_to_shortcut(ptr); + smp_read_barrier_depends(); + if ((shortcut->index_key[0] & ASSOC_ARRAY_FAN_MASK) != 0) + goto not_this_keyring; + + ptr = ACCESS_ONCE(shortcut->next_node); + node = assoc_array_ptr_to_node(ptr); + goto begin_node; + } + + node = assoc_array_ptr_to_node(ptr); + smp_read_barrier_depends(); + + ptr = node->slots[0]; + if (!assoc_array_ptr_is_meta(ptr)) + goto begin_node; + +descend_to_node: + /* Descend to a more distal node in this keyring's content tree and go + * through that. + */ + kdebug("descend"); + if (assoc_array_ptr_is_shortcut(ptr)) { + shortcut = assoc_array_ptr_to_shortcut(ptr); + smp_read_barrier_depends(); + ptr = ACCESS_ONCE(shortcut->next_node); + BUG_ON(!assoc_array_ptr_is_node(ptr)); + } + node = assoc_array_ptr_to_node(ptr); + +begin_node: + kdebug("begin_node"); + smp_read_barrier_depends(); + slot = 0; +ascend_to_node: + /* Go through the slots in a node */ + for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) { + ptr = ACCESS_ONCE(node->slots[slot]); + + if (assoc_array_ptr_is_meta(ptr) && node->back_pointer) + goto descend_to_node; + + if (!keyring_ptr_is_keyring(ptr)) continue; - if (key_task_permission(make_key_ref(key, possessed), - cred, KEY_SEARCH) < 0) + key = keyring_ptr_to_key(ptr); + + if (sp >= KEYRING_SEARCH_MAX_DEPTH) { + if (ctx->flags & KEYRING_SEARCH_DETECT_TOO_DEEP) { + ctx->result = ERR_PTR(-ELOOP); + return false; + } + goto not_this_keyring; + } + + /* Search a nested keyring */ + if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) && + key_task_permission(make_key_ref(key, ctx->possessed), + ctx->cred, KEY_NEED_SEARCH) < 0) continue; /* stack the current position */ - stack[sp].keylist = keylist; - stack[sp].kix = kix; + stack[sp].keyring = keyring; + stack[sp].node = node; + stack[sp].slot = slot; sp++; /* begin again with the new keyring */ keyring = key; - goto descend; + goto descend_to_keyring; } - /* the keyring we're looking at was disqualified or didn't contain a - * matching key */ + /* We've dealt with all the slots in the current node, so now we need + * to ascend to the parent and continue processing there. + */ + ptr = ACCESS_ONCE(node->back_pointer); + slot = node->parent_slot; + + if (ptr && assoc_array_ptr_is_shortcut(ptr)) { + shortcut = assoc_array_ptr_to_shortcut(ptr); + smp_read_barrier_depends(); + ptr = ACCESS_ONCE(shortcut->back_pointer); + slot = shortcut->parent_slot; + } + if (!ptr) + goto not_this_keyring; + node = assoc_array_ptr_to_node(ptr); + smp_read_barrier_depends(); + slot++; + + /* If we've ascended to the root (zero backpointer), we must have just + * finished processing the leftmost branch rather than the root slots - + * so there can't be any more keyrings for us to find. + */ + if (node->back_pointer) { + kdebug("ascend %d", slot); + goto ascend_to_node; + } + + /* The keyring we're looking at was disqualified or didn't contain a + * matching key. + */ not_this_keyring: - if (sp > 0) { - /* resume the processing of a keyring higher up in the tree */ - sp--; - keylist = stack[sp].keylist; - kix = stack[sp].kix + 1; - goto ascend; + kdebug("not_this_keyring %d", sp); + if (sp <= 0) { + kleave(" = false"); + return false; } - key_ref = ERR_PTR(err); - goto error_2; + /* Resume the processing of a keyring higher up in the tree */ + sp--; + keyring = stack[sp].keyring; + node = stack[sp].node; + slot = stack[sp].slot + 1; + kdebug("ascend to %d [%d]", keyring->serial, slot); + goto ascend_to_node; - /* we found a viable match */ + /* We found a viable match */ found: - atomic_inc(&key->usage); + key = key_ref_to_ptr(ctx->result); key_check(key); - key_ref = make_key_ref(key, possessed); -error_2: - rcu_read_unlock(); -error: - return key_ref; + if (!(ctx->flags & KEYRING_SEARCH_NO_UPDATE_TIME)) { + key->last_used_at = ctx->now.tv_sec; + keyring->last_used_at = ctx->now.tv_sec; + while (sp > 0) + stack[--sp].keyring->last_used_at = ctx->now.tv_sec; + } + kleave(" = true"); + return true; +} + +/** + * keyring_search_aux - Search a keyring tree for a key matching some criteria + * @keyring_ref: A pointer to the keyring with possession indicator. + * @ctx: The keyring search context. + * + * Search the supplied keyring tree for a key that matches the criteria given. + * The root keyring and any linked keyrings must grant Search permission to the + * caller to be searchable and keys can only be found if they too grant Search + * to the caller. The possession flag on the root keyring pointer controls use + * of the possessor bits in permissions checking of the entire tree. In + * addition, the LSM gets to forbid keyring searches and key matches. + * + * The search is performed as a breadth-then-depth search up to the prescribed + * limit (KEYRING_SEARCH_MAX_DEPTH). + * + * Keys are matched to the type provided and are then filtered by the match + * function, which is given the description to use in any way it sees fit. The + * match function may use any attributes of a key that it wishes to to + * determine the match. Normally the match function from the key type would be + * used. + * + * RCU can be used to prevent the keyring key lists from disappearing without + * the need to take lots of locks. + * + * Returns a pointer to the found key and increments the key usage count if + * successful; -EAGAIN if no matching keys were found, or if expired or revoked + * keys were found; -ENOKEY if only negative keys were found; -ENOTDIR if the + * specified keyring wasn't a keyring. + * + * In the case of a successful return, the possession attribute from + * @keyring_ref is propagated to the returned key reference. + */ +key_ref_t keyring_search_aux(key_ref_t keyring_ref, + struct keyring_search_context *ctx) +{ + struct key *keyring; + long err; -} /* end keyring_search_aux() */ + ctx->iterator = keyring_search_iterator; + ctx->possessed = is_key_possessed(keyring_ref); + ctx->result = ERR_PTR(-EAGAIN); -/*****************************************************************************/ -/* - * search the supplied keyring tree for a key that matches the criterion - * - perform a breadth-then-depth search up to the prescribed limit - * - we only find keys on which we have search permission - * - we readlock the keyrings as we search down the tree - * - we return -EAGAIN if we didn't find any matching key - * - we return -ENOKEY if we only found negative matching keys + keyring = key_ref_to_ptr(keyring_ref); + key_check(keyring); + + if (keyring->type != &key_type_keyring) + return ERR_PTR(-ENOTDIR); + + if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM)) { + err = key_task_permission(keyring_ref, ctx->cred, KEY_NEED_SEARCH); + if (err < 0) + return ERR_PTR(err); + } + + rcu_read_lock(); + ctx->now = current_kernel_time(); + if (search_nested_keyrings(keyring, ctx)) + __key_get(key_ref_to_ptr(ctx->result)); + rcu_read_unlock(); + return ctx->result; +} + +/** + * keyring_search - Search the supplied keyring tree for a matching key + * @keyring: The root of the keyring tree to be searched. + * @type: The type of keyring we want to find. + * @description: The name of the keyring we want to find. + * + * As keyring_search_aux() above, but using the current task's credentials and + * type's default matching function and preferred search method. */ key_ref_t keyring_search(key_ref_t keyring, struct key_type *type, const char *description) { - if (!type->match) + struct keyring_search_context ctx = { + .index_key.type = type, + .index_key.description = description, + .cred = current_cred(), + .match = type->match, + .match_data = description, + .flags = (type->def_lookup_type | + KEYRING_SEARCH_DO_STATE_CHECK), + }; + + if (!ctx.match) return ERR_PTR(-ENOKEY); - return keyring_search_aux(keyring, current->cred, - type, description, type->match); - -} /* end keyring_search() */ - + return keyring_search_aux(keyring, &ctx); +} EXPORT_SYMBOL(keyring_search); -/*****************************************************************************/ /* - * search the given keyring only (no recursion) - * - keyring must be locked by caller - * - caller must guarantee that the keyring is a keyring + * Search the given keyring for a key that might be updated. + * + * The caller must guarantee that the keyring is a keyring and that the + * permission is granted to modify the keyring as no check is made here. The + * caller must also hold a lock on the keyring semaphore. + * + * Returns a pointer to the found key with usage count incremented if + * successful and returns NULL if not found. Revoked and invalidated keys are + * skipped over. + * + * If successful, the possession indicator is propagated from the keyring ref + * to the returned key reference. */ -key_ref_t __keyring_search_one(key_ref_t keyring_ref, - const struct key_type *ktype, - const char *description, - key_perm_t perm) +key_ref_t find_key_to_update(key_ref_t keyring_ref, + const struct keyring_index_key *index_key) { - struct keyring_list *klist; - unsigned long possessed; struct key *keyring, *key; - int loop; + const void *object; keyring = key_ref_to_ptr(keyring_ref); - possessed = is_key_possessed(keyring_ref); - rcu_read_lock(); + kenter("{%d},{%s,%s}", + keyring->serial, index_key->type->name, index_key->description); - klist = rcu_dereference(keyring->payload.subscriptions); - if (klist) { - for (loop = 0; loop < klist->nkeys; loop++) { - key = klist->keys[loop]; - - if (key->type == ktype && - (!key->type->match || - key->type->match(key, description)) && - key_permission(make_key_ref(key, possessed), - perm) == 0 && - !test_bit(KEY_FLAG_REVOKED, &key->flags) - ) - goto found; - } - } + object = assoc_array_find(&keyring->keys, &keyring_assoc_array_ops, + index_key); - rcu_read_unlock(); - return ERR_PTR(-ENOKEY); + if (object) + goto found; -found: - atomic_inc(&key->usage); - rcu_read_unlock(); - return make_key_ref(key, possessed); + kleave(" = NULL"); + return NULL; -} /* end __keyring_search_one() */ +found: + key = keyring_ptr_to_key(object); + if (key->flags & ((1 << KEY_FLAG_INVALIDATED) | + (1 << KEY_FLAG_REVOKED))) { + kleave(" = NULL [x]"); + return NULL; + } + __key_get(key); + kleave(" = {%d}", key->serial); + return make_key_ref(key, is_key_possessed(keyring_ref)); +} -/*****************************************************************************/ /* - * find a keyring with the specified name - * - all named keyrings are searched - * - normally only finds keyrings with search permission for the current process + * Find a keyring with the specified name. + * + * All named keyrings in the current user namespace are searched, provided they + * grant Search permission directly to the caller (unless this check is + * skipped). Keyrings whose usage points have reached zero or who have been + * revoked are skipped. + * + * Returns a pointer to the keyring with the keyring's refcount having being + * incremented on success. -ENOKEY is returned if a key could not be found. */ struct key *find_keyring_by_name(const char *name, bool skip_perm_check) { @@ -542,7 +962,7 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check) &keyring_name_hash[bucket], type_data.link ) { - if (keyring->user->user_ns != current_user_ns()) + if (!kuid_has_mapping(current_user_ns(), keyring->user->uid)) continue; if (test_bit(KEY_FLAG_REVOKED, &keyring->flags)) @@ -553,7 +973,7 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check) if (!skip_perm_check && key_permission(make_key_ref(keyring, 0), - KEY_SEARCH) < 0) + KEY_NEED_SEARCH) < 0) continue; /* we've got a match but we might end up racing with @@ -561,6 +981,7 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check) * (ie. it has a zero usage count) */ if (!atomic_inc_not_zero(&keyring->usage)) continue; + keyring->last_used_at = current_kernel_time().tv_sec; goto out; } } @@ -569,125 +990,67 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check) out: read_unlock(&keyring_name_lock); return keyring; +} -} /* end find_keyring_by_name() */ - -/*****************************************************************************/ -/* - * see if a cycle will will be created by inserting acyclic tree B in acyclic - * tree A at the topmost level (ie: as a direct child of A) - * - since we are adding B to A at the top level, checking for cycles should - * just be a matter of seeing if node A is somewhere in tree B - */ -static int keyring_detect_cycle(struct key *A, struct key *B) +static int keyring_detect_cycle_iterator(const void *object, + void *iterator_data) { - struct { - struct keyring_list *keylist; - int kix; - } stack[KEYRING_SEARCH_MAX_DEPTH]; - - struct keyring_list *keylist; - struct key *subtree, *key; - int sp, kix, ret; - - rcu_read_lock(); - - ret = -EDEADLK; - if (A == B) - goto cycle_detected; - - subtree = B; - sp = 0; - - /* start processing a new keyring */ -descend: - if (test_bit(KEY_FLAG_REVOKED, &subtree->flags)) - goto not_this_keyring; - - keylist = rcu_dereference(subtree->payload.subscriptions); - if (!keylist) - goto not_this_keyring; - kix = 0; - -ascend: - /* iterate through the remaining keys in this keyring */ - for (; kix < keylist->nkeys; kix++) { - key = keylist->keys[kix]; - - if (key == A) - goto cycle_detected; - - /* recursively check nested keyrings */ - if (key->type == &key_type_keyring) { - if (sp >= KEYRING_SEARCH_MAX_DEPTH) - goto too_deep; - - /* stack the current position */ - stack[sp].keylist = keylist; - stack[sp].kix = kix; - sp++; - - /* begin again with the new keyring */ - subtree = key; - goto descend; - } - } - - /* the keyring we're looking at was disqualified or didn't contain a - * matching key */ -not_this_keyring: - if (sp > 0) { - /* resume the checking of a keyring higher up in the tree */ - sp--; - keylist = stack[sp].keylist; - kix = stack[sp].kix + 1; - goto ascend; - } - - ret = 0; /* no cycles detected */ + struct keyring_search_context *ctx = iterator_data; + const struct key *key = keyring_ptr_to_key(object); -error: - rcu_read_unlock(); - return ret; + kenter("{%d}", key->serial); -too_deep: - ret = -ELOOP; - goto error; + /* We might get a keyring with matching index-key that is nonetheless a + * different keyring. */ + if (key != ctx->match_data) + return 0; -cycle_detected: - ret = -EDEADLK; - goto error; - -} /* end keyring_detect_cycle() */ + ctx->result = ERR_PTR(-EDEADLK); + return 1; +} /* - * dispose of a keyring list after the RCU grace period, freeing the unlinked - * key + * See if a cycle will will be created by inserting acyclic tree B in acyclic + * tree A at the topmost level (ie: as a direct child of A). + * + * Since we are adding B to A at the top level, checking for cycles should just + * be a matter of seeing if node A is somewhere in tree B. */ -static void keyring_unlink_rcu_disposal(struct rcu_head *rcu) +static int keyring_detect_cycle(struct key *A, struct key *B) { - struct keyring_list *klist = - container_of(rcu, struct keyring_list, rcu); + struct keyring_search_context ctx = { + .index_key = A->index_key, + .match_data = A, + .iterator = keyring_detect_cycle_iterator, + .flags = (KEYRING_SEARCH_LOOKUP_DIRECT | + KEYRING_SEARCH_NO_STATE_CHECK | + KEYRING_SEARCH_NO_UPDATE_TIME | + KEYRING_SEARCH_NO_CHECK_PERM | + KEYRING_SEARCH_DETECT_TOO_DEEP), + }; - if (klist->delkey != USHRT_MAX) - key_put(klist->keys[klist->delkey]); - kfree(klist); + rcu_read_lock(); + search_nested_keyrings(B, &ctx); + rcu_read_unlock(); + return PTR_ERR(ctx.result) == -EAGAIN ? 0 : PTR_ERR(ctx.result); } /* - * preallocate memory so that a key can be linked into to a keyring + * Preallocate memory so that a key can be linked into to a keyring. */ -int __key_link_begin(struct key *keyring, const struct key_type *type, - const char *description, - struct keyring_list **_prealloc) +int __key_link_begin(struct key *keyring, + const struct keyring_index_key *index_key, + struct assoc_array_edit **_edit) __acquires(&keyring->sem) + __acquires(&keyring_serialise_link_sem) { - struct keyring_list *klist, *nklist; - unsigned max; - size_t size; - int loop, ret; + struct assoc_array_edit *edit; + int ret; + + kenter("%d,%s,%s,", + keyring->serial, index_key->type->name, index_key->description); - kenter("%d,%s,%s,", key_serial(keyring), type->name, description); + BUG_ON(index_key->desc_len == 0); if (keyring->type != &key_type_keyring) return -ENOTDIR; @@ -700,90 +1063,39 @@ int __key_link_begin(struct key *keyring, const struct key_type *type, /* serialise link/link calls to prevent parallel calls causing a cycle * when linking two keyring in opposite orders */ - if (type == &key_type_keyring) + if (index_key->type == &key_type_keyring) down_write(&keyring_serialise_link_sem); - klist = rcu_dereference_locked_keyring(keyring); - - /* see if there's a matching key we can displace */ - if (klist && klist->nkeys > 0) { - for (loop = klist->nkeys - 1; loop >= 0; loop--) { - if (klist->keys[loop]->type == type && - strcmp(klist->keys[loop]->description, - description) == 0 - ) { - /* found a match - we'll replace this one with - * the new key */ - size = sizeof(struct key *) * klist->maxkeys; - size += sizeof(*klist); - BUG_ON(size > PAGE_SIZE); - - ret = -ENOMEM; - nklist = kmemdup(klist, size, GFP_KERNEL); - if (!nklist) - goto error_sem; - - /* note replacement slot */ - klist->delkey = nklist->delkey = loop; - goto done; - } - } - } - - /* check that we aren't going to overrun the user's quota */ - ret = key_payload_reserve(keyring, - keyring->datalen + KEYQUOTA_LINK_BYTES); - if (ret < 0) + /* Create an edit script that will insert/replace the key in the + * keyring tree. + */ + edit = assoc_array_insert(&keyring->keys, + &keyring_assoc_array_ops, + index_key, + NULL); + if (IS_ERR(edit)) { + ret = PTR_ERR(edit); goto error_sem; + } - if (klist && klist->nkeys < klist->maxkeys) { - /* there's sufficient slack space to append directly */ - nklist = NULL; - } else { - /* grow the key list */ - max = 4; - if (klist) - max += klist->maxkeys; - - ret = -ENFILE; - if (max > USHRT_MAX - 1) - goto error_quota; - size = sizeof(*klist) + sizeof(struct key *) * max; - if (size > PAGE_SIZE) - goto error_quota; - - ret = -ENOMEM; - nklist = kmalloc(size, GFP_KERNEL); - if (!nklist) - goto error_quota; - - nklist->maxkeys = max; - if (klist) { - memcpy(nklist->keys, klist->keys, - sizeof(struct key *) * klist->nkeys); - nklist->delkey = klist->nkeys; - nklist->nkeys = klist->nkeys + 1; - klist->delkey = USHRT_MAX; - } else { - nklist->nkeys = 1; - nklist->delkey = 0; - } - - /* add the key into the new space */ - nklist->keys[nklist->delkey] = NULL; + /* If we're not replacing a link in-place then we're going to need some + * extra quota. + */ + if (!edit->dead_leaf) { + ret = key_payload_reserve(keyring, + keyring->datalen + KEYQUOTA_LINK_BYTES); + if (ret < 0) + goto error_cancel; } -done: - *_prealloc = nklist; + *_edit = edit; kleave(" = 0"); return 0; -error_quota: - /* undo the quota changes */ - key_payload_reserve(keyring, - keyring->datalen - KEYQUOTA_LINK_BYTES); +error_cancel: + assoc_array_cancel_edit(edit); error_sem: - if (type == &key_type_keyring) + if (index_key->type == &key_type_keyring) up_write(&keyring_serialise_link_sem); error_krsem: up_write(&keyring->sem); @@ -792,10 +1104,10 @@ error_krsem: } /* - * check already instantiated keys aren't going to be a problem - * - the caller must have called __key_link_begin() - * - don't need to call this for keys that were created since __key_link_begin() - * was called + * Check already instantiated keys aren't going to be a problem. + * + * The caller must have called __key_link_begin(). Don't need to call this for + * keys that were created since __key_link_begin() was called. */ int __key_link_check_live_key(struct key *keyring, struct key *key) { @@ -807,347 +1119,246 @@ int __key_link_check_live_key(struct key *keyring, struct key *key) } /* - * link a key into to a keyring - * - must be called with __key_link_begin() having being called - * - discard already extant link to matching key if there is one + * Link a key into to a keyring. + * + * Must be called with __key_link_begin() having being called. Discards any + * already extant link to matching key if there is one, so that each keyring + * holds at most one link to any given key of a particular type+description + * combination. */ -void __key_link(struct key *keyring, struct key *key, - struct keyring_list **_prealloc) +void __key_link(struct key *key, struct assoc_array_edit **_edit) { - struct keyring_list *klist, *nklist; - - nklist = *_prealloc; - *_prealloc = NULL; - - kenter("%d,%d,%p", keyring->serial, key->serial, nklist); - - klist = rcu_dereference_protected(keyring->payload.subscriptions, - rwsem_is_locked(&keyring->sem)); - - atomic_inc(&key->usage); - - /* there's a matching key we can displace or an empty slot in a newly - * allocated list we can fill */ - if (nklist) { - kdebug("replace %hu/%hu/%hu", - nklist->delkey, nklist->nkeys, nklist->maxkeys); - - nklist->keys[nklist->delkey] = key; - - rcu_assign_pointer(keyring->payload.subscriptions, nklist); - - /* dispose of the old keyring list and, if there was one, the - * displaced key */ - if (klist) { - kdebug("dispose %hu/%hu/%hu", - klist->delkey, klist->nkeys, klist->maxkeys); - call_rcu(&klist->rcu, keyring_unlink_rcu_disposal); - } - } else { - /* there's sufficient slack space to append directly */ - klist->keys[klist->nkeys] = key; - smp_wmb(); - klist->nkeys++; - } + __key_get(key); + assoc_array_insert_set_object(*_edit, keyring_key_to_ptr(key)); + assoc_array_apply_edit(*_edit); + *_edit = NULL; } /* - * finish linking a key into to a keyring - * - must be called with __key_link_begin() having being called + * Finish linking a key into to a keyring. + * + * Must be called with __key_link_begin() having being called. */ -void __key_link_end(struct key *keyring, struct key_type *type, - struct keyring_list *prealloc) +void __key_link_end(struct key *keyring, + const struct keyring_index_key *index_key, + struct assoc_array_edit *edit) __releases(&keyring->sem) + __releases(&keyring_serialise_link_sem) { - BUG_ON(type == NULL); - BUG_ON(type->name == NULL); - kenter("%d,%s,%p", keyring->serial, type->name, prealloc); + BUG_ON(index_key->type == NULL); + kenter("%d,%s,", keyring->serial, index_key->type->name); - if (type == &key_type_keyring) + if (index_key->type == &key_type_keyring) up_write(&keyring_serialise_link_sem); - if (prealloc) { - kfree(prealloc); + if (edit && !edit->dead_leaf) { key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES); + assoc_array_cancel_edit(edit); } up_write(&keyring->sem); } -/* - * link a key to a keyring +/** + * key_link - Link a key to a keyring + * @keyring: The keyring to make the link in. + * @key: The key to link to. + * + * Make a link in a keyring to a key, such that the keyring holds a reference + * on that key and the key can potentially be found by searching that keyring. + * + * This function will write-lock the keyring's semaphore and will consume some + * of the user's key data quota to hold the link. + * + * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring, + * -EKEYREVOKED if the keyring has been revoked, -ENFILE if the keyring is + * full, -EDQUOT if there is insufficient key data quota remaining to add + * another link or -ENOMEM if there's insufficient memory. + * + * It is assumed that the caller has checked that it is permitted for a link to + * be made (the keyring should have Write permission and the key Link + * permission). */ int key_link(struct key *keyring, struct key *key) { - struct keyring_list *prealloc; + struct assoc_array_edit *edit; int ret; + kenter("{%d,%d}", keyring->serial, atomic_read(&keyring->usage)); + key_check(keyring); key_check(key); - ret = __key_link_begin(keyring, key->type, key->description, &prealloc); + if (test_bit(KEY_FLAG_TRUSTED_ONLY, &keyring->flags) && + !test_bit(KEY_FLAG_TRUSTED, &key->flags)) + return -EPERM; + + ret = __key_link_begin(keyring, &key->index_key, &edit); if (ret == 0) { + kdebug("begun {%d,%d}", keyring->serial, atomic_read(&keyring->usage)); ret = __key_link_check_live_key(keyring, key); if (ret == 0) - __key_link(keyring, key, &prealloc); - __key_link_end(keyring, key->type, prealloc); + __key_link(key, &edit); + __key_link_end(keyring, &key->index_key, edit); } + kleave(" = %d {%d,%d}", ret, keyring->serial, atomic_read(&keyring->usage)); return ret; } - EXPORT_SYMBOL(key_link); -/*****************************************************************************/ -/* - * unlink the first link to a key from a keyring +/** + * key_unlink - Unlink the first link to a key from a keyring. + * @keyring: The keyring to remove the link from. + * @key: The key the link is to. + * + * Remove a link from a keyring to a key. + * + * This function will write-lock the keyring's semaphore. + * + * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring, -ENOENT if + * the key isn't linked to by the keyring or -ENOMEM if there's insufficient + * memory. + * + * It is assumed that the caller has checked that it is permitted for a link to + * be removed (the keyring should have Write permission; no permissions are + * required on the key). */ int key_unlink(struct key *keyring, struct key *key) { - struct keyring_list *klist, *nklist; - int loop, ret; + struct assoc_array_edit *edit; + int ret; key_check(keyring); key_check(key); - ret = -ENOTDIR; if (keyring->type != &key_type_keyring) - goto error; + return -ENOTDIR; down_write(&keyring->sem); - klist = rcu_dereference_locked_keyring(keyring); - if (klist) { - /* search the keyring for the key */ - for (loop = 0; loop < klist->nkeys; loop++) - if (klist->keys[loop] == key) - goto key_is_present; + edit = assoc_array_delete(&keyring->keys, &keyring_assoc_array_ops, + &key->index_key); + if (IS_ERR(edit)) { + ret = PTR_ERR(edit); + goto error; } - - up_write(&keyring->sem); ret = -ENOENT; - goto error; - -key_is_present: - /* we need to copy the key list for RCU purposes */ - nklist = kmalloc(sizeof(*klist) + - sizeof(struct key *) * klist->maxkeys, - GFP_KERNEL); - if (!nklist) - goto nomem; - nklist->maxkeys = klist->maxkeys; - nklist->nkeys = klist->nkeys - 1; - - if (loop > 0) - memcpy(&nklist->keys[0], - &klist->keys[0], - loop * sizeof(struct key *)); - - if (loop < nklist->nkeys) - memcpy(&nklist->keys[loop], - &klist->keys[loop + 1], - (nklist->nkeys - loop) * sizeof(struct key *)); - - /* adjust the user's quota */ - key_payload_reserve(keyring, - keyring->datalen - KEYQUOTA_LINK_BYTES); - - rcu_assign_pointer(keyring->payload.subscriptions, nklist); - - up_write(&keyring->sem); - - /* schedule for later cleanup */ - klist->delkey = loop; - call_rcu(&klist->rcu, keyring_unlink_rcu_disposal); + if (edit == NULL) + goto error; + assoc_array_apply_edit(edit); + key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES); ret = 0; error: - return ret; -nomem: - ret = -ENOMEM; up_write(&keyring->sem); - goto error; - -} /* end key_unlink() */ - + return ret; +} EXPORT_SYMBOL(key_unlink); -/*****************************************************************************/ -/* - * dispose of a keyring list after the RCU grace period, releasing the keys it - * links to - */ -static void keyring_clear_rcu_disposal(struct rcu_head *rcu) -{ - struct keyring_list *klist; - int loop; - - klist = container_of(rcu, struct keyring_list, rcu); - - for (loop = klist->nkeys - 1; loop >= 0; loop--) - key_put(klist->keys[loop]); - - kfree(klist); - -} /* end keyring_clear_rcu_disposal() */ - -/*****************************************************************************/ -/* - * clear the specified process keyring - * - implements keyctl(KEYCTL_CLEAR) +/** + * keyring_clear - Clear a keyring + * @keyring: The keyring to clear. + * + * Clear the contents of the specified keyring. + * + * Returns 0 if successful or -ENOTDIR if the keyring isn't a keyring. */ int keyring_clear(struct key *keyring) { - struct keyring_list *klist; + struct assoc_array_edit *edit; int ret; - ret = -ENOTDIR; - if (keyring->type == &key_type_keyring) { - /* detach the pointer block with the locks held */ - down_write(&keyring->sem); - - klist = rcu_dereference_locked_keyring(keyring); - if (klist) { - /* adjust the quota */ - key_payload_reserve(keyring, - sizeof(struct keyring_list)); - - rcu_assign_pointer(keyring->payload.subscriptions, - NULL); - } - - up_write(&keyring->sem); + if (keyring->type != &key_type_keyring) + return -ENOTDIR; - /* free the keys after the locks have been dropped */ - if (klist) - call_rcu(&klist->rcu, keyring_clear_rcu_disposal); + down_write(&keyring->sem); + edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops); + if (IS_ERR(edit)) { + ret = PTR_ERR(edit); + } else { + if (edit) + assoc_array_apply_edit(edit); + key_payload_reserve(keyring, 0); ret = 0; } + up_write(&keyring->sem); return ret; - -} /* end keyring_clear() */ - +} EXPORT_SYMBOL(keyring_clear); -/*****************************************************************************/ /* - * dispose of the links from a revoked keyring - * - called with the key sem write-locked + * Dispose of the links from a revoked keyring. + * + * This is called with the key sem write-locked. */ static void keyring_revoke(struct key *keyring) { - struct keyring_list *klist; + struct assoc_array_edit *edit; - klist = rcu_dereference_locked_keyring(keyring); - - /* adjust the quota */ - key_payload_reserve(keyring, 0); - - if (klist) { - rcu_assign_pointer(keyring->payload.subscriptions, NULL); - call_rcu(&klist->rcu, keyring_clear_rcu_disposal); + edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops); + if (!IS_ERR(edit)) { + if (edit) + assoc_array_apply_edit(edit); + key_payload_reserve(keyring, 0); } +} -} /* end keyring_revoke() */ +static bool keyring_gc_select_iterator(void *object, void *iterator_data) +{ + struct key *key = keyring_ptr_to_key(object); + time_t *limit = iterator_data; -/* - * Determine whether a key is dead - */ -static bool key_is_dead(struct key *key, time_t limit) + if (key_is_dead(key, *limit)) + return false; + key_get(key); + return true; +} + +static int keyring_gc_check_iterator(const void *object, void *iterator_data) { - return test_bit(KEY_FLAG_DEAD, &key->flags) || - (key->expiry > 0 && key->expiry <= limit); + const struct key *key = keyring_ptr_to_key(object); + time_t *limit = iterator_data; + + key_check(key); + return key_is_dead(key, *limit); } /* - * Collect garbage from the contents of a keyring + * Garbage collect pointers from a keyring. + * + * Not called with any locks held. The keyring's key struct will not be + * deallocated under us as only our caller may deallocate it. */ void keyring_gc(struct key *keyring, time_t limit) { - struct keyring_list *klist, *new; - struct key *key; - int loop, keep, max; - - kenter("{%x,%s}", key_serial(keyring), keyring->description); - - down_write(&keyring->sem); - - klist = rcu_dereference_locked_keyring(keyring); - if (!klist) - goto no_klist; - - /* work out how many subscriptions we're keeping */ - keep = 0; - for (loop = klist->nkeys - 1; loop >= 0; loop--) - if (!key_is_dead(klist->keys[loop], limit)) - keep++; - - if (keep == klist->nkeys) - goto just_return; - - /* allocate a new keyring payload */ - max = roundup(keep, 4); - new = kmalloc(sizeof(struct keyring_list) + max * sizeof(struct key *), - GFP_KERNEL); - if (!new) - goto nomem; - new->maxkeys = max; - new->nkeys = 0; - new->delkey = 0; - - /* install the live keys - * - must take care as expired keys may be updated back to life - */ - keep = 0; - for (loop = klist->nkeys - 1; loop >= 0; loop--) { - key = klist->keys[loop]; - if (!key_is_dead(key, limit)) { - if (keep >= max) - goto discard_new; - new->keys[keep++] = key_get(key); - } - } - new->nkeys = keep; - - /* adjust the quota */ - key_payload_reserve(keyring, - sizeof(struct keyring_list) + - KEYQUOTA_LINK_BYTES * keep); + int result; - if (keep == 0) { - rcu_assign_pointer(keyring->payload.subscriptions, NULL); - kfree(new); - } else { - rcu_assign_pointer(keyring->payload.subscriptions, new); - } + kenter("%x{%s}", keyring->serial, keyring->description ?: ""); - up_write(&keyring->sem); + if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) | + (1 << KEY_FLAG_REVOKED))) + goto dont_gc; - call_rcu(&klist->rcu, keyring_clear_rcu_disposal); - kleave(" [yes]"); - return; - -discard_new: - new->nkeys = keep; - keyring_clear_rcu_disposal(&new->rcu); - up_write(&keyring->sem); - kleave(" [discard]"); - return; - -just_return: - up_write(&keyring->sem); - kleave(" [no dead]"); - return; + /* scan the keyring looking for dead keys */ + rcu_read_lock(); + result = assoc_array_iterate(&keyring->keys, + keyring_gc_check_iterator, &limit); + rcu_read_unlock(); + if (result == true) + goto do_gc; -no_klist: - up_write(&keyring->sem); - kleave(" [no_klist]"); +dont_gc: + kleave(" [no gc]"); return; -nomem: +do_gc: + down_write(&keyring->sem); + assoc_array_gc(&keyring->keys, &keyring_assoc_array_ops, + keyring_gc_select_iterator, &limit); up_write(&keyring->sem); - kleave(" [oom]"); + kleave(" [gc]"); } diff --git a/security/keys/permission.c b/security/keys/permission.c index 28645502cd0..732cc0beffd 100644 --- a/security/keys/permission.c +++ b/security/keys/permission.c @@ -1,4 +1,4 @@ -/* permission.c: key permission determination +/* Key permission checking * * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) @@ -13,21 +13,22 @@ #include <linux/security.h> #include "internal.h" -/*****************************************************************************/ /** * key_task_permission - Check a key can be used - * @key_ref: The key to check - * @cred: The credentials to use - * @perm: The permissions to check for + * @key_ref: The key to check. + * @cred: The credentials to use. + * @perm: The permissions to check for. * * Check to see whether permission is granted to use a key in the desired way, * but permit the security modules to override. * - * The caller must hold either a ref on cred or must hold the RCU readlock or a - * spinlock. + * The caller must hold either a ref on cred or must hold the RCU readlock. + * + * Returns 0 if successful, -EACCES if access is denied based on the + * permissions bits or the LSM check. */ int key_task_permission(const key_ref_t key_ref, const struct cred *cred, - key_perm_t perm) + unsigned perm) { struct key *key; key_perm_t kperm; @@ -35,19 +36,16 @@ int key_task_permission(const key_ref_t key_ref, const struct cred *cred, key = key_ref_to_ptr(key_ref); - if (key->user->user_ns != cred->user->user_ns) - goto use_other_perms; - /* use the second 8-bits of permissions for keys the caller owns */ - if (key->uid == cred->fsuid) { + if (uid_eq(key->uid, cred->fsuid)) { kperm = key->perm >> 16; goto use_these_perms; } /* use the third 8-bits of permissions for keys the caller has a group * membership in common with */ - if (key->gid != -1 && key->perm & KEY_GRP_ALL) { - if (key->gid == cred->fsgid) { + if (gid_valid(key->gid) && key->perm & KEY_GRP_ALL) { + if (gid_eq(key->gid, cred->fsgid)) { kperm = key->perm >> 8; goto use_these_perms; } @@ -59,8 +57,6 @@ int key_task_permission(const key_ref_t key_ref, const struct cred *cred, } } -use_other_perms: - /* otherwise use the least-significant 8-bits */ kperm = key->perm; @@ -72,46 +68,43 @@ use_these_perms: if (is_key_possessed(key_ref)) kperm |= key->perm >> 24; - kperm = kperm & perm & KEY_ALL; + kperm = kperm & perm & KEY_NEED_ALL; if (kperm != perm) return -EACCES; /* let LSM be the final arbiter */ return security_key_permission(key_ref, cred, perm); - -} /* end key_task_permission() */ - +} EXPORT_SYMBOL(key_task_permission); -/*****************************************************************************/ -/* - * validate a key +/** + * key_validate - Validate a key. + * @key: The key to be validated. + * + * Check that a key is valid, returning 0 if the key is okay, -ENOKEY if the + * key is invalidated, -EKEYREVOKED if the key's type has been removed or if + * the key has been revoked or -EKEYEXPIRED if the key has expired. */ -int key_validate(struct key *key) +int key_validate(const struct key *key) { - struct timespec now; - int ret = 0; - - if (key) { - /* check it's still accessible */ - ret = -EKEYREVOKED; - if (test_bit(KEY_FLAG_REVOKED, &key->flags) || - test_bit(KEY_FLAG_DEAD, &key->flags)) - goto error; - - /* check it hasn't expired */ - ret = 0; - if (key->expiry) { - now = current_kernel_time(); - if (now.tv_sec >= key->expiry) - ret = -EKEYEXPIRED; - } - } + unsigned long flags = key->flags; -error: - return ret; + if (flags & (1 << KEY_FLAG_INVALIDATED)) + return -ENOKEY; -} /* end key_validate() */ + /* check it's still accessible */ + if (flags & ((1 << KEY_FLAG_REVOKED) | + (1 << KEY_FLAG_DEAD))) + return -EKEYREVOKED; + + /* check it hasn't expired */ + if (key->expiry) { + struct timespec now = current_kernel_time(); + if (now.tv_sec >= key->expiry) + return -EKEYEXPIRED; + } + return 0; +} EXPORT_SYMBOL(key_validate); diff --git a/security/keys/persistent.c b/security/keys/persistent.c new file mode 100644 index 00000000000..c9fae5ea89f --- /dev/null +++ b/security/keys/persistent.c @@ -0,0 +1,167 @@ +/* General persistent per-UID keyrings register + * + * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved. + * Written by David Howells (dhowells@redhat.com) + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public Licence + * as published by the Free Software Foundation; either version + * 2 of the Licence, or (at your option) any later version. + */ + +#include <linux/user_namespace.h> +#include "internal.h" + +unsigned persistent_keyring_expiry = 3 * 24 * 3600; /* Expire after 3 days of non-use */ + +/* + * Create the persistent keyring register for the current user namespace. + * + * Called with the namespace's sem locked for writing. + */ +static int key_create_persistent_register(struct user_namespace *ns) +{ + struct key *reg = keyring_alloc(".persistent_register", + KUIDT_INIT(0), KGIDT_INIT(0), + current_cred(), + ((KEY_POS_ALL & ~KEY_POS_SETATTR) | + KEY_USR_VIEW | KEY_USR_READ), + KEY_ALLOC_NOT_IN_QUOTA, NULL); + if (IS_ERR(reg)) + return PTR_ERR(reg); + + ns->persistent_keyring_register = reg; + return 0; +} + +/* + * Create the persistent keyring for the specified user. + * + * Called with the namespace's sem locked for writing. + */ +static key_ref_t key_create_persistent(struct user_namespace *ns, kuid_t uid, + struct keyring_index_key *index_key) +{ + struct key *persistent; + key_ref_t reg_ref, persistent_ref; + + if (!ns->persistent_keyring_register) { + long err = key_create_persistent_register(ns); + if (err < 0) + return ERR_PTR(err); + } else { + reg_ref = make_key_ref(ns->persistent_keyring_register, true); + persistent_ref = find_key_to_update(reg_ref, index_key); + if (persistent_ref) + return persistent_ref; + } + + persistent = keyring_alloc(index_key->description, + uid, INVALID_GID, current_cred(), + ((KEY_POS_ALL & ~KEY_POS_SETATTR) | + KEY_USR_VIEW | KEY_USR_READ), + KEY_ALLOC_NOT_IN_QUOTA, + ns->persistent_keyring_register); + if (IS_ERR(persistent)) + return ERR_CAST(persistent); + + return make_key_ref(persistent, true); +} + +/* + * Get the persistent keyring for a specific UID and link it to the nominated + * keyring. + */ +static long key_get_persistent(struct user_namespace *ns, kuid_t uid, + key_ref_t dest_ref) +{ + struct keyring_index_key index_key; + struct key *persistent; + key_ref_t reg_ref, persistent_ref; + char buf[32]; + long ret; + + /* Look in the register if it exists */ + index_key.type = &key_type_keyring; + index_key.description = buf; + index_key.desc_len = sprintf(buf, "_persistent.%u", from_kuid(ns, uid)); + + if (ns->persistent_keyring_register) { + reg_ref = make_key_ref(ns->persistent_keyring_register, true); + down_read(&ns->persistent_keyring_register_sem); + persistent_ref = find_key_to_update(reg_ref, &index_key); + up_read(&ns->persistent_keyring_register_sem); + + if (persistent_ref) + goto found; + } + + /* It wasn't in the register, so we'll need to create it. We might + * also need to create the register. + */ + down_write(&ns->persistent_keyring_register_sem); + persistent_ref = key_create_persistent(ns, uid, &index_key); + up_write(&ns->persistent_keyring_register_sem); + if (!IS_ERR(persistent_ref)) + goto found; + + return PTR_ERR(persistent_ref); + +found: + ret = key_task_permission(persistent_ref, current_cred(), KEY_NEED_LINK); + if (ret == 0) { + persistent = key_ref_to_ptr(persistent_ref); + ret = key_link(key_ref_to_ptr(dest_ref), persistent); + if (ret == 0) { + key_set_timeout(persistent, persistent_keyring_expiry); + ret = persistent->serial; + } + } + + key_ref_put(persistent_ref); + return ret; +} + +/* + * Get the persistent keyring for a specific UID and link it to the nominated + * keyring. + */ +long keyctl_get_persistent(uid_t _uid, key_serial_t destid) +{ + struct user_namespace *ns = current_user_ns(); + key_ref_t dest_ref; + kuid_t uid; + long ret; + + /* -1 indicates the current user */ + if (_uid == (uid_t)-1) { + uid = current_uid(); + } else { + uid = make_kuid(ns, _uid); + if (!uid_valid(uid)) + return -EINVAL; + + /* You can only see your own persistent cache if you're not + * sufficiently privileged. + */ + if (!uid_eq(uid, current_uid()) && + !uid_eq(uid, current_euid()) && + !ns_capable(ns, CAP_SETUID)) + return -EPERM; + } + + /* There must be a destination keyring */ + dest_ref = lookup_user_key(destid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE); + if (IS_ERR(dest_ref)) + return PTR_ERR(dest_ref); + if (key_ref_to_ptr(dest_ref)->type != &key_type_keyring) { + ret = -ENOTDIR; + goto out_put_dest; + } + + ret = key_get_persistent(ns, uid, dest_ref); + +out_put_dest: + key_ref_put(dest_ref); + return ret; +} diff --git a/security/keys/proc.c b/security/keys/proc.c index 70373966816..d3f6f2fd21d 100644 --- a/security/keys/proc.c +++ b/security/keys/proc.c @@ -1,4 +1,4 @@ -/* proc.c: proc files for key database enumeration +/* procfs files for key database enumeration * * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) @@ -60,9 +60,8 @@ static const struct file_operations proc_key_users_fops = { .release = seq_release, }; -/*****************************************************************************/ /* - * declare the /proc files + * Declare the /proc files. */ static int __init key_proc_init(void) { @@ -79,25 +78,24 @@ static int __init key_proc_init(void) panic("Cannot create /proc/key-users\n"); return 0; - -} /* end key_proc_init() */ +} __initcall(key_proc_init); -/*****************************************************************************/ /* - * implement "/proc/keys" to provides a list of the keys on the system + * Implement "/proc/keys" to provide a list of the keys on the system that + * grant View permission to the caller. */ #ifdef CONFIG_KEYS_DEBUG_PROC_KEYS -static struct rb_node *key_serial_next(struct rb_node *n) +static struct rb_node *key_serial_next(struct seq_file *p, struct rb_node *n) { - struct user_namespace *user_ns = current_user_ns(); + struct user_namespace *user_ns = seq_user_ns(p); n = rb_next(n); while (n) { struct key *key = rb_entry(n, struct key, serial_node); - if (key->user->user_ns == user_ns) + if (kuid_has_mapping(user_ns, key->user->uid)) break; n = rb_next(n); } @@ -109,9 +107,9 @@ static int proc_keys_open(struct inode *inode, struct file *file) return seq_open(file, &proc_keys_ops); } -static struct key *find_ge_key(key_serial_t id) +static struct key *find_ge_key(struct seq_file *p, key_serial_t id) { - struct user_namespace *user_ns = current_user_ns(); + struct user_namespace *user_ns = seq_user_ns(p); struct rb_node *n = key_serial_tree.rb_node; struct key *minkey = NULL; @@ -134,7 +132,7 @@ static struct key *find_ge_key(key_serial_t id) return NULL; for (;;) { - if (minkey->user->user_ns == user_ns) + if (kuid_has_mapping(user_ns, minkey->user->uid)) return minkey; n = rb_next(&minkey->serial_node); if (!n) @@ -153,7 +151,7 @@ static void *proc_keys_start(struct seq_file *p, loff_t *_pos) if (*_pos > INT_MAX) return NULL; - key = find_ge_key(pos); + key = find_ge_key(p, pos); if (!key) return NULL; *_pos = key->serial; @@ -170,7 +168,7 @@ static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos) { struct rb_node *n; - n = key_serial_next(v); + n = key_serial_next(p, v); if (n) *_pos = key_node_serial(n); return n; @@ -184,7 +182,6 @@ static void proc_keys_stop(struct seq_file *p, void *v) static int proc_keys_show(struct seq_file *m, void *v) { - const struct cred *cred = current_cred(); struct rb_node *_p = v; struct key *key = rb_entry(_p, struct key, serial_node); struct timespec now; @@ -193,15 +190,23 @@ static int proc_keys_show(struct seq_file *m, void *v) char xbuf[12]; int rc; + struct keyring_search_context ctx = { + .index_key.type = key->type, + .index_key.description = key->description, + .cred = current_cred(), + .match = lookup_user_key_possessed, + .match_data = key, + .flags = (KEYRING_SEARCH_NO_STATE_CHECK | + KEYRING_SEARCH_LOOKUP_DIRECT), + }; + key_ref = make_key_ref(key, 0); /* determine if the key is possessed by this process (a test we can * skip if the key does not indicate the possessor can view it */ if (key->perm & KEY_POS_VIEW) { - skey_ref = search_my_process_keyrings(key->type, key, - lookup_user_key_possessed, - cred); + skey_ref = search_my_process_keyrings(&ctx); if (!IS_ERR(skey_ref)) { key_ref_put(skey_ref); key_ref = make_key_ref(key, 1); @@ -213,7 +218,7 @@ static int proc_keys_show(struct seq_file *m, void *v) * - the caller holds a spinlock, and thus the RCU read lock, making our * access to __current_cred() safe */ - rc = key_task_permission(key_ref, cred, KEY_VIEW); + rc = key_task_permission(key_ref, ctx.cred, KEY_NEED_VIEW); if (rc < 0) return 0; @@ -244,7 +249,7 @@ static int proc_keys_show(struct seq_file *m, void *v) #define showflag(KEY, LETTER, FLAG) \ (test_bit(FLAG, &(KEY)->flags) ? LETTER : '-') - seq_printf(m, "%08x %c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ", + seq_printf(m, "%08x %c%c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ", key->serial, showflag(key, 'I', KEY_FLAG_INSTANTIATED), showflag(key, 'R', KEY_FLAG_REVOKED), @@ -252,11 +257,12 @@ static int proc_keys_show(struct seq_file *m, void *v) showflag(key, 'Q', KEY_FLAG_IN_QUOTA), showflag(key, 'U', KEY_FLAG_USER_CONSTRUCT), showflag(key, 'N', KEY_FLAG_NEGATIVE), + showflag(key, 'i', KEY_FLAG_INVALIDATED), atomic_read(&key->usage), xbuf, key->perm, - key->uid, - key->gid, + from_kuid_munged(seq_user_ns(m), key->uid), + from_kgid_munged(seq_user_ns(m), key->gid), key->type->name); #undef showflag @@ -271,31 +277,31 @@ static int proc_keys_show(struct seq_file *m, void *v) #endif /* CONFIG_KEYS_DEBUG_PROC_KEYS */ -static struct rb_node *__key_user_next(struct rb_node *n) +static struct rb_node *__key_user_next(struct user_namespace *user_ns, struct rb_node *n) { while (n) { struct key_user *user = rb_entry(n, struct key_user, node); - if (user->user_ns == current_user_ns()) + if (kuid_has_mapping(user_ns, user->uid)) break; n = rb_next(n); } return n; } -static struct rb_node *key_user_next(struct rb_node *n) +static struct rb_node *key_user_next(struct user_namespace *user_ns, struct rb_node *n) { - return __key_user_next(rb_next(n)); + return __key_user_next(user_ns, rb_next(n)); } -static struct rb_node *key_user_first(struct rb_root *r) +static struct rb_node *key_user_first(struct user_namespace *user_ns, struct rb_root *r) { struct rb_node *n = rb_first(r); - return __key_user_next(n); + return __key_user_next(user_ns, n); } -/*****************************************************************************/ /* - * implement "/proc/key-users" to provides a list of the key users + * Implement "/proc/key-users" to provides a list of the key users and their + * quotas. */ static int proc_key_users_open(struct inode *inode, struct file *file) { @@ -310,10 +316,10 @@ static void *proc_key_users_start(struct seq_file *p, loff_t *_pos) spin_lock(&key_user_lock); - _p = key_user_first(&key_user_tree); + _p = key_user_first(seq_user_ns(p), &key_user_tree); while (pos > 0 && _p) { pos--; - _p = key_user_next(_p); + _p = key_user_next(seq_user_ns(p), _p); } return _p; @@ -322,7 +328,7 @@ static void *proc_key_users_start(struct seq_file *p, loff_t *_pos) static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos) { (*_pos)++; - return key_user_next((struct rb_node *)v); + return key_user_next(seq_user_ns(p), (struct rb_node *)v); } static void proc_key_users_stop(struct seq_file *p, void *v) @@ -335,13 +341,13 @@ static int proc_key_users_show(struct seq_file *m, void *v) { struct rb_node *_p = v; struct key_user *user = rb_entry(_p, struct key_user, node); - unsigned maxkeys = (user->uid == 0) ? + unsigned maxkeys = uid_eq(user->uid, GLOBAL_ROOT_UID) ? key_quota_root_maxkeys : key_quota_maxkeys; - unsigned maxbytes = (user->uid == 0) ? + unsigned maxbytes = uid_eq(user->uid, GLOBAL_ROOT_UID) ? key_quota_root_maxbytes : key_quota_maxbytes; seq_printf(m, "%5u: %5d %d/%d %d/%d %d/%d\n", - user->uid, + from_kuid_munged(seq_user_ns(m), user->uid), atomic_read(&user->usage), atomic_read(&user->nkeys), atomic_read(&user->nikeys), @@ -351,5 +357,4 @@ static int proc_key_users_show(struct seq_file *m, void *v) maxbytes); return 0; - } diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c index 504bdd2452b..0cf8a130a26 100644 --- a/security/keys/process_keys.c +++ b/security/keys/process_keys.c @@ -1,4 +1,4 @@ -/* Management of a process's keyrings +/* Manage a process's keyrings * * Copyright (C) 2004-2005, 2008 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) @@ -21,41 +21,43 @@ #include <asm/uaccess.h> #include "internal.h" -/* session keyring create vs join semaphore */ +/* Session keyring create vs join semaphore */ static DEFINE_MUTEX(key_session_mutex); -/* user keyring creation semaphore */ +/* User keyring creation semaphore */ static DEFINE_MUTEX(key_user_keyring_mutex); -/* the root user's tracking struct */ +/* The root user's tracking struct */ struct key_user root_key_user = { .usage = ATOMIC_INIT(3), .cons_lock = __MUTEX_INITIALIZER(root_key_user.cons_lock), .lock = __SPIN_LOCK_UNLOCKED(root_key_user.lock), .nkeys = ATOMIC_INIT(2), .nikeys = ATOMIC_INIT(2), - .uid = 0, - .user_ns = &init_user_ns, + .uid = GLOBAL_ROOT_UID, }; -/*****************************************************************************/ /* - * install user and user session keyrings for a particular UID + * Install the user and user session keyrings for the current process's UID. */ int install_user_keyrings(void) { struct user_struct *user; const struct cred *cred; struct key *uid_keyring, *session_keyring; + key_perm_t user_keyring_perm; char buf[20]; int ret; + uid_t uid; + user_keyring_perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL; cred = current_cred(); user = cred->user; + uid = from_kuid(cred->user_ns, user->uid); - kenter("%p{%u}", user, user->uid); + kenter("%p{%u}", user, uid); - if (user->uid_keyring) { + if (user->uid_keyring && user->session_keyring) { kleave(" = 0 [exist]"); return 0; } @@ -68,13 +70,13 @@ int install_user_keyrings(void) * - there may be one in existence already as it may have been * pinned by a session, but the user_struct pointing to it * may have been destroyed by setuid */ - sprintf(buf, "_uid.%u", user->uid); + sprintf(buf, "_uid.%u", uid); uid_keyring = find_keyring_by_name(buf, true); if (IS_ERR(uid_keyring)) { - uid_keyring = keyring_alloc(buf, user->uid, (gid_t) -1, - cred, KEY_ALLOC_IN_QUOTA, - NULL); + uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID, + cred, user_keyring_perm, + KEY_ALLOC_IN_QUOTA, NULL); if (IS_ERR(uid_keyring)) { ret = PTR_ERR(uid_keyring); goto error; @@ -83,13 +85,14 @@ int install_user_keyrings(void) /* get a default session keyring (which might also exist * already) */ - sprintf(buf, "_uid_ses.%u", user->uid); + sprintf(buf, "_uid_ses.%u", uid); session_keyring = find_keyring_by_name(buf, true); if (IS_ERR(session_keyring)) { session_keyring = - keyring_alloc(buf, user->uid, (gid_t) -1, - cred, KEY_ALLOC_IN_QUOTA, NULL); + keyring_alloc(buf, user->uid, INVALID_GID, + cred, user_keyring_perm, + KEY_ALLOC_IN_QUOTA, NULL); if (IS_ERR(session_keyring)) { ret = PTR_ERR(session_keyring); goto error_release; @@ -122,13 +125,15 @@ error: } /* - * install a fresh thread keyring directly to new credentials + * Install a fresh thread keyring directly to new credentials. This keyring is + * allowed to overrun the quota. */ int install_thread_keyring_to_cred(struct cred *new) { struct key *keyring; keyring = keyring_alloc("_tid", new->uid, new->gid, new, + KEY_POS_ALL | KEY_USR_VIEW, KEY_ALLOC_QUOTA_OVERRUN, NULL); if (IS_ERR(keyring)) return PTR_ERR(keyring); @@ -138,7 +143,7 @@ int install_thread_keyring_to_cred(struct cred *new) } /* - * install a fresh thread keyring, discarding the old one + * Install a fresh thread keyring, discarding the old one. */ static int install_thread_keyring(void) { @@ -161,39 +166,34 @@ static int install_thread_keyring(void) } /* - * install a process keyring directly to a credentials struct - * - returns -EEXIST if there was already a process keyring, 0 if one installed, - * and other -ve on any other error + * Install a process keyring directly to a credentials struct. + * + * Returns -EEXIST if there was already a process keyring, 0 if one installed, + * and other value on any other error */ int install_process_keyring_to_cred(struct cred *new) { struct key *keyring; - int ret; - if (new->tgcred->process_keyring) + if (new->process_keyring) return -EEXIST; - keyring = keyring_alloc("_pid", new->uid, new->gid, - new, KEY_ALLOC_QUOTA_OVERRUN, NULL); + keyring = keyring_alloc("_pid", new->uid, new->gid, new, + KEY_POS_ALL | KEY_USR_VIEW, + KEY_ALLOC_QUOTA_OVERRUN, NULL); if (IS_ERR(keyring)) return PTR_ERR(keyring); - spin_lock_irq(&new->tgcred->lock); - if (!new->tgcred->process_keyring) { - new->tgcred->process_keyring = keyring; - keyring = NULL; - ret = 0; - } else { - ret = -EEXIST; - } - spin_unlock_irq(&new->tgcred->lock); - key_put(keyring); - return ret; + new->process_keyring = keyring; + return 0; } /* - * make sure a process keyring is installed - * - we + * Make sure a process keyring is installed for the current process. The + * existing process keyring is not replaced. + * + * Returns 0 if there is a process keyring by the end of this function, some + * error otherwise. */ static int install_process_keyring(void) { @@ -214,7 +214,7 @@ static int install_process_keyring(void) } /* - * install a session keyring directly to a credentials struct + * Install a session keyring directly to a credentials struct. */ int install_session_keyring_to_cred(struct cred *cred, struct key *keyring) { @@ -226,36 +226,31 @@ int install_session_keyring_to_cred(struct cred *cred, struct key *keyring) /* create an empty session keyring */ if (!keyring) { flags = KEY_ALLOC_QUOTA_OVERRUN; - if (cred->tgcred->session_keyring) + if (cred->session_keyring) flags = KEY_ALLOC_IN_QUOTA; - keyring = keyring_alloc("_ses", cred->uid, cred->gid, - cred, flags, NULL); + keyring = keyring_alloc("_ses", cred->uid, cred->gid, cred, + KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ, + flags, NULL); if (IS_ERR(keyring)) return PTR_ERR(keyring); } else { - atomic_inc(&keyring->usage); + __key_get(keyring); } /* install the keyring */ - spin_lock_irq(&cred->tgcred->lock); - old = cred->tgcred->session_keyring; - rcu_assign_pointer(cred->tgcred->session_keyring, keyring); - spin_unlock_irq(&cred->tgcred->lock); - - /* we're using RCU on the pointer, but there's no point synchronising - * on it if it didn't previously point to anything */ - if (old) { - synchronize_rcu(); + old = cred->session_keyring; + rcu_assign_pointer(cred->session_keyring, keyring); + + if (old) key_put(old); - } return 0; } /* - * install a session keyring, discarding the old one - * - if a keyring is not supplied, an empty one is invented + * Install a session keyring, discarding the old one. If a keyring is not + * supplied, an empty one is invented. */ static int install_session_keyring(struct key *keyring) { @@ -266,7 +261,7 @@ static int install_session_keyring(struct key *keyring) if (!new) return -ENOMEM; - ret = install_session_keyring_to_cred(new, NULL); + ret = install_session_keyring_to_cred(new, keyring); if (ret < 0) { abort_creds(new); return ret; @@ -275,9 +270,8 @@ static int install_session_keyring(struct key *keyring) return commit_creds(new); } -/*****************************************************************************/ /* - * the filesystem user ID changed + * Handle the fsuid changing. */ void key_fsuid_changed(struct task_struct *tsk) { @@ -288,12 +282,10 @@ void key_fsuid_changed(struct task_struct *tsk) tsk->cred->thread_keyring->uid = tsk->cred->fsuid; up_write(&tsk->cred->thread_keyring->sem); } +} -} /* end key_fsuid_changed() */ - -/*****************************************************************************/ /* - * the filesystem group ID changed + * Handle the fsgid changing. */ void key_fsgid_changed(struct task_struct *tsk) { @@ -304,21 +296,30 @@ void key_fsgid_changed(struct task_struct *tsk) tsk->cred->thread_keyring->gid = tsk->cred->fsgid; up_write(&tsk->cred->thread_keyring->sem); } +} -} /* end key_fsgid_changed() */ - -/*****************************************************************************/ /* - * search only my process keyrings for the first matching key - * - we use the supplied match function to see if the description (or other - * feature of interest) matches - * - we return -EAGAIN if we didn't find any matching key - * - we return -ENOKEY if we found only negative matching keys + * Search the process keyrings attached to the supplied cred for the first + * matching key. + * + * The search criteria are the type and the match function. The description is + * given to the match function as a parameter, but doesn't otherwise influence + * the search. Typically the match function will compare the description + * parameter to the key's description. + * + * This can only search keyrings that grant Search permission to the supplied + * credentials. Keyrings linked to searched keyrings will also be searched if + * they grant Search permission too. Keys can only be found if they grant + * Search permission to the credentials. + * + * Returns a pointer to the key with the key usage count incremented if + * successful, -EAGAIN if we didn't find any matching key or -ENOKEY if we only + * matched negative keys. + * + * In the case of a successful return, the possession attribute is set on the + * returned key reference. */ -key_ref_t search_my_process_keyrings(struct key_type *type, - const void *description, - key_match_func_t match, - const struct cred *cred) +key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx) { key_ref_t key_ref, ret, err; @@ -334,17 +335,14 @@ key_ref_t search_my_process_keyrings(struct key_type *type, err = ERR_PTR(-EAGAIN); /* search the thread keyring first */ - if (cred->thread_keyring) { + if (ctx->cred->thread_keyring) { key_ref = keyring_search_aux( - make_key_ref(cred->thread_keyring, 1), - cred, type, description, match); + make_key_ref(ctx->cred->thread_keyring, 1), ctx); if (!IS_ERR(key_ref)) goto found; switch (PTR_ERR(key_ref)) { case -EAGAIN: /* no key */ - if (ret) - break; case -ENOKEY: /* negative key */ ret = key_ref; break; @@ -355,10 +353,9 @@ key_ref_t search_my_process_keyrings(struct key_type *type, } /* search the process keyring second */ - if (cred->tgcred->process_keyring) { + if (ctx->cred->process_keyring) { key_ref = keyring_search_aux( - make_key_ref(cred->tgcred->process_keyring, 1), - cred, type, description, match); + make_key_ref(ctx->cred->process_keyring, 1), ctx); if (!IS_ERR(key_ref)) goto found; @@ -376,13 +373,11 @@ key_ref_t search_my_process_keyrings(struct key_type *type, } /* search the session keyring */ - if (cred->tgcred->session_keyring) { + if (ctx->cred->session_keyring) { rcu_read_lock(); key_ref = keyring_search_aux( - make_key_ref(rcu_dereference( - cred->tgcred->session_keyring), - 1), - cred, type, description, match); + make_key_ref(rcu_dereference(ctx->cred->session_keyring), 1), + ctx); rcu_read_unlock(); if (!IS_ERR(key_ref)) @@ -401,10 +396,10 @@ key_ref_t search_my_process_keyrings(struct key_type *type, } } /* or search the user-session keyring */ - else if (cred->user->session_keyring) { + else if (ctx->cred->user->session_keyring) { key_ref = keyring_search_aux( - make_key_ref(cred->user->session_keyring, 1), - cred, type, description, match); + make_key_ref(ctx->cred->user->session_keyring, 1), + ctx); if (!IS_ERR(key_ref)) goto found; @@ -428,25 +423,22 @@ found: return key_ref; } -/*****************************************************************************/ /* - * search the process keyrings for the first matching key - * - we use the supplied match function to see if the description (or other - * feature of interest) matches - * - we return -EAGAIN if we didn't find any matching key - * - we return -ENOKEY if we found only negative matching keys + * Search the process keyrings attached to the supplied cred for the first + * matching key in the manner of search_my_process_keyrings(), but also search + * the keys attached to the assumed authorisation key using its credentials if + * one is available. + * + * Return same as search_my_process_keyrings(). */ -key_ref_t search_process_keyrings(struct key_type *type, - const void *description, - key_match_func_t match, - const struct cred *cred) +key_ref_t search_process_keyrings(struct keyring_search_context *ctx) { struct request_key_auth *rka; key_ref_t key_ref, ret = ERR_PTR(-EACCES), err; might_sleep(); - key_ref = search_my_process_keyrings(type, description, match, cred); + key_ref = search_my_process_keyrings(ctx); if (!IS_ERR(key_ref)) goto found; err = key_ref; @@ -455,18 +447,21 @@ key_ref_t search_process_keyrings(struct key_type *type, * search the keyrings of the process mentioned there * - we don't permit access to request_key auth keys via this method */ - if (cred->request_key_auth && - cred == current_cred() && - type != &key_type_request_key_auth + if (ctx->cred->request_key_auth && + ctx->cred == current_cred() && + ctx->index_key.type != &key_type_request_key_auth ) { + const struct cred *cred = ctx->cred; + /* defend against the auth key being revoked */ down_read(&cred->request_key_auth->sem); - if (key_validate(cred->request_key_auth) == 0) { - rka = cred->request_key_auth->payload.data; + if (key_validate(ctx->cred->request_key_auth) == 0) { + rka = ctx->cred->request_key_auth->payload.data; - key_ref = search_process_keyrings(type, description, - match, rka->cred); + ctx->cred = rka->cred; + key_ref = search_process_keyrings(ctx); + ctx->cred = cred; up_read(&cred->request_key_auth->sem); @@ -489,41 +484,54 @@ key_ref_t search_process_keyrings(struct key_type *type, found: return key_ref; +} -} /* end search_process_keyrings() */ - -/*****************************************************************************/ /* - * see if the key we're looking at is the target key + * See if the key we're looking at is the target key. */ int lookup_user_key_possessed(const struct key *key, const void *target) { return key == target; +} -} /* end lookup_user_key_possessed() */ - -/*****************************************************************************/ /* - * lookup a key given a key ID from userspace with a given permissions mask - * - don't create special keyrings unless so requested - * - partially constructed keys aren't found unless requested + * Look up a key ID given us by userspace with a given permissions mask to get + * the key it refers to. + * + * Flags can be passed to request that special keyrings be created if referred + * to directly, to permit partially constructed keys to be found and to skip + * validity and permission checks on the found key. + * + * Returns a pointer to the key with an incremented usage count if successful; + * -EINVAL if the key ID is invalid; -ENOKEY if the key ID does not correspond + * to a key or the best found key was a negative key; -EKEYREVOKED or + * -EKEYEXPIRED if the best found key was revoked or expired; -EACCES if the + * found key doesn't grant the requested permit or the LSM denied access to it; + * or -ENOMEM if a special keyring couldn't be created. + * + * In the case of a successful return, the possession attribute is set on the + * returned key reference. */ key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags, key_perm_t perm) { + struct keyring_search_context ctx = { + .match = lookup_user_key_possessed, + .flags = (KEYRING_SEARCH_NO_STATE_CHECK | + KEYRING_SEARCH_LOOKUP_DIRECT), + }; struct request_key_auth *rka; - const struct cred *cred; struct key *key; key_ref_t key_ref, skey_ref; int ret; try_again: - cred = get_current_cred(); + ctx.cred = get_current_cred(); key_ref = ERR_PTR(-ENOKEY); switch (id) { case KEY_SPEC_THREAD_KEYRING: - if (!cred->thread_keyring) { + if (!ctx.cred->thread_keyring) { if (!(lflags & KEY_LOOKUP_CREATE)) goto error; @@ -535,13 +543,13 @@ try_again: goto reget_creds; } - key = cred->thread_keyring; - atomic_inc(&key->usage); + key = ctx.cred->thread_keyring; + __key_get(key); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_PROCESS_KEYRING: - if (!cred->tgcred->process_keyring) { + if (!ctx.cred->process_keyring) { if (!(lflags & KEY_LOOKUP_CREATE)) goto error; @@ -553,54 +561,64 @@ try_again: goto reget_creds; } - key = cred->tgcred->process_keyring; - atomic_inc(&key->usage); + key = ctx.cred->process_keyring; + __key_get(key); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_SESSION_KEYRING: - if (!cred->tgcred->session_keyring) { + if (!ctx.cred->session_keyring) { /* always install a session keyring upon access if one * doesn't exist yet */ ret = install_user_keyrings(); if (ret < 0) goto error; - ret = install_session_keyring( - cred->user->session_keyring); + if (lflags & KEY_LOOKUP_CREATE) + ret = join_session_keyring(NULL); + else + ret = install_session_keyring( + ctx.cred->user->session_keyring); if (ret < 0) goto error; goto reget_creds; + } else if (ctx.cred->session_keyring == + ctx.cred->user->session_keyring && + lflags & KEY_LOOKUP_CREATE) { + ret = join_session_keyring(NULL); + if (ret < 0) + goto error; + goto reget_creds; } rcu_read_lock(); - key = rcu_dereference(cred->tgcred->session_keyring); - atomic_inc(&key->usage); + key = rcu_dereference(ctx.cred->session_keyring); + __key_get(key); rcu_read_unlock(); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_USER_KEYRING: - if (!cred->user->uid_keyring) { + if (!ctx.cred->user->uid_keyring) { ret = install_user_keyrings(); if (ret < 0) goto error; } - key = cred->user->uid_keyring; - atomic_inc(&key->usage); + key = ctx.cred->user->uid_keyring; + __key_get(key); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_USER_SESSION_KEYRING: - if (!cred->user->session_keyring) { + if (!ctx.cred->user->session_keyring) { ret = install_user_keyrings(); if (ret < 0) goto error; } - key = cred->user->session_keyring; - atomic_inc(&key->usage); + key = ctx.cred->user->session_keyring; + __key_get(key); key_ref = make_key_ref(key, 1); break; @@ -610,28 +628,29 @@ try_again: goto error; case KEY_SPEC_REQKEY_AUTH_KEY: - key = cred->request_key_auth; + key = ctx.cred->request_key_auth; if (!key) goto error; - atomic_inc(&key->usage); + __key_get(key); key_ref = make_key_ref(key, 1); break; case KEY_SPEC_REQUESTOR_KEYRING: - if (!cred->request_key_auth) + if (!ctx.cred->request_key_auth) goto error; - down_read(&cred->request_key_auth->sem); - if (cred->request_key_auth->flags & KEY_FLAG_REVOKED) { + down_read(&ctx.cred->request_key_auth->sem); + if (test_bit(KEY_FLAG_REVOKED, + &ctx.cred->request_key_auth->flags)) { key_ref = ERR_PTR(-EKEYREVOKED); key = NULL; } else { - rka = cred->request_key_auth->payload.data; + rka = ctx.cred->request_key_auth->payload.data; key = rka->dest_keyring; - atomic_inc(&key->usage); + __key_get(key); } - up_read(&cred->request_key_auth->sem); + up_read(&ctx.cred->request_key_auth->sem); if (!key) goto error; key_ref = make_key_ref(key, 1); @@ -651,9 +670,13 @@ try_again: key_ref = make_key_ref(key, 0); /* check to see if we possess the key */ - skey_ref = search_process_keyrings(key->type, key, - lookup_user_key_possessed, - cred); + ctx.index_key.type = key->type; + ctx.index_key.description = key->description; + ctx.index_key.desc_len = strlen(key->description); + ctx.match_data = key; + kdebug("check possessed"); + skey_ref = search_process_keyrings(&ctx); + kdebug("possessed=%p", skey_ref); if (!IS_ERR(skey_ref)) { key_put(key); @@ -693,12 +716,14 @@ try_again: goto invalid_key; /* check the permissions */ - ret = key_task_permission(key_ref, cred, perm); + ret = key_task_permission(key_ref, ctx.cred, perm); if (ret < 0) goto invalid_key; + key->last_used_at = current_kernel_time().tv_sec; + error: - put_cred(cred); + put_cred(ctx.cred); return key_ref; invalid_key: @@ -709,17 +734,20 @@ invalid_key: /* if we attempted to install a keyring, then it may have caused new * creds to be installed */ reget_creds: - put_cred(cred); + put_cred(ctx.cred); goto try_again; +} -} /* end lookup_user_key() */ - -/*****************************************************************************/ /* - * join the named keyring as the session keyring if possible, or attempt to - * create a new one of that name if not - * - if the name is NULL, an empty anonymous keyring is installed instead - * - named session keyring joining is done with a semaphore held + * Join the named keyring as the session keyring if possible else attempt to + * create a new one of that name and join that. + * + * If the name is NULL, an empty anonymous keyring will be installed as the + * session keyring. + * + * Named session keyrings are joined with a semaphore held to prevent the + * keyrings from going away whilst the attempt is made to going them and also + * to prevent a race in creating compatible session keyrings. */ long join_session_keyring(const char *name) { @@ -728,12 +756,6 @@ long join_session_keyring(const char *name) struct key *keyring; long ret, serial; - /* only permit this if there's a single thread in the thread group - - * this avoids us having to adjust the creds on all threads and risking - * ENOMEM */ - if (!current_is_single_threaded()) - return -EMLINK; - new = prepare_creds(); if (!new) return -ENOMEM; @@ -745,7 +767,7 @@ long join_session_keyring(const char *name) if (ret < 0) goto error; - serial = new->tgcred->session_keyring->serial; + serial = new->session_keyring->serial; ret = commit_creds(new); if (ret == 0) ret = serial; @@ -759,8 +781,10 @@ long join_session_keyring(const char *name) keyring = find_keyring_by_name(name, false); if (PTR_ERR(keyring) == -ENOKEY) { /* not found - try and create a new one */ - keyring = keyring_alloc(name, old->uid, old->gid, old, - KEY_ALLOC_IN_QUOTA, NULL); + keyring = keyring_alloc( + name, old->uid, old->gid, old, + KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ | KEY_USR_LINK, + KEY_ALLOC_IN_QUOTA, NULL); if (IS_ERR(keyring)) { ret = PTR_ERR(keyring); goto error2; @@ -768,6 +792,9 @@ long join_session_keyring(const char *name) } else if (IS_ERR(keyring)) { ret = PTR_ERR(keyring); goto error2; + } else if (keyring == new->session_keyring) { + ret = 0; + goto error2; } /* we've got a keyring - now to install it */ @@ -791,26 +818,19 @@ error: } /* - * Replace a process's session keyring when that process resumes userspace on - * behalf of one of its children + * Replace a process's session keyring on behalf of one of its children when + * the target process is about to resume userspace execution. */ -void key_replace_session_keyring(void) +void key_change_session_keyring(struct callback_head *twork) { - const struct cred *old; - struct cred *new; + const struct cred *old = current_cred(); + struct cred *new = container_of(twork, struct cred, rcu); - if (!current->replacement_session_keyring) - return; - - write_lock_irq(&tasklist_lock); - new = current->replacement_session_keyring; - current->replacement_session_keyring = NULL; - write_unlock_irq(&tasklist_lock); - - if (!new) + if (unlikely(current->flags & PF_EXITING)) { + put_cred(new); return; + } - old = current_cred(); new-> uid = old-> uid; new-> euid = old-> euid; new-> suid = old-> suid; @@ -820,6 +840,7 @@ void key_replace_session_keyring(void) new-> sgid = old-> sgid; new->fsgid = old->fsgid; new->user = get_uid(old->user); + new->user_ns = get_user_ns(old->user_ns); new->group_info = get_group_info(old->group_info); new->securebits = old->securebits; @@ -830,10 +851,19 @@ void key_replace_session_keyring(void) new->jit_keyring = old->jit_keyring; new->thread_keyring = key_get(old->thread_keyring); - new->tgcred->tgid = old->tgcred->tgid; - new->tgcred->process_keyring = key_get(old->tgcred->process_keyring); + new->process_keyring = key_get(old->process_keyring); security_transfer_creds(new, old); commit_creds(new); } + +/* + * Make sure that root's user and user-session keyrings exist. + */ +static int __init init_root_keyring(void) +{ + return install_user_keyrings(); +} + +late_initcall(init_root_keyring); diff --git a/security/keys/request_key.c b/security/keys/request_key.c index 0088dd8bf68..381411941cc 100644 --- a/security/keys/request_key.c +++ b/security/keys/request_key.c @@ -8,7 +8,7 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * - * See Documentation/keys-request-key.txt + * See Documentation/security/keys-request-key.txt */ #include <linux/module.h> @@ -39,8 +39,14 @@ static int key_wait_bit_intr(void *flags) return signal_pending(current) ? -ERESTARTSYS : 0; } -/* - * call to complete the construction of a key +/** + * complete_request_key - Complete the construction of a key. + * @cons: The key construction record. + * @error: The success or failute of the construction. + * + * Complete the attempt to construct a key. The key will be negated + * if an error is indicated. The authorisation key will be revoked + * unconditionally. */ void complete_request_key(struct key_construction *cons, int error) { @@ -58,40 +64,49 @@ void complete_request_key(struct key_construction *cons, int error) } EXPORT_SYMBOL(complete_request_key); -static int umh_keys_init(struct subprocess_info *info) +/* + * Initialise a usermode helper that is going to have a specific session + * keyring. + * + * This is called in context of freshly forked kthread before kernel_execve(), + * so we can simply install the desired session_keyring at this point. + */ +static int umh_keys_init(struct subprocess_info *info, struct cred *cred) { - struct cred *cred = (struct cred*)current_cred(); struct key *keyring = info->data; - /* - * This is called in context of freshly forked kthread before - * kernel_execve(), we can just change our ->session_keyring. - */ + return install_session_keyring_to_cred(cred, keyring); } +/* + * Clean up a usermode helper with session keyring. + */ static void umh_keys_cleanup(struct subprocess_info *info) { struct key *keyring = info->data; key_put(keyring); } +/* + * Call a usermode helper with a specific session keyring. + */ static int call_usermodehelper_keys(char *path, char **argv, char **envp, - struct key *session_keyring, enum umh_wait wait) + struct key *session_keyring, int wait) { - gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL; - struct subprocess_info *info = - call_usermodehelper_setup(path, argv, envp, gfp_mask); + struct subprocess_info *info; + info = call_usermodehelper_setup(path, argv, envp, GFP_KERNEL, + umh_keys_init, umh_keys_cleanup, + session_keyring); if (!info) return -ENOMEM; - call_usermodehelper_setfns(info, umh_keys_init, umh_keys_cleanup, - key_get(session_keyring)); + key_get(session_keyring); return call_usermodehelper_exec(info, wait); } /* - * request userspace finish the construction of a key + * Request userspace finish the construction of a key * - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>" */ static int call_sbin_request_key(struct key_construction *cons, @@ -118,6 +133,7 @@ static int call_sbin_request_key(struct key_construction *cons, cred = get_current_cred(); keyring = keyring_alloc(desc, cred->fsuid, cred->fsgid, cred, + KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ, KEY_ALLOC_QUOTA_OVERRUN, NULL); put_cred(cred); if (IS_ERR(keyring)) { @@ -131,8 +147,8 @@ static int call_sbin_request_key(struct key_construction *cons, goto error_link; /* record the UID and GID */ - sprintf(uid_str, "%d", cred->fsuid); - sprintf(gid_str, "%d", cred->fsgid); + sprintf(uid_str, "%d", from_kuid(&init_user_ns, cred->fsuid)); + sprintf(gid_str, "%d", from_kgid(&init_user_ns, cred->fsgid)); /* we say which key is under construction */ sprintf(key_str, "%d", key->serial); @@ -142,12 +158,12 @@ static int call_sbin_request_key(struct key_construction *cons, cred->thread_keyring ? cred->thread_keyring->serial : 0); prkey = 0; - if (cred->tgcred->process_keyring) - prkey = cred->tgcred->process_keyring->serial; + if (cred->process_keyring) + prkey = cred->process_keyring->serial; sprintf(keyring_str[1], "%d", prkey); rcu_read_lock(); - session = rcu_dereference(cred->tgcred->session_keyring); + session = rcu_dereference(cred->session_keyring); if (!session) session = cred->user->session_keyring; sskey = session->serial; @@ -198,8 +214,9 @@ error_alloc: } /* - * call out to userspace for key construction - * - we ignore program failure and go on key status instead + * Call out to userspace for key construction. + * + * Program failure is ignored in favour of key status. */ static int construct_key(struct key *key, const void *callout_info, size_t callout_len, void *aux, @@ -246,9 +263,10 @@ static int construct_key(struct key *key, const void *callout_info, } /* - * get the appropriate destination keyring for the request - * - we return whatever keyring we select with an extra reference upon it which - * the caller must release + * Get the appropriate destination keyring for the request. + * + * The keyring selected is returned with an extra reference upon it which the + * caller must release. */ static void construct_get_dest_keyring(struct key **_dest_keyring) { @@ -287,14 +305,14 @@ static void construct_get_dest_keyring(struct key **_dest_keyring) break; case KEY_REQKEY_DEFL_PROCESS_KEYRING: - dest_keyring = key_get(cred->tgcred->process_keyring); + dest_keyring = key_get(cred->process_keyring); if (dest_keyring) break; case KEY_REQKEY_DEFL_SESSION_KEYRING: rcu_read_lock(); dest_keyring = key_get( - rcu_dereference(cred->tgcred->session_keyring)); + rcu_dereference(cred->session_keyring)); rcu_read_unlock(); if (dest_keyring) @@ -321,38 +339,48 @@ static void construct_get_dest_keyring(struct key **_dest_keyring) } /* - * allocate a new key in under-construction state and attempt to link it in to - * the requested place - * - may return a key that's already under construction instead + * Allocate a new key in under-construction state and attempt to link it in to + * the requested keyring. + * + * May return a key that's already under construction instead if there was a + * race between two thread calling request_key(). */ -static int construct_alloc_key(struct key_type *type, - const char *description, +static int construct_alloc_key(struct keyring_search_context *ctx, struct key *dest_keyring, unsigned long flags, struct key_user *user, struct key **_key) { - struct keyring_list *prealloc; - const struct cred *cred = current_cred(); + struct assoc_array_edit *edit; struct key *key; + key_perm_t perm; key_ref_t key_ref; int ret; - kenter("%s,%s,,,", type->name, description); + kenter("%s,%s,,,", + ctx->index_key.type->name, ctx->index_key.description); *_key = NULL; mutex_lock(&user->cons_lock); - key = key_alloc(type, description, cred->fsuid, cred->fsgid, cred, - KEY_POS_ALL, flags); + perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR; + perm |= KEY_USR_VIEW; + if (ctx->index_key.type->read) + perm |= KEY_POS_READ; + if (ctx->index_key.type == &key_type_keyring || + ctx->index_key.type->update) + perm |= KEY_POS_WRITE; + + key = key_alloc(ctx->index_key.type, ctx->index_key.description, + ctx->cred->fsuid, ctx->cred->fsgid, ctx->cred, + perm, flags); if (IS_ERR(key)) goto alloc_failed; set_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags); if (dest_keyring) { - ret = __key_link_begin(dest_keyring, type, description, - &prealloc); + ret = __key_link_begin(dest_keyring, &ctx->index_key, &edit); if (ret < 0) goto link_prealloc_failed; } @@ -362,16 +390,16 @@ static int construct_alloc_key(struct key_type *type, * waited for locks */ mutex_lock(&key_construction_mutex); - key_ref = search_process_keyrings(type, description, type->match, cred); + key_ref = search_process_keyrings(ctx); if (!IS_ERR(key_ref)) goto key_already_present; if (dest_keyring) - __key_link(dest_keyring, key, &prealloc); + __key_link(key, &edit); mutex_unlock(&key_construction_mutex); if (dest_keyring) - __key_link_end(dest_keyring, type, prealloc); + __key_link_end(dest_keyring, &ctx->index_key, edit); mutex_unlock(&user->cons_lock); *_key = key; kleave(" = 0 [%d]", key_serial(key)); @@ -386,8 +414,8 @@ key_already_present: if (dest_keyring) { ret = __key_link_check_live_key(dest_keyring, key); if (ret == 0) - __key_link(dest_keyring, key, &prealloc); - __key_link_end(dest_keyring, type, prealloc); + __key_link(key, &edit); + __key_link_end(dest_keyring, &ctx->index_key, edit); if (ret < 0) goto link_check_failed; } @@ -403,7 +431,6 @@ link_check_failed: return ret; link_prealloc_failed: - up_write(&dest_keyring->sem); mutex_unlock(&user->cons_lock); kleave(" = %d [prelink]", ret); return ret; @@ -415,10 +442,9 @@ alloc_failed: } /* - * commence key construction + * Commence key construction. */ -static struct key *construct_key_and_link(struct key_type *type, - const char *description, +static struct key *construct_key_and_link(struct keyring_search_context *ctx, const char *callout_info, size_t callout_len, void *aux, @@ -431,14 +457,13 @@ static struct key *construct_key_and_link(struct key_type *type, kenter(""); - user = key_user_lookup(current_fsuid(), current_user_ns()); + user = key_user_lookup(current_fsuid()); if (!user) return ERR_PTR(-ENOMEM); construct_get_dest_keyring(&dest_keyring); - ret = construct_alloc_key(type, description, dest_keyring, flags, user, - &key); + ret = construct_alloc_key(ctx, dest_keyring, flags, user, &key); key_user_put(user); if (ret == 0) { @@ -451,7 +476,7 @@ static struct key *construct_key_and_link(struct key_type *type, } else if (ret == -EINPROGRESS) { ret = 0; } else { - key = ERR_PTR(ret); + goto couldnt_alloc_key; } key_put(dest_keyring); @@ -461,17 +486,38 @@ static struct key *construct_key_and_link(struct key_type *type, construction_failed: key_negate_and_link(key, key_negative_timeout, NULL, NULL); key_put(key); +couldnt_alloc_key: key_put(dest_keyring); kleave(" = %d", ret); return ERR_PTR(ret); } -/* - * request a key - * - search the process's keyrings - * - check the list of keys being created or updated - * - call out to userspace for a key if supplementary info was provided - * - cache the key in an appropriate keyring +/** + * request_key_and_link - Request a key and cache it in a keyring. + * @type: The type of key we want. + * @description: The searchable description of the key. + * @callout_info: The data to pass to the instantiation upcall (or NULL). + * @callout_len: The length of callout_info. + * @aux: Auxiliary data for the upcall. + * @dest_keyring: Where to cache the key. + * @flags: Flags to key_alloc(). + * + * A key matching the specified criteria is searched for in the process's + * keyrings and returned with its usage count incremented if found. Otherwise, + * if callout_info is not NULL, a key will be allocated and some service + * (probably in userspace) will be asked to instantiate it. + * + * If successfully found or created, the key will be linked to the destination + * keyring if one is provided. + * + * Returns a pointer to the key if successful; -EACCES, -ENOKEY, -EKEYREVOKED + * or -EKEYEXPIRED if an inaccessible, negative, revoked or expired key was + * found; -ENOKEY if no key was found and no @callout_info was given; -EDQUOT + * if insufficient key quota was available to create a new key; or -ENOMEM if + * insufficient memory was available. + * + * If the returned key was created, then it may still be under construction, + * and wait_for_key_construction() should be used to wait for that to complete. */ struct key *request_key_and_link(struct key_type *type, const char *description, @@ -481,18 +527,24 @@ struct key *request_key_and_link(struct key_type *type, struct key *dest_keyring, unsigned long flags) { - const struct cred *cred = current_cred(); + struct keyring_search_context ctx = { + .index_key.type = type, + .index_key.description = description, + .cred = current_cred(), + .match = type->match, + .match_data = description, + .flags = KEYRING_SEARCH_LOOKUP_DIRECT, + }; struct key *key; key_ref_t key_ref; int ret; kenter("%s,%s,%p,%zu,%p,%p,%lx", - type->name, description, callout_info, callout_len, aux, - dest_keyring, flags); + ctx.index_key.type->name, ctx.index_key.description, + callout_info, callout_len, aux, dest_keyring, flags); /* search all the process keyrings for a key */ - key_ref = search_process_keyrings(type, description, type->match, - cred); + key_ref = search_process_keyrings(&ctx); if (!IS_ERR(key_ref)) { key = key_ref_to_ptr(key_ref); @@ -515,9 +567,8 @@ struct key *request_key_and_link(struct key_type *type, if (!callout_info) goto error; - key = construct_key_and_link(type, description, callout_info, - callout_len, aux, dest_keyring, - flags); + key = construct_key_and_link(&ctx, callout_info, callout_len, + aux, dest_keyring, flags); } error: @@ -525,8 +576,16 @@ error: return key; } -/* - * wait for construction of a key to complete +/** + * wait_for_key_construction - Wait for construction of a key to complete + * @key: The key being waited for. + * @intr: Whether to wait interruptibly. + * + * Wait for a key to finish being constructed. + * + * Returns 0 if successful; -ERESTARTSYS if the wait was interrupted; -ENOKEY + * if the key was negated; or -EKEYREVOKED or -EKEYEXPIRED if the key was + * revoked or expired. */ int wait_for_key_construction(struct key *key, bool intr) { @@ -537,18 +596,27 @@ int wait_for_key_construction(struct key *key, bool intr) intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE); if (ret < 0) return ret; - if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) - return -ENOKEY; + if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) { + smp_rmb(); + return key->type_data.reject_error; + } return key_validate(key); } EXPORT_SYMBOL(wait_for_key_construction); -/* - * request a key - * - search the process's keyrings - * - check the list of keys being created or updated - * - call out to userspace for a key if supplementary info was provided - * - waits uninterruptible for creation to complete +/** + * request_key - Request a key and wait for construction + * @type: Type of key. + * @description: The searchable description of the key. + * @callout_info: The data to pass to the instantiation upcall (or NULL). + * + * As for request_key_and_link() except that it does not add the returned key + * to a keyring if found, new keys are always allocated in the user's quota, + * the callout_info must be a NUL-terminated string and no auxiliary data can + * be passed. + * + * Furthermore, it then works as wait_for_key_construction() to wait for the + * completion of keys undergoing construction with a non-interruptible wait. */ struct key *request_key(struct key_type *type, const char *description, @@ -573,12 +641,19 @@ struct key *request_key(struct key_type *type, } EXPORT_SYMBOL(request_key); -/* - * request a key with auxiliary data for the upcaller - * - search the process's keyrings - * - check the list of keys being created or updated - * - call out to userspace for a key if supplementary info was provided - * - waits uninterruptible for creation to complete +/** + * request_key_with_auxdata - Request a key with auxiliary data for the upcaller + * @type: The type of key we want. + * @description: The searchable description of the key. + * @callout_info: The data to pass to the instantiation upcall (or NULL). + * @callout_len: The length of callout_info. + * @aux: Auxiliary data for the upcall. + * + * As for request_key_and_link() except that it does not add the returned key + * to a keyring if found and new keys are always allocated in the user's quota. + * + * Furthermore, it then works as wait_for_key_construction() to wait for the + * completion of keys undergoing construction with a non-interruptible wait. */ struct key *request_key_with_auxdata(struct key_type *type, const char *description, @@ -603,10 +678,18 @@ struct key *request_key_with_auxdata(struct key_type *type, EXPORT_SYMBOL(request_key_with_auxdata); /* - * request a key (allow async construction) - * - search the process's keyrings - * - check the list of keys being created or updated - * - call out to userspace for a key if supplementary info was provided + * request_key_async - Request a key (allow async construction) + * @type: Type of key. + * @description: The searchable description of the key. + * @callout_info: The data to pass to the instantiation upcall (or NULL). + * @callout_len: The length of callout_info. + * + * As for request_key_and_link() except that it does not add the returned key + * to a keyring if found, new keys are always allocated in the user's quota and + * no auxiliary data can be passed. + * + * The caller should call wait_for_key_construction() to wait for the + * completion of the returned key if it is still undergoing construction. */ struct key *request_key_async(struct key_type *type, const char *description, @@ -621,9 +704,17 @@ EXPORT_SYMBOL(request_key_async); /* * request a key with auxiliary data for the upcaller (allow async construction) - * - search the process's keyrings - * - check the list of keys being created or updated - * - call out to userspace for a key if supplementary info was provided + * @type: Type of key. + * @description: The searchable description of the key. + * @callout_info: The data to pass to the instantiation upcall (or NULL). + * @callout_len: The length of callout_info. + * @aux: Auxiliary data for the upcall. + * + * As for request_key_and_link() except that it does not add the returned key + * to a keyring if found and new keys are always allocated in the user's quota. + * + * The caller should call wait_for_key_construction() to wait for the + * completion of the returned key if it is still undergoing construction. */ struct key *request_key_async_with_auxdata(struct key_type *type, const char *description, diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c index 86747151ee5..7495a93b4b9 100644 --- a/security/keys/request_key_auth.c +++ b/security/keys/request_key_auth.c @@ -1,4 +1,4 @@ -/* request_key_auth.c: request key authorisation controlling key def +/* Request key authorisation token key definition. * * Copyright (C) 2005 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) @@ -8,7 +8,7 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * - * See Documentation/keys-request-key.txt + * See Documentation/security/keys-request-key.txt */ #include <linux/module.h> @@ -18,15 +18,17 @@ #include <linux/slab.h> #include <asm/uaccess.h> #include "internal.h" +#include <keys/user-type.h> -static int request_key_auth_instantiate(struct key *, const void *, size_t); +static int request_key_auth_instantiate(struct key *, + struct key_preparsed_payload *); static void request_key_auth_describe(const struct key *, struct seq_file *); static void request_key_auth_revoke(struct key *); static void request_key_auth_destroy(struct key *); static long request_key_auth_read(const struct key *, char __user *, size_t); /* - * the request-key authorisation key type definition + * The request-key authorisation key type definition. */ struct key_type key_type_request_key_auth = { .name = ".request_key_auth", @@ -38,22 +40,18 @@ struct key_type key_type_request_key_auth = { .read = request_key_auth_read, }; -/*****************************************************************************/ /* - * instantiate a request-key authorisation key + * Instantiate a request-key authorisation key. */ static int request_key_auth_instantiate(struct key *key, - const void *data, - size_t datalen) + struct key_preparsed_payload *prep) { - key->payload.data = (struct request_key_auth *) data; + key->payload.data = (struct request_key_auth *)prep->data; return 0; +} -} /* end request_key_auth_instantiate() */ - -/*****************************************************************************/ /* - * reading a request-key authorisation key retrieves the callout information + * Describe an authorisation token. */ static void request_key_auth_describe(const struct key *key, struct seq_file *m) @@ -62,13 +60,12 @@ static void request_key_auth_describe(const struct key *key, seq_puts(m, "key:"); seq_puts(m, key->description); - seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len); - -} /* end request_key_auth_describe() */ + if (key_is_instantiated(key)) + seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len); +} -/*****************************************************************************/ /* - * read the callout_info data + * Read the callout_info data (retrieves the callout information). * - the key's semaphore is read-locked */ static long request_key_auth_read(const struct key *key, @@ -91,13 +88,12 @@ static long request_key_auth_read(const struct key *key, } return ret; +} -} /* end request_key_auth_read() */ - -/*****************************************************************************/ /* - * handle revocation of an authorisation token key - * - called with the key sem write-locked + * Handle revocation of an authorisation token key. + * + * Called with the key sem write-locked. */ static void request_key_auth_revoke(struct key *key) { @@ -109,12 +105,10 @@ static void request_key_auth_revoke(struct key *key) put_cred(rka->cred); rka->cred = NULL; } +} -} /* end request_key_auth_revoke() */ - -/*****************************************************************************/ /* - * destroy an instantiation authorisation token key + * Destroy an instantiation authorisation token key. */ static void request_key_auth_destroy(struct key *key) { @@ -131,13 +125,11 @@ static void request_key_auth_destroy(struct key *key) key_put(rka->dest_keyring); kfree(rka->callout_info); kfree(rka); +} -} /* end request_key_auth_destroy() */ - -/*****************************************************************************/ /* - * create an authorisation token for /sbin/request-key or whoever to gain - * access to the caller's security data + * Create an authorisation token for /sbin/request-key or whoever to gain + * access to the caller's security data. */ struct key *request_key_auth_new(struct key *target, const void *callout_info, size_t callout_len, struct key *dest_keyring) @@ -228,45 +220,34 @@ error_alloc: kfree(rka); kleave("= %d", ret); return ERR_PTR(ret); +} -} /* end request_key_auth_new() */ - -/*****************************************************************************/ -/* - * see if an authorisation key is associated with a particular key - */ -static int key_get_instantiation_authkey_match(const struct key *key, - const void *_id) -{ - struct request_key_auth *rka = key->payload.data; - key_serial_t id = (key_serial_t)(unsigned long) _id; - - return rka->target_key->serial == id; - -} /* end key_get_instantiation_authkey_match() */ - -/*****************************************************************************/ /* - * get the authorisation key for instantiation of a specific key if attached to - * the current process's keyrings - * - this key is inserted into a keyring and that is set as /sbin/request-key's - * session keyring - * - a target_id of zero specifies any valid token + * Search the current process's keyrings for the authorisation key for + * instantiation of a key. */ struct key *key_get_instantiation_authkey(key_serial_t target_id) { - const struct cred *cred = current_cred(); + char description[16]; + struct keyring_search_context ctx = { + .index_key.type = &key_type_request_key_auth, + .index_key.description = description, + .cred = current_cred(), + .match = user_match, + .match_data = description, + .flags = KEYRING_SEARCH_LOOKUP_DIRECT, + }; struct key *authkey; key_ref_t authkey_ref; - authkey_ref = search_process_keyrings( - &key_type_request_key_auth, - (void *) (unsigned long) target_id, - key_get_instantiation_authkey_match, - cred); + sprintf(description, "%x", target_id); + + authkey_ref = search_process_keyrings(&ctx); if (IS_ERR(authkey_ref)) { authkey = ERR_CAST(authkey_ref); + if (authkey == ERR_PTR(-EAGAIN)) + authkey = ERR_PTR(-ENOKEY); goto error; } @@ -278,5 +259,4 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id) error: return authkey; - -} /* end key_get_instantiation_authkey() */ +} diff --git a/security/keys/sysctl.c b/security/keys/sysctl.c index ee32d181764..b68faa1a5cf 100644 --- a/security/keys/sysctl.c +++ b/security/keys/sysctl.c @@ -15,7 +15,7 @@ static const int zero, one = 1, max = INT_MAX; -ctl_table key_sysctls[] = { +struct ctl_table key_sysctls[] = { { .procname = "maxkeys", .data = &key_quota_maxkeys, @@ -61,5 +61,16 @@ ctl_table key_sysctls[] = { .extra1 = (void *) &zero, .extra2 = (void *) &max, }, +#ifdef CONFIG_PERSISTENT_KEYRINGS + { + .procname = "persistent_keyring_expiry", + .data = &persistent_keyring_expiry, + .maxlen = sizeof(unsigned), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = (void *) &zero, + .extra2 = (void *) &max, + }, +#endif { } }; diff --git a/security/keys/trusted.c b/security/keys/trusted.c new file mode 100644 index 00000000000..6b804aa4529 --- /dev/null +++ b/security/keys/trusted.c @@ -0,0 +1,1163 @@ +/* + * Copyright (C) 2010 IBM Corporation + * + * Author: + * David Safford <safford@us.ibm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation, version 2 of the License. + * + * See Documentation/security/keys-trusted-encrypted.txt + */ + +#include <linux/uaccess.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/slab.h> +#include <linux/parser.h> +#include <linux/string.h> +#include <linux/err.h> +#include <keys/user-type.h> +#include <keys/trusted-type.h> +#include <linux/key-type.h> +#include <linux/rcupdate.h> +#include <linux/crypto.h> +#include <crypto/hash.h> +#include <crypto/sha.h> +#include <linux/capability.h> +#include <linux/tpm.h> +#include <linux/tpm_command.h> + +#include "trusted.h" + +static const char hmac_alg[] = "hmac(sha1)"; +static const char hash_alg[] = "sha1"; + +struct sdesc { + struct shash_desc shash; + char ctx[]; +}; + +static struct crypto_shash *hashalg; +static struct crypto_shash *hmacalg; + +static struct sdesc *init_sdesc(struct crypto_shash *alg) +{ + struct sdesc *sdesc; + int size; + + size = sizeof(struct shash_desc) + crypto_shash_descsize(alg); + sdesc = kmalloc(size, GFP_KERNEL); + if (!sdesc) + return ERR_PTR(-ENOMEM); + sdesc->shash.tfm = alg; + sdesc->shash.flags = 0x0; + return sdesc; +} + +static int TSS_sha1(const unsigned char *data, unsigned int datalen, + unsigned char *digest) +{ + struct sdesc *sdesc; + int ret; + + sdesc = init_sdesc(hashalg); + if (IS_ERR(sdesc)) { + pr_info("trusted_key: can't alloc %s\n", hash_alg); + return PTR_ERR(sdesc); + } + + ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest); + kfree(sdesc); + return ret; +} + +static int TSS_rawhmac(unsigned char *digest, const unsigned char *key, + unsigned int keylen, ...) +{ + struct sdesc *sdesc; + va_list argp; + unsigned int dlen; + unsigned char *data; + int ret; + + sdesc = init_sdesc(hmacalg); + if (IS_ERR(sdesc)) { + pr_info("trusted_key: can't alloc %s\n", hmac_alg); + return PTR_ERR(sdesc); + } + + ret = crypto_shash_setkey(hmacalg, key, keylen); + if (ret < 0) + goto out; + ret = crypto_shash_init(&sdesc->shash); + if (ret < 0) + goto out; + + va_start(argp, keylen); + for (;;) { + dlen = va_arg(argp, unsigned int); + if (dlen == 0) + break; + data = va_arg(argp, unsigned char *); + if (data == NULL) { + ret = -EINVAL; + break; + } + ret = crypto_shash_update(&sdesc->shash, data, dlen); + if (ret < 0) + break; + } + va_end(argp); + if (!ret) + ret = crypto_shash_final(&sdesc->shash, digest); +out: + kfree(sdesc); + return ret; +} + +/* + * calculate authorization info fields to send to TPM + */ +static int TSS_authhmac(unsigned char *digest, const unsigned char *key, + unsigned int keylen, unsigned char *h1, + unsigned char *h2, unsigned char h3, ...) +{ + unsigned char paramdigest[SHA1_DIGEST_SIZE]; + struct sdesc *sdesc; + unsigned int dlen; + unsigned char *data; + unsigned char c; + int ret; + va_list argp; + + sdesc = init_sdesc(hashalg); + if (IS_ERR(sdesc)) { + pr_info("trusted_key: can't alloc %s\n", hash_alg); + return PTR_ERR(sdesc); + } + + c = h3; + ret = crypto_shash_init(&sdesc->shash); + if (ret < 0) + goto out; + va_start(argp, h3); + for (;;) { + dlen = va_arg(argp, unsigned int); + if (dlen == 0) + break; + data = va_arg(argp, unsigned char *); + if (!data) { + ret = -EINVAL; + break; + } + ret = crypto_shash_update(&sdesc->shash, data, dlen); + if (ret < 0) + break; + } + va_end(argp); + if (!ret) + ret = crypto_shash_final(&sdesc->shash, paramdigest); + if (!ret) + ret = TSS_rawhmac(digest, key, keylen, SHA1_DIGEST_SIZE, + paramdigest, TPM_NONCE_SIZE, h1, + TPM_NONCE_SIZE, h2, 1, &c, 0, 0); +out: + kfree(sdesc); + return ret; +} + +/* + * verify the AUTH1_COMMAND (Seal) result from TPM + */ +static int TSS_checkhmac1(unsigned char *buffer, + const uint32_t command, + const unsigned char *ononce, + const unsigned char *key, + unsigned int keylen, ...) +{ + uint32_t bufsize; + uint16_t tag; + uint32_t ordinal; + uint32_t result; + unsigned char *enonce; + unsigned char *continueflag; + unsigned char *authdata; + unsigned char testhmac[SHA1_DIGEST_SIZE]; + unsigned char paramdigest[SHA1_DIGEST_SIZE]; + struct sdesc *sdesc; + unsigned int dlen; + unsigned int dpos; + va_list argp; + int ret; + + bufsize = LOAD32(buffer, TPM_SIZE_OFFSET); + tag = LOAD16(buffer, 0); + ordinal = command; + result = LOAD32N(buffer, TPM_RETURN_OFFSET); + if (tag == TPM_TAG_RSP_COMMAND) + return 0; + if (tag != TPM_TAG_RSP_AUTH1_COMMAND) + return -EINVAL; + authdata = buffer + bufsize - SHA1_DIGEST_SIZE; + continueflag = authdata - 1; + enonce = continueflag - TPM_NONCE_SIZE; + + sdesc = init_sdesc(hashalg); + if (IS_ERR(sdesc)) { + pr_info("trusted_key: can't alloc %s\n", hash_alg); + return PTR_ERR(sdesc); + } + ret = crypto_shash_init(&sdesc->shash); + if (ret < 0) + goto out; + ret = crypto_shash_update(&sdesc->shash, (const u8 *)&result, + sizeof result); + if (ret < 0) + goto out; + ret = crypto_shash_update(&sdesc->shash, (const u8 *)&ordinal, + sizeof ordinal); + if (ret < 0) + goto out; + va_start(argp, keylen); + for (;;) { + dlen = va_arg(argp, unsigned int); + if (dlen == 0) + break; + dpos = va_arg(argp, unsigned int); + ret = crypto_shash_update(&sdesc->shash, buffer + dpos, dlen); + if (ret < 0) + break; + } + va_end(argp); + if (!ret) + ret = crypto_shash_final(&sdesc->shash, paramdigest); + if (ret < 0) + goto out; + + ret = TSS_rawhmac(testhmac, key, keylen, SHA1_DIGEST_SIZE, paramdigest, + TPM_NONCE_SIZE, enonce, TPM_NONCE_SIZE, ononce, + 1, continueflag, 0, 0); + if (ret < 0) + goto out; + + if (memcmp(testhmac, authdata, SHA1_DIGEST_SIZE)) + ret = -EINVAL; +out: + kfree(sdesc); + return ret; +} + +/* + * verify the AUTH2_COMMAND (unseal) result from TPM + */ +static int TSS_checkhmac2(unsigned char *buffer, + const uint32_t command, + const unsigned char *ononce, + const unsigned char *key1, + unsigned int keylen1, + const unsigned char *key2, + unsigned int keylen2, ...) +{ + uint32_t bufsize; + uint16_t tag; + uint32_t ordinal; + uint32_t result; + unsigned char *enonce1; + unsigned char *continueflag1; + unsigned char *authdata1; + unsigned char *enonce2; + unsigned char *continueflag2; + unsigned char *authdata2; + unsigned char testhmac1[SHA1_DIGEST_SIZE]; + unsigned char testhmac2[SHA1_DIGEST_SIZE]; + unsigned char paramdigest[SHA1_DIGEST_SIZE]; + struct sdesc *sdesc; + unsigned int dlen; + unsigned int dpos; + va_list argp; + int ret; + + bufsize = LOAD32(buffer, TPM_SIZE_OFFSET); + tag = LOAD16(buffer, 0); + ordinal = command; + result = LOAD32N(buffer, TPM_RETURN_OFFSET); + + if (tag == TPM_TAG_RSP_COMMAND) + return 0; + if (tag != TPM_TAG_RSP_AUTH2_COMMAND) + return -EINVAL; + authdata1 = buffer + bufsize - (SHA1_DIGEST_SIZE + 1 + + SHA1_DIGEST_SIZE + SHA1_DIGEST_SIZE); + authdata2 = buffer + bufsize - (SHA1_DIGEST_SIZE); + continueflag1 = authdata1 - 1; + continueflag2 = authdata2 - 1; + enonce1 = continueflag1 - TPM_NONCE_SIZE; + enonce2 = continueflag2 - TPM_NONCE_SIZE; + + sdesc = init_sdesc(hashalg); + if (IS_ERR(sdesc)) { + pr_info("trusted_key: can't alloc %s\n", hash_alg); + return PTR_ERR(sdesc); + } + ret = crypto_shash_init(&sdesc->shash); + if (ret < 0) + goto out; + ret = crypto_shash_update(&sdesc->shash, (const u8 *)&result, + sizeof result); + if (ret < 0) + goto out; + ret = crypto_shash_update(&sdesc->shash, (const u8 *)&ordinal, + sizeof ordinal); + if (ret < 0) + goto out; + + va_start(argp, keylen2); + for (;;) { + dlen = va_arg(argp, unsigned int); + if (dlen == 0) + break; + dpos = va_arg(argp, unsigned int); + ret = crypto_shash_update(&sdesc->shash, buffer + dpos, dlen); + if (ret < 0) + break; + } + va_end(argp); + if (!ret) + ret = crypto_shash_final(&sdesc->shash, paramdigest); + if (ret < 0) + goto out; + + ret = TSS_rawhmac(testhmac1, key1, keylen1, SHA1_DIGEST_SIZE, + paramdigest, TPM_NONCE_SIZE, enonce1, + TPM_NONCE_SIZE, ononce, 1, continueflag1, 0, 0); + if (ret < 0) + goto out; + if (memcmp(testhmac1, authdata1, SHA1_DIGEST_SIZE)) { + ret = -EINVAL; + goto out; + } + ret = TSS_rawhmac(testhmac2, key2, keylen2, SHA1_DIGEST_SIZE, + paramdigest, TPM_NONCE_SIZE, enonce2, + TPM_NONCE_SIZE, ononce, 1, continueflag2, 0, 0); + if (ret < 0) + goto out; + if (memcmp(testhmac2, authdata2, SHA1_DIGEST_SIZE)) + ret = -EINVAL; +out: + kfree(sdesc); + return ret; +} + +/* + * For key specific tpm requests, we will generate and send our + * own TPM command packets using the drivers send function. + */ +static int trusted_tpm_send(const u32 chip_num, unsigned char *cmd, + size_t buflen) +{ + int rc; + + dump_tpm_buf(cmd); + rc = tpm_send(chip_num, cmd, buflen); + dump_tpm_buf(cmd); + if (rc > 0) + /* Can't return positive return codes values to keyctl */ + rc = -EPERM; + return rc; +} + +/* + * Lock a trusted key, by extending a selected PCR. + * + * Prevents a trusted key that is sealed to PCRs from being accessed. + * This uses the tpm driver's extend function. + */ +static int pcrlock(const int pcrnum) +{ + unsigned char hash[SHA1_DIGEST_SIZE]; + int ret; + + if (!capable(CAP_SYS_ADMIN)) + return -EPERM; + ret = tpm_get_random(TPM_ANY_NUM, hash, SHA1_DIGEST_SIZE); + if (ret != SHA1_DIGEST_SIZE) + return ret; + return tpm_pcr_extend(TPM_ANY_NUM, pcrnum, hash) ? -EINVAL : 0; +} + +/* + * Create an object specific authorisation protocol (OSAP) session + */ +static int osap(struct tpm_buf *tb, struct osapsess *s, + const unsigned char *key, uint16_t type, uint32_t handle) +{ + unsigned char enonce[TPM_NONCE_SIZE]; + unsigned char ononce[TPM_NONCE_SIZE]; + int ret; + + ret = tpm_get_random(TPM_ANY_NUM, ononce, TPM_NONCE_SIZE); + if (ret != TPM_NONCE_SIZE) + return ret; + + INIT_BUF(tb); + store16(tb, TPM_TAG_RQU_COMMAND); + store32(tb, TPM_OSAP_SIZE); + store32(tb, TPM_ORD_OSAP); + store16(tb, type); + store32(tb, handle); + storebytes(tb, ononce, TPM_NONCE_SIZE); + + ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE); + if (ret < 0) + return ret; + + s->handle = LOAD32(tb->data, TPM_DATA_OFFSET); + memcpy(s->enonce, &(tb->data[TPM_DATA_OFFSET + sizeof(uint32_t)]), + TPM_NONCE_SIZE); + memcpy(enonce, &(tb->data[TPM_DATA_OFFSET + sizeof(uint32_t) + + TPM_NONCE_SIZE]), TPM_NONCE_SIZE); + return TSS_rawhmac(s->secret, key, SHA1_DIGEST_SIZE, TPM_NONCE_SIZE, + enonce, TPM_NONCE_SIZE, ononce, 0, 0); +} + +/* + * Create an object independent authorisation protocol (oiap) session + */ +static int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce) +{ + int ret; + + INIT_BUF(tb); + store16(tb, TPM_TAG_RQU_COMMAND); + store32(tb, TPM_OIAP_SIZE); + store32(tb, TPM_ORD_OIAP); + ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE); + if (ret < 0) + return ret; + + *handle = LOAD32(tb->data, TPM_DATA_OFFSET); + memcpy(nonce, &tb->data[TPM_DATA_OFFSET + sizeof(uint32_t)], + TPM_NONCE_SIZE); + return 0; +} + +struct tpm_digests { + unsigned char encauth[SHA1_DIGEST_SIZE]; + unsigned char pubauth[SHA1_DIGEST_SIZE]; + unsigned char xorwork[SHA1_DIGEST_SIZE * 2]; + unsigned char xorhash[SHA1_DIGEST_SIZE]; + unsigned char nonceodd[TPM_NONCE_SIZE]; +}; + +/* + * Have the TPM seal(encrypt) the trusted key, possibly based on + * Platform Configuration Registers (PCRs). AUTH1 for sealing key. + */ +static int tpm_seal(struct tpm_buf *tb, uint16_t keytype, + uint32_t keyhandle, const unsigned char *keyauth, + const unsigned char *data, uint32_t datalen, + unsigned char *blob, uint32_t *bloblen, + const unsigned char *blobauth, + const unsigned char *pcrinfo, uint32_t pcrinfosize) +{ + struct osapsess sess; + struct tpm_digests *td; + unsigned char cont; + uint32_t ordinal; + uint32_t pcrsize; + uint32_t datsize; + int sealinfosize; + int encdatasize; + int storedsize; + int ret; + int i; + + /* alloc some work space for all the hashes */ + td = kmalloc(sizeof *td, GFP_KERNEL); + if (!td) + return -ENOMEM; + + /* get session for sealing key */ + ret = osap(tb, &sess, keyauth, keytype, keyhandle); + if (ret < 0) + goto out; + dump_sess(&sess); + + /* calculate encrypted authorization value */ + memcpy(td->xorwork, sess.secret, SHA1_DIGEST_SIZE); + memcpy(td->xorwork + SHA1_DIGEST_SIZE, sess.enonce, SHA1_DIGEST_SIZE); + ret = TSS_sha1(td->xorwork, SHA1_DIGEST_SIZE * 2, td->xorhash); + if (ret < 0) + goto out; + + ret = tpm_get_random(TPM_ANY_NUM, td->nonceodd, TPM_NONCE_SIZE); + if (ret != TPM_NONCE_SIZE) + goto out; + ordinal = htonl(TPM_ORD_SEAL); + datsize = htonl(datalen); + pcrsize = htonl(pcrinfosize); + cont = 0; + + /* encrypt data authorization key */ + for (i = 0; i < SHA1_DIGEST_SIZE; ++i) + td->encauth[i] = td->xorhash[i] ^ blobauth[i]; + + /* calculate authorization HMAC value */ + if (pcrinfosize == 0) { + /* no pcr info specified */ + ret = TSS_authhmac(td->pubauth, sess.secret, SHA1_DIGEST_SIZE, + sess.enonce, td->nonceodd, cont, + sizeof(uint32_t), &ordinal, SHA1_DIGEST_SIZE, + td->encauth, sizeof(uint32_t), &pcrsize, + sizeof(uint32_t), &datsize, datalen, data, 0, + 0); + } else { + /* pcr info specified */ + ret = TSS_authhmac(td->pubauth, sess.secret, SHA1_DIGEST_SIZE, + sess.enonce, td->nonceodd, cont, + sizeof(uint32_t), &ordinal, SHA1_DIGEST_SIZE, + td->encauth, sizeof(uint32_t), &pcrsize, + pcrinfosize, pcrinfo, sizeof(uint32_t), + &datsize, datalen, data, 0, 0); + } + if (ret < 0) + goto out; + + /* build and send the TPM request packet */ + INIT_BUF(tb); + store16(tb, TPM_TAG_RQU_AUTH1_COMMAND); + store32(tb, TPM_SEAL_SIZE + pcrinfosize + datalen); + store32(tb, TPM_ORD_SEAL); + store32(tb, keyhandle); + storebytes(tb, td->encauth, SHA1_DIGEST_SIZE); + store32(tb, pcrinfosize); + storebytes(tb, pcrinfo, pcrinfosize); + store32(tb, datalen); + storebytes(tb, data, datalen); + store32(tb, sess.handle); + storebytes(tb, td->nonceodd, TPM_NONCE_SIZE); + store8(tb, cont); + storebytes(tb, td->pubauth, SHA1_DIGEST_SIZE); + + ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE); + if (ret < 0) + goto out; + + /* calculate the size of the returned Blob */ + sealinfosize = LOAD32(tb->data, TPM_DATA_OFFSET + sizeof(uint32_t)); + encdatasize = LOAD32(tb->data, TPM_DATA_OFFSET + sizeof(uint32_t) + + sizeof(uint32_t) + sealinfosize); + storedsize = sizeof(uint32_t) + sizeof(uint32_t) + sealinfosize + + sizeof(uint32_t) + encdatasize; + + /* check the HMAC in the response */ + ret = TSS_checkhmac1(tb->data, ordinal, td->nonceodd, sess.secret, + SHA1_DIGEST_SIZE, storedsize, TPM_DATA_OFFSET, 0, + 0); + + /* copy the returned blob to caller */ + if (!ret) { + memcpy(blob, tb->data + TPM_DATA_OFFSET, storedsize); + *bloblen = storedsize; + } +out: + kfree(td); + return ret; +} + +/* + * use the AUTH2_COMMAND form of unseal, to authorize both key and blob + */ +static int tpm_unseal(struct tpm_buf *tb, + uint32_t keyhandle, const unsigned char *keyauth, + const unsigned char *blob, int bloblen, + const unsigned char *blobauth, + unsigned char *data, unsigned int *datalen) +{ + unsigned char nonceodd[TPM_NONCE_SIZE]; + unsigned char enonce1[TPM_NONCE_SIZE]; + unsigned char enonce2[TPM_NONCE_SIZE]; + unsigned char authdata1[SHA1_DIGEST_SIZE]; + unsigned char authdata2[SHA1_DIGEST_SIZE]; + uint32_t authhandle1 = 0; + uint32_t authhandle2 = 0; + unsigned char cont = 0; + uint32_t ordinal; + uint32_t keyhndl; + int ret; + + /* sessions for unsealing key and data */ + ret = oiap(tb, &authhandle1, enonce1); + if (ret < 0) { + pr_info("trusted_key: oiap failed (%d)\n", ret); + return ret; + } + ret = oiap(tb, &authhandle2, enonce2); + if (ret < 0) { + pr_info("trusted_key: oiap failed (%d)\n", ret); + return ret; + } + + ordinal = htonl(TPM_ORD_UNSEAL); + keyhndl = htonl(SRKHANDLE); + ret = tpm_get_random(TPM_ANY_NUM, nonceodd, TPM_NONCE_SIZE); + if (ret != TPM_NONCE_SIZE) { + pr_info("trusted_key: tpm_get_random failed (%d)\n", ret); + return ret; + } + ret = TSS_authhmac(authdata1, keyauth, TPM_NONCE_SIZE, + enonce1, nonceodd, cont, sizeof(uint32_t), + &ordinal, bloblen, blob, 0, 0); + if (ret < 0) + return ret; + ret = TSS_authhmac(authdata2, blobauth, TPM_NONCE_SIZE, + enonce2, nonceodd, cont, sizeof(uint32_t), + &ordinal, bloblen, blob, 0, 0); + if (ret < 0) + return ret; + + /* build and send TPM request packet */ + INIT_BUF(tb); + store16(tb, TPM_TAG_RQU_AUTH2_COMMAND); + store32(tb, TPM_UNSEAL_SIZE + bloblen); + store32(tb, TPM_ORD_UNSEAL); + store32(tb, keyhandle); + storebytes(tb, blob, bloblen); + store32(tb, authhandle1); + storebytes(tb, nonceodd, TPM_NONCE_SIZE); + store8(tb, cont); + storebytes(tb, authdata1, SHA1_DIGEST_SIZE); + store32(tb, authhandle2); + storebytes(tb, nonceodd, TPM_NONCE_SIZE); + store8(tb, cont); + storebytes(tb, authdata2, SHA1_DIGEST_SIZE); + + ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE); + if (ret < 0) { + pr_info("trusted_key: authhmac failed (%d)\n", ret); + return ret; + } + + *datalen = LOAD32(tb->data, TPM_DATA_OFFSET); + ret = TSS_checkhmac2(tb->data, ordinal, nonceodd, + keyauth, SHA1_DIGEST_SIZE, + blobauth, SHA1_DIGEST_SIZE, + sizeof(uint32_t), TPM_DATA_OFFSET, + *datalen, TPM_DATA_OFFSET + sizeof(uint32_t), 0, + 0); + if (ret < 0) { + pr_info("trusted_key: TSS_checkhmac2 failed (%d)\n", ret); + return ret; + } + memcpy(data, tb->data + TPM_DATA_OFFSET + sizeof(uint32_t), *datalen); + return 0; +} + +/* + * Have the TPM seal(encrypt) the symmetric key + */ +static int key_seal(struct trusted_key_payload *p, + struct trusted_key_options *o) +{ + struct tpm_buf *tb; + int ret; + + tb = kzalloc(sizeof *tb, GFP_KERNEL); + if (!tb) + return -ENOMEM; + + /* include migratable flag at end of sealed key */ + p->key[p->key_len] = p->migratable; + + ret = tpm_seal(tb, o->keytype, o->keyhandle, o->keyauth, + p->key, p->key_len + 1, p->blob, &p->blob_len, + o->blobauth, o->pcrinfo, o->pcrinfo_len); + if (ret < 0) + pr_info("trusted_key: srkseal failed (%d)\n", ret); + + kfree(tb); + return ret; +} + +/* + * Have the TPM unseal(decrypt) the symmetric key + */ +static int key_unseal(struct trusted_key_payload *p, + struct trusted_key_options *o) +{ + struct tpm_buf *tb; + int ret; + + tb = kzalloc(sizeof *tb, GFP_KERNEL); + if (!tb) + return -ENOMEM; + + ret = tpm_unseal(tb, o->keyhandle, o->keyauth, p->blob, p->blob_len, + o->blobauth, p->key, &p->key_len); + if (ret < 0) + pr_info("trusted_key: srkunseal failed (%d)\n", ret); + else + /* pull migratable flag out of sealed key */ + p->migratable = p->key[--p->key_len]; + + kfree(tb); + return ret; +} + +enum { + Opt_err = -1, + Opt_new, Opt_load, Opt_update, + Opt_keyhandle, Opt_keyauth, Opt_blobauth, + Opt_pcrinfo, Opt_pcrlock, Opt_migratable +}; + +static const match_table_t key_tokens = { + {Opt_new, "new"}, + {Opt_load, "load"}, + {Opt_update, "update"}, + {Opt_keyhandle, "keyhandle=%s"}, + {Opt_keyauth, "keyauth=%s"}, + {Opt_blobauth, "blobauth=%s"}, + {Opt_pcrinfo, "pcrinfo=%s"}, + {Opt_pcrlock, "pcrlock=%s"}, + {Opt_migratable, "migratable=%s"}, + {Opt_err, NULL} +}; + +/* can have zero or more token= options */ +static int getoptions(char *c, struct trusted_key_payload *pay, + struct trusted_key_options *opt) +{ + substring_t args[MAX_OPT_ARGS]; + char *p = c; + int token; + int res; + unsigned long handle; + unsigned long lock; + + while ((p = strsep(&c, " \t"))) { + if (*p == '\0' || *p == ' ' || *p == '\t') + continue; + token = match_token(p, key_tokens, args); + + switch (token) { + case Opt_pcrinfo: + opt->pcrinfo_len = strlen(args[0].from) / 2; + if (opt->pcrinfo_len > MAX_PCRINFO_SIZE) + return -EINVAL; + res = hex2bin(opt->pcrinfo, args[0].from, + opt->pcrinfo_len); + if (res < 0) + return -EINVAL; + break; + case Opt_keyhandle: + res = kstrtoul(args[0].from, 16, &handle); + if (res < 0) + return -EINVAL; + opt->keytype = SEAL_keytype; + opt->keyhandle = handle; + break; + case Opt_keyauth: + if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE) + return -EINVAL; + res = hex2bin(opt->keyauth, args[0].from, + SHA1_DIGEST_SIZE); + if (res < 0) + return -EINVAL; + break; + case Opt_blobauth: + if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE) + return -EINVAL; + res = hex2bin(opt->blobauth, args[0].from, + SHA1_DIGEST_SIZE); + if (res < 0) + return -EINVAL; + break; + case Opt_migratable: + if (*args[0].from == '0') + pay->migratable = 0; + else + return -EINVAL; + break; + case Opt_pcrlock: + res = kstrtoul(args[0].from, 10, &lock); + if (res < 0) + return -EINVAL; + opt->pcrlock = lock; + break; + default: + return -EINVAL; + } + } + return 0; +} + +/* + * datablob_parse - parse the keyctl data and fill in the + * payload and options structures + * + * On success returns 0, otherwise -EINVAL. + */ +static int datablob_parse(char *datablob, struct trusted_key_payload *p, + struct trusted_key_options *o) +{ + substring_t args[MAX_OPT_ARGS]; + long keylen; + int ret = -EINVAL; + int key_cmd; + char *c; + + /* main command */ + c = strsep(&datablob, " \t"); + if (!c) + return -EINVAL; + key_cmd = match_token(c, key_tokens, args); + switch (key_cmd) { + case Opt_new: + /* first argument is key size */ + c = strsep(&datablob, " \t"); + if (!c) + return -EINVAL; + ret = kstrtol(c, 10, &keylen); + if (ret < 0 || keylen < MIN_KEY_SIZE || keylen > MAX_KEY_SIZE) + return -EINVAL; + p->key_len = keylen; + ret = getoptions(datablob, p, o); + if (ret < 0) + return ret; + ret = Opt_new; + break; + case Opt_load: + /* first argument is sealed blob */ + c = strsep(&datablob, " \t"); + if (!c) + return -EINVAL; + p->blob_len = strlen(c) / 2; + if (p->blob_len > MAX_BLOB_SIZE) + return -EINVAL; + ret = hex2bin(p->blob, c, p->blob_len); + if (ret < 0) + return -EINVAL; + ret = getoptions(datablob, p, o); + if (ret < 0) + return ret; + ret = Opt_load; + break; + case Opt_update: + /* all arguments are options */ + ret = getoptions(datablob, p, o); + if (ret < 0) + return ret; + ret = Opt_update; + break; + case Opt_err: + return -EINVAL; + break; + } + return ret; +} + +static struct trusted_key_options *trusted_options_alloc(void) +{ + struct trusted_key_options *options; + + options = kzalloc(sizeof *options, GFP_KERNEL); + if (options) { + /* set any non-zero defaults */ + options->keytype = SRK_keytype; + options->keyhandle = SRKHANDLE; + } + return options; +} + +static struct trusted_key_payload *trusted_payload_alloc(struct key *key) +{ + struct trusted_key_payload *p = NULL; + int ret; + + ret = key_payload_reserve(key, sizeof *p); + if (ret < 0) + return p; + p = kzalloc(sizeof *p, GFP_KERNEL); + if (p) + p->migratable = 1; /* migratable by default */ + return p; +} + +/* + * trusted_instantiate - create a new trusted key + * + * Unseal an existing trusted blob or, for a new key, get a + * random key, then seal and create a trusted key-type key, + * adding it to the specified keyring. + * + * On success, return 0. Otherwise return errno. + */ +static int trusted_instantiate(struct key *key, + struct key_preparsed_payload *prep) +{ + struct trusted_key_payload *payload = NULL; + struct trusted_key_options *options = NULL; + size_t datalen = prep->datalen; + char *datablob; + int ret = 0; + int key_cmd; + size_t key_len; + + if (datalen <= 0 || datalen > 32767 || !prep->data) + return -EINVAL; + + datablob = kmalloc(datalen + 1, GFP_KERNEL); + if (!datablob) + return -ENOMEM; + memcpy(datablob, prep->data, datalen); + datablob[datalen] = '\0'; + + options = trusted_options_alloc(); + if (!options) { + ret = -ENOMEM; + goto out; + } + payload = trusted_payload_alloc(key); + if (!payload) { + ret = -ENOMEM; + goto out; + } + + key_cmd = datablob_parse(datablob, payload, options); + if (key_cmd < 0) { + ret = key_cmd; + goto out; + } + + dump_payload(payload); + dump_options(options); + + switch (key_cmd) { + case Opt_load: + ret = key_unseal(payload, options); + dump_payload(payload); + dump_options(options); + if (ret < 0) + pr_info("trusted_key: key_unseal failed (%d)\n", ret); + break; + case Opt_new: + key_len = payload->key_len; + ret = tpm_get_random(TPM_ANY_NUM, payload->key, key_len); + if (ret != key_len) { + pr_info("trusted_key: key_create failed (%d)\n", ret); + goto out; + } + ret = key_seal(payload, options); + if (ret < 0) + pr_info("trusted_key: key_seal failed (%d)\n", ret); + break; + default: + ret = -EINVAL; + goto out; + } + if (!ret && options->pcrlock) + ret = pcrlock(options->pcrlock); +out: + kfree(datablob); + kfree(options); + if (!ret) + rcu_assign_keypointer(key, payload); + else + kfree(payload); + return ret; +} + +static void trusted_rcu_free(struct rcu_head *rcu) +{ + struct trusted_key_payload *p; + + p = container_of(rcu, struct trusted_key_payload, rcu); + memset(p->key, 0, p->key_len); + kfree(p); +} + +/* + * trusted_update - reseal an existing key with new PCR values + */ +static int trusted_update(struct key *key, struct key_preparsed_payload *prep) +{ + struct trusted_key_payload *p = key->payload.data; + struct trusted_key_payload *new_p; + struct trusted_key_options *new_o; + size_t datalen = prep->datalen; + char *datablob; + int ret = 0; + + if (!p->migratable) + return -EPERM; + if (datalen <= 0 || datalen > 32767 || !prep->data) + return -EINVAL; + + datablob = kmalloc(datalen + 1, GFP_KERNEL); + if (!datablob) + return -ENOMEM; + new_o = trusted_options_alloc(); + if (!new_o) { + ret = -ENOMEM; + goto out; + } + new_p = trusted_payload_alloc(key); + if (!new_p) { + ret = -ENOMEM; + goto out; + } + + memcpy(datablob, prep->data, datalen); + datablob[datalen] = '\0'; + ret = datablob_parse(datablob, new_p, new_o); + if (ret != Opt_update) { + ret = -EINVAL; + kfree(new_p); + goto out; + } + /* copy old key values, and reseal with new pcrs */ + new_p->migratable = p->migratable; + new_p->key_len = p->key_len; + memcpy(new_p->key, p->key, p->key_len); + dump_payload(p); + dump_payload(new_p); + + ret = key_seal(new_p, new_o); + if (ret < 0) { + pr_info("trusted_key: key_seal failed (%d)\n", ret); + kfree(new_p); + goto out; + } + if (new_o->pcrlock) { + ret = pcrlock(new_o->pcrlock); + if (ret < 0) { + pr_info("trusted_key: pcrlock failed (%d)\n", ret); + kfree(new_p); + goto out; + } + } + rcu_assign_keypointer(key, new_p); + call_rcu(&p->rcu, trusted_rcu_free); +out: + kfree(datablob); + kfree(new_o); + return ret; +} + +/* + * trusted_read - copy the sealed blob data to userspace in hex. + * On success, return to userspace the trusted key datablob size. + */ +static long trusted_read(const struct key *key, char __user *buffer, + size_t buflen) +{ + struct trusted_key_payload *p; + char *ascii_buf; + char *bufp; + int i; + + p = rcu_dereference_key(key); + if (!p) + return -EINVAL; + if (!buffer || buflen <= 0) + return 2 * p->blob_len; + ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL); + if (!ascii_buf) + return -ENOMEM; + + bufp = ascii_buf; + for (i = 0; i < p->blob_len; i++) + bufp = hex_byte_pack(bufp, p->blob[i]); + if ((copy_to_user(buffer, ascii_buf, 2 * p->blob_len)) != 0) { + kfree(ascii_buf); + return -EFAULT; + } + kfree(ascii_buf); + return 2 * p->blob_len; +} + +/* + * trusted_destroy - before freeing the key, clear the decrypted data + */ +static void trusted_destroy(struct key *key) +{ + struct trusted_key_payload *p = key->payload.data; + + if (!p) + return; + memset(p->key, 0, p->key_len); + kfree(key->payload.data); +} + +struct key_type key_type_trusted = { + .name = "trusted", + .instantiate = trusted_instantiate, + .update = trusted_update, + .match = user_match, + .destroy = trusted_destroy, + .describe = user_describe, + .read = trusted_read, +}; + +EXPORT_SYMBOL_GPL(key_type_trusted); + +static void trusted_shash_release(void) +{ + if (hashalg) + crypto_free_shash(hashalg); + if (hmacalg) + crypto_free_shash(hmacalg); +} + +static int __init trusted_shash_alloc(void) +{ + int ret; + + hmacalg = crypto_alloc_shash(hmac_alg, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(hmacalg)) { + pr_info("trusted_key: could not allocate crypto %s\n", + hmac_alg); + return PTR_ERR(hmacalg); + } + + hashalg = crypto_alloc_shash(hash_alg, 0, CRYPTO_ALG_ASYNC); + if (IS_ERR(hashalg)) { + pr_info("trusted_key: could not allocate crypto %s\n", + hash_alg); + ret = PTR_ERR(hashalg); + goto hashalg_fail; + } + + return 0; + +hashalg_fail: + crypto_free_shash(hmacalg); + return ret; +} + +static int __init init_trusted(void) +{ + int ret; + + ret = trusted_shash_alloc(); + if (ret < 0) + return ret; + ret = register_key_type(&key_type_trusted); + if (ret < 0) + trusted_shash_release(); + return ret; +} + +static void __exit cleanup_trusted(void) +{ + trusted_shash_release(); + unregister_key_type(&key_type_trusted); +} + +late_initcall(init_trusted); +module_exit(cleanup_trusted); + +MODULE_LICENSE("GPL"); diff --git a/security/keys/trusted.h b/security/keys/trusted.h new file mode 100644 index 00000000000..3249fbd2b65 --- /dev/null +++ b/security/keys/trusted.h @@ -0,0 +1,134 @@ +#ifndef __TRUSTED_KEY_H +#define __TRUSTED_KEY_H + +/* implementation specific TPM constants */ +#define MAX_PCRINFO_SIZE 64 +#define MAX_BUF_SIZE 512 +#define TPM_GETRANDOM_SIZE 14 +#define TPM_OSAP_SIZE 36 +#define TPM_OIAP_SIZE 10 +#define TPM_SEAL_SIZE 87 +#define TPM_UNSEAL_SIZE 104 +#define TPM_SIZE_OFFSET 2 +#define TPM_RETURN_OFFSET 6 +#define TPM_DATA_OFFSET 10 + +#define LOAD32(buffer, offset) (ntohl(*(uint32_t *)&buffer[offset])) +#define LOAD32N(buffer, offset) (*(uint32_t *)&buffer[offset]) +#define LOAD16(buffer, offset) (ntohs(*(uint16_t *)&buffer[offset])) + +struct tpm_buf { + int len; + unsigned char data[MAX_BUF_SIZE]; +}; + +#define INIT_BUF(tb) (tb->len = 0) + +struct osapsess { + uint32_t handle; + unsigned char secret[SHA1_DIGEST_SIZE]; + unsigned char enonce[TPM_NONCE_SIZE]; +}; + +/* discrete values, but have to store in uint16_t for TPM use */ +enum { + SEAL_keytype = 1, + SRK_keytype = 4 +}; + +struct trusted_key_options { + uint16_t keytype; + uint32_t keyhandle; + unsigned char keyauth[SHA1_DIGEST_SIZE]; + unsigned char blobauth[SHA1_DIGEST_SIZE]; + uint32_t pcrinfo_len; + unsigned char pcrinfo[MAX_PCRINFO_SIZE]; + int pcrlock; +}; + +#define TPM_DEBUG 0 + +#if TPM_DEBUG +static inline void dump_options(struct trusted_key_options *o) +{ + pr_info("trusted_key: sealing key type %d\n", o->keytype); + pr_info("trusted_key: sealing key handle %0X\n", o->keyhandle); + pr_info("trusted_key: pcrlock %d\n", o->pcrlock); + pr_info("trusted_key: pcrinfo %d\n", o->pcrinfo_len); + print_hex_dump(KERN_INFO, "pcrinfo ", DUMP_PREFIX_NONE, + 16, 1, o->pcrinfo, o->pcrinfo_len, 0); +} + +static inline void dump_payload(struct trusted_key_payload *p) +{ + pr_info("trusted_key: key_len %d\n", p->key_len); + print_hex_dump(KERN_INFO, "key ", DUMP_PREFIX_NONE, + 16, 1, p->key, p->key_len, 0); + pr_info("trusted_key: bloblen %d\n", p->blob_len); + print_hex_dump(KERN_INFO, "blob ", DUMP_PREFIX_NONE, + 16, 1, p->blob, p->blob_len, 0); + pr_info("trusted_key: migratable %d\n", p->migratable); +} + +static inline void dump_sess(struct osapsess *s) +{ + print_hex_dump(KERN_INFO, "trusted-key: handle ", DUMP_PREFIX_NONE, + 16, 1, &s->handle, 4, 0); + pr_info("trusted-key: secret:\n"); + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, + 16, 1, &s->secret, SHA1_DIGEST_SIZE, 0); + pr_info("trusted-key: enonce:\n"); + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, + 16, 1, &s->enonce, SHA1_DIGEST_SIZE, 0); +} + +static inline void dump_tpm_buf(unsigned char *buf) +{ + int len; + + pr_info("\ntrusted-key: tpm buffer\n"); + len = LOAD32(buf, TPM_SIZE_OFFSET); + print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, buf, len, 0); +} +#else +static inline void dump_options(struct trusted_key_options *o) +{ +} + +static inline void dump_payload(struct trusted_key_payload *p) +{ +} + +static inline void dump_sess(struct osapsess *s) +{ +} + +static inline void dump_tpm_buf(unsigned char *buf) +{ +} +#endif + +static inline void store8(struct tpm_buf *buf, const unsigned char value) +{ + buf->data[buf->len++] = value; +} + +static inline void store16(struct tpm_buf *buf, const uint16_t value) +{ + *(uint16_t *) & buf->data[buf->len] = htons(value); + buf->len += sizeof value; +} + +static inline void store32(struct tpm_buf *buf, const uint32_t value) +{ + *(uint32_t *) & buf->data[buf->len] = htonl(value); + buf->len += sizeof value; +} + +static inline void storebytes(struct tpm_buf *buf, const unsigned char *in, + const int len) +{ + memcpy(buf->data + buf->len, in, len); + buf->len += len; +} +#endif diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c index e9aa0792965..faa2caeb593 100644 --- a/security/keys/user_defined.c +++ b/security/keys/user_defined.c @@ -18,34 +18,56 @@ #include <asm/uaccess.h> #include "internal.h" +static int logon_vet_description(const char *desc); + /* * user defined keys take an arbitrary string as the description and an * arbitrary blob of data as the payload */ struct key_type key_type_user = { - .name = "user", - .instantiate = user_instantiate, - .update = user_update, - .match = user_match, - .revoke = user_revoke, - .destroy = user_destroy, - .describe = user_describe, - .read = user_read, + .name = "user", + .def_lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, + .instantiate = user_instantiate, + .update = user_update, + .match = user_match, + .revoke = user_revoke, + .destroy = user_destroy, + .describe = user_describe, + .read = user_read, }; EXPORT_SYMBOL_GPL(key_type_user); -/*****************************************************************************/ +/* + * This key type is essentially the same as key_type_user, but it does + * not define a .read op. This is suitable for storing username and + * password pairs in the keyring that you do not want to be readable + * from userspace. + */ +struct key_type key_type_logon = { + .name = "logon", + .def_lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, + .instantiate = user_instantiate, + .update = user_update, + .match = user_match, + .revoke = user_revoke, + .destroy = user_destroy, + .describe = user_describe, + .vet_description = logon_vet_description, +}; +EXPORT_SYMBOL_GPL(key_type_logon); + /* * instantiate a user defined key */ -int user_instantiate(struct key *key, const void *data, size_t datalen) +int user_instantiate(struct key *key, struct key_preparsed_payload *prep) { struct user_key_payload *upayload; + size_t datalen = prep->datalen; int ret; ret = -EINVAL; - if (datalen <= 0 || datalen > 32767 || !data) + if (datalen <= 0 || datalen > 32767 || !prep->data) goto error; ret = key_payload_reserve(key, datalen); @@ -59,43 +81,28 @@ int user_instantiate(struct key *key, const void *data, size_t datalen) /* attach the data */ upayload->datalen = datalen; - memcpy(upayload->data, data, datalen); - rcu_assign_pointer(key->payload.data, upayload); + memcpy(upayload->data, prep->data, datalen); + rcu_assign_keypointer(key, upayload); ret = 0; error: return ret; - -} /* end user_instantiate() */ +} EXPORT_SYMBOL_GPL(user_instantiate); -/*****************************************************************************/ -/* - * dispose of the old data from an updated user defined key - */ -static void user_update_rcu_disposal(struct rcu_head *rcu) -{ - struct user_key_payload *upayload; - - upayload = container_of(rcu, struct user_key_payload, rcu); - - kfree(upayload); - -} /* end user_update_rcu_disposal() */ - -/*****************************************************************************/ /* * update a user defined key * - the key's semaphore is write-locked */ -int user_update(struct key *key, const void *data, size_t datalen) +int user_update(struct key *key, struct key_preparsed_payload *prep) { struct user_key_payload *upayload, *zap; + size_t datalen = prep->datalen; int ret; ret = -EINVAL; - if (datalen <= 0 || datalen > 32767 || !data) + if (datalen <= 0 || datalen > 32767 || !prep->data) goto error; /* construct a replacement payload */ @@ -105,7 +112,7 @@ int user_update(struct key *key, const void *data, size_t datalen) goto error; upayload->datalen = datalen; - memcpy(upayload->data, data, datalen); + memcpy(upayload->data, prep->data, datalen); /* check the quota and attach the new data */ zap = upayload; @@ -115,32 +122,29 @@ int user_update(struct key *key, const void *data, size_t datalen) if (ret == 0) { /* attach the new data, displacing the old */ zap = key->payload.data; - rcu_assign_pointer(key->payload.data, upayload); + rcu_assign_keypointer(key, upayload); key->expiry = 0; } - call_rcu(&zap->rcu, user_update_rcu_disposal); + if (zap) + kfree_rcu(zap, rcu); error: return ret; - -} /* end user_update() */ +} EXPORT_SYMBOL_GPL(user_update); -/*****************************************************************************/ /* * match users on their name */ int user_match(const struct key *key, const void *description) { return strcmp(key->description, description) == 0; - -} /* end user_match() */ +} EXPORT_SYMBOL_GPL(user_match); -/*****************************************************************************/ /* * dispose of the links from a revoked keyring * - called with the key sem write-locked @@ -153,15 +157,13 @@ void user_revoke(struct key *key) key_payload_reserve(key, 0); if (upayload) { - rcu_assign_pointer(key->payload.data, NULL); - call_rcu(&upayload->rcu, user_update_rcu_disposal); + rcu_assign_keypointer(key, NULL); + kfree_rcu(upayload, rcu); } - -} /* end user_revoke() */ +} EXPORT_SYMBOL(user_revoke); -/*****************************************************************************/ /* * dispose of the data dangling from the corpse of a user key */ @@ -170,26 +172,22 @@ void user_destroy(struct key *key) struct user_key_payload *upayload = key->payload.data; kfree(upayload); - -} /* end user_destroy() */ +} EXPORT_SYMBOL_GPL(user_destroy); -/*****************************************************************************/ /* * describe the user key */ void user_describe(const struct key *key, struct seq_file *m) { seq_puts(m, key->description); - - seq_printf(m, ": %u", key->datalen); - -} /* end user_describe() */ + if (key_is_instantiated(key)) + seq_printf(m, ": %u", key->datalen); +} EXPORT_SYMBOL_GPL(user_describe); -/*****************************************************************************/ /* * read the key data * - the key's semaphore is read-locked @@ -199,8 +197,7 @@ long user_read(const struct key *key, char __user *buffer, size_t buflen) struct user_key_payload *upayload; long ret; - upayload = rcu_dereference_protected( - key->payload.data, rwsem_is_locked(&((struct key *)key)->sem)); + upayload = rcu_dereference_key(key); ret = upayload->datalen; /* we can return the data as is */ @@ -213,7 +210,23 @@ long user_read(const struct key *key, char __user *buffer, size_t buflen) } return ret; - -} /* end user_read() */ +} EXPORT_SYMBOL_GPL(user_read); + +/* Vet the description for a "logon" key */ +static int logon_vet_description(const char *desc) +{ + char *p; + + /* require a "qualified" description string */ + p = strchr(desc, ':'); + if (!p) + return -EINVAL; + + /* also reject description with ':' as first char */ + if (p == desc) + return -EINVAL; + + return 0; +} diff --git a/security/lsm_audit.c b/security/lsm_audit.c index 908aa712816..69fdf3bc765 100644 --- a/security/lsm_audit.c +++ b/security/lsm_audit.c @@ -49,8 +49,8 @@ int ipv4_skb_to_auditdata(struct sk_buff *skb, if (ih == NULL) return -EINVAL; - ad->u.net.v4info.saddr = ih->saddr; - ad->u.net.v4info.daddr = ih->daddr; + ad->u.net->v4info.saddr = ih->saddr; + ad->u.net->v4info.daddr = ih->daddr; if (proto) *proto = ih->protocol; @@ -64,8 +64,8 @@ int ipv4_skb_to_auditdata(struct sk_buff *skb, if (th == NULL) break; - ad->u.net.sport = th->source; - ad->u.net.dport = th->dest; + ad->u.net->sport = th->source; + ad->u.net->dport = th->dest; break; } case IPPROTO_UDP: { @@ -73,8 +73,8 @@ int ipv4_skb_to_auditdata(struct sk_buff *skb, if (uh == NULL) break; - ad->u.net.sport = uh->source; - ad->u.net.dport = uh->dest; + ad->u.net->sport = uh->source; + ad->u.net->dport = uh->dest; break; } case IPPROTO_DCCP: { @@ -82,16 +82,16 @@ int ipv4_skb_to_auditdata(struct sk_buff *skb, if (dh == NULL) break; - ad->u.net.sport = dh->dccph_sport; - ad->u.net.dport = dh->dccph_dport; + ad->u.net->sport = dh->dccph_sport; + ad->u.net->dport = dh->dccph_dport; break; } case IPPROTO_SCTP: { struct sctphdr *sh = sctp_hdr(skb); if (sh == NULL) break; - ad->u.net.sport = sh->source; - ad->u.net.dport = sh->dest; + ad->u.net->sport = sh->source; + ad->u.net->dport = sh->dest; break; } default: @@ -114,19 +114,20 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb, int offset, ret = 0; struct ipv6hdr *ip6; u8 nexthdr; + __be16 frag_off; ip6 = ipv6_hdr(skb); if (ip6 == NULL) return -EINVAL; - ipv6_addr_copy(&ad->u.net.v6info.saddr, &ip6->saddr); - ipv6_addr_copy(&ad->u.net.v6info.daddr, &ip6->daddr); + ad->u.net->v6info.saddr = ip6->saddr; + ad->u.net->v6info.daddr = ip6->daddr; ret = 0; /* IPv6 can have several extension header before the Transport header * skip them */ offset = skb_network_offset(skb); offset += sizeof(*ip6); nexthdr = ip6->nexthdr; - offset = ipv6_skip_exthdr(skb, offset, &nexthdr); + offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off); if (offset < 0) return 0; if (proto) @@ -139,8 +140,8 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb, if (th == NULL) break; - ad->u.net.sport = th->source; - ad->u.net.dport = th->dest; + ad->u.net->sport = th->source; + ad->u.net->dport = th->dest; break; } case IPPROTO_UDP: { @@ -150,8 +151,8 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb, if (uh == NULL) break; - ad->u.net.sport = uh->source; - ad->u.net.dport = uh->dest; + ad->u.net->sport = uh->source; + ad->u.net->dport = uh->dest; break; } case IPPROTO_DCCP: { @@ -161,8 +162,8 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb, if (dh == NULL) break; - ad->u.net.sport = dh->dccph_sport; - ad->u.net.dport = dh->dccph_dport; + ad->u.net->sport = dh->dccph_sport; + ad->u.net->dport = dh->dccph_dport; break; } case IPPROTO_SCTP: { @@ -171,8 +172,8 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb, sh = skb_header_pointer(skb, offset, sizeof(_sctph), &_sctph); if (sh == NULL) break; - ad->u.net.sport = sh->source; - ad->u.net.dport = sh->dest; + ad->u.net->sport = sh->source; + ad->u.net->dport = sh->dest; break; } default: @@ -210,15 +211,17 @@ static inline void print_ipv4_addr(struct audit_buffer *ab, __be32 addr, static void dump_common_audit_data(struct audit_buffer *ab, struct common_audit_data *a) { - struct inode *inode = NULL; struct task_struct *tsk = current; - if (a->tsk) - tsk = a->tsk; - if (tsk && tsk->pid) { - audit_log_format(ab, " pid=%d comm=", tsk->pid); - audit_log_untrustedstring(ab, tsk->comm); - } + /* + * To keep stack sizes in check force programers to notice if they + * start making this union too large! See struct lsm_network_audit + * as an example of how to deal with large data. + */ + BUILD_BUG_ON(sizeof(a->u) > sizeof(void *)*2); + + audit_log_format(ab, " pid=%d comm=", task_pid_nr(tsk)); + audit_log_untrustedstring(ab, tsk->comm); switch (a->type) { case LSM_AUDIT_DATA_NONE: @@ -229,43 +232,63 @@ static void dump_common_audit_data(struct audit_buffer *ab, case LSM_AUDIT_DATA_CAP: audit_log_format(ab, " capability=%d ", a->u.cap); break; - case LSM_AUDIT_DATA_FS: - if (a->u.fs.path.dentry) { - struct dentry *dentry = a->u.fs.path.dentry; - if (a->u.fs.path.mnt) { - audit_log_d_path(ab, "path=", &a->u.fs.path); - } else { - audit_log_format(ab, " name="); - audit_log_untrustedstring(ab, - dentry->d_name.name); - } - inode = dentry->d_inode; - } else if (a->u.fs.inode) { - struct dentry *dentry; - inode = a->u.fs.inode; - dentry = d_find_alias(inode); - if (dentry) { - audit_log_format(ab, " name="); - audit_log_untrustedstring(ab, - dentry->d_name.name); - dput(dentry); - } + case LSM_AUDIT_DATA_PATH: { + struct inode *inode; + + audit_log_d_path(ab, " path=", &a->u.path); + + inode = a->u.path.dentry->d_inode; + if (inode) { + audit_log_format(ab, " dev="); + audit_log_untrustedstring(ab, inode->i_sb->s_id); + audit_log_format(ab, " ino=%lu", inode->i_ino); } - if (inode) - audit_log_format(ab, " dev=%s ino=%lu", - inode->i_sb->s_id, - inode->i_ino); break; + } + case LSM_AUDIT_DATA_DENTRY: { + struct inode *inode; + + audit_log_format(ab, " name="); + audit_log_untrustedstring(ab, a->u.dentry->d_name.name); + + inode = a->u.dentry->d_inode; + if (inode) { + audit_log_format(ab, " dev="); + audit_log_untrustedstring(ab, inode->i_sb->s_id); + audit_log_format(ab, " ino=%lu", inode->i_ino); + } + break; + } + case LSM_AUDIT_DATA_INODE: { + struct dentry *dentry; + struct inode *inode; + + inode = a->u.inode; + dentry = d_find_alias(inode); + if (dentry) { + audit_log_format(ab, " name="); + audit_log_untrustedstring(ab, + dentry->d_name.name); + dput(dentry); + } + audit_log_format(ab, " dev="); + audit_log_untrustedstring(ab, inode->i_sb->s_id); + audit_log_format(ab, " ino=%lu", inode->i_ino); + break; + } case LSM_AUDIT_DATA_TASK: tsk = a->u.tsk; - if (tsk && tsk->pid) { - audit_log_format(ab, " pid=%d comm=", tsk->pid); - audit_log_untrustedstring(ab, tsk->comm); + if (tsk) { + pid_t pid = task_pid_nr(tsk); + if (pid) { + audit_log_format(ab, " pid=%d comm=", pid); + audit_log_untrustedstring(ab, tsk->comm); + } } break; case LSM_AUDIT_DATA_NET: - if (a->u.net.sk) { - struct sock *sk = a->u.net.sk; + if (a->u.net->sk) { + struct sock *sk = a->u.net->sk; struct unix_sock *u; int len = 0; char *p = NULL; @@ -282,26 +305,23 @@ static void dump_common_audit_data(struct audit_buffer *ab, "faddr", "fport"); break; } +#if IS_ENABLED(CONFIG_IPV6) case AF_INET6: { struct inet_sock *inet = inet_sk(sk); - struct ipv6_pinfo *inet6 = inet6_sk(sk); - print_ipv6_addr(ab, &inet6->rcv_saddr, + print_ipv6_addr(ab, &sk->sk_v6_rcv_saddr, inet->inet_sport, "laddr", "lport"); - print_ipv6_addr(ab, &inet6->daddr, + print_ipv6_addr(ab, &sk->sk_v6_daddr, inet->inet_dport, "faddr", "fport"); break; } +#endif case AF_UNIX: u = unix_sk(sk); - if (u->dentry) { - struct path path = { - .dentry = u->dentry, - .mnt = u->mnt - }; - audit_log_d_path(ab, "path=", &path); + if (u->path.dentry) { + audit_log_d_path(ab, " path=", &u->path); break; } if (!u->addr) @@ -317,29 +337,29 @@ static void dump_common_audit_data(struct audit_buffer *ab, } } - switch (a->u.net.family) { + switch (a->u.net->family) { case AF_INET: - print_ipv4_addr(ab, a->u.net.v4info.saddr, - a->u.net.sport, + print_ipv4_addr(ab, a->u.net->v4info.saddr, + a->u.net->sport, "saddr", "src"); - print_ipv4_addr(ab, a->u.net.v4info.daddr, - a->u.net.dport, + print_ipv4_addr(ab, a->u.net->v4info.daddr, + a->u.net->dport, "daddr", "dest"); break; case AF_INET6: - print_ipv6_addr(ab, &a->u.net.v6info.saddr, - a->u.net.sport, + print_ipv6_addr(ab, &a->u.net->v6info.saddr, + a->u.net->sport, "saddr", "src"); - print_ipv6_addr(ab, &a->u.net.v6info.daddr, - a->u.net.dport, + print_ipv6_addr(ab, &a->u.net->v6info.daddr, + a->u.net->dport, "daddr", "dest"); break; } - if (a->u.net.netif > 0) { + if (a->u.net->netif > 0) { struct net_device *dev; /* NOTE: we always use init's namespace */ - dev = dev_get_by_index(&init_net, a->u.net.netif); + dev = dev_get_by_index(&init_net, a->u.net->netif); if (dev) { audit_log_format(ab, " netif=%s", dev->name); dev_put(dev); @@ -365,29 +385,34 @@ static void dump_common_audit_data(struct audit_buffer *ab, /** * common_lsm_audit - generic LSM auditing function * @a: auxiliary audit data + * @pre_audit: lsm-specific pre-audit callback + * @post_audit: lsm-specific post-audit callback * * setup the audit buffer for common security information * uses callback to print LSM specific information */ -void common_lsm_audit(struct common_audit_data *a) +void common_lsm_audit(struct common_audit_data *a, + void (*pre_audit)(struct audit_buffer *, void *), + void (*post_audit)(struct audit_buffer *, void *)) { struct audit_buffer *ab; if (a == NULL) return; /* we use GFP_ATOMIC so we won't sleep */ - ab = audit_log_start(current->audit_context, GFP_ATOMIC, AUDIT_AVC); + ab = audit_log_start(current->audit_context, GFP_ATOMIC | __GFP_NOWARN, + AUDIT_AVC); if (ab == NULL) return; - if (a->lsm_pre_audit) - a->lsm_pre_audit(ab, a); + if (pre_audit) + pre_audit(ab, a); dump_common_audit_data(ab, a); - if (a->lsm_post_audit) - a->lsm_post_audit(ab, a); + if (post_audit) + post_audit(ab, a); audit_log_end(ab); } diff --git a/security/security.c b/security/security.c index 3ef5e2a7a74..31614e9e96e 100644 --- a/security/security.c +++ b/security/security.c @@ -12,19 +12,27 @@ */ #include <linux/capability.h> +#include <linux/dcache.h> #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/security.h> +#include <linux/integrity.h> #include <linux/ima.h> +#include <linux/evm.h> +#include <linux/fsnotify.h> +#include <linux/mman.h> +#include <linux/mount.h> +#include <linux/personality.h> +#include <linux/backing-dev.h> +#include <net/flow.h> + +#define MAX_LSM_EVM_XATTR 2 /* Boot-time LSM user choice */ static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] = CONFIG_DEFAULT_SECURITY; -/* things that live in capability.c */ -extern void __init security_fixup_ops(struct security_operations *ops); - static struct security_operations *security_ops; static struct security_operations default_security_ops = { .name = "default", @@ -129,11 +137,23 @@ int __init register_security(struct security_operations *ops) int security_ptrace_access_check(struct task_struct *child, unsigned int mode) { +#ifdef CONFIG_SECURITY_YAMA_STACKED + int rc; + rc = yama_ptrace_access_check(child, mode); + if (rc) + return rc; +#endif return security_ops->ptrace_access_check(child, mode); } int security_ptrace_traceme(struct task_struct *parent) { +#ifdef CONFIG_SECURITY_YAMA_STACKED + int rc; + rc = yama_ptrace_traceme(parent); + if (rc) + return rc; +#endif return security_ops->ptrace_traceme(parent); } @@ -154,37 +174,16 @@ int security_capset(struct cred *new, const struct cred *old, effective, inheritable, permitted); } -int security_capable(int cap) -{ - return security_ops->capable(current, current_cred(), cap, - SECURITY_CAP_AUDIT); -} - -int security_real_capable(struct task_struct *tsk, int cap) +int security_capable(const struct cred *cred, struct user_namespace *ns, + int cap) { - const struct cred *cred; - int ret; - - cred = get_task_cred(tsk); - ret = security_ops->capable(tsk, cred, cap, SECURITY_CAP_AUDIT); - put_cred(cred); - return ret; -} - -int security_real_capable_noaudit(struct task_struct *tsk, int cap) -{ - const struct cred *cred; - int ret; - - cred = get_task_cred(tsk); - ret = security_ops->capable(tsk, cred, cap, SECURITY_CAP_NOAUDIT); - put_cred(cred); - return ret; + return security_ops->capable(cred, ns, cap, SECURITY_CAP_AUDIT); } -int security_sysctl(struct ctl_table *table, int op) +int security_capable_noaudit(const struct cred *cred, struct user_namespace *ns, + int cap) { - return security_ops->sysctl(table, op); + return security_ops->capable(cred, ns, cap, SECURITY_CAP_NOAUDIT); } int security_quotactl(int cmds, int type, int id, struct super_block *sb) @@ -197,35 +196,21 @@ int security_quota_on(struct dentry *dentry) return security_ops->quota_on(dentry); } -int security_syslog(int type, bool from_file) +int security_syslog(int type) { - return security_ops->syslog(type, from_file); + return security_ops->syslog(type); } -int security_settime(struct timespec *ts, struct timezone *tz) +int security_settime(const struct timespec *ts, const struct timezone *tz) { return security_ops->settime(ts, tz); } -int security_vm_enough_memory(long pages) -{ - WARN_ON(current->mm == NULL); - return security_ops->vm_enough_memory(current->mm, pages); -} - int security_vm_enough_memory_mm(struct mm_struct *mm, long pages) { - WARN_ON(mm == NULL); return security_ops->vm_enough_memory(mm, pages); } -int security_vm_enough_memory_kern(long pages) -{ - /* If current->mm is a kernel thread then we will pass NULL, - for this specific case that is fine */ - return security_ops->vm_enough_memory(current->mm, pages); -} - int security_bprm_set_creds(struct linux_binprm *bprm) { return security_ops->bprm_set_creds(bprm); @@ -272,6 +257,11 @@ int security_sb_copy_data(char *orig, char *copy) } EXPORT_SYMBOL(security_sb_copy_data); +int security_sb_remount(struct super_block *sb, void *data) +{ + return security_ops->sb_remount(sb, data); +} + int security_sb_kern_mount(struct super_block *sb, int flags, void *data) { return security_ops->sb_kern_mount(sb, flags, data); @@ -287,8 +277,8 @@ int security_sb_statfs(struct dentry *dentry) return security_ops->sb_statfs(dentry); } -int security_sb_mount(char *dev_name, struct path *path, - char *type, unsigned long flags, void *data) +int security_sb_mount(const char *dev_name, struct path *path, + const char *type, unsigned long flags, void *data) { return security_ops->sb_mount(dev_name, path, type, flags, data); } @@ -304,16 +294,19 @@ int security_sb_pivotroot(struct path *old_path, struct path *new_path) } int security_sb_set_mnt_opts(struct super_block *sb, - struct security_mnt_opts *opts) + struct security_mnt_opts *opts, + unsigned long kern_flags, + unsigned long *set_kern_flags) { - return security_ops->sb_set_mnt_opts(sb, opts); + return security_ops->sb_set_mnt_opts(sb, opts, kern_flags, + set_kern_flags); } EXPORT_SYMBOL(security_sb_set_mnt_opts); -void security_sb_clone_mnt_opts(const struct super_block *oldsb, +int security_sb_clone_mnt_opts(const struct super_block *oldsb, struct super_block *newsb) { - security_ops->sb_clone_mnt_opts(oldsb, newsb); + return security_ops->sb_clone_mnt_opts(oldsb, newsb); } EXPORT_SYMBOL(security_sb_clone_mnt_opts); @@ -331,21 +324,67 @@ int security_inode_alloc(struct inode *inode) void security_inode_free(struct inode *inode) { - ima_inode_free(inode); + integrity_inode_free(inode); security_ops->inode_free_security(inode); } +int security_dentry_init_security(struct dentry *dentry, int mode, + struct qstr *name, void **ctx, + u32 *ctxlen) +{ + return security_ops->dentry_init_security(dentry, mode, name, + ctx, ctxlen); +} +EXPORT_SYMBOL(security_dentry_init_security); + int security_inode_init_security(struct inode *inode, struct inode *dir, - char **name, void **value, size_t *len) + const struct qstr *qstr, + const initxattrs initxattrs, void *fs_data) { + struct xattr new_xattrs[MAX_LSM_EVM_XATTR + 1]; + struct xattr *lsm_xattr, *evm_xattr, *xattr; + int ret; + if (unlikely(IS_PRIVATE(inode))) - return -EOPNOTSUPP; - return security_ops->inode_init_security(inode, dir, name, value, len); + return 0; + + if (!initxattrs) + return security_ops->inode_init_security(inode, dir, qstr, + NULL, NULL, NULL); + memset(new_xattrs, 0, sizeof(new_xattrs)); + lsm_xattr = new_xattrs; + ret = security_ops->inode_init_security(inode, dir, qstr, + &lsm_xattr->name, + &lsm_xattr->value, + &lsm_xattr->value_len); + if (ret) + goto out; + + evm_xattr = lsm_xattr + 1; + ret = evm_inode_init_security(inode, lsm_xattr, evm_xattr); + if (ret) + goto out; + ret = initxattrs(inode, new_xattrs, fs_data); +out: + for (xattr = new_xattrs; xattr->value != NULL; xattr++) + kfree(xattr->value); + return (ret == -EOPNOTSUPP) ? 0 : ret; } EXPORT_SYMBOL(security_inode_init_security); +int security_old_inode_init_security(struct inode *inode, struct inode *dir, + const struct qstr *qstr, const char **name, + void **value, size_t *len) +{ + if (unlikely(IS_PRIVATE(inode))) + return -EOPNOTSUPP; + return security_ops->inode_init_security(inode, dir, qstr, name, value, + len); +} +EXPORT_SYMBOL(security_old_inode_init_security); + #ifdef CONFIG_SECURITY_PATH -int security_path_mknod(struct path *dir, struct dentry *dentry, int mode, +int security_path_mknod(struct path *dir, struct dentry *dentry, umode_t mode, unsigned int dev) { if (unlikely(IS_PRIVATE(dir->dentry->d_inode))) @@ -354,12 +393,13 @@ int security_path_mknod(struct path *dir, struct dentry *dentry, int mode, } EXPORT_SYMBOL(security_path_mknod); -int security_path_mkdir(struct path *dir, struct dentry *dentry, int mode) +int security_path_mkdir(struct path *dir, struct dentry *dentry, umode_t mode) { if (unlikely(IS_PRIVATE(dir->dentry->d_inode))) return 0; return security_ops->path_mkdir(dir, dentry, mode); } +EXPORT_SYMBOL(security_path_mkdir); int security_path_rmdir(struct path *dir, struct dentry *dentry) { @@ -374,6 +414,7 @@ int security_path_unlink(struct path *dir, struct dentry *dentry) return 0; return security_ops->path_unlink(dir, dentry); } +EXPORT_SYMBOL(security_path_unlink); int security_path_symlink(struct path *dir, struct dentry *dentry, const char *old_name) @@ -392,14 +433,24 @@ int security_path_link(struct dentry *old_dentry, struct path *new_dir, } int security_path_rename(struct path *old_dir, struct dentry *old_dentry, - struct path *new_dir, struct dentry *new_dentry) + struct path *new_dir, struct dentry *new_dentry, + unsigned int flags) { if (unlikely(IS_PRIVATE(old_dentry->d_inode) || (new_dentry->d_inode && IS_PRIVATE(new_dentry->d_inode)))) return 0; + + if (flags & RENAME_EXCHANGE) { + int err = security_ops->path_rename(new_dir, new_dentry, + old_dir, old_dentry); + if (err) + return err; + } + return security_ops->path_rename(old_dir, old_dentry, new_dir, new_dentry); } +EXPORT_SYMBOL(security_path_rename); int security_path_truncate(struct path *path) { @@ -408,15 +459,14 @@ int security_path_truncate(struct path *path) return security_ops->path_truncate(path); } -int security_path_chmod(struct dentry *dentry, struct vfsmount *mnt, - mode_t mode) +int security_path_chmod(struct path *path, umode_t mode) { - if (unlikely(IS_PRIVATE(dentry->d_inode))) + if (unlikely(IS_PRIVATE(path->dentry->d_inode))) return 0; - return security_ops->path_chmod(dentry, mnt, mode); + return security_ops->path_chmod(path, mode); } -int security_path_chown(struct path *path, uid_t uid, gid_t gid) +int security_path_chown(struct path *path, kuid_t uid, kgid_t gid) { if (unlikely(IS_PRIVATE(path->dentry->d_inode))) return 0; @@ -429,7 +479,7 @@ int security_path_chroot(struct path *path) } #endif -int security_inode_create(struct inode *dir, struct dentry *dentry, int mode) +int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode) { if (unlikely(IS_PRIVATE(dir))) return 0; @@ -460,7 +510,7 @@ int security_inode_symlink(struct inode *dir, struct dentry *dentry, return security_ops->inode_symlink(dir, dentry, old_name); } -int security_inode_mkdir(struct inode *dir, struct dentry *dentry, int mode) +int security_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { if (unlikely(IS_PRIVATE(dir))) return 0; @@ -475,7 +525,7 @@ int security_inode_rmdir(struct inode *dir, struct dentry *dentry) return security_ops->inode_rmdir(dir, dentry); } -int security_inode_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) +int security_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) { if (unlikely(IS_PRIVATE(dir))) return 0; @@ -483,11 +533,20 @@ int security_inode_mknod(struct inode *dir, struct dentry *dentry, int mode, dev } int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry, - struct inode *new_dir, struct dentry *new_dentry) + struct inode *new_dir, struct dentry *new_dentry, + unsigned int flags) { if (unlikely(IS_PRIVATE(old_dentry->d_inode) || (new_dentry->d_inode && IS_PRIVATE(new_dentry->d_inode)))) return 0; + + if (flags & RENAME_EXCHANGE) { + int err = security_ops->inode_rename(new_dir, new_dentry, + old_dir, old_dentry); + if (err) + return err; + } + return security_ops->inode_rename(old_dir, old_dentry, new_dir, new_dentry); } @@ -515,9 +574,14 @@ int security_inode_permission(struct inode *inode, int mask) int security_inode_setattr(struct dentry *dentry, struct iattr *attr) { + int ret; + if (unlikely(IS_PRIVATE(dentry->d_inode))) return 0; - return security_ops->inode_setattr(dentry, attr); + ret = security_ops->inode_setattr(dentry, attr); + if (ret) + return ret; + return evm_inode_setattr(dentry, attr); } EXPORT_SYMBOL_GPL(security_inode_setattr); @@ -531,9 +595,17 @@ int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry) int security_inode_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { + int ret; + if (unlikely(IS_PRIVATE(dentry->d_inode))) return 0; - return security_ops->inode_setxattr(dentry, name, value, size, flags); + ret = security_ops->inode_setxattr(dentry, name, value, size, flags); + if (ret) + return ret; + ret = ima_inode_setxattr(dentry, name, value, size); + if (ret) + return ret; + return evm_inode_setxattr(dentry, name, value, size); } void security_inode_post_setxattr(struct dentry *dentry, const char *name, @@ -542,6 +614,7 @@ void security_inode_post_setxattr(struct dentry *dentry, const char *name, if (unlikely(IS_PRIVATE(dentry->d_inode))) return; security_ops->inode_post_setxattr(dentry, name, value, size, flags); + evm_inode_post_setxattr(dentry, name, value, size); } int security_inode_getxattr(struct dentry *dentry, const char *name) @@ -560,9 +633,17 @@ int security_inode_listxattr(struct dentry *dentry) int security_inode_removexattr(struct dentry *dentry, const char *name) { + int ret; + if (unlikely(IS_PRIVATE(dentry->d_inode))) return 0; - return security_ops->inode_removexattr(dentry, name); + ret = security_ops->inode_removexattr(dentry, name); + if (ret) + return ret; + ret = ima_inode_removexattr(dentry, name); + if (ret) + return ret; + return evm_inode_removexattr(dentry, name); } int security_inode_need_killpriv(struct dentry *dentry) @@ -595,6 +676,7 @@ int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer return 0; return security_ops->inode_listsecurity(inode, buffer, buffer_size); } +EXPORT_SYMBOL(security_inode_listsecurity); void security_inode_getsecid(const struct inode *inode, u32 *secid) { @@ -627,18 +709,56 @@ int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return security_ops->file_ioctl(file, cmd, arg); } -int security_file_mmap(struct file *file, unsigned long reqprot, - unsigned long prot, unsigned long flags, - unsigned long addr, unsigned long addr_only) +static inline unsigned long mmap_prot(struct file *file, unsigned long prot) { - int ret; + /* + * Does we have PROT_READ and does the application expect + * it to imply PROT_EXEC? If not, nothing to talk about... + */ + if ((prot & (PROT_READ | PROT_EXEC)) != PROT_READ) + return prot; + if (!(current->personality & READ_IMPLIES_EXEC)) + return prot; + /* + * if that's an anonymous mapping, let it. + */ + if (!file) + return prot | PROT_EXEC; + /* + * ditto if it's not on noexec mount, except that on !MMU we need + * BDI_CAP_EXEC_MMAP (== VM_MAYEXEC) in this case + */ + if (!(file->f_path.mnt->mnt_flags & MNT_NOEXEC)) { +#ifndef CONFIG_MMU + unsigned long caps = 0; + struct address_space *mapping = file->f_mapping; + if (mapping && mapping->backing_dev_info) + caps = mapping->backing_dev_info->capabilities; + if (!(caps & BDI_CAP_EXEC_MAP)) + return prot; +#endif + return prot | PROT_EXEC; + } + /* anything on noexec mount won't get PROT_EXEC */ + return prot; +} - ret = security_ops->file_mmap(file, reqprot, prot, flags, addr, addr_only); +int security_mmap_file(struct file *file, unsigned long prot, + unsigned long flags) +{ + int ret; + ret = security_ops->mmap_file(file, prot, + mmap_prot(file, prot), flags); if (ret) return ret; return ima_file_mmap(file, prot); } +int security_mmap_addr(unsigned long addr) +{ + return security_ops->mmap_addr(addr); +} + int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot, unsigned long prot) { @@ -671,11 +791,11 @@ int security_file_receive(struct file *file) return security_ops->file_receive(file); } -int security_dentry_open(struct file *file, const struct cred *cred) +int security_file_open(struct file *file, const struct cred *cred) { int ret; - ret = security_ops->dentry_open(file, cred); + ret = security_ops->file_open(file, cred); if (ret) return ret; @@ -687,6 +807,14 @@ int security_task_create(unsigned long clone_flags) return security_ops->task_create(clone_flags); } +void security_task_free(struct task_struct *task) +{ +#ifdef CONFIG_SECURITY_YAMA_STACKED + yama_task_free(task); +#endif + security_ops->task_free(task); +} + int security_cred_alloc_blank(struct cred *cred, gfp_t gfp) { return security_ops->cred_alloc_blank(cred, gfp); @@ -722,6 +850,16 @@ int security_kernel_module_request(char *kmod_name) return security_ops->kernel_module_request(kmod_name); } +int security_kernel_module_from_file(struct file *file) +{ + int ret; + + ret = security_ops->kernel_module_from_file(file); + if (ret) + return ret; + return ima_module_check(file); +} + int security_task_fix_setuid(struct cred *new, const struct cred *old, int flags) { @@ -799,6 +937,12 @@ int security_task_wait(struct task_struct *p) int security_task_prctl(int option, unsigned long arg2, unsigned long arg3, unsigned long arg4, unsigned long arg5) { +#ifdef CONFIG_SECURITY_YAMA_STACKED + int rc; + rc = yama_task_prctl(option, arg2, arg3, arg4, arg5); + if (rc != -ENOSYS) + return rc; +#endif return security_ops->task_prctl(option, arg2, arg3, arg4, arg5); } @@ -933,11 +1077,11 @@ int security_netlink_send(struct sock *sk, struct sk_buff *skb) return security_ops->netlink_send(sk, skb); } -int security_netlink_recv(struct sk_buff *skb, int cap) +int security_ismaclabel(const char *name) { - return security_ops->netlink_recv(skb, cap); + return security_ops->ismaclabel(name); } -EXPORT_SYMBOL(security_netlink_recv); +EXPORT_SYMBOL(security_ismaclabel); int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) { @@ -977,8 +1121,7 @@ EXPORT_SYMBOL(security_inode_getsecctx); #ifdef CONFIG_SECURITY_NETWORK -int security_unix_stream_connect(struct socket *sock, struct socket *other, - struct sock *newsk) +int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk) { return security_ops->unix_stream_connect(sock, other, newsk); } @@ -1090,10 +1233,11 @@ void security_sk_clone(const struct sock *sk, struct sock *newsk) { security_ops->sk_clone_security(sk, newsk); } +EXPORT_SYMBOL(security_sk_clone); void security_sk_classify_flow(struct sock *sk, struct flowi *fl) { - security_ops->sk_getsecid(sk, &fl->secid); + security_ops->sk_getsecid(sk, &fl->flowi_secid); } EXPORT_SYMBOL(security_sk_classify_flow); @@ -1146,31 +1290,56 @@ void security_secmark_refcount_dec(void) } EXPORT_SYMBOL(security_secmark_refcount_dec); +int security_tun_dev_alloc_security(void **security) +{ + return security_ops->tun_dev_alloc_security(security); +} +EXPORT_SYMBOL(security_tun_dev_alloc_security); + +void security_tun_dev_free_security(void *security) +{ + security_ops->tun_dev_free_security(security); +} +EXPORT_SYMBOL(security_tun_dev_free_security); + int security_tun_dev_create(void) { return security_ops->tun_dev_create(); } EXPORT_SYMBOL(security_tun_dev_create); -void security_tun_dev_post_create(struct sock *sk) +int security_tun_dev_attach_queue(void *security) { - return security_ops->tun_dev_post_create(sk); + return security_ops->tun_dev_attach_queue(security); } -EXPORT_SYMBOL(security_tun_dev_post_create); +EXPORT_SYMBOL(security_tun_dev_attach_queue); -int security_tun_dev_attach(struct sock *sk) +int security_tun_dev_attach(struct sock *sk, void *security) { - return security_ops->tun_dev_attach(sk); + return security_ops->tun_dev_attach(sk, security); } EXPORT_SYMBOL(security_tun_dev_attach); +int security_tun_dev_open(void *security) +{ + return security_ops->tun_dev_open(security); +} +EXPORT_SYMBOL(security_tun_dev_open); + +void security_skb_owned_by(struct sk_buff *skb, struct sock *sk) +{ + security_ops->skb_owned_by(skb, sk); +} + #endif /* CONFIG_SECURITY_NETWORK */ #ifdef CONFIG_SECURITY_NETWORK_XFRM -int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx) +int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, + struct xfrm_user_sec_ctx *sec_ctx, + gfp_t gfp) { - return security_ops->xfrm_policy_alloc_security(ctxp, sec_ctx); + return security_ops->xfrm_policy_alloc_security(ctxp, sec_ctx, gfp); } EXPORT_SYMBOL(security_xfrm_policy_alloc); @@ -1191,22 +1360,17 @@ int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx) return security_ops->xfrm_policy_delete_security(ctx); } -int security_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx) +int security_xfrm_state_alloc(struct xfrm_state *x, + struct xfrm_user_sec_ctx *sec_ctx) { - return security_ops->xfrm_state_alloc_security(x, sec_ctx, 0); + return security_ops->xfrm_state_alloc(x, sec_ctx); } EXPORT_SYMBOL(security_xfrm_state_alloc); int security_xfrm_state_alloc_acquire(struct xfrm_state *x, struct xfrm_sec_ctx *polsec, u32 secid) { - if (!polsec) - return 0; - /* - * We want the context to be taken from secid which is usually - * from the sock. - */ - return security_ops->xfrm_state_alloc_security(x, NULL, secid); + return security_ops->xfrm_state_alloc_acquire(x, polsec, secid); } int security_xfrm_state_delete(struct xfrm_state *x) @@ -1226,7 +1390,8 @@ int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir) } int security_xfrm_state_pol_flow_match(struct xfrm_state *x, - struct xfrm_policy *xp, struct flowi *fl) + struct xfrm_policy *xp, + const struct flowi *fl) { return security_ops->xfrm_state_pol_flow_match(x, xp, fl); } @@ -1238,7 +1403,7 @@ int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid) void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl) { - int rc = security_ops->xfrm_decode_session(skb, &fl->secid, 0); + int rc = security_ops->xfrm_decode_session(skb, &fl->flowi_secid, 0); BUG_ON(rc); } @@ -1260,7 +1425,7 @@ void security_key_free(struct key *key) } int security_key_permission(key_ref_t key_ref, - const struct cred *cred, key_perm_t perm) + const struct cred *cred, unsigned perm) { return security_ops->key_permission(key_ref, cred, perm); } diff --git a/security/selinux/avc.c b/security/selinux/avc.c index 9da6420e205..a18f1fa6440 100644 --- a/security/selinux/avc.c +++ b/security/selinux/avc.c @@ -38,11 +38,7 @@ #define AVC_CACHE_RECLAIM 16 #ifdef CONFIG_SECURITY_SELINUX_AVC_STATS -#define avc_cache_stats_incr(field) \ -do { \ - per_cpu(avc_cache_stats, get_cpu()).field++; \ - put_cpu(); \ -} while (0) +#define avc_cache_stats_incr(field) this_cpu_inc(avc_cache_stats.field) #else #define avc_cache_stats_incr(field) do {} while (0) #endif @@ -69,14 +65,8 @@ struct avc_cache { }; struct avc_callback_node { - int (*callback) (u32 event, u32 ssid, u32 tsid, - u16 tclass, u32 perms, - u32 *out_retained); + int (*callback) (u32 event); u32 events; - u32 ssid; - u32 tsid; - u16 tclass; - u32 perms; struct avc_callback_node *next; }; @@ -198,11 +188,9 @@ int avc_get_hash_stats(char *page) for (i = 0; i < AVC_CACHE_SLOTS; i++) { head = &avc_cache.slots[i]; if (!hlist_empty(head)) { - struct hlist_node *next; - slots_used++; chain_len = 0; - hlist_for_each_entry_rcu(node, next, head, list) + hlist_for_each_entry_rcu(node, head, list) chain_len++; if (chain_len > max_chain_len) max_chain_len = chain_len; @@ -251,7 +239,6 @@ static inline int avc_reclaim_node(void) int hvalue, try, ecx; unsigned long flags; struct hlist_head *head; - struct hlist_node *next; spinlock_t *lock; for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) { @@ -263,7 +250,7 @@ static inline int avc_reclaim_node(void) continue; rcu_read_lock(); - hlist_for_each_entry(node, next, head, list) { + hlist_for_each_entry(node, head, list) { avc_node_delete(node); avc_cache_stats_incr(reclaims); ecx++; @@ -284,7 +271,7 @@ static struct avc_node *avc_alloc_node(void) { struct avc_node *node; - node = kmem_cache_zalloc(avc_node_cachep, GFP_ATOMIC); + node = kmem_cache_zalloc(avc_node_cachep, GFP_ATOMIC|__GFP_NOMEMALLOC); if (!node) goto out; @@ -311,11 +298,10 @@ static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass) struct avc_node *node, *ret = NULL; int hvalue; struct hlist_head *head; - struct hlist_node *next; hvalue = avc_hash(ssid, tsid, tclass); head = &avc_cache.slots[hvalue]; - hlist_for_each_entry_rcu(node, next, head, list) { + hlist_for_each_entry_rcu(node, head, list) { if (ssid == node->ae.ssid && tclass == node->ae.tclass && tsid == node->ae.tsid) { @@ -347,11 +333,10 @@ static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass) node = avc_search_node(ssid, tsid, tclass); if (node) - avc_cache_stats_incr(hits); - else - avc_cache_stats_incr(misses); + return node; - return node; + avc_cache_stats_incr(misses); + return NULL; } static int avc_latest_notif_update(int seqno, int is_insert) @@ -405,7 +390,6 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_dec node = avc_alloc_node(); if (node) { struct hlist_head *head; - struct hlist_node *next; spinlock_t *lock; hvalue = avc_hash(ssid, tsid, tclass); @@ -415,7 +399,7 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_dec lock = &avc_cache.slots_lock[hvalue]; spin_lock_irqsave(lock, flag); - hlist_for_each_entry(pos, next, head, list) { + hlist_for_each_entry(pos, head, list) { if (pos->ae.ssid == ssid && pos->ae.tsid == tsid && pos->ae.tclass == tclass) { @@ -441,9 +425,9 @@ static void avc_audit_pre_callback(struct audit_buffer *ab, void *a) { struct common_audit_data *ad = a; audit_log_format(ab, "avc: %s ", - ad->selinux_audit_data.denied ? "denied" : "granted"); - avc_dump_av(ab, ad->selinux_audit_data.tclass, - ad->selinux_audit_data.audited); + ad->selinux_audit_data->denied ? "denied" : "granted"); + avc_dump_av(ab, ad->selinux_audit_data->tclass, + ad->selinux_audit_data->audited); audit_log_format(ab, " for "); } @@ -457,105 +441,69 @@ static void avc_audit_post_callback(struct audit_buffer *ab, void *a) { struct common_audit_data *ad = a; audit_log_format(ab, " "); - avc_dump_query(ab, ad->selinux_audit_data.ssid, - ad->selinux_audit_data.tsid, - ad->selinux_audit_data.tclass); + avc_dump_query(ab, ad->selinux_audit_data->ssid, + ad->selinux_audit_data->tsid, + ad->selinux_audit_data->tclass); + if (ad->selinux_audit_data->denied) { + audit_log_format(ab, " permissive=%u", + ad->selinux_audit_data->result ? 0 : 1); + } } -/** - * avc_audit - Audit the granting or denial of permissions. - * @ssid: source security identifier - * @tsid: target security identifier - * @tclass: target security class - * @requested: requested permissions - * @avd: access vector decisions - * @result: result from avc_has_perm_noaudit - * @a: auxiliary audit data - * - * Audit the granting or denial of permissions in accordance - * with the policy. This function is typically called by - * avc_has_perm() after a permission check, but can also be - * called directly by callers who use avc_has_perm_noaudit() - * in order to separate the permission check from the auditing. - * For example, this separation is useful when the permission check must - * be performed under a lock, to allow the lock to be released - * before calling the auditing code. - */ -void avc_audit(u32 ssid, u32 tsid, - u16 tclass, u32 requested, - struct av_decision *avd, int result, struct common_audit_data *a) +/* This is the slow part of avc audit with big stack footprint */ +noinline int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass, + u32 requested, u32 audited, u32 denied, int result, + struct common_audit_data *a, + unsigned flags) { struct common_audit_data stack_data; - u32 denied, audited; - denied = requested & ~avd->allowed; - if (denied) { - audited = denied & avd->auditdeny; - /* - * a->selinux_audit_data.auditdeny is TRICKY! Setting a bit in - * this field means that ANY denials should NOT be audited if - * the policy contains an explicit dontaudit rule for that - * permission. Take notice that this is unrelated to the - * actual permissions that were denied. As an example lets - * assume: - * - * denied == READ - * avd.auditdeny & ACCESS == 0 (not set means explicit rule) - * selinux_audit_data.auditdeny & ACCESS == 1 - * - * We will NOT audit the denial even though the denied - * permission was READ and the auditdeny checks were for - * ACCESS - */ - if (a && - a->selinux_audit_data.auditdeny && - !(a->selinux_audit_data.auditdeny & avd->auditdeny)) - audited = 0; - } else if (result) - audited = denied = requested; - else - audited = requested & avd->auditallow; - if (!audited) - return; + struct selinux_audit_data sad; + if (!a) { a = &stack_data; - COMMON_AUDIT_DATA_INIT(a, NONE); + a->type = LSM_AUDIT_DATA_NONE; } - a->selinux_audit_data.tclass = tclass; - a->selinux_audit_data.requested = requested; - a->selinux_audit_data.ssid = ssid; - a->selinux_audit_data.tsid = tsid; - a->selinux_audit_data.audited = audited; - a->selinux_audit_data.denied = denied; - a->lsm_pre_audit = avc_audit_pre_callback; - a->lsm_post_audit = avc_audit_post_callback; - common_lsm_audit(a); + + /* + * When in a RCU walk do the audit on the RCU retry. This is because + * the collection of the dname in an inode audit message is not RCU + * safe. Note this may drop some audits when the situation changes + * during retry. However this is logically just as if the operation + * happened a little later. + */ + if ((a->type == LSM_AUDIT_DATA_INODE) && + (flags & MAY_NOT_BLOCK)) + return -ECHILD; + + sad.tclass = tclass; + sad.requested = requested; + sad.ssid = ssid; + sad.tsid = tsid; + sad.audited = audited; + sad.denied = denied; + sad.result = result; + + a->selinux_audit_data = &sad; + + common_lsm_audit(a, avc_audit_pre_callback, avc_audit_post_callback); + return 0; } /** * avc_add_callback - Register a callback for security events. * @callback: callback function * @events: security events - * @ssid: source security identifier or %SECSID_WILD - * @tsid: target security identifier or %SECSID_WILD - * @tclass: target security class - * @perms: permissions * - * Register a callback function for events in the set @events - * related to the SID pair (@ssid, @tsid) - * and the permissions @perms, interpreting - * @perms based on @tclass. Returns %0 on success or - * -%ENOMEM if insufficient memory exists to add the callback. + * Register a callback function for events in the set @events. + * Returns %0 on success or -%ENOMEM if insufficient memory + * exists to add the callback. */ -int avc_add_callback(int (*callback)(u32 event, u32 ssid, u32 tsid, - u16 tclass, u32 perms, - u32 *out_retained), - u32 events, u32 ssid, u32 tsid, - u16 tclass, u32 perms) +int __init avc_add_callback(int (*callback)(u32 event), u32 events) { struct avc_callback_node *c; int rc = 0; - c = kmalloc(sizeof(*c), GFP_ATOMIC); + c = kmalloc(sizeof(*c), GFP_KERNEL); if (!c) { rc = -ENOMEM; goto out; @@ -563,9 +511,6 @@ int avc_add_callback(int (*callback)(u32 event, u32 ssid, u32 tsid, c->callback = callback; c->events = events; - c->ssid = ssid; - c->tsid = tsid; - c->perms = perms; c->next = avc_callbacks; avc_callbacks = c; out: @@ -596,7 +541,6 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass, unsigned long flag; struct avc_node *pos, *node, *orig = NULL; struct hlist_head *head; - struct hlist_node *next; spinlock_t *lock; node = avc_alloc_node(); @@ -613,7 +557,7 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass, spin_lock_irqsave(lock, flag); - hlist_for_each_entry(pos, next, head, list) { + hlist_for_each_entry(pos, head, list) { if (ssid == pos->ae.ssid && tsid == pos->ae.tsid && tclass == pos->ae.tclass && @@ -669,7 +613,6 @@ out: static void avc_flush(void) { struct hlist_head *head; - struct hlist_node *next; struct avc_node *node; spinlock_t *lock; unsigned long flag; @@ -685,7 +628,7 @@ static void avc_flush(void) * prevent RCU grace periods from ending. */ rcu_read_lock(); - hlist_for_each_entry(node, next, head, list) + hlist_for_each_entry(node, head, list) avc_node_delete(node); rcu_read_unlock(); spin_unlock_irqrestore(lock, flag); @@ -705,8 +648,7 @@ int avc_ss_reset(u32 seqno) for (c = avc_callbacks; c; c = c->next) { if (c->events & AVC_CALLBACK_RESET) { - tmprc = c->callback(AVC_CALLBACK_RESET, - 0, 0, 0, 0, NULL); + tmprc = c->callback(AVC_CALLBACK_RESET); /* save the first error encountered for the return value and continue processing the callbacks */ if (!rc) @@ -718,6 +660,41 @@ int avc_ss_reset(u32 seqno) return rc; } +/* + * Slow-path helper function for avc_has_perm_noaudit, + * when the avc_node lookup fails. We get called with + * the RCU read lock held, and need to return with it + * still held, but drop if for the security compute. + * + * Don't inline this, since it's the slow-path and just + * results in a bigger stack frame. + */ +static noinline struct avc_node *avc_compute_av(u32 ssid, u32 tsid, + u16 tclass, struct av_decision *avd) +{ + rcu_read_unlock(); + security_compute_av(ssid, tsid, tclass, avd); + rcu_read_lock(); + return avc_insert(ssid, tsid, tclass, avd); +} + +static noinline int avc_denied(u32 ssid, u32 tsid, + u16 tclass, u32 requested, + unsigned flags, + struct av_decision *avd) +{ + if (flags & AVC_STRICT) + return -EACCES; + + if (selinux_enforcing && !(avd->flags & AVD_FLAGS_PERMISSIVE)) + return -EACCES; + + avc_update_node(AVC_CALLBACK_GRANT, requested, ssid, + tsid, tclass, avd->seqno); + return 0; +} + + /** * avc_has_perm_noaudit - Check permissions but perform no auditing. * @ssid: source security identifier @@ -738,13 +715,12 @@ int avc_ss_reset(u32 seqno) * auditing, e.g. in cases where a lock must be held for the check but * should be released for the auditing. */ -int avc_has_perm_noaudit(u32 ssid, u32 tsid, +inline int avc_has_perm_noaudit(u32 ssid, u32 tsid, u16 tclass, u32 requested, unsigned flags, - struct av_decision *in_avd) + struct av_decision *avd) { struct avc_node *node; - struct av_decision avd_entry, *avd; int rc = 0; u32 denied; @@ -753,34 +729,16 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid, rcu_read_lock(); node = avc_lookup(ssid, tsid, tclass); - if (!node) { - rcu_read_unlock(); - - if (in_avd) - avd = in_avd; - else - avd = &avd_entry; - - security_compute_av(ssid, tsid, tclass, avd); - rcu_read_lock(); - node = avc_insert(ssid, tsid, tclass, avd); + if (unlikely(!node)) { + node = avc_compute_av(ssid, tsid, tclass, avd); } else { - if (in_avd) - memcpy(in_avd, &node->ae.avd, sizeof(*in_avd)); + memcpy(avd, &node->ae.avd, sizeof(*avd)); avd = &node->ae.avd; } denied = requested & ~(avd->allowed); - - if (denied) { - if (flags & AVC_STRICT) - rc = -EACCES; - else if (!selinux_enforcing || (avd->flags & AVD_FLAGS_PERMISSIVE)) - avc_update_node(AVC_CALLBACK_GRANT, requested, ssid, - tsid, tclass, avd->seqno); - else - rc = -EACCES; - } + if (unlikely(denied)) + rc = avc_denied(ssid, tsid, tclass, requested, flags, avd); rcu_read_unlock(); return rc; @@ -806,10 +764,13 @@ int avc_has_perm(u32 ssid, u32 tsid, u16 tclass, u32 requested, struct common_audit_data *auditdata) { struct av_decision avd; - int rc; + int rc, rc2; rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd); - avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata); + + rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata); + if (rc2) + return rc2; return rc; } diff --git a/security/selinux/exports.c b/security/selinux/exports.c index 90664385dea..e75dd94e2d2 100644 --- a/security/selinux/exports.c +++ b/security/selinux/exports.c @@ -12,6 +12,7 @@ * as published by the Free Software Foundation. */ #include <linux/module.h> +#include <linux/selinux.h> #include "security.h" diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index 156ef93d6f7..83d06db34d0 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -14,7 +14,7 @@ * Copyright (C) 2004-2005 Trusted Computer Solutions, Inc. * <dgoeddel@trustedcs.com> * Copyright (C) 2006, 2007, 2009 Hewlett-Packard Development Company, L.P. - * Paul Moore <paul.moore@hp.com> + * Paul Moore <paul@paul-moore.com> * Copyright (C) 2007 Hitachi Software Engineering Co., Ltd. * Yuichi Nakamura <ynakam@hitachisoft.jp> * @@ -24,6 +24,7 @@ */ #include <linux/init.h> +#include <linux/kd.h> #include <linux/kernel.h> #include <linux/tracehook.h> #include <linux/errno.h> @@ -36,29 +37,32 @@ #include <linux/mman.h> #include <linux/slab.h> #include <linux/pagemap.h> +#include <linux/proc_fs.h> #include <linux/swap.h> #include <linux/spinlock.h> #include <linux/syscalls.h> +#include <linux/dcache.h> #include <linux/file.h> #include <linux/fdtable.h> #include <linux/namei.h> #include <linux/mount.h> -#include <linux/proc_fs.h> #include <linux/netfilter_ipv4.h> #include <linux/netfilter_ipv6.h> #include <linux/tty.h> #include <net/icmp.h> #include <net/ip.h> /* for local_port_range[] */ +#include <net/sock.h> #include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */ +#include <net/inet_connection_sock.h> #include <net/net_namespace.h> #include <net/netlabel.h> #include <linux/uaccess.h> #include <asm/ioctls.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include <linux/bitops.h> #include <linux/interrupt.h> #include <linux/netdevice.h> /* for network interface checks */ -#include <linux/netlink.h> +#include <net/netlink.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/dccp.h> @@ -70,13 +74,16 @@ #include <net/ipv6.h> #include <linux/hugetlb.h> #include <linux/personality.h> -#include <linux/sysctl.h> #include <linux/audit.h> #include <linux/string.h> #include <linux/selinux.h> #include <linux/mutex.h> #include <linux/posix-timers.h> #include <linux/syslog.h> +#include <linux/user_namespace.h> +#include <linux/export.h> +#include <linux/msg.h> +#include <linux/shm.h> #include "avc.h" #include "objsec.h" @@ -86,14 +93,12 @@ #include "xfrm.h" #include "netlabel.h" #include "audit.h" +#include "avc_ss.h" -#define NUM_SEL_MNT_OPTS 5 - -extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm); extern struct security_operations *security_ops; /* SECMARK reference count */ -atomic_t selinux_secmark_refcount = ATOMIC_INIT(0); +static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0); #ifdef CONFIG_SECURITY_SELINUX_DEVELOP int selinux_enforcing; @@ -101,7 +106,7 @@ int selinux_enforcing; static int __init enforcing_setup(char *str) { unsigned long enforcing; - if (!strict_strtoul(str, 0, &enforcing)) + if (!kstrtoul(str, 0, &enforcing)) selinux_enforcing = enforcing ? 1 : 0; return 1; } @@ -114,7 +119,7 @@ int selinux_enabled = CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE; static int __init selinux_enabled_setup(char *str) { unsigned long enabled; - if (!strict_strtoul(str, 0, &enabled)) + if (!kstrtoul(str, 0, &enabled)) selinux_enabled = enabled ? 1 : 0; return 1; } @@ -132,12 +137,28 @@ static struct kmem_cache *sel_inode_cache; * This function checks the SECMARK reference counter to see if any SECMARK * targets are currently configured, if the reference counter is greater than * zero SECMARK is considered to be enabled. Returns true (1) if SECMARK is - * enabled, false (0) if SECMARK is disabled. + * enabled, false (0) if SECMARK is disabled. If the always_check_network + * policy capability is enabled, SECMARK is always considered enabled. * */ static int selinux_secmark_enabled(void) { - return (atomic_read(&selinux_secmark_refcount) > 0); + return (selinux_policycap_alwaysnetwork || atomic_read(&selinux_secmark_refcount)); +} + +/** + * selinux_peerlbl_enabled - Check to see if peer labeling is currently enabled + * + * Description: + * This function checks if NetLabel or labeled IPSEC is enabled. Returns true + * (1) if any are enabled or false (0) if neither are enabled. If the + * always_check_network policy capability is enabled, peer labeling + * is always considered enabled. + * + */ +static int selinux_peerlbl_enabled(void) +{ + return (selinux_policycap_alwaysnetwork || netlbl_enabled() || selinux_xfrm_enabled()); } /* @@ -212,6 +233,14 @@ static int inode_alloc_security(struct inode *inode) return 0; } +static void inode_free_rcu(struct rcu_head *head) +{ + struct inode_security_struct *isec; + + isec = container_of(head, struct inode_security_struct, rcu); + kmem_cache_free(sel_inode_cache, isec); +} + static void inode_free_security(struct inode *inode) { struct inode_security_struct *isec = inode->i_security; @@ -222,8 +251,16 @@ static void inode_free_security(struct inode *inode) list_del_init(&isec->list); spin_unlock(&sbsec->isec_lock); - inode->i_security = NULL; - kmem_cache_free(sel_inode_cache, isec); + /* + * The inode may still be referenced in a path walk and + * a call to selinux_inode_permission() can be made + * after inode_free_security() is called. Ideally, the VFS + * wouldn't do this, but fixing that is a much harder + * job. For now, simply free the i_security via RCU, and + * leave the current inode->i_security pointer intact. + * The inode will be freed after the RCU grace period too. + */ + call_rcu(&isec->rcu, inode_free_rcu); } static int file_alloc_security(struct file *file) @@ -276,19 +313,16 @@ static void superblock_free_security(struct super_block *sb) kfree(sbsec); } -/* The security server must be initialized before - any labeling or access decisions can be provided. */ -extern int ss_initialized; - /* The file system's label must be initialized prior to use. */ -static const char *labeling_behaviors[6] = { +static const char *labeling_behaviors[7] = { "uses xattr", "uses transition SIDs", "uses task SIDs", "uses genfs_contexts", "not configured for labeling", "uses mountpoint labeling", + "uses native labeling", }; static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dentry); @@ -305,8 +339,11 @@ enum { Opt_defcontext = 3, Opt_rootcontext = 4, Opt_labelsupport = 5, + Opt_nextmntopt = 6, }; +#define NUM_SEL_MNT_OPTS (Opt_nextmntopt - 1) + static const match_table_t tokens = { {Opt_context, CONTEXT_STR "%s"}, {Opt_fscontext, FSCONTEXT_STR "%s"}, @@ -351,6 +388,29 @@ static int may_context_mount_inode_relabel(u32 sid, return rc; } +static int selinux_is_sblabel_mnt(struct super_block *sb) +{ + struct superblock_security_struct *sbsec = sb->s_security; + + if (sbsec->behavior == SECURITY_FS_USE_XATTR || + sbsec->behavior == SECURITY_FS_USE_TRANS || + sbsec->behavior == SECURITY_FS_USE_TASK) + return 1; + + /* Special handling for sysfs. Is genfs but also has setxattr handler*/ + if (strncmp(sb->s_type->name, "sysfs", sizeof("sysfs")) == 0) + return 1; + + /* + * Special handling for rootfs. Is genfs but supports + * setting SELinux context on in-core inodes. + */ + if (strncmp(sb->s_type->name, "rootfs", sizeof("rootfs")) == 0) + return 1; + + return 0; +} + static int sb_finish_set_opts(struct super_block *sb) { struct superblock_security_struct *sbsec = sb->s_security; @@ -384,8 +444,6 @@ static int sb_finish_set_opts(struct super_block *sb) } } - sbsec->flags |= (SE_SBINITIALIZED | SE_SBLABELSUPP); - if (sbsec->behavior > ARRAY_SIZE(labeling_behaviors)) printk(KERN_ERR "SELinux: initialized (dev %s, type %s), unknown behavior\n", sb->s_id, sb->s_type->name); @@ -394,15 +452,9 @@ static int sb_finish_set_opts(struct super_block *sb) sb->s_id, sb->s_type->name, labeling_behaviors[sbsec->behavior-1]); - if (sbsec->behavior == SECURITY_FS_USE_GENFS || - sbsec->behavior == SECURITY_FS_USE_MNTPOINT || - sbsec->behavior == SECURITY_FS_USE_NONE || - sbsec->behavior > ARRAY_SIZE(labeling_behaviors)) - sbsec->flags &= ~SE_SBLABELSUPP; - - /* Special handling for sysfs. Is genfs but also has setxattr handler*/ - if (strncmp(sb->s_type->name, "sysfs", sizeof("sysfs")) == 0) - sbsec->flags |= SE_SBLABELSUPP; + sbsec->flags |= SE_SBINITIALIZED; + if (selinux_is_sblabel_mnt(sb)) + sbsec->flags |= SBLABEL_MNT; /* Initialize the root inode. */ rc = inode_doinit_with_dentry(root_inode, root); @@ -456,15 +508,18 @@ static int selinux_get_mnt_opts(const struct super_block *sb, if (!ss_initialized) return -EINVAL; + /* make sure we always check enough bits to cover the mask */ + BUILD_BUG_ON(SE_MNTMASK >= (1 << NUM_SEL_MNT_OPTS)); + tmp = sbsec->flags & SE_MNTMASK; /* count the number of mount options for this sb */ - for (i = 0; i < 8; i++) { + for (i = 0; i < NUM_SEL_MNT_OPTS; i++) { if (tmp & 0x01) opts->num_mnt_opts++; tmp >>= 1; } /* Check if the Label support flag is set */ - if (sbsec->flags & SE_SBLABELSUPP) + if (sbsec->flags & SBLABEL_MNT) opts->num_mnt_opts++; opts->mnt_opts = kcalloc(opts->num_mnt_opts, sizeof(char *), GFP_ATOMIC); @@ -511,9 +566,9 @@ static int selinux_get_mnt_opts(const struct super_block *sb, opts->mnt_opts[i] = context; opts->mnt_opts_flags[i++] = ROOTCONTEXT_MNT; } - if (sbsec->flags & SE_SBLABELSUPP) { + if (sbsec->flags & SBLABEL_MNT) { opts->mnt_opts[i] = NULL; - opts->mnt_opts_flags[i++] = SE_SBLABELSUPP; + opts->mnt_opts_flags[i++] = SBLABEL_MNT; } BUG_ON(i != opts->num_mnt_opts); @@ -550,7 +605,9 @@ static int bad_option(struct superblock_security_struct *sbsec, char flag, * labeling information. */ static int selinux_set_mnt_opts(struct super_block *sb, - struct security_mnt_opts *opts) + struct security_mnt_opts *opts, + unsigned long kern_flags, + unsigned long *set_kern_flags) { const struct cred *cred = current_cred(); int rc = 0, i; @@ -578,6 +635,12 @@ static int selinux_set_mnt_opts(struct super_block *sb, "before the security server is initialized\n"); goto out; } + if (kern_flags && !set_kern_flags) { + /* Specifying internal flags without providing a place to + * place the results is not allowed */ + rc = -EINVAL; + goto out; + } /* * Binary mount data FS will come through this function twice. Once @@ -602,10 +665,10 @@ static int selinux_set_mnt_opts(struct super_block *sb, for (i = 0; i < num_opts; i++) { u32 sid; - if (flags[i] == SE_SBLABELSUPP) + if (flags[i] == SBLABEL_MNT) continue; rc = security_context_to_sid(mount_options[i], - strlen(mount_options[i]), &sid); + strlen(mount_options[i]), &sid, GFP_KERNEL); if (rc) { printk(KERN_WARNING "SELinux: security_context_to_sid" "(%s) failed for (dev %s, type %s) errno=%d\n", @@ -668,14 +731,19 @@ static int selinux_set_mnt_opts(struct super_block *sb, if (strcmp(sb->s_type->name, "proc") == 0) sbsec->flags |= SE_SBPROC; - /* Determine the labeling behavior to use for this filesystem type. */ - rc = security_fs_use((sbsec->flags & SE_SBPROC) ? "proc" : sb->s_type->name, &sbsec->behavior, &sbsec->sid); - if (rc) { - printk(KERN_WARNING "%s: security_fs_use(%s) returned %d\n", - __func__, sb->s_type->name, rc); - goto out; + if (!sbsec->behavior) { + /* + * Determine the labeling behavior to use for this + * filesystem type. + */ + rc = security_fs_use(sb); + if (rc) { + printk(KERN_WARNING + "%s: security_fs_use(%s) returned %d\n", + __func__, sb->s_type->name, rc); + goto out; + } } - /* sets the context of the superblock for the fs being mounted. */ if (fscontext_sid) { rc = may_context_mount_sb_relabel(fscontext_sid, sbsec, cred); @@ -690,6 +758,11 @@ static int selinux_set_mnt_opts(struct super_block *sb, * sets the label used on all file below the mountpoint, and will set * the superblock context if not already set. */ + if (kern_flags & SECURITY_LSM_NATIVE_LABELS && !context_sid) { + sbsec->behavior = SECURITY_FS_USE_NATIVE; + *set_kern_flags |= SECURITY_LSM_NATIVE_LABELS; + } + if (context_sid) { if (!fscontext_sid) { rc = may_context_mount_sb_relabel(context_sid, sbsec, @@ -721,7 +794,8 @@ static int selinux_set_mnt_opts(struct super_block *sb, } if (defcontext_sid) { - if (sbsec->behavior != SECURITY_FS_USE_XATTR) { + if (sbsec->behavior != SECURITY_FS_USE_XATTR && + sbsec->behavior != SECURITY_FS_USE_NATIVE) { rc = -EINVAL; printk(KERN_WARNING "SELinux: defcontext option is " "invalid for this filesystem type\n"); @@ -749,7 +823,37 @@ out_double_mount: goto out; } -static void selinux_sb_clone_mnt_opts(const struct super_block *oldsb, +static int selinux_cmp_sb_context(const struct super_block *oldsb, + const struct super_block *newsb) +{ + struct superblock_security_struct *old = oldsb->s_security; + struct superblock_security_struct *new = newsb->s_security; + char oldflags = old->flags & SE_MNTMASK; + char newflags = new->flags & SE_MNTMASK; + + if (oldflags != newflags) + goto mismatch; + if ((oldflags & FSCONTEXT_MNT) && old->sid != new->sid) + goto mismatch; + if ((oldflags & CONTEXT_MNT) && old->mntpoint_sid != new->mntpoint_sid) + goto mismatch; + if ((oldflags & DEFCONTEXT_MNT) && old->def_sid != new->def_sid) + goto mismatch; + if (oldflags & ROOTCONTEXT_MNT) { + struct inode_security_struct *oldroot = oldsb->s_root->d_inode->i_security; + struct inode_security_struct *newroot = newsb->s_root->d_inode->i_security; + if (oldroot->sid != newroot->sid) + goto mismatch; + } + return 0; +mismatch: + printk(KERN_WARNING "SELinux: mount invalid. Same superblock, " + "different security settings for (dev %s, " + "type %s)\n", newsb->s_id, newsb->s_type->name); + return -EBUSY; +} + +static int selinux_sb_clone_mnt_opts(const struct super_block *oldsb, struct super_block *newsb) { const struct superblock_security_struct *oldsbsec = oldsb->s_security; @@ -764,14 +868,14 @@ static void selinux_sb_clone_mnt_opts(const struct super_block *oldsb, * mount options. thus we can safely deal with this superblock later */ if (!ss_initialized) - return; + return 0; /* how can we clone if the old one wasn't set up?? */ BUG_ON(!(oldsbsec->flags & SE_SBINITIALIZED)); - /* if fs is reusing a sb, just let its options stand... */ + /* if fs is reusing a sb, make sure that the contexts match */ if (newsbsec->flags & SE_SBINITIALIZED) - return; + return selinux_cmp_sb_context(oldsb, newsb); mutex_lock(&newsbsec->lock); @@ -804,6 +908,7 @@ static void selinux_sb_clone_mnt_opts(const struct super_block *oldsb, sb_finish_set_opts(newsb); mutex_unlock(&newsbsec->lock); + return 0; } static int selinux_parse_opts_str(char *options, @@ -947,7 +1052,7 @@ static int superblock_doinit(struct super_block *sb, void *data) goto out_err; out: - rc = selinux_set_mnt_opts(sb, &opts); + rc = selinux_set_mnt_opts(sb, &opts, 0, NULL); out_err: security_free_mnt_opts(&opts); @@ -981,12 +1086,13 @@ static void selinux_write_opts(struct seq_file *m, case DEFCONTEXT_MNT: prefix = DEFCONTEXT_STR; break; - case SE_SBLABELSUPP: + case SBLABEL_MNT: seq_putc(m, ','); seq_puts(m, LABELSUPP_STR); continue; default: BUG(); + return; }; /* we need a comma before each option */ seq_putc(m, ','); @@ -1089,7 +1195,7 @@ static inline u16 socket_type_to_security_class(int family, int type, int protoc return SECCLASS_NETLINK_ROUTE_SOCKET; case NETLINK_FIREWALL: return SECCLASS_NETLINK_FIREWALL_SOCKET; - case NETLINK_INET_DIAG: + case NETLINK_SOCK_DIAG: return SECCLASS_NETLINK_TCPDIAG_SOCKET; case NETLINK_NFLOG: return SECCLASS_NETLINK_NFLOG_SOCKET; @@ -1120,39 +1226,35 @@ static inline u16 socket_type_to_security_class(int family, int type, int protoc } #ifdef CONFIG_PROC_FS -static int selinux_proc_get_sid(struct proc_dir_entry *de, +static int selinux_proc_get_sid(struct dentry *dentry, u16 tclass, u32 *sid) { - int buflen, rc; - char *buffer, *path, *end; + int rc; + char *buffer, *path; buffer = (char *)__get_free_page(GFP_KERNEL); if (!buffer) return -ENOMEM; - buflen = PAGE_SIZE; - end = buffer+buflen; - *--end = '\0'; - buflen--; - path = end-1; - *path = '/'; - while (de && de != de->parent) { - buflen -= de->namelen + 1; - if (buflen < 0) - break; - end -= de->namelen; - memcpy(end, de->name, de->namelen); - *--end = '/'; - path = end; - de = de->parent; + path = dentry_path_raw(dentry, buffer, PAGE_SIZE); + if (IS_ERR(path)) + rc = PTR_ERR(path); + else { + /* each process gets a /proc/PID/ entry. Strip off the + * PID part to get a valid selinux labeling. + * e.g. /proc/1/net/rpc/nfs -> /net/rpc/nfs */ + while (path[1] >= '0' && path[1] <= '9') { + path[1] = '/'; + path++; + } + rc = security_genfs_sid("proc", path, tclass, sid); } - rc = security_genfs_sid("proc", path, tclass, sid); free_page((unsigned long)buffer); return rc; } #else -static int selinux_proc_get_sid(struct proc_dir_entry *de, +static int selinux_proc_get_sid(struct dentry *dentry, u16 tclass, u32 *sid) { @@ -1192,6 +1294,8 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent } switch (sbsec->behavior) { + case SECURITY_FS_USE_NATIVE: + break; case SECURITY_FS_USE_XATTR: if (!inode->i_op->getxattr) { isec->sid = sbsec->def_sid; @@ -1300,10 +1404,8 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent /* Try to obtain a transition SID. */ isec->sclass = inode_mode_to_security_class(inode->i_mode); - rc = security_transition_sid(isec->task_sid, - sbsec->sid, - isec->sclass, - &sid); + rc = security_transition_sid(isec->task_sid, sbsec->sid, + isec->sclass, NULL, &sid); if (rc) goto out_unlock; isec->sid = sid; @@ -1316,16 +1418,33 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent isec->sid = sbsec->sid; if ((sbsec->flags & SE_SBPROC) && !S_ISLNK(inode->i_mode)) { - struct proc_inode *proci = PROC_I(inode); - if (proci->pde) { - isec->sclass = inode_mode_to_security_class(inode->i_mode); - rc = selinux_proc_get_sid(proci->pde, - isec->sclass, - &sid); - if (rc) - goto out_unlock; - isec->sid = sid; - } + /* We must have a dentry to determine the label on + * procfs inodes */ + if (opt_dentry) + /* Called from d_instantiate or + * d_splice_alias. */ + dentry = dget(opt_dentry); + else + /* Called from selinux_complete_init, try to + * find a dentry. */ + dentry = d_find_alias(inode); + /* + * This can be hit on boot when a file is accessed + * before the policy is loaded. When we load policy we + * may find inodes that have no dentry on the + * sbsec->isec_head list. No reason to complain as + * these will get fixed up the next time we go through + * inode_doinit() with a dentry, before these inodes + * could be used again by userspace. + */ + if (!dentry) + goto out_unlock; + isec->sclass = inode_mode_to_security_class(inode->i_mode); + rc = selinux_proc_get_sid(dentry, isec->sclass, &sid); + dput(dentry); + if (rc) + goto out_unlock; + isec->sid = sid; } break; } @@ -1421,8 +1540,7 @@ static int current_has_perm(const struct task_struct *tsk, #endif /* Check whether a task is allowed to use a capability. */ -static int task_has_capability(struct task_struct *tsk, - const struct cred *cred, +static int cred_has_capability(const struct cred *cred, int cap, int audit) { struct common_audit_data ad; @@ -1432,8 +1550,7 @@ static int task_has_capability(struct task_struct *tsk, u32 av = CAP_TO_MASK(cap); int rc; - COMMON_AUDIT_DATA_INIT(&ad, CAP); - ad.tsk = tsk; + ad.type = LSM_AUDIT_DATA_CAP; ad.u.cap = cap; switch (CAP_TO_INDEX(cap)) { @@ -1447,11 +1564,15 @@ static int task_has_capability(struct task_struct *tsk, printk(KERN_ERR "SELinux: out of range capability %d\n", cap); BUG(); + return -EINVAL; } rc = avc_has_perm_noaudit(sid, sid, sclass, av, 0, &avd); - if (audit == SECURITY_CAP_AUDIT) - avc_audit(sid, sid, sclass, av, &avd, rc, &ad); + if (audit == SECURITY_CAP_AUDIT) { + int rc2 = avc_audit(sid, sid, sclass, av, &avd, rc, &ad); + if (rc2) + return rc2; + } return rc; } @@ -1474,7 +1595,6 @@ static int inode_has_perm(const struct cred *cred, struct common_audit_data *adp) { struct inode_security_struct *isec; - struct common_audit_data ad; u32 sid; validate_creds(cred); @@ -1485,12 +1605,6 @@ static int inode_has_perm(const struct cred *cred, sid = cred_sid(cred); isec = inode->i_security; - if (!adp) { - adp = &ad; - COMMON_AUDIT_DATA_INIT(&ad, FS); - ad.u.fs.inode = inode; - } - return avc_has_perm(sid, isec->sid, isec->sclass, perms, adp); } @@ -1498,19 +1612,44 @@ static int inode_has_perm(const struct cred *cred, the dentry to help the auditing code to more easily generate the pathname if needed. */ static inline int dentry_has_perm(const struct cred *cred, - struct vfsmount *mnt, struct dentry *dentry, u32 av) { struct inode *inode = dentry->d_inode; struct common_audit_data ad; - COMMON_AUDIT_DATA_INIT(&ad, FS); - ad.u.fs.path.mnt = mnt; - ad.u.fs.path.dentry = dentry; + ad.type = LSM_AUDIT_DATA_DENTRY; + ad.u.dentry = dentry; return inode_has_perm(cred, inode, av, &ad); } +/* Same as inode_has_perm, but pass explicit audit data containing + the path to help the auditing code to more easily generate the + pathname if needed. */ +static inline int path_has_perm(const struct cred *cred, + struct path *path, + u32 av) +{ + struct inode *inode = path->dentry->d_inode; + struct common_audit_data ad; + + ad.type = LSM_AUDIT_DATA_PATH; + ad.u.path = *path; + return inode_has_perm(cred, inode, av, &ad); +} + +/* Same as path_has_perm, but uses the inode from the file struct. */ +static inline int file_path_has_perm(const struct cred *cred, + struct file *file, + u32 av) +{ + struct common_audit_data ad; + + ad.type = LSM_AUDIT_DATA_PATH; + ad.u.path = file->f_path; + return inode_has_perm(cred, file_inode(file), av, &ad); +} + /* Check whether a task can use an open file descriptor to access an inode in a given way. Check access to the descriptor itself, and then use dentry_has_perm to @@ -1524,13 +1663,13 @@ static int file_has_perm(const struct cred *cred, u32 av) { struct file_security_struct *fsec = file->f_security; - struct inode *inode = file->f_path.dentry->d_inode; + struct inode *inode = file_inode(file); struct common_audit_data ad; u32 sid = cred_sid(cred); int rc; - COMMON_AUDIT_DATA_INIT(&ad, FS); - ad.u.fs.path = file->f_path; + ad.type = LSM_AUDIT_DATA_PATH; + ad.u.path = file->f_path; if (sid != fsec->sid) { rc = avc_has_perm(sid, fsec->sid, @@ -1568,8 +1707,8 @@ static int may_create(struct inode *dir, sid = tsec->sid; newsid = tsec->create_sid; - COMMON_AUDIT_DATA_INIT(&ad, FS); - ad.u.fs.path.dentry = dentry; + ad.type = LSM_AUDIT_DATA_DENTRY; + ad.u.dentry = dentry; rc = avc_has_perm(sid, dsec->sid, SECCLASS_DIR, DIR__ADD_NAME | DIR__SEARCH, @@ -1577,8 +1716,9 @@ static int may_create(struct inode *dir, if (rc) return rc; - if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) { - rc = security_transition_sid(sid, dsec->sid, tclass, &newsid); + if (!newsid || !(sbsec->flags & SBLABEL_MNT)) { + rc = security_transition_sid(sid, dsec->sid, tclass, + &dentry->d_name, &newsid); if (rc) return rc; } @@ -1620,8 +1760,8 @@ static int may_link(struct inode *dir, dsec = dir->i_security; isec = dentry->d_inode->i_security; - COMMON_AUDIT_DATA_INIT(&ad, FS); - ad.u.fs.path.dentry = dentry; + ad.type = LSM_AUDIT_DATA_DENTRY; + ad.u.dentry = dentry; av = DIR__SEARCH; av |= (kind ? DIR__REMOVE_NAME : DIR__ADD_NAME); @@ -1666,9 +1806,9 @@ static inline int may_rename(struct inode *old_dir, old_is_dir = S_ISDIR(old_dentry->d_inode->i_mode); new_dsec = new_dir->i_security; - COMMON_AUDIT_DATA_INIT(&ad, FS); + ad.type = LSM_AUDIT_DATA_DENTRY; - ad.u.fs.path.dentry = old_dentry; + ad.u.dentry = old_dentry; rc = avc_has_perm(sid, old_dsec->sid, SECCLASS_DIR, DIR__REMOVE_NAME | DIR__SEARCH, &ad); if (rc) @@ -1684,7 +1824,7 @@ static inline int may_rename(struct inode *old_dir, return rc; } - ad.u.fs.path.dentry = new_dentry; + ad.u.dentry = new_dentry; av = DIR__ADD_NAME | DIR__SEARCH; if (new_dentry->d_inode) av |= DIR__REMOVE_NAME; @@ -1722,7 +1862,7 @@ static inline u32 file_mask_to_av(int mode, int mask) { u32 av = 0; - if ((mode & S_IFMT) != S_IFDIR) { + if (!S_ISDIR(mode)) { if (mask & MAY_EXEC) av |= FILE__EXECUTE; if (mask & MAY_READ) @@ -1793,7 +1933,7 @@ static int selinux_ptrace_access_check(struct task_struct *child, if (rc) return rc; - if (mode == PTRACE_MODE_READ) { + if (mode & PTRACE_MODE_READ) { u32 sid = current_sid(); u32 csid = task_sid(child); return avc_has_perm(sid, csid, SECCLASS_FILE, FILE__READ, NULL); @@ -1850,92 +1990,16 @@ static int selinux_capset(struct cred *new, const struct cred *old, * the CAP_SETUID and CAP_SETGID capabilities using the capable hook. */ -static int selinux_capable(struct task_struct *tsk, const struct cred *cred, +static int selinux_capable(const struct cred *cred, struct user_namespace *ns, int cap, int audit) { int rc; - rc = cap_capable(tsk, cred, cap, audit); + rc = cap_capable(cred, ns, cap, audit); if (rc) return rc; - return task_has_capability(tsk, cred, cap, audit); -} - -static int selinux_sysctl_get_sid(ctl_table *table, u16 tclass, u32 *sid) -{ - int buflen, rc; - char *buffer, *path, *end; - - rc = -ENOMEM; - buffer = (char *)__get_free_page(GFP_KERNEL); - if (!buffer) - goto out; - - buflen = PAGE_SIZE; - end = buffer+buflen; - *--end = '\0'; - buflen--; - path = end-1; - *path = '/'; - while (table) { - const char *name = table->procname; - size_t namelen = strlen(name); - buflen -= namelen + 1; - if (buflen < 0) - goto out_free; - end -= namelen; - memcpy(end, name, namelen); - *--end = '/'; - path = end; - table = table->parent; - } - buflen -= 4; - if (buflen < 0) - goto out_free; - end -= 4; - memcpy(end, "/sys", 4); - path = end; - rc = security_genfs_sid("proc", path, tclass, sid); -out_free: - free_page((unsigned long)buffer); -out: - return rc; -} - -static int selinux_sysctl(ctl_table *table, int op) -{ - int error = 0; - u32 av; - u32 tsid, sid; - int rc; - - sid = current_sid(); - - rc = selinux_sysctl_get_sid(table, (op == 0001) ? - SECCLASS_DIR : SECCLASS_FILE, &tsid); - if (rc) { - /* Default to the well-defined sysctl SID. */ - tsid = SECINITSID_SYSCTL; - } - - /* The op values are "defined" in sysctl.c, thereby creating - * a bad coupling between this module and sysctl.c */ - if (op == 001) { - error = avc_has_perm(sid, tsid, - SECCLASS_DIR, DIR__SEARCH, NULL); - } else { - av = 0; - if (op & 004) - av |= FILE__READ; - if (op & 002) - av |= FILE__WRITE; - if (av) - error = avc_has_perm(sid, tsid, - SECCLASS_FILE, av, NULL); - } - - return error; + return cred_has_capability(cred, cap, audit); } static int selinux_quotactl(int cmds, int type, int id, struct super_block *sb) @@ -1970,17 +2034,13 @@ static int selinux_quota_on(struct dentry *dentry) { const struct cred *cred = current_cred(); - return dentry_has_perm(cred, NULL, dentry, FILE__QUOTAON); + return dentry_has_perm(cred, dentry, FILE__QUOTAON); } -static int selinux_syslog(int type, bool from_file) +static int selinux_syslog(int type) { int rc; - rc = cap_syslog(type, from_file); - if (rc) - return rc; - switch (type) { case SYSLOG_ACTION_READ_ALL: /* Read last kernel messages */ case SYSLOG_ACTION_SIZE_BUFFER: /* Return size of the log buffer */ @@ -2016,7 +2076,7 @@ static int selinux_vm_enough_memory(struct mm_struct *mm, long pages) { int rc, cap_sys_admin = 0; - rc = selinux_capable(current, current_cred(), CAP_SYS_ADMIN, + rc = selinux_capable(current_cred(), &init_user_ns, CAP_SYS_ADMIN, SECURITY_CAP_NOAUDIT); if (rc == 0) cap_sys_admin = 1; @@ -2032,7 +2092,7 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm) struct task_security_struct *new_tsec; struct inode_security_struct *isec; struct common_audit_data ad; - struct inode *inode = bprm->file->f_path.dentry->d_inode; + struct inode *inode = file_inode(bprm->file); int rc; rc = cap_bprm_set_creds(bprm); @@ -2061,18 +2121,29 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm) new_tsec->sid = old_tsec->exec_sid; /* Reset exec SID on execve. */ new_tsec->exec_sid = 0; + + /* + * Minimize confusion: if no_new_privs or nosuid and a + * transition is explicitly requested, then fail the exec. + */ + if (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS) + return -EPERM; + if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) + return -EACCES; } else { /* Check for a default transition on this program. */ rc = security_transition_sid(old_tsec->sid, isec->sid, - SECCLASS_PROCESS, &new_tsec->sid); + SECCLASS_PROCESS, NULL, + &new_tsec->sid); if (rc) return rc; } - COMMON_AUDIT_DATA_INIT(&ad, FS); - ad.u.fs.path = bprm->file->f_path; + ad.type = LSM_AUDIT_DATA_PATH; + ad.u.path = bprm->file->f_path; - if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) + if ((bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) || + (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS)) new_tsec->sid = old_tsec->sid; if (new_tsec->sid == old_tsec->sid) { @@ -2110,7 +2181,7 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm) u32 ptsid = 0; rcu_read_lock(); - tracer = tracehook_tracer_task(current); + tracer = ptrace_parent(current); if (likely(tracer != NULL)) { sec = __task_cred(tracer)->security; ptsid = sec->sid; @@ -2154,40 +2225,36 @@ static int selinux_bprm_secureexec(struct linux_binprm *bprm) return (atsecure || cap_bprm_secureexec(bprm)); } -extern struct vfsmount *selinuxfs_mount; -extern struct dentry *selinux_null; +static int match_file(const void *p, struct file *file, unsigned fd) +{ + return file_has_perm(p, file, file_to_av(file)) ? fd + 1 : 0; +} /* Derived from fs/exec.c:flush_old_files. */ static inline void flush_unauthorized_files(const struct cred *cred, struct files_struct *files) { - struct common_audit_data ad; struct file *file, *devnull = NULL; struct tty_struct *tty; - struct fdtable *fdt; - long j = -1; int drop_tty = 0; + unsigned n; tty = get_current_tty(); if (tty) { spin_lock(&tty_files_lock); if (!list_empty(&tty->tty_files)) { struct tty_file_private *file_priv; - struct inode *inode; /* Revalidate access to controlling tty. - Use inode_has_perm on the tty inode directly rather - than using file_has_perm, as this particular open - file may belong to another process and we are only - interested in the inode-based check here. */ + Use file_path_has_perm on the tty path directly + rather than using file_has_perm, as this particular + open file may belong to another process and we are + only interested in the inode-based check here. */ file_priv = list_first_entry(&tty->tty_files, struct tty_file_private, list); file = file_priv->file; - inode = file->f_path.dentry->d_inode; - if (inode_has_perm(cred, inode, - FILE__READ | FILE__WRITE, NULL)) { + if (file_path_has_perm(cred, file, FILE__READ | FILE__WRITE)) drop_tty = 1; - } } spin_unlock(&tty_files_lock); tty_kref_put(tty); @@ -2197,62 +2264,19 @@ static inline void flush_unauthorized_files(const struct cred *cred, no_tty(); /* Revalidate access to inherited open files. */ + n = iterate_fd(files, 0, match_file, cred); + if (!n) /* none found? */ + return; - COMMON_AUDIT_DATA_INIT(&ad, FS); - - spin_lock(&files->file_lock); - for (;;) { - unsigned long set, i; - int fd; - - j++; - i = j * __NFDBITS; - fdt = files_fdtable(files); - if (i >= fdt->max_fds) - break; - set = fdt->open_fds->fds_bits[j]; - if (!set) - continue; - spin_unlock(&files->file_lock); - for ( ; set ; i++, set >>= 1) { - if (set & 1) { - file = fget(i); - if (!file) - continue; - if (file_has_perm(cred, - file, - file_to_av(file))) { - sys_close(i); - fd = get_unused_fd(); - if (fd != i) { - if (fd >= 0) - put_unused_fd(fd); - fput(file); - continue; - } - if (devnull) { - get_file(devnull); - } else { - devnull = dentry_open( - dget(selinux_null), - mntget(selinuxfs_mount), - O_RDWR, cred); - if (IS_ERR(devnull)) { - devnull = NULL; - put_unused_fd(fd); - fput(file); - continue; - } - } - fd_install(fd, devnull); - } - fput(file); - } - } - spin_lock(&files->file_lock); - - } - spin_unlock(&files->file_lock); + devnull = dentry_open(&selinux_null, O_RDWR, cred); + if (IS_ERR(devnull)) + devnull = NULL; + /* replace all the matching ones with this */ + do { + replace_fd(n - 1, devnull, 0); + } while ((n = iterate_fd(files, n, match_file, cred)) != 0); + if (devnull) + fput(devnull); } /* @@ -2447,6 +2471,92 @@ out: return rc; } +static int selinux_sb_remount(struct super_block *sb, void *data) +{ + int rc, i, *flags; + struct security_mnt_opts opts; + char *secdata, **mount_options; + struct superblock_security_struct *sbsec = sb->s_security; + + if (!(sbsec->flags & SE_SBINITIALIZED)) + return 0; + + if (!data) + return 0; + + if (sb->s_type->fs_flags & FS_BINARY_MOUNTDATA) + return 0; + + security_init_mnt_opts(&opts); + secdata = alloc_secdata(); + if (!secdata) + return -ENOMEM; + rc = selinux_sb_copy_data(data, secdata); + if (rc) + goto out_free_secdata; + + rc = selinux_parse_opts_str(secdata, &opts); + if (rc) + goto out_free_secdata; + + mount_options = opts.mnt_opts; + flags = opts.mnt_opts_flags; + + for (i = 0; i < opts.num_mnt_opts; i++) { + u32 sid; + size_t len; + + if (flags[i] == SBLABEL_MNT) + continue; + len = strlen(mount_options[i]); + rc = security_context_to_sid(mount_options[i], len, &sid, + GFP_KERNEL); + if (rc) { + printk(KERN_WARNING "SELinux: security_context_to_sid" + "(%s) failed for (dev %s, type %s) errno=%d\n", + mount_options[i], sb->s_id, sb->s_type->name, rc); + goto out_free_opts; + } + rc = -EINVAL; + switch (flags[i]) { + case FSCONTEXT_MNT: + if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid, sid)) + goto out_bad_option; + break; + case CONTEXT_MNT: + if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid, sid)) + goto out_bad_option; + break; + case ROOTCONTEXT_MNT: { + struct inode_security_struct *root_isec; + root_isec = sb->s_root->d_inode->i_security; + + if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid, sid)) + goto out_bad_option; + break; + } + case DEFCONTEXT_MNT: + if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid, sid)) + goto out_bad_option; + break; + default: + goto out_free_opts; + } + } + + rc = 0; +out_free_opts: + security_free_mnt_opts(&opts); +out_free_secdata: + free_secdata(secdata); + return rc; +out_bad_option: + printk(KERN_WARNING "SELinux: unable to change security options " + "during remount (dev %s, type=%s)\n", sb->s_id, + sb->s_type->name); + goto out_free_opts; +} + static int selinux_sb_kern_mount(struct super_block *sb, int flags, void *data) { const struct cred *cred = current_cred(); @@ -2461,8 +2571,8 @@ static int selinux_sb_kern_mount(struct super_block *sb, int flags, void *data) if (flags & MS_KERNMOUNT) return 0; - COMMON_AUDIT_DATA_INIT(&ad, FS); - ad.u.fs.path.dentry = sb->s_root; + ad.type = LSM_AUDIT_DATA_DENTRY; + ad.u.dentry = sb->s_root; return superblock_has_perm(cred, sb, FILESYSTEM__MOUNT, &ad); } @@ -2471,25 +2581,24 @@ static int selinux_sb_statfs(struct dentry *dentry) const struct cred *cred = current_cred(); struct common_audit_data ad; - COMMON_AUDIT_DATA_INIT(&ad, FS); - ad.u.fs.path.dentry = dentry->d_sb->s_root; + ad.type = LSM_AUDIT_DATA_DENTRY; + ad.u.dentry = dentry->d_sb->s_root; return superblock_has_perm(cred, dentry->d_sb, FILESYSTEM__GETATTR, &ad); } -static int selinux_mount(char *dev_name, +static int selinux_mount(const char *dev_name, struct path *path, - char *type, + const char *type, unsigned long flags, void *data) { const struct cred *cred = current_cred(); if (flags & MS_REMOUNT) - return superblock_has_perm(cred, path->mnt->mnt_sb, + return superblock_has_perm(cred, path->dentry->d_sb, FILESYSTEM__REMOUNT, NULL); else - return dentry_has_perm(cred, path->mnt, path->dentry, - FILE__MOUNTON); + return path_has_perm(cred, path, FILE__MOUNTON); } static int selinux_umount(struct vfsmount *mnt, int flags) @@ -2512,16 +2621,51 @@ static void selinux_inode_free_security(struct inode *inode) inode_free_security(inode); } +static int selinux_dentry_init_security(struct dentry *dentry, int mode, + struct qstr *name, void **ctx, + u32 *ctxlen) +{ + const struct cred *cred = current_cred(); + struct task_security_struct *tsec; + struct inode_security_struct *dsec; + struct superblock_security_struct *sbsec; + struct inode *dir = dentry->d_parent->d_inode; + u32 newsid; + int rc; + + tsec = cred->security; + dsec = dir->i_security; + sbsec = dir->i_sb->s_security; + + if (tsec->create_sid && sbsec->behavior != SECURITY_FS_USE_MNTPOINT) { + newsid = tsec->create_sid; + } else { + rc = security_transition_sid(tsec->sid, dsec->sid, + inode_mode_to_security_class(mode), + name, + &newsid); + if (rc) { + printk(KERN_WARNING + "%s: security_transition_sid failed, rc=%d\n", + __func__, -rc); + return rc; + } + } + + return security_sid_to_context(newsid, (char **)ctx, ctxlen); +} + static int selinux_inode_init_security(struct inode *inode, struct inode *dir, - char **name, void **value, - size_t *len) + const struct qstr *qstr, + const char **name, + void **value, size_t *len) { const struct task_security_struct *tsec = current_security(); struct inode_security_struct *dsec; struct superblock_security_struct *sbsec; u32 sid, newsid, clen; int rc; - char *namep = NULL, *context; + char *context; dsec = dir->i_security; sbsec = dir->i_sb->s_security; @@ -2529,10 +2673,13 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir, sid = tsec->sid; newsid = tsec->create_sid; - if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) { + if ((sbsec->flags & SE_SBINITIALIZED) && + (sbsec->behavior == SECURITY_FS_USE_MNTPOINT)) + newsid = sbsec->mntpoint_sid; + else if (!newsid || !(sbsec->flags & SBLABEL_MNT)) { rc = security_transition_sid(sid, dsec->sid, inode_mode_to_security_class(inode->i_mode), - &newsid); + qstr, &newsid); if (rc) { printk(KERN_WARNING "%s: " "security_transition_sid failed, rc=%d (dev=%s " @@ -2551,22 +2698,16 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir, isec->initialized = 1; } - if (!ss_initialized || !(sbsec->flags & SE_SBLABELSUPP)) + if (!ss_initialized || !(sbsec->flags & SBLABEL_MNT)) return -EOPNOTSUPP; - if (name) { - namep = kstrdup(XATTR_SELINUX_SUFFIX, GFP_NOFS); - if (!namep) - return -ENOMEM; - *name = namep; - } + if (name) + *name = XATTR_SELINUX_SUFFIX; if (value && len) { rc = security_sid_to_context_force(newsid, &context, &clen); - if (rc) { - kfree(namep); + if (rc) return rc; - } *value = context; *len = clen; } @@ -2574,7 +2715,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir, return 0; } -static int selinux_inode_create(struct inode *dir, struct dentry *dentry, int mask) +static int selinux_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode) { return may_create(dir, dentry, SECCLASS_FILE); } @@ -2594,7 +2735,7 @@ static int selinux_inode_symlink(struct inode *dir, struct dentry *dentry, const return may_create(dir, dentry, SECCLASS_LNK_FILE); } -static int selinux_inode_mkdir(struct inode *dir, struct dentry *dentry, int mask) +static int selinux_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mask) { return may_create(dir, dentry, SECCLASS_DIR); } @@ -2604,7 +2745,7 @@ static int selinux_inode_rmdir(struct inode *dir, struct dentry *dentry) return may_link(dir, dentry, MAY_RMDIR); } -static int selinux_inode_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev) +static int selinux_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev) { return may_create(dir, dentry, inode_mode_to_security_class(mode)); } @@ -2619,22 +2760,46 @@ static int selinux_inode_readlink(struct dentry *dentry) { const struct cred *cred = current_cred(); - return dentry_has_perm(cred, NULL, dentry, FILE__READ); + return dentry_has_perm(cred, dentry, FILE__READ); } static int selinux_inode_follow_link(struct dentry *dentry, struct nameidata *nameidata) { const struct cred *cred = current_cred(); - return dentry_has_perm(cred, NULL, dentry, FILE__READ); + return dentry_has_perm(cred, dentry, FILE__READ); +} + +static noinline int audit_inode_permission(struct inode *inode, + u32 perms, u32 audited, u32 denied, + int result, + unsigned flags) +{ + struct common_audit_data ad; + struct inode_security_struct *isec = inode->i_security; + int rc; + + ad.type = LSM_AUDIT_DATA_INODE; + ad.u.inode = inode; + + rc = slow_avc_audit(current_sid(), isec->sid, isec->sclass, perms, + audited, denied, result, &ad, flags); + if (rc) + return rc; + return 0; } static int selinux_inode_permission(struct inode *inode, int mask) { const struct cred *cred = current_cred(); - struct common_audit_data ad; u32 perms; bool from_access; + unsigned flags = mask & MAY_NOT_BLOCK; + struct inode_security_struct *isec; + u32 sid; + struct av_decision avd; + int rc, rc2; + u32 audited, denied; from_access = mask & MAY_ACCESS; mask &= (MAY_READ|MAY_WRITE|MAY_EXEC|MAY_APPEND); @@ -2643,21 +2808,34 @@ static int selinux_inode_permission(struct inode *inode, int mask) if (!mask) return 0; - COMMON_AUDIT_DATA_INIT(&ad, FS); - ad.u.fs.inode = inode; + validate_creds(cred); - if (from_access) - ad.selinux_audit_data.auditdeny |= FILE__AUDIT_ACCESS; + if (unlikely(IS_PRIVATE(inode))) + return 0; perms = file_mask_to_av(inode->i_mode, mask); - return inode_has_perm(cred, inode, perms, &ad); + sid = cred_sid(cred); + isec = inode->i_security; + + rc = avc_has_perm_noaudit(sid, isec->sid, isec->sclass, perms, 0, &avd); + audited = avc_audit_required(perms, &avd, rc, + from_access ? FILE__AUDIT_ACCESS : 0, + &denied); + if (likely(!audited)) + return rc; + + rc2 = audit_inode_permission(inode, perms, audited, denied, rc, flags); + if (rc2) + return rc2; + return rc; } static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr) { const struct cred *cred = current_cred(); unsigned int ia_valid = iattr->ia_valid; + __u32 av = FILE__WRITE; /* ATTR_FORCE is just used for ATTR_KILL_S[UG]ID. */ if (ia_valid & ATTR_FORCE) { @@ -2669,16 +2847,23 @@ static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr) if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID | ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_TIMES_SET)) - return dentry_has_perm(cred, NULL, dentry, FILE__SETATTR); + return dentry_has_perm(cred, dentry, FILE__SETATTR); - return dentry_has_perm(cred, NULL, dentry, FILE__WRITE); + if (selinux_policycap_openperm && (ia_valid & ATTR_SIZE)) + av |= FILE__OPEN; + + return dentry_has_perm(cred, dentry, av); } static int selinux_inode_getattr(struct vfsmount *mnt, struct dentry *dentry) { const struct cred *cred = current_cred(); + struct path path; + + path.dentry = dentry; + path.mnt = mnt; - return dentry_has_perm(cred, mnt, dentry, FILE__GETATTR); + return path_has_perm(cred, &path, FILE__GETATTR); } static int selinux_inode_setotherxattr(struct dentry *dentry, const char *name) @@ -2699,7 +2884,7 @@ static int selinux_inode_setotherxattr(struct dentry *dentry, const char *name) /* Not an attribute we recognize, so just check the ordinary setattr permission. */ - return dentry_has_perm(cred, NULL, dentry, FILE__SETATTR); + return dentry_has_perm(cred, dentry, FILE__SETATTR); } static int selinux_inode_setxattr(struct dentry *dentry, const char *name, @@ -2716,24 +2901,46 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name, return selinux_inode_setotherxattr(dentry, name); sbsec = inode->i_sb->s_security; - if (!(sbsec->flags & SE_SBLABELSUPP)) + if (!(sbsec->flags & SBLABEL_MNT)) return -EOPNOTSUPP; - if (!is_owner_or_cap(inode)) + if (!inode_owner_or_capable(inode)) return -EPERM; - COMMON_AUDIT_DATA_INIT(&ad, FS); - ad.u.fs.path.dentry = dentry; + ad.type = LSM_AUDIT_DATA_DENTRY; + ad.u.dentry = dentry; rc = avc_has_perm(sid, isec->sid, isec->sclass, FILE__RELABELFROM, &ad); if (rc) return rc; - rc = security_context_to_sid(value, size, &newsid); + rc = security_context_to_sid(value, size, &newsid, GFP_KERNEL); if (rc == -EINVAL) { - if (!capable(CAP_MAC_ADMIN)) + if (!capable(CAP_MAC_ADMIN)) { + struct audit_buffer *ab; + size_t audit_size; + const char *str; + + /* We strip a nul only if it is at the end, otherwise the + * context contains a nul and we should audit that */ + if (value) { + str = value; + if (str[size - 1] == '\0') + audit_size = size - 1; + else + audit_size = size; + } else { + str = ""; + audit_size = 0; + } + ab = audit_log_start(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR); + audit_log_format(ab, "op=setxattr invalid_context="); + audit_log_n_untrustedstring(ab, value, audit_size); + audit_log_end(ab); + return rc; + } rc = security_context_to_sid_force(value, size, &newsid); } if (rc) @@ -2778,7 +2985,10 @@ static void selinux_inode_post_setxattr(struct dentry *dentry, const char *name, return; } + isec->sclass = inode_mode_to_security_class(inode->i_mode); isec->sid = newsid; + isec->initialized = 1; + return; } @@ -2786,14 +2996,14 @@ static int selinux_inode_getxattr(struct dentry *dentry, const char *name) { const struct cred *cred = current_cred(); - return dentry_has_perm(cred, NULL, dentry, FILE__GETATTR); + return dentry_has_perm(cred, dentry, FILE__GETATTR); } static int selinux_inode_listxattr(struct dentry *dentry) { const struct cred *cred = current_cred(); - return dentry_has_perm(cred, NULL, dentry, FILE__GETATTR); + return dentry_has_perm(cred, dentry, FILE__GETATTR); } static int selinux_inode_removexattr(struct dentry *dentry, const char *name) @@ -2830,7 +3040,7 @@ static int selinux_inode_getsecurity(const struct inode *inode, const char *name * and lack of permission just means that we fall back to the * in-core context value, not a denial. */ - error = selinux_capable(current, current_cred(), CAP_MAC_ADMIN, + error = selinux_capable(current_cred(), &init_user_ns, CAP_MAC_ADMIN, SECURITY_CAP_NOAUDIT); if (!error) error = security_sid_to_context_force(isec->sid, &context, @@ -2862,10 +3072,11 @@ static int selinux_inode_setsecurity(struct inode *inode, const char *name, if (!value || !size) return -EACCES; - rc = security_context_to_sid((void *)value, size, &newsid); + rc = security_context_to_sid((void *)value, size, &newsid, GFP_KERNEL); if (rc) return rc; + isec->sclass = inode_mode_to_security_class(inode->i_mode); isec->sid = newsid; isec->initialized = 1; return 0; @@ -2890,7 +3101,7 @@ static void selinux_inode_getsecid(const struct inode *inode, u32 *secid) static int selinux_revalidate_file_permission(struct file *file, int mask) { const struct cred *cred = current_cred(); - struct inode *inode = file->f_path.dentry->d_inode; + struct inode *inode = file_inode(file); /* file_mask_to_av won't add FILE__WRITE if MAY_APPEND is set */ if ((file->f_flags & O_APPEND) && (mask & MAY_WRITE)) @@ -2902,7 +3113,7 @@ static int selinux_revalidate_file_permission(struct file *file, int mask) static int selinux_file_permission(struct file *file, int mask) { - struct inode *inode = file->f_path.dentry->d_inode; + struct inode *inode = file_inode(file); struct file_security_struct *fsec = file->f_security; struct inode_security_struct *isec = inode->i_security; u32 sid = current_sid(); @@ -2913,7 +3124,7 @@ static int selinux_file_permission(struct file *file, int mask) if (sid == fsec->sid && fsec->isid == isec->sid && fsec->pseqno == avc_policy_seqno()) - /* No change since dentry_open check. */ + /* No change since file_open check. */ return 0; return selinux_revalidate_file_permission(file, mask); @@ -2933,16 +3144,47 @@ static int selinux_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { const struct cred *cred = current_cred(); - u32 av = 0; + int error = 0; - if (_IOC_DIR(cmd) & _IOC_WRITE) - av |= FILE__WRITE; - if (_IOC_DIR(cmd) & _IOC_READ) - av |= FILE__READ; - if (!av) - av = FILE__IOCTL; + switch (cmd) { + case FIONREAD: + /* fall through */ + case FIBMAP: + /* fall through */ + case FIGETBSZ: + /* fall through */ + case FS_IOC_GETFLAGS: + /* fall through */ + case FS_IOC_GETVERSION: + error = file_has_perm(cred, file, FILE__GETATTR); + break; + + case FS_IOC_SETFLAGS: + /* fall through */ + case FS_IOC_SETVERSION: + error = file_has_perm(cred, file, FILE__SETATTR); + break; - return file_has_perm(cred, file, av); + /* sys_ioctl() checks */ + case FIONBIO: + /* fall through */ + case FIOASYNC: + error = file_has_perm(cred, file, 0); + break; + + case KDSKBENT: + case KDSKBSENT: + error = cred_has_capability(cred, CAP_SYS_TTY_CONFIG, + SECURITY_CAP_AUDIT); + break; + + /* default case assumes that the command will go + * to the file's ioctl() function. + */ + default: + error = file_has_perm(cred, file, FILE__IOCTL); + } + return error; } static int default_noexec; @@ -2982,31 +3224,27 @@ error: return rc; } -static int selinux_file_mmap(struct file *file, unsigned long reqprot, - unsigned long prot, unsigned long flags, - unsigned long addr, unsigned long addr_only) +static int selinux_mmap_addr(unsigned long addr) { - int rc = 0; - u32 sid = current_sid(); + int rc; + + /* do DAC check on address space usage */ + rc = cap_mmap_addr(addr); + if (rc) + return rc; - /* - * notice that we are intentionally putting the SELinux check before - * the secondary cap_file_mmap check. This is such a likely attempt - * at bad behaviour/exploit that we always want to get the AVC, even - * if DAC would have also denied the operation. - */ if (addr < CONFIG_LSM_MMAP_MIN_ADDR) { + u32 sid = current_sid(); rc = avc_has_perm(sid, sid, SECCLASS_MEMPROTECT, MEMPROTECT__MMAP_ZERO, NULL); - if (rc) - return rc; } - /* do DAC check on address space usage */ - rc = cap_file_mmap(file, reqprot, prot, flags, addr, addr_only); - if (rc || addr_only) - return rc; + return rc; +} +static int selinux_mmap_file(struct file *file, unsigned long reqprot, + unsigned long prot, unsigned long flags) +{ if (selinux_checkreqprot) prot = reqprot; @@ -3065,11 +3303,6 @@ static int selinux_file_fcntl(struct file *file, unsigned int cmd, switch (cmd) { case F_SETFL: - if (!file->f_path.dentry || !file->f_path.dentry->d_inode) { - err = -EINVAL; - break; - } - if ((file->f_flags & O_APPEND) && !(arg & O_APPEND)) { err = file_has_perm(cred, file, FILE__WRITE); break; @@ -3080,21 +3313,21 @@ static int selinux_file_fcntl(struct file *file, unsigned int cmd, case F_GETFL: case F_GETOWN: case F_GETSIG: + case F_GETOWNER_UIDS: /* Just check FD__USE permission */ err = file_has_perm(cred, file, 0); break; case F_GETLK: case F_SETLK: case F_SETLKW: + case F_OFD_GETLK: + case F_OFD_SETLK: + case F_OFD_SETLKW: #if BITS_PER_LONG == 32 case F_GETLK64: case F_SETLK64: case F_SETLKW64: #endif - if (!file->f_path.dentry || !file->f_path.dentry->d_inode) { - err = -EINVAL; - break; - } err = file_has_perm(cred, file, FILE__LOCK); break; } @@ -3141,15 +3374,13 @@ static int selinux_file_receive(struct file *file) return file_has_perm(cred, file, file_to_av(file)); } -static int selinux_dentry_open(struct file *file, const struct cred *cred) +static int selinux_file_open(struct file *file, const struct cred *cred) { struct file_security_struct *fsec; - struct inode *inode; struct inode_security_struct *isec; - inode = file->f_path.dentry->d_inode; fsec = file->f_security; - isec = inode->i_security; + isec = file_inode(file)->i_security; /* * Save inode label and policy sequence number * at open-time so that selinux_file_permission @@ -3167,7 +3398,7 @@ static int selinux_dentry_open(struct file *file, const struct cred *cred) * new inode label or new policy. * This check is not redundant - do not remove. */ - return inode_has_perm(cred, inode, open_file_to_av(file), NULL); + return file_path_has_perm(cred, file, open_file_to_av(file)); } /* task security operations */ @@ -3199,7 +3430,11 @@ static void selinux_cred_free(struct cred *cred) { struct task_security_struct *tsec = cred->security; - BUG_ON((unsigned long) cred->security < PAGE_SIZE); + /* + * cred->security == NULL if security_cred_alloc_blank() or + * security_prepare_creds() returned an error. + */ + BUG_ON(cred->security && (unsigned long) cred->security < PAGE_SIZE); cred->security = (void *) 0x7UL; kfree(tsec); } @@ -3285,7 +3520,7 @@ static int selinux_kernel_module_request(char *kmod_name) sid = task_sid(current); - COMMON_AUDIT_DATA_INIT(&ad, KMOD); + ad.type = LSM_AUDIT_DATA_KMOD; ad.u.kmod_name = kmod_name; return avc_has_perm(sid, SECINITSID_KERNEL, SECCLASS_SYSTEM, @@ -3424,8 +3659,8 @@ static int selinux_parse_skb_ipv4(struct sk_buff *skb, if (ihlen < sizeof(_iph)) goto out; - ad->u.net.v4info.saddr = ih->saddr; - ad->u.net.v4info.daddr = ih->daddr; + ad->u.net->v4info.saddr = ih->saddr; + ad->u.net->v4info.daddr = ih->daddr; ret = 0; if (proto) @@ -3443,8 +3678,8 @@ static int selinux_parse_skb_ipv4(struct sk_buff *skb, if (th == NULL) break; - ad->u.net.sport = th->source; - ad->u.net.dport = th->dest; + ad->u.net->sport = th->source; + ad->u.net->dport = th->dest; break; } @@ -3459,8 +3694,8 @@ static int selinux_parse_skb_ipv4(struct sk_buff *skb, if (uh == NULL) break; - ad->u.net.sport = uh->source; - ad->u.net.dport = uh->dest; + ad->u.net->sport = uh->source; + ad->u.net->dport = uh->dest; break; } @@ -3475,8 +3710,8 @@ static int selinux_parse_skb_ipv4(struct sk_buff *skb, if (dh == NULL) break; - ad->u.net.sport = dh->dccph_sport; - ad->u.net.dport = dh->dccph_dport; + ad->u.net->sport = dh->dccph_sport; + ad->u.net->dport = dh->dccph_dport; break; } @@ -3496,19 +3731,20 @@ static int selinux_parse_skb_ipv6(struct sk_buff *skb, u8 nexthdr; int ret = -EINVAL, offset; struct ipv6hdr _ipv6h, *ip6; + __be16 frag_off; offset = skb_network_offset(skb); ip6 = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h); if (ip6 == NULL) goto out; - ipv6_addr_copy(&ad->u.net.v6info.saddr, &ip6->saddr); - ipv6_addr_copy(&ad->u.net.v6info.daddr, &ip6->daddr); + ad->u.net->v6info.saddr = ip6->saddr; + ad->u.net->v6info.daddr = ip6->daddr; ret = 0; nexthdr = ip6->nexthdr; offset += sizeof(_ipv6h); - offset = ipv6_skip_exthdr(skb, offset, &nexthdr); + offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off); if (offset < 0) goto out; @@ -3523,8 +3759,8 @@ static int selinux_parse_skb_ipv6(struct sk_buff *skb, if (th == NULL) break; - ad->u.net.sport = th->source; - ad->u.net.dport = th->dest; + ad->u.net->sport = th->source; + ad->u.net->dport = th->dest; break; } @@ -3535,8 +3771,8 @@ static int selinux_parse_skb_ipv6(struct sk_buff *skb, if (uh == NULL) break; - ad->u.net.sport = uh->source; - ad->u.net.dport = uh->dest; + ad->u.net->sport = uh->source; + ad->u.net->dport = uh->dest; break; } @@ -3547,8 +3783,8 @@ static int selinux_parse_skb_ipv6(struct sk_buff *skb, if (dh == NULL) break; - ad->u.net.sport = dh->dccph_sport; - ad->u.net.dport = dh->dccph_dport; + ad->u.net->sport = dh->dccph_sport; + ad->u.net->dport = dh->dccph_dport; break; } @@ -3568,13 +3804,13 @@ static int selinux_parse_skb(struct sk_buff *skb, struct common_audit_data *ad, char *addrp; int ret; - switch (ad->u.net.family) { + switch (ad->u.net->family) { case PF_INET: ret = selinux_parse_skb_ipv4(skb, ad, proto); if (ret) goto parse_error; - addrp = (char *)(src ? &ad->u.net.v4info.saddr : - &ad->u.net.v4info.daddr); + addrp = (char *)(src ? &ad->u.net->v4info.saddr : + &ad->u.net->v4info.daddr); goto okay; #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) @@ -3582,8 +3818,8 @@ static int selinux_parse_skb(struct sk_buff *skb, struct common_audit_data *ad, ret = selinux_parse_skb_ipv6(skb, ad, proto); if (ret) goto parse_error; - addrp = (char *)(src ? &ad->u.net.v6info.saddr : - &ad->u.net.v6info.daddr); + addrp = (char *)(src ? &ad->u.net->v6info.saddr : + &ad->u.net->v6info.daddr); goto okay; #endif /* IPV6 */ default: @@ -3625,8 +3861,12 @@ static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid) u32 nlbl_sid; u32 nlbl_type; - selinux_skb_xfrm_sid(skb, &xfrm_sid); - selinux_netlbl_skbuff_getsid(skb, family, &nlbl_type, &nlbl_sid); + err = selinux_xfrm_skb_sid(skb, &xfrm_sid); + if (unlikely(err)) + return -EACCES; + err = selinux_netlbl_skbuff_getsid(skb, family, &nlbl_type, &nlbl_sid); + if (unlikely(err)) + return -EACCES; err = security_net_peersid_resolve(nlbl_sid, nlbl_type, xfrm_sid, sid); if (unlikely(err)) { @@ -3639,24 +3879,57 @@ static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid) return 0; } +/** + * selinux_conn_sid - Determine the child socket label for a connection + * @sk_sid: the parent socket's SID + * @skb_sid: the packet's SID + * @conn_sid: the resulting connection SID + * + * If @skb_sid is valid then the user:role:type information from @sk_sid is + * combined with the MLS information from @skb_sid in order to create + * @conn_sid. If @skb_sid is not valid then then @conn_sid is simply a copy + * of @sk_sid. Returns zero on success, negative values on failure. + * + */ +static int selinux_conn_sid(u32 sk_sid, u32 skb_sid, u32 *conn_sid) +{ + int err = 0; + + if (skb_sid != SECSID_NULL) + err = security_sid_mls_copy(sk_sid, skb_sid, conn_sid); + else + *conn_sid = sk_sid; + + return err; +} + /* socket security operations */ -static u32 socket_sockcreate_sid(const struct task_security_struct *tsec) +static int socket_sockcreate_sid(const struct task_security_struct *tsec, + u16 secclass, u32 *socksid) { - return tsec->sockcreate_sid ? : tsec->sid; + if (tsec->sockcreate_sid > SECSID_NULL) { + *socksid = tsec->sockcreate_sid; + return 0; + } + + return security_transition_sid(tsec->sid, tsec->sid, secclass, NULL, + socksid); } static int sock_has_perm(struct task_struct *task, struct sock *sk, u32 perms) { struct sk_security_struct *sksec = sk->sk_security; struct common_audit_data ad; + struct lsm_network_audit net = {0,}; u32 tsid = task_sid(task); if (sksec->sid == SECINITSID_KERNEL) return 0; - COMMON_AUDIT_DATA_INIT(&ad, NET); - ad.u.net.sk = sk; + ad.type = LSM_AUDIT_DATA_NET; + ad.u.net = &net; + ad.u.net->sk = sk; return avc_has_perm(tsid, sksec->sid, sksec->sclass, perms, &ad); } @@ -3667,12 +3940,16 @@ static int selinux_socket_create(int family, int type, const struct task_security_struct *tsec = current_security(); u32 newsid; u16 secclass; + int rc; if (kern) return 0; - newsid = socket_sockcreate_sid(tsec); secclass = socket_type_to_security_class(family, type, protocol); + rc = socket_sockcreate_sid(tsec, secclass, &newsid); + if (rc) + return rc; + return avc_has_perm(tsec->sid, newsid, secclass, SOCKET__CREATE, NULL); } @@ -3684,12 +3961,16 @@ static int selinux_socket_post_create(struct socket *sock, int family, struct sk_security_struct *sksec; int err = 0; + isec->sclass = socket_type_to_security_class(family, type, protocol); + if (kern) isec->sid = SECINITSID_KERNEL; - else - isec->sid = socket_sockcreate_sid(tsec); + else { + err = socket_sockcreate_sid(tsec, isec->sclass, &(isec->sid)); + if (err) + return err; + } - isec->sclass = socket_type_to_security_class(family, type, protocol); isec->initialized = 1; if (sock->sk) { @@ -3726,6 +4007,7 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in char *addrp; struct sk_security_struct *sksec = sk->sk_security; struct common_audit_data ad; + struct lsm_network_audit net = {0,}; struct sockaddr_in *addr4 = NULL; struct sockaddr_in6 *addr6 = NULL; unsigned short snum; @@ -3744,16 +4026,17 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in if (snum) { int low, high; - inet_get_local_port_range(&low, &high); + inet_get_local_port_range(sock_net(sk), &low, &high); if (snum < max(PROT_SOCK, low) || snum > high) { err = sel_netport_sid(sk->sk_protocol, snum, &sid); if (err) goto out; - COMMON_AUDIT_DATA_INIT(&ad, NET); - ad.u.net.sport = htons(snum); - ad.u.net.family = family; + ad.type = LSM_AUDIT_DATA_NET; + ad.u.net = &net; + ad.u.net->sport = htons(snum); + ad.u.net->family = family; err = avc_has_perm(sksec->sid, sid, sksec->sclass, SOCKET__NAME_BIND, &ad); @@ -3784,14 +4067,15 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in if (err) goto out; - COMMON_AUDIT_DATA_INIT(&ad, NET); - ad.u.net.sport = htons(snum); - ad.u.net.family = family; + ad.type = LSM_AUDIT_DATA_NET; + ad.u.net = &net; + ad.u.net->sport = htons(snum); + ad.u.net->family = family; if (family == PF_INET) - ad.u.net.v4info.saddr = addr4->sin_addr.s_addr; + ad.u.net->v4info.saddr = addr4->sin_addr.s_addr; else - ipv6_addr_copy(&ad.u.net.v6info.saddr, &addr6->sin6_addr); + ad.u.net->v6info.saddr = addr6->sin6_addr; err = avc_has_perm(sksec->sid, sid, sksec->sclass, node_perm, &ad); @@ -3818,6 +4102,7 @@ static int selinux_socket_connect(struct socket *sock, struct sockaddr *address, if (sksec->sclass == SECCLASS_TCP_SOCKET || sksec->sclass == SECCLASS_DCCP_SOCKET) { struct common_audit_data ad; + struct lsm_network_audit net = {0,}; struct sockaddr_in *addr4 = NULL; struct sockaddr_in6 *addr6 = NULL; unsigned short snum; @@ -3842,9 +4127,10 @@ static int selinux_socket_connect(struct socket *sock, struct sockaddr *address, perm = (sksec->sclass == SECCLASS_TCP_SOCKET) ? TCP_SOCKET__NAME_CONNECT : DCCP_SOCKET__NAME_CONNECT; - COMMON_AUDIT_DATA_INIT(&ad, NET); - ad.u.net.dport = htons(snum); - ad.u.net.family = sk->sk_family; + ad.type = LSM_AUDIT_DATA_NET; + ad.u.net = &net; + ad.u.net->dport = htons(snum); + ad.u.net->family = sk->sk_family; err = avc_has_perm(sksec->sid, sid, sksec->sclass, perm, &ad); if (err) goto out; @@ -3925,18 +4211,20 @@ static int selinux_socket_shutdown(struct socket *sock, int how) return sock_has_perm(current, sock->sk, SOCKET__SHUTDOWN); } -static int selinux_socket_unix_stream_connect(struct socket *sock, - struct socket *other, +static int selinux_socket_unix_stream_connect(struct sock *sock, + struct sock *other, struct sock *newsk) { - struct sk_security_struct *sksec_sock = sock->sk->sk_security; - struct sk_security_struct *sksec_other = other->sk->sk_security; + struct sk_security_struct *sksec_sock = sock->sk_security; + struct sk_security_struct *sksec_other = other->sk_security; struct sk_security_struct *sksec_new = newsk->sk_security; struct common_audit_data ad; + struct lsm_network_audit net = {0,}; int err; - COMMON_AUDIT_DATA_INIT(&ad, NET); - ad.u.net.sk = other->sk; + ad.type = LSM_AUDIT_DATA_NET; + ad.u.net = &net; + ad.u.net->sk = other; err = avc_has_perm(sksec_sock->sid, sksec_other->sid, sksec_other->sclass, @@ -3963,9 +4251,11 @@ static int selinux_socket_unix_may_send(struct socket *sock, struct sk_security_struct *ssec = sock->sk->sk_security; struct sk_security_struct *osec = other->sk->sk_security; struct common_audit_data ad; + struct lsm_network_audit net = {0,}; - COMMON_AUDIT_DATA_INIT(&ad, NET); - ad.u.net.sk = other->sk; + ad.type = LSM_AUDIT_DATA_NET; + ad.u.net = &net; + ad.u.net->sk = other->sk; return avc_has_perm(ssec->sid, osec->sid, osec->sclass, SOCKET__SENDTO, &ad); @@ -3999,14 +4289,15 @@ static int selinux_sock_rcv_skb_compat(struct sock *sk, struct sk_buff *skb, { int err = 0; struct sk_security_struct *sksec = sk->sk_security; - u32 peer_sid; u32 sk_sid = sksec->sid; struct common_audit_data ad; + struct lsm_network_audit net = {0,}; char *addrp; - COMMON_AUDIT_DATA_INIT(&ad, NET); - ad.u.net.netif = skb->skb_iif; - ad.u.net.family = family; + ad.type = LSM_AUDIT_DATA_NET; + ad.u.net = &net; + ad.u.net->netif = skb->skb_iif; + ad.u.net->family = family; err = selinux_parse_skb(skb, &ad, &addrp, 1, NULL); if (err) return err; @@ -4018,20 +4309,10 @@ static int selinux_sock_rcv_skb_compat(struct sock *sk, struct sk_buff *skb, return err; } - if (selinux_policycap_netpeer) { - err = selinux_skb_peerlbl_sid(skb, family, &peer_sid); - if (err) - return err; - err = avc_has_perm(sk_sid, peer_sid, - SECCLASS_PEER, PEER__RECV, &ad); - if (err) - selinux_netlbl_err(skb, err, 0); - } else { - err = selinux_netlbl_sock_rcv_skb(sksec, skb, family, &ad); - if (err) - return err; - err = selinux_xfrm_sock_rcv_skb(sksec->sid, skb, &ad); - } + err = selinux_netlbl_sock_rcv_skb(sksec, skb, family, &ad); + if (err) + return err; + err = selinux_xfrm_sock_rcv_skb(sksec->sid, skb, &ad); return err; } @@ -4043,6 +4324,7 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) u16 family = sk->sk_family; u32 sk_sid = sksec->sid; struct common_audit_data ad; + struct lsm_network_audit net = {0,}; char *addrp; u8 secmark_active; u8 peerlbl_active; @@ -4062,13 +4344,14 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) return selinux_sock_rcv_skb_compat(sk, skb, family); secmark_active = selinux_secmark_enabled(); - peerlbl_active = netlbl_enabled() || selinux_xfrm_enabled(); + peerlbl_active = selinux_peerlbl_enabled(); if (!secmark_active && !peerlbl_active) return 0; - COMMON_AUDIT_DATA_INIT(&ad, NET); - ad.u.net.netif = skb->skb_iif; - ad.u.net.family = family; + ad.type = LSM_AUDIT_DATA_NET; + ad.u.net = &net; + ad.u.net->netif = skb->skb_iif; + ad.u.net->family = family; err = selinux_parse_skb(skb, &ad, &addrp, 1, NULL); if (err) return err; @@ -4087,8 +4370,10 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) } err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER, PEER__RECV, &ad); - if (err) + if (err) { selinux_netlbl_err(skb, err, 0); + return err; + } } if (secmark_active) { @@ -4225,27 +4510,18 @@ static int selinux_inet_conn_request(struct sock *sk, struct sk_buff *skb, { struct sk_security_struct *sksec = sk->sk_security; int err; - u16 family = sk->sk_family; - u32 newsid; + u16 family = req->rsk_ops->family; + u32 connsid; u32 peersid; - /* handle mapped IPv4 packets arriving via IPv6 sockets */ - if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP)) - family = PF_INET; - err = selinux_skb_peerlbl_sid(skb, family, &peersid); if (err) return err; - if (peersid == SECSID_NULL) { - req->secid = sksec->sid; - req->peer_secid = SECSID_NULL; - } else { - err = security_sid_mls_copy(sksec->sid, peersid, &newsid); - if (err) - return err; - req->secid = newsid; - req->peer_secid = peersid; - } + err = selinux_conn_sid(sksec->sid, peersid, &connsid); + if (err) + return err; + req->secid = connsid; + req->peer_secid = peersid; return selinux_netlbl_inet_conn_request(req, family); } @@ -4279,6 +4555,11 @@ static void selinux_inet_conn_established(struct sock *sk, struct sk_buff *skb) selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid); } +static void selinux_skb_owned_by(struct sk_buff *skb, struct sock *sk) +{ + skb_set_owner_w(skb, sk); +} + static int selinux_secmark_relabel_packet(u32 sid) { const struct task_security_struct *__tsec; @@ -4303,7 +4584,25 @@ static void selinux_secmark_refcount_dec(void) static void selinux_req_classify_flow(const struct request_sock *req, struct flowi *fl) { - fl->secid = req->secid; + fl->flowi_secid = req->secid; +} + +static int selinux_tun_dev_alloc_security(void **security) +{ + struct tun_security_struct *tunsec; + + tunsec = kzalloc(sizeof(*tunsec), GFP_KERNEL); + if (!tunsec) + return -ENOMEM; + tunsec->sid = current_sid(); + + *security = tunsec; + return 0; +} + +static void selinux_tun_dev_free_security(void *security) +{ + kfree(security); } static int selinux_tun_dev_create(void) @@ -4321,8 +4620,17 @@ static int selinux_tun_dev_create(void) NULL); } -static void selinux_tun_dev_post_create(struct sock *sk) +static int selinux_tun_dev_attach_queue(void *security) { + struct tun_security_struct *tunsec = security; + + return avc_has_perm(current_sid(), tunsec->sid, SECCLASS_TUN_SOCKET, + TUN_SOCKET__ATTACH_QUEUE, NULL); +} + +static int selinux_tun_dev_attach(struct sock *sk, void *security) +{ + struct tun_security_struct *tunsec = security; struct sk_security_struct *sksec = sk->sk_security; /* we don't currently perform any NetLabel based labeling here and it @@ -4332,20 +4640,19 @@ static void selinux_tun_dev_post_create(struct sock *sk) * cause confusion to the TUN user that had no idea network labeling * protocols were being used */ - /* see the comments in selinux_tun_dev_create() about why we don't use - * the sockcreate SID here */ - - sksec->sid = current_sid(); + sksec->sid = tunsec->sid; sksec->sclass = SECCLASS_TUN_SOCKET; + + return 0; } -static int selinux_tun_dev_attach(struct sock *sk) +static int selinux_tun_dev_open(void *security) { - struct sk_security_struct *sksec = sk->sk_security; + struct tun_security_struct *tunsec = security; u32 sid = current_sid(); int err; - err = avc_has_perm(sid, sksec->sid, SECCLASS_TUN_SOCKET, + err = avc_has_perm(sid, tunsec->sid, SECCLASS_TUN_SOCKET, TUN_SOCKET__RELABELFROM, NULL); if (err) return err; @@ -4353,8 +4660,7 @@ static int selinux_tun_dev_attach(struct sock *sk) TUN_SOCKET__RELABELTO, NULL); if (err) return err; - - sksec->sid = sid; + tunsec->sid = sid; return 0; } @@ -4366,7 +4672,7 @@ static int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb) struct nlmsghdr *nlh; struct sk_security_struct *sksec = sk->sk_security; - if (skb->len < NLMSG_SPACE(0)) { + if (skb->len < NLMSG_HDRLEN) { err = -EINVAL; goto out; } @@ -4403,6 +4709,7 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb, int ifindex, char *addrp; u32 peer_sid; struct common_audit_data ad; + struct lsm_network_audit net = {0,}; u8 secmark_active; u8 netlbl_active; u8 peerlbl_active; @@ -4412,16 +4719,17 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb, int ifindex, secmark_active = selinux_secmark_enabled(); netlbl_active = netlbl_enabled(); - peerlbl_active = netlbl_active || selinux_xfrm_enabled(); + peerlbl_active = selinux_peerlbl_enabled(); if (!secmark_active && !peerlbl_active) return NF_ACCEPT; if (selinux_skb_peerlbl_sid(skb, family, &peer_sid) != 0) return NF_DROP; - COMMON_AUDIT_DATA_INIT(&ad, NET); - ad.u.net.netif = ifindex; - ad.u.net.family = family; + ad.type = LSM_AUDIT_DATA_NET; + ad.u.net = &net; + ad.u.net->netif = ifindex; + ad.u.net->family = family; if (selinux_parse_skb(skb, &ad, &addrp, 1, NULL) != 0) return NF_DROP; @@ -4450,7 +4758,7 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb, int ifindex, return NF_ACCEPT; } -static unsigned int selinux_ipv4_forward(unsigned int hooknum, +static unsigned int selinux_ipv4_forward(const struct nf_hook_ops *ops, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, @@ -4460,7 +4768,7 @@ static unsigned int selinux_ipv4_forward(unsigned int hooknum, } #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) -static unsigned int selinux_ipv6_forward(unsigned int hooknum, +static unsigned int selinux_ipv6_forward(const struct nf_hook_ops *ops, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, @@ -4473,6 +4781,7 @@ static unsigned int selinux_ipv6_forward(unsigned int hooknum, static unsigned int selinux_ip_output(struct sk_buff *skb, u16 family) { + struct sock *sk; u32 sid; if (!netlbl_enabled()) @@ -4481,8 +4790,27 @@ static unsigned int selinux_ip_output(struct sk_buff *skb, /* we do this in the LOCAL_OUT path and not the POST_ROUTING path * because we want to make sure we apply the necessary labeling * before IPsec is applied so we can leverage AH protection */ - if (skb->sk) { - struct sk_security_struct *sksec = skb->sk->sk_security; + sk = skb->sk; + if (sk) { + struct sk_security_struct *sksec; + + if (sk->sk_state == TCP_LISTEN) + /* if the socket is the listening state then this + * packet is a SYN-ACK packet which means it needs to + * be labeled based on the connection/request_sock and + * not the parent socket. unfortunately, we can't + * lookup the request_sock yet as it isn't queued on + * the parent socket until after the SYN-ACK is sent. + * the "solution" is to simply pass the packet as-is + * as any IP option based labeling should be copied + * from the initial connection request (in the IP + * layer). it is far from ideal, but until we get a + * security label in the packet itself this is the + * best we can do. */ + return NF_ACCEPT; + + /* standard practice, label using the parent socket */ + sksec = sk->sk_security; sid = sksec->sid; } else sid = SECINITSID_KERNEL; @@ -4492,7 +4820,7 @@ static unsigned int selinux_ip_output(struct sk_buff *skb, return NF_ACCEPT; } -static unsigned int selinux_ipv4_output(unsigned int hooknum, +static unsigned int selinux_ipv4_output(const struct nf_hook_ops *ops, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, @@ -4508,6 +4836,7 @@ static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb, struct sock *sk = skb->sk; struct sk_security_struct *sksec; struct common_audit_data ad; + struct lsm_network_audit net = {0,}; char *addrp; u8 proto; @@ -4515,9 +4844,10 @@ static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb, return NF_ACCEPT; sksec = sk->sk_security; - COMMON_AUDIT_DATA_INIT(&ad, NET); - ad.u.net.netif = ifindex; - ad.u.net.family = family; + ad.type = LSM_AUDIT_DATA_NET; + ad.u.net = &net; + ad.u.net->netif = ifindex; + ad.u.net->family = family; if (selinux_parse_skb(skb, &ad, &addrp, 0, &proto)) return NF_DROP; @@ -4526,9 +4856,8 @@ static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb, SECCLASS_PACKET, PACKET__SEND, &ad)) return NF_DROP_ERR(-ECONNREFUSED); - if (selinux_policycap_netpeer) - if (selinux_xfrm_postroute_last(sksec->sid, skb, &ad, proto)) - return NF_DROP_ERR(-ECONNREFUSED); + if (selinux_xfrm_postroute_last(sksec->sid, skb, &ad, proto)) + return NF_DROP_ERR(-ECONNREFUSED); return NF_ACCEPT; } @@ -4540,6 +4869,7 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex, u32 peer_sid; struct sock *sk; struct common_audit_data ad; + struct lsm_network_audit net = {0,}; char *addrp; u8 secmark_active; u8 peerlbl_active; @@ -4550,57 +4880,92 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex, * as fast and as clean as possible. */ if (!selinux_policycap_netpeer) return selinux_ip_postroute_compat(skb, ifindex, family); + + secmark_active = selinux_secmark_enabled(); + peerlbl_active = selinux_peerlbl_enabled(); + if (!secmark_active && !peerlbl_active) + return NF_ACCEPT; + + sk = skb->sk; + #ifdef CONFIG_XFRM /* If skb->dst->xfrm is non-NULL then the packet is undergoing an IPsec * packet transformation so allow the packet to pass without any checks * since we'll have another chance to perform access control checks * when the packet is on it's final way out. * NOTE: there appear to be some IPv6 multicast cases where skb->dst - * is NULL, in this case go ahead and apply access control. */ - if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL) + * is NULL, in this case go ahead and apply access control. + * NOTE: if this is a local socket (skb->sk != NULL) that is in the + * TCP listening state we cannot wait until the XFRM processing + * is done as we will miss out on the SA label if we do; + * unfortunately, this means more work, but it is only once per + * connection. */ + if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL && + !(sk != NULL && sk->sk_state == TCP_LISTEN)) return NF_ACCEPT; #endif - secmark_active = selinux_secmark_enabled(); - peerlbl_active = netlbl_enabled() || selinux_xfrm_enabled(); - if (!secmark_active && !peerlbl_active) - return NF_ACCEPT; - /* if the packet is being forwarded then get the peer label from the - * packet itself; otherwise check to see if it is from a local - * application or the kernel, if from an application get the peer label - * from the sending socket, otherwise use the kernel's sid */ - sk = skb->sk; if (sk == NULL) { - switch (family) { - case PF_INET: - if (IPCB(skb)->flags & IPSKB_FORWARDED) - secmark_perm = PACKET__FORWARD_OUT; - else - secmark_perm = PACKET__SEND; - break; - case PF_INET6: - if (IP6CB(skb)->flags & IP6SKB_FORWARDED) - secmark_perm = PACKET__FORWARD_OUT; - else - secmark_perm = PACKET__SEND; - break; - default: - return NF_DROP_ERR(-ECONNREFUSED); - } - if (secmark_perm == PACKET__FORWARD_OUT) { + /* Without an associated socket the packet is either coming + * from the kernel or it is being forwarded; check the packet + * to determine which and if the packet is being forwarded + * query the packet directly to determine the security label. */ + if (skb->skb_iif) { + secmark_perm = PACKET__FORWARD_OUT; if (selinux_skb_peerlbl_sid(skb, family, &peer_sid)) return NF_DROP; - } else + } else { + secmark_perm = PACKET__SEND; peer_sid = SECINITSID_KERNEL; + } + } else if (sk->sk_state == TCP_LISTEN) { + /* Locally generated packet but the associated socket is in the + * listening state which means this is a SYN-ACK packet. In + * this particular case the correct security label is assigned + * to the connection/request_sock but unfortunately we can't + * query the request_sock as it isn't queued on the parent + * socket until after the SYN-ACK packet is sent; the only + * viable choice is to regenerate the label like we do in + * selinux_inet_conn_request(). See also selinux_ip_output() + * for similar problems. */ + u32 skb_sid; + struct sk_security_struct *sksec = sk->sk_security; + if (selinux_skb_peerlbl_sid(skb, family, &skb_sid)) + return NF_DROP; + /* At this point, if the returned skb peerlbl is SECSID_NULL + * and the packet has been through at least one XFRM + * transformation then we must be dealing with the "final" + * form of labeled IPsec packet; since we've already applied + * all of our access controls on this packet we can safely + * pass the packet. */ + if (skb_sid == SECSID_NULL) { + switch (family) { + case PF_INET: + if (IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED) + return NF_ACCEPT; + break; + case PF_INET6: + if (IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) + return NF_ACCEPT; + default: + return NF_DROP_ERR(-ECONNREFUSED); + } + } + if (selinux_conn_sid(sksec->sid, skb_sid, &peer_sid)) + return NF_DROP; + secmark_perm = PACKET__SEND; } else { + /* Locally generated packet, fetch the security label from the + * associated socket. */ struct sk_security_struct *sksec = sk->sk_security; peer_sid = sksec->sid; secmark_perm = PACKET__SEND; } - COMMON_AUDIT_DATA_INIT(&ad, NET); - ad.u.net.netif = ifindex; - ad.u.net.family = family; + ad.type = LSM_AUDIT_DATA_NET; + ad.u.net = &net; + ad.u.net->netif = ifindex; + ad.u.net->family = family; if (selinux_parse_skb(skb, &ad, &addrp, 0, NULL)) return NF_DROP; @@ -4629,7 +4994,7 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex, return NF_ACCEPT; } -static unsigned int selinux_ipv4_postroute(unsigned int hooknum, +static unsigned int selinux_ipv4_postroute(const struct nf_hook_ops *ops, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, @@ -4639,7 +5004,7 @@ static unsigned int selinux_ipv4_postroute(unsigned int hooknum, } #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) -static unsigned int selinux_ipv6_postroute(unsigned int hooknum, +static unsigned int selinux_ipv6_postroute(const struct nf_hook_ops *ops, struct sk_buff *skb, const struct net_device *in, const struct net_device *out, @@ -4662,22 +5027,6 @@ static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb) return selinux_nlmsg_perm(sk, skb); } -static int selinux_netlink_recv(struct sk_buff *skb, int capability) -{ - int err; - struct common_audit_data ad; - - err = cap_netlink_recv(skb, capability); - if (err) - return err; - - COMMON_AUDIT_DATA_INIT(&ad, CAP); - ad.u.cap = capability; - - return avc_has_perm(NETLINK_CB(skb).sid, NETLINK_CB(skb).sid, - SECCLASS_CAPABILITY, CAP_TO_MASK(capability), &ad); -} - static int ipc_alloc_security(struct task_struct *task, struct kern_ipc_perm *perm, u16 sclass) @@ -4735,7 +5084,7 @@ static int ipc_has_perm(struct kern_ipc_perm *ipc_perms, isec = ipc_perms->security; - COMMON_AUDIT_DATA_INIT(&ad, IPC); + ad.type = LSM_AUDIT_DATA_IPC; ad.u.ipc_id = ipc_perms->key; return avc_has_perm(sid, isec->sid, isec->sclass, perms, &ad); @@ -4765,7 +5114,7 @@ static int selinux_msg_queue_alloc_security(struct msg_queue *msq) isec = msq->q_perm.security; - COMMON_AUDIT_DATA_INIT(&ad, IPC); + ad.type = LSM_AUDIT_DATA_IPC; ad.u.ipc_id = msq->q_perm.key; rc = avc_has_perm(sid, isec->sid, SECCLASS_MSGQ, @@ -4790,7 +5139,7 @@ static int selinux_msg_queue_associate(struct msg_queue *msq, int msqflg) isec = msq->q_perm.security; - COMMON_AUDIT_DATA_INIT(&ad, IPC); + ad.type = LSM_AUDIT_DATA_IPC; ad.u.ipc_id = msq->q_perm.key; return avc_has_perm(sid, isec->sid, SECCLASS_MSGQ, @@ -4845,12 +5194,12 @@ static int selinux_msg_queue_msgsnd(struct msg_queue *msq, struct msg_msg *msg, * message queue this message will be stored in */ rc = security_transition_sid(sid, isec->sid, SECCLASS_MSG, - &msec->sid); + NULL, &msec->sid); if (rc) return rc; } - COMMON_AUDIT_DATA_INIT(&ad, IPC); + ad.type = LSM_AUDIT_DATA_IPC; ad.u.ipc_id = msq->q_perm.key; /* Can this process write to the queue? */ @@ -4881,7 +5230,7 @@ static int selinux_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg, isec = msq->q_perm.security; msec = msg->security; - COMMON_AUDIT_DATA_INIT(&ad, IPC); + ad.type = LSM_AUDIT_DATA_IPC; ad.u.ipc_id = msq->q_perm.key; rc = avc_has_perm(sid, isec->sid, @@ -4906,7 +5255,7 @@ static int selinux_shm_alloc_security(struct shmid_kernel *shp) isec = shp->shm_perm.security; - COMMON_AUDIT_DATA_INIT(&ad, IPC); + ad.type = LSM_AUDIT_DATA_IPC; ad.u.ipc_id = shp->shm_perm.key; rc = avc_has_perm(sid, isec->sid, SECCLASS_SHM, @@ -4931,7 +5280,7 @@ static int selinux_shm_associate(struct shmid_kernel *shp, int shmflg) isec = shp->shm_perm.security; - COMMON_AUDIT_DATA_INIT(&ad, IPC); + ad.type = LSM_AUDIT_DATA_IPC; ad.u.ipc_id = shp->shm_perm.key; return avc_has_perm(sid, isec->sid, SECCLASS_SHM, @@ -4998,7 +5347,7 @@ static int selinux_sem_alloc_security(struct sem_array *sma) isec = sma->sem_perm.security; - COMMON_AUDIT_DATA_INIT(&ad, IPC); + ad.type = LSM_AUDIT_DATA_IPC; ad.u.ipc_id = sma->sem_perm.key; rc = avc_has_perm(sid, isec->sid, SECCLASS_SEM, @@ -5023,7 +5372,7 @@ static int selinux_sem_associate(struct sem_array *sma, int semflg) isec = sma->sem_perm.security; - COMMON_AUDIT_DATA_INIT(&ad, IPC); + ad.type = LSM_AUDIT_DATA_IPC; ad.u.ipc_id = sma->sem_perm.key; return avc_has_perm(sid, isec->sid, SECCLASS_SEM, @@ -5201,10 +5550,25 @@ static int selinux_setprocattr(struct task_struct *p, str[size-1] = 0; size--; } - error = security_context_to_sid(value, size, &sid); + error = security_context_to_sid(value, size, &sid, GFP_KERNEL); if (error == -EINVAL && !strcmp(name, "fscreate")) { - if (!capable(CAP_MAC_ADMIN)) + if (!capable(CAP_MAC_ADMIN)) { + struct audit_buffer *ab; + size_t audit_size; + + /* We strip a nul only if it is at the end, otherwise the + * context contains a nul and we should audit that */ + if (str[size - 1] == '\0') + audit_size = size - 1; + else + audit_size = size; + ab = audit_log_start(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR); + audit_log_format(ab, "op=fscreate invalid_context="); + audit_log_n_untrustedstring(ab, value, audit_size); + audit_log_end(ab); + return error; + } error = security_context_to_sid_force(value, size, &sid); } @@ -5256,11 +5620,11 @@ static int selinux_setprocattr(struct task_struct *p, /* Check for ptracing, and update the task SID if ok. Otherwise, leave SID unchanged and fail. */ ptsid = 0; - task_lock(p); - tracer = tracehook_tracer_task(p); + rcu_read_lock(); + tracer = ptrace_parent(p); if (tracer) ptsid = task_sid(tracer); - task_unlock(p); + rcu_read_unlock(); if (tracer) { error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS, @@ -5283,6 +5647,11 @@ abort_change: return error; } +static int selinux_ismaclabel(const char *name) +{ + return (strcmp(name, XATTR_SELINUX_SUFFIX) == 0); +} + static int selinux_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) { return security_sid_to_context(secid, secdata, seclen); @@ -5290,7 +5659,7 @@ static int selinux_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) static int selinux_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid) { - return security_context_to_sid(secdata, seclen, secid); + return security_context_to_sid(secdata, seclen, secid, GFP_KERNEL); } static void selinux_release_secctx(char *secdata, u32 seclen) @@ -5356,7 +5725,7 @@ static void selinux_key_free(struct key *k) static int selinux_key_permission(key_ref_t key_ref, const struct cred *cred, - key_perm_t perm) + unsigned perm) { struct key *key; struct key_security_struct *ksec; @@ -5399,7 +5768,6 @@ static struct security_operations selinux_ops = { .ptrace_traceme = selinux_ptrace_traceme, .capget = selinux_capget, .capset = selinux_capset, - .sysctl = selinux_sysctl, .capable = selinux_capable, .quotactl = selinux_quotactl, .quota_on = selinux_quota_on, @@ -5407,7 +5775,6 @@ static struct security_operations selinux_ops = { .vm_enough_memory = selinux_vm_enough_memory, .netlink_send = selinux_netlink_send, - .netlink_recv = selinux_netlink_recv, .bprm_set_creds = selinux_bprm_set_creds, .bprm_committing_creds = selinux_bprm_committing_creds, @@ -5417,6 +5784,7 @@ static struct security_operations selinux_ops = { .sb_alloc_security = selinux_sb_alloc_security, .sb_free_security = selinux_sb_free_security, .sb_copy_data = selinux_sb_copy_data, + .sb_remount = selinux_sb_remount, .sb_kern_mount = selinux_sb_kern_mount, .sb_show_options = selinux_sb_show_options, .sb_statfs = selinux_sb_statfs, @@ -5426,6 +5794,7 @@ static struct security_operations selinux_ops = { .sb_clone_mnt_opts = selinux_sb_clone_mnt_opts, .sb_parse_opts_str = selinux_parse_opts_str, + .dentry_init_security = selinux_dentry_init_security, .inode_alloc_security = selinux_inode_alloc_security, .inode_free_security = selinux_inode_free_security, @@ -5457,7 +5826,8 @@ static struct security_operations selinux_ops = { .file_alloc_security = selinux_file_alloc_security, .file_free_security = selinux_file_free_security, .file_ioctl = selinux_file_ioctl, - .file_mmap = selinux_file_mmap, + .mmap_file = selinux_mmap_file, + .mmap_addr = selinux_mmap_addr, .file_mprotect = selinux_file_mprotect, .file_lock = selinux_file_lock, .file_fcntl = selinux_file_fcntl, @@ -5465,7 +5835,7 @@ static struct security_operations selinux_ops = { .file_send_sigiotask = selinux_file_send_sigiotask, .file_receive = selinux_file_receive, - .dentry_open = selinux_dentry_open, + .file_open = selinux_file_open, .task_create = selinux_task_create, .cred_alloc_blank = selinux_cred_alloc_blank, @@ -5520,6 +5890,7 @@ static struct security_operations selinux_ops = { .getprocattr = selinux_getprocattr, .setprocattr = selinux_setprocattr, + .ismaclabel = selinux_ismaclabel, .secid_to_secctx = selinux_secid_to_secctx, .secctx_to_secid = selinux_secctx_to_secid, .release_secctx = selinux_release_secctx, @@ -5558,16 +5929,21 @@ static struct security_operations selinux_ops = { .secmark_refcount_inc = selinux_secmark_refcount_inc, .secmark_refcount_dec = selinux_secmark_refcount_dec, .req_classify_flow = selinux_req_classify_flow, + .tun_dev_alloc_security = selinux_tun_dev_alloc_security, + .tun_dev_free_security = selinux_tun_dev_free_security, .tun_dev_create = selinux_tun_dev_create, - .tun_dev_post_create = selinux_tun_dev_post_create, + .tun_dev_attach_queue = selinux_tun_dev_attach_queue, .tun_dev_attach = selinux_tun_dev_attach, + .tun_dev_open = selinux_tun_dev_open, + .skb_owned_by = selinux_skb_owned_by, #ifdef CONFIG_SECURITY_NETWORK_XFRM .xfrm_policy_alloc_security = selinux_xfrm_policy_alloc, .xfrm_policy_clone_security = selinux_xfrm_policy_clone, .xfrm_policy_free_security = selinux_xfrm_policy_free, .xfrm_policy_delete_security = selinux_xfrm_policy_delete, - .xfrm_state_alloc_security = selinux_xfrm_state_alloc, + .xfrm_state_alloc = selinux_xfrm_state_alloc, + .xfrm_state_alloc_acquire = selinux_xfrm_state_alloc_acquire, .xfrm_state_free_security = selinux_xfrm_state_free, .xfrm_state_delete_security = selinux_xfrm_state_delete, .xfrm_policy_lookup = selinux_xfrm_policy_lookup, @@ -5649,21 +6025,21 @@ static struct nf_hook_ops selinux_ipv4_ops[] = { { .hook = selinux_ipv4_postroute, .owner = THIS_MODULE, - .pf = PF_INET, + .pf = NFPROTO_IPV4, .hooknum = NF_INET_POST_ROUTING, .priority = NF_IP_PRI_SELINUX_LAST, }, { .hook = selinux_ipv4_forward, .owner = THIS_MODULE, - .pf = PF_INET, + .pf = NFPROTO_IPV4, .hooknum = NF_INET_FORWARD, .priority = NF_IP_PRI_SELINUX_FIRST, }, { .hook = selinux_ipv4_output, .owner = THIS_MODULE, - .pf = PF_INET, + .pf = NFPROTO_IPV4, .hooknum = NF_INET_LOCAL_OUT, .priority = NF_IP_PRI_SELINUX_FIRST, } @@ -5675,14 +6051,14 @@ static struct nf_hook_ops selinux_ipv6_ops[] = { { .hook = selinux_ipv6_postroute, .owner = THIS_MODULE, - .pf = PF_INET6, + .pf = NFPROTO_IPV6, .hooknum = NF_INET_POST_ROUTING, .priority = NF_IP6_PRI_SELINUX_LAST, }, { .hook = selinux_ipv6_forward, .owner = THIS_MODULE, - .pf = PF_INET6, + .pf = NFPROTO_IPV6, .hooknum = NF_INET_FORWARD, .priority = NF_IP6_PRI_SELINUX_FIRST, } @@ -5740,8 +6116,6 @@ static int selinux_disabled; int selinux_disable(void) { - extern void exit_sel_fs(void); - if (ss_initialized) { /* Not permitted after initial policy load. */ return -EINVAL; diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h index e94e82f7381..ddf8eec03f2 100644 --- a/security/selinux/include/avc.h +++ b/security/selinux/include/avc.h @@ -15,8 +15,6 @@ #include <linux/audit.h> #include <linux/lsm_audit.h> #include <linux/in6.h> -#include <linux/path.h> -#include <asm/system.h> #include "flask.h" #include "av_permissions.h" #include "security.h" @@ -42,7 +40,6 @@ struct sk_buff; */ struct avc_cache_stats { unsigned int lookups; - unsigned int hits; unsigned int misses; unsigned int allocations; unsigned int reclaims; @@ -50,16 +47,99 @@ struct avc_cache_stats { }; /* + * We only need this data after we have decided to send an audit message. + */ +struct selinux_audit_data { + u32 ssid; + u32 tsid; + u16 tclass; + u32 requested; + u32 audited; + u32 denied; + int result; +}; + +/* * AVC operations */ void __init avc_init(void); -void avc_audit(u32 ssid, u32 tsid, - u16 tclass, u32 requested, - struct av_decision *avd, - int result, - struct common_audit_data *a); +static inline u32 avc_audit_required(u32 requested, + struct av_decision *avd, + int result, + u32 auditdeny, + u32 *deniedp) +{ + u32 denied, audited; + denied = requested & ~avd->allowed; + if (unlikely(denied)) { + audited = denied & avd->auditdeny; + /* + * auditdeny is TRICKY! Setting a bit in + * this field means that ANY denials should NOT be audited if + * the policy contains an explicit dontaudit rule for that + * permission. Take notice that this is unrelated to the + * actual permissions that were denied. As an example lets + * assume: + * + * denied == READ + * avd.auditdeny & ACCESS == 0 (not set means explicit rule) + * auditdeny & ACCESS == 1 + * + * We will NOT audit the denial even though the denied + * permission was READ and the auditdeny checks were for + * ACCESS + */ + if (auditdeny && !(auditdeny & avd->auditdeny)) + audited = 0; + } else if (result) + audited = denied = requested; + else + audited = requested & avd->auditallow; + *deniedp = denied; + return audited; +} + +int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass, + u32 requested, u32 audited, u32 denied, int result, + struct common_audit_data *a, + unsigned flags); + +/** + * avc_audit - Audit the granting or denial of permissions. + * @ssid: source security identifier + * @tsid: target security identifier + * @tclass: target security class + * @requested: requested permissions + * @avd: access vector decisions + * @result: result from avc_has_perm_noaudit + * @a: auxiliary audit data + * @flags: VFS walk flags + * + * Audit the granting or denial of permissions in accordance + * with the policy. This function is typically called by + * avc_has_perm() after a permission check, but can also be + * called directly by callers who use avc_has_perm_noaudit() + * in order to separate the permission check from the auditing. + * For example, this separation is useful when the permission check must + * be performed under a lock, to allow the lock to be released + * before calling the auditing code. + */ +static inline int avc_audit(u32 ssid, u32 tsid, + u16 tclass, u32 requested, + struct av_decision *avd, + int result, + struct common_audit_data *a) +{ + u32 audited, denied; + audited = avc_audit_required(requested, avd, result, 0, &denied); + if (likely(!audited)) + return 0; + return slow_avc_audit(ssid, tsid, tclass, + requested, audited, denied, result, + a, 0); +} #define AVC_STRICT 1 /* Ignore permissive mode. */ int avc_has_perm_noaudit(u32 ssid, u32 tsid, @@ -82,11 +162,7 @@ u32 avc_policy_seqno(void); #define AVC_CALLBACK_AUDITDENY_ENABLE 64 #define AVC_CALLBACK_AUDITDENY_DISABLE 128 -int avc_add_callback(int (*callback)(u32 event, u32 ssid, u32 tsid, - u16 tclass, u32 perms, - u32 *out_retained), - u32 events, u32 ssid, u32 tsid, - u16 tclass, u32 perms); +int avc_add_callback(int (*callback)(u32 event), u32 events); /* Exported to selinuxfs */ int avc_get_hash_stats(char *page); diff --git a/security/selinux/include/avc_ss.h b/security/selinux/include/avc_ss.h index 4677aa519b0..d5c328452df 100644 --- a/security/selinux/include/avc_ss.h +++ b/security/selinux/include/avc_ss.h @@ -18,5 +18,11 @@ struct security_class_mapping { extern struct security_class_mapping secclass_map[]; +/* + * The security server must be initialized before + * any labeling or access decisions can be provided. + */ +extern int ss_initialized; + #endif /* _SELINUX_AVC_SS_H_ */ diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h index 8858d2b2d4b..be491a74c1e 100644 --- a/security/selinux/include/classmap.h +++ b/security/selinux/include/classmap.h @@ -12,6 +12,10 @@ #define COMMON_IPC_PERMS "create", "destroy", "getattr", "setattr", "read", \ "write", "associate", "unix_read", "unix_write" +/* + * Note: The name for any socket class should be suffixed by "socket", + * and doesn't contain more than one substr of "socket". + */ struct security_class_mapping secclass_map[] = { { "security", { "compute_av", "compute_create", "compute_member", @@ -132,8 +136,7 @@ struct security_class_mapping secclass_map[] = { { "appletalk_socket", { COMMON_SOCK_PERMS, NULL } }, { "packet", - { "send", "recv", "relabelto", "flow_in", "flow_out", - "forward_in", "forward_out", NULL } }, + { "send", "recv", "relabelto", "forward_in", "forward_out", NULL } }, { "key", { "view", "read", "write", "search", "link", "setattr", "create", NULL } }, @@ -142,9 +145,11 @@ struct security_class_mapping secclass_map[] = { "node_bind", "name_connect", NULL } }, { "memprotect", { "mmap_zero", NULL } }, { "peer", { "recv", NULL } }, - { "capability2", { "mac_override", "mac_admin", NULL } }, + { "capability2", + { "mac_override", "mac_admin", "syslog", "wake_alarm", "block_suspend", + "audit_read", NULL } }, { "kernel_service", { "use_as_override", "create_files_as", NULL } }, { "tun_socket", - { COMMON_SOCK_PERMS, NULL } }, + { COMMON_SOCK_PERMS, "attach_queue", NULL } }, { NULL } }; diff --git a/security/selinux/include/netif.h b/security/selinux/include/netif.h index ce23edd128b..43d507242b4 100644 --- a/security/selinux/include/netif.h +++ b/security/selinux/include/netif.h @@ -8,7 +8,7 @@ * * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com> * Copyright (C) 2007 Hewlett-Packard Development Company, L.P. - * Paul Moore, <paul.moore@hp.com> + * Paul Moore <paul@paul-moore.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, diff --git a/security/selinux/include/netlabel.h b/security/selinux/include/netlabel.h index cf2f628e6e2..8c59b8f150e 100644 --- a/security/selinux/include/netlabel.h +++ b/security/selinux/include/netlabel.h @@ -1,7 +1,7 @@ /* * SELinux interface to the NetLabel subsystem * - * Author : Paul Moore <paul.moore@hp.com> + * Author: Paul Moore <paul@paul-moore.com> * */ diff --git a/security/selinux/include/netnode.h b/security/selinux/include/netnode.h index 1b94450d11d..df7a5ed6c69 100644 --- a/security/selinux/include/netnode.h +++ b/security/selinux/include/netnode.h @@ -6,7 +6,7 @@ * needed to reduce the lookup overhead since most of these queries happen on * a per-packet basis. * - * Author: Paul Moore <paul.moore@hp.com> + * Author: Paul Moore <paul@paul-moore.com> * */ diff --git a/security/selinux/include/netport.h b/security/selinux/include/netport.h index 8991752eaf9..4d965b83d73 100644 --- a/security/selinux/include/netport.h +++ b/security/selinux/include/netport.h @@ -5,7 +5,7 @@ * mapping is maintained as part of the normal policy but a fast cache is * needed to reduce the lookup overhead. * - * Author: Paul Moore <paul.moore@hp.com> + * Author: Paul Moore <paul@paul-moore.com> * */ diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h index 26c7eee1c30..078e553f52f 100644 --- a/security/selinux/include/objsec.h +++ b/security/selinux/include/objsec.h @@ -38,7 +38,10 @@ struct task_security_struct { struct inode_security_struct { struct inode *inode; /* back pointer to inode object */ - struct list_head list; /* list of inode_security_struct */ + union { + struct list_head list; /* list of inode_security_struct */ + struct rcu_head rcu; /* for freeing the inode_security_struct */ + }; u32 task_sid; /* SID of creating task */ u32 sid; /* SID of this object */ u16 sclass; /* security class of this object */ @@ -58,8 +61,8 @@ struct superblock_security_struct { u32 sid; /* SID of file system superblock */ u32 def_sid; /* default SID for labeling */ u32 mntpoint_sid; /* SECURITY_FS_USE_MNTPOINT context for files */ - unsigned int behavior; /* labeling behavior */ - unsigned char flags; /* which mount options were specified */ + unsigned short behavior; /* labeling behavior */ + unsigned short flags; /* which mount options were specified */ struct mutex lock; struct list_head isec_head; spinlock_t isec_lock; @@ -110,6 +113,10 @@ struct sk_security_struct { u16 sclass; /* sock security class */ }; +struct tun_security_struct { + u32 sid; /* SID for the tun device sockets */ +}; + struct key_security_struct { u32 sid; /* SID of key */ }; diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h index 671273eb111..ce7852cf526 100644 --- a/security/selinux/include/security.h +++ b/security/selinux/include/security.h @@ -8,6 +8,7 @@ #ifndef _SELINUX_SECURITY_H_ #define _SELINUX_SECURITY_H_ +#include <linux/dcache.h> #include <linux/magic.h> #include <linux/types.h> #include "flask.h" @@ -28,26 +29,32 @@ #define POLICYDB_VERSION_POLCAP 22 #define POLICYDB_VERSION_PERMISSIVE 23 #define POLICYDB_VERSION_BOUNDARY 24 +#define POLICYDB_VERSION_FILENAME_TRANS 25 +#define POLICYDB_VERSION_ROLETRANS 26 +#define POLICYDB_VERSION_NEW_OBJECT_DEFAULTS 27 +#define POLICYDB_VERSION_DEFAULT_TYPE 28 +#define POLICYDB_VERSION_CONSTRAINT_NAMES 29 /* Range of policy versions we understand*/ #define POLICYDB_VERSION_MIN POLICYDB_VERSION_BASE #ifdef CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX #define POLICYDB_VERSION_MAX CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX_VALUE #else -#define POLICYDB_VERSION_MAX POLICYDB_VERSION_BOUNDARY +#define POLICYDB_VERSION_MAX POLICYDB_VERSION_CONSTRAINT_NAMES #endif /* Mask for just the mount related flags */ #define SE_MNTMASK 0x0f /* Super block security struct flags for mount options */ +/* BE CAREFUL, these need to be the low order bits for selinux_get_mnt_opts */ #define CONTEXT_MNT 0x01 #define FSCONTEXT_MNT 0x02 #define ROOTCONTEXT_MNT 0x04 #define DEFCONTEXT_MNT 0x08 +#define SBLABEL_MNT 0x10 /* Non-mount related flags */ -#define SE_SBINITIALIZED 0x10 -#define SE_SBPROC 0x20 -#define SE_SBLABELSUPP 0x40 +#define SE_SBINITIALIZED 0x0100 +#define SE_SBPROC 0x0200 #define CONTEXT_STR "context=" #define FSCONTEXT_STR "fscontext=" @@ -63,12 +70,15 @@ extern int selinux_enabled; enum { POLICYDB_CAPABILITY_NETPEER, POLICYDB_CAPABILITY_OPENPERM, + POLICYDB_CAPABILITY_REDHAT1, + POLICYDB_CAPABILITY_ALWAYSNETWORK, __POLICYDB_CAPABILITY_MAX }; #define POLICYDB_CAPABILITY_MAX (__POLICYDB_CAPABILITY_MAX - 1) extern int selinux_policycap_netpeer; extern int selinux_policycap_openperm; +extern int selinux_policycap_alwaysnetwork; /* * type_datum properties @@ -83,7 +93,7 @@ extern int selinux_policycap_openperm; int security_mls_enabled(void); int security_load_policy(void *data, size_t len); -int security_read_policy(void **data, ssize_t *len); +int security_read_policy(void **data, size_t *len); size_t security_policydb_len(void); int security_policycap_supported(unsigned int req_cap); @@ -106,11 +116,11 @@ void security_compute_av(u32 ssid, u32 tsid, void security_compute_av_user(u32 ssid, u32 tsid, u16 tclass, struct av_decision *avd); -int security_transition_sid(u32 ssid, u32 tsid, - u16 tclass, u32 *out_sid); +int security_transition_sid(u32 ssid, u32 tsid, u16 tclass, + const struct qstr *qstr, u32 *out_sid); -int security_transition_sid_user(u32 ssid, u32 tsid, - u16 tclass, u32 *out_sid); +int security_transition_sid_user(u32 ssid, u32 tsid, u16 tclass, + const char *objname, u32 *out_sid); int security_member_sid(u32 ssid, u32 tsid, u16 tclass, u32 *out_sid); @@ -124,7 +134,7 @@ int security_sid_to_context(u32 sid, char **scontext, int security_sid_to_context_force(u32 sid, char **scontext, u32 *scontext_len); int security_context_to_sid(const char *scontext, u32 scontext_len, - u32 *out_sid); + u32 *out_sid, gfp_t gfp); int security_context_to_sid_default(const char *scontext, u32 scontext_len, u32 *out_sid, u32 def_sid, gfp_t gfp_flags); @@ -164,9 +174,10 @@ int security_get_allow_unknown(void); #define SECURITY_FS_USE_GENFS 4 /* use the genfs support */ #define SECURITY_FS_USE_NONE 5 /* no labeling support */ #define SECURITY_FS_USE_MNTPOINT 6 /* use mountpoint labeling */ +#define SECURITY_FS_USE_NATIVE 7 /* use native label support */ +#define SECURITY_FS_USE_MAX 7 /* Highest SECURITY_FS_USE_XXX */ -int security_fs_use(const char *fstype, unsigned int *behavior, - u32 *sid); +int security_fs_use(struct super_block *sb); int security_genfs_sid(const char *fstype, char *name, u16 sclass, u32 *sid); @@ -213,6 +224,14 @@ struct selinux_kernel_status { extern void selinux_status_update_setenforce(int enforcing); extern void selinux_status_update_policyload(int seqno); +extern void selinux_complete_init(void); +extern int selinux_disable(void); +extern void exit_sel_fs(void); +extern struct path selinux_null; +extern struct vfsmount *selinuxfs_mount; +extern void selnl_notify_setenforce(int val); +extern void selnl_notify_policyload(u32 seqno); +extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm); #endif /* _SELINUX_SECURITY_H_ */ diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h index 13128f9a3e5..1450f85b946 100644 --- a/security/selinux/include/xfrm.h +++ b/security/selinux/include/xfrm.h @@ -7,30 +7,25 @@ #ifndef _SELINUX_XFRM_H_ #define _SELINUX_XFRM_H_ +#include <net/flow.h> + int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, - struct xfrm_user_sec_ctx *sec_ctx); + struct xfrm_user_sec_ctx *uctx, + gfp_t gfp); int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctxp); void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx); int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx); int selinux_xfrm_state_alloc(struct xfrm_state *x, - struct xfrm_user_sec_ctx *sec_ctx, u32 secid); + struct xfrm_user_sec_ctx *uctx); +int selinux_xfrm_state_alloc_acquire(struct xfrm_state *x, + struct xfrm_sec_ctx *polsec, u32 secid); void selinux_xfrm_state_free(struct xfrm_state *x); int selinux_xfrm_state_delete(struct xfrm_state *x); int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir); int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, - struct xfrm_policy *xp, struct flowi *fl); - -/* - * Extract the security blob from the sock (it's actually on the socket) - */ -static inline struct inode_security_struct *get_sock_isec(struct sock *sk) -{ - if (!sk->sk_socket) - return NULL; - - return SOCK_INODE(sk->sk_socket)->i_security; -} + struct xfrm_policy *xp, + const struct flowi *fl); #ifdef CONFIG_SECURITY_NETWORK_XFRM extern atomic_t selinux_xfrm_refcount; @@ -40,15 +35,23 @@ static inline int selinux_xfrm_enabled(void) return (atomic_read(&selinux_xfrm_refcount) > 0); } -int selinux_xfrm_sock_rcv_skb(u32 sid, struct sk_buff *skb, - struct common_audit_data *ad); -int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb, - struct common_audit_data *ad, u8 proto); +int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb, + struct common_audit_data *ad); +int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb, + struct common_audit_data *ad, u8 proto); int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall); +int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid); static inline void selinux_xfrm_notify_policyload(void) { - atomic_inc(&flow_cache_genid); + struct net *net; + + rtnl_lock(); + for_each_net(net) { + atomic_inc(&net->xfrm.flow_cache_genid); + rt_genid_bump_all(net); + } + rtnl_unlock(); } #else static inline int selinux_xfrm_enabled(void) @@ -56,19 +59,21 @@ static inline int selinux_xfrm_enabled(void) return 0; } -static inline int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb, - struct common_audit_data *ad) +static inline int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb, + struct common_audit_data *ad) { return 0; } -static inline int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb, - struct common_audit_data *ad, u8 proto) +static inline int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb, + struct common_audit_data *ad, + u8 proto) { return 0; } -static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall) +static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, + int ckall) { *sid = SECSID_NULL; return 0; @@ -77,12 +82,12 @@ static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int static inline void selinux_xfrm_notify_policyload(void) { } -#endif -static inline void selinux_skb_xfrm_sid(struct sk_buff *skb, u32 *sid) +static inline int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid) { - int err = selinux_xfrm_decode_session(skb, sid, 0); - BUG_ON(err); + *sid = SECSID_NULL; + return 0; } +#endif #endif /* _SELINUX_XFRM_H_ */ diff --git a/security/selinux/netif.c b/security/selinux/netif.c index d6095d63d83..694e9e43855 100644 --- a/security/selinux/netif.c +++ b/security/selinux/netif.c @@ -8,7 +8,7 @@ * * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com> * Copyright (C) 2007 Hewlett-Packard Development Company, L.P. - * Paul Moore <paul.moore@hp.com> + * Paul Moore <paul@paul-moore.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, @@ -104,22 +104,6 @@ static int sel_netif_insert(struct sel_netif *netif) } /** - * sel_netif_free - Frees an interface entry - * @p: the entry's RCU field - * - * Description: - * This function is designed to be used as a callback to the call_rcu() - * function so that memory allocated to a hash table interface entry can be - * released safely. - * - */ -static void sel_netif_free(struct rcu_head *p) -{ - struct sel_netif *netif = container_of(p, struct sel_netif, rcu_head); - kfree(netif); -} - -/** * sel_netif_destroy - Remove an interface record from the table * @netif: the existing interface record * @@ -131,7 +115,7 @@ static void sel_netif_destroy(struct sel_netif *netif) { list_del_rcu(&netif->list); sel_netif_total--; - call_rcu(&netif->rcu_head, sel_netif_free); + kfree_rcu(netif, rcu_head); } /** @@ -268,8 +252,7 @@ static void sel_netif_flush(void) spin_unlock_bh(&sel_netif_lock); } -static int sel_netif_avc_callback(u32 event, u32 ssid, u32 tsid, - u16 class, u32 perms, u32 *retained) +static int sel_netif_avc_callback(u32 event) { if (event == AVC_CALLBACK_RESET) { sel_netif_flush(); @@ -281,7 +264,7 @@ static int sel_netif_avc_callback(u32 event, u32 ssid, u32 tsid, static int sel_netif_netdev_notifier_handler(struct notifier_block *this, unsigned long event, void *ptr) { - struct net_device *dev = ptr; + struct net_device *dev = netdev_notifier_info_to_dev(ptr); if (dev_net(dev) != &init_net) return NOTIFY_DONE; @@ -308,8 +291,7 @@ static __init int sel_netif_init(void) register_netdevice_notifier(&sel_netif_netdev_notifier); - err = avc_add_callback(sel_netif_avc_callback, AVC_CALLBACK_RESET, - SECSID_NULL, SECSID_NULL, SECCLASS_NULL, 0); + err = avc_add_callback(sel_netif_avc_callback, AVC_CALLBACK_RESET); if (err) panic("avc_add_callback() failed, error %d\n", err); diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c index 1c2fc46544b..0364120d1ec 100644 --- a/security/selinux/netlabel.c +++ b/security/selinux/netlabel.c @@ -4,7 +4,7 @@ * This file provides the necessary glue to tie NetLabel into the SELinux * subsystem. * - * Author: Paul Moore <paul.moore@hp.com> + * Author: Paul Moore <paul@paul-moore.com> * */ @@ -101,6 +101,32 @@ static struct netlbl_lsm_secattr *selinux_netlbl_sock_genattr(struct sock *sk) } /** + * selinux_netlbl_sock_getattr - Get the cached NetLabel secattr + * @sk: the socket + * @sid: the SID + * + * Query the socket's cached secattr and if the SID matches the cached value + * return the cache, otherwise return NULL. + * + */ +static struct netlbl_lsm_secattr *selinux_netlbl_sock_getattr( + const struct sock *sk, + u32 sid) +{ + struct sk_security_struct *sksec = sk->sk_security; + struct netlbl_lsm_secattr *secattr = sksec->nlbl_secattr; + + if (secattr == NULL) + return NULL; + + if ((secattr->flags & NETLBL_SECATTR_SECID) && + (secattr->attr.secid == sid)) + return secattr; + + return NULL; +} + +/** * selinux_netlbl_cache_invalidate - Invalidate the NetLabel cache * * Description: @@ -151,7 +177,7 @@ void selinux_netlbl_sk_security_free(struct sk_security_struct *sksec) * * Description: * Called when the NetLabel state of a sk_security_struct needs to be reset. - * The caller is responsibile for all the NetLabel sk_security_struct locking. + * The caller is responsible for all the NetLabel sk_security_struct locking. * */ void selinux_netlbl_sk_security_reset(struct sk_security_struct *sksec) @@ -224,7 +250,7 @@ int selinux_netlbl_skbuff_setsid(struct sk_buff *skb, struct sk_security_struct *sksec = sk->sk_security; if (sksec->nlbl_state != NLBL_REQSKB) return 0; - secattr = sksec->nlbl_secattr; + secattr = selinux_netlbl_sock_getattr(sk, sid); } if (secattr == NULL) { secattr = &secattr_storage; @@ -410,6 +436,9 @@ int selinux_netlbl_socket_setsockopt(struct socket *sock, sksec->nlbl_state == NLBL_CONNLABELED)) { netlbl_secattr_init(&secattr); lock_sock(sk); + /* call the netlabel function directly as we want to see the + * on-the-wire label that is assigned via the socket's options + * and not the cached netlabel/lsm attributes */ rc = netlbl_sock_getattr(sk, &secattr); release_sock(sk); if (rc == 0) @@ -442,8 +471,7 @@ int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr) sksec->nlbl_state != NLBL_CONNLABELED) return 0; - local_bh_disable(); - bh_lock_sock_nested(sk); + lock_sock(sk); /* connected sockets are allowed to disconnect when the address family * is set to AF_UNSPEC, if that is what is happening we want to reset @@ -464,7 +492,6 @@ int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr) sksec->nlbl_state = NLBL_CONNLABELED; socket_connect_return: - bh_unlock_sock(sk); - local_bh_enable(); + release_sock(sk); return rc; } diff --git a/security/selinux/netlink.c b/security/selinux/netlink.c index 36ac257cec9..828fb6a4e94 100644 --- a/security/selinux/netlink.c +++ b/security/selinux/netlink.c @@ -14,10 +14,13 @@ #include <linux/slab.h> #include <linux/stddef.h> #include <linux/kernel.h> +#include <linux/export.h> #include <linux/skbuff.h> -#include <linux/netlink.h> #include <linux/selinux_netlink.h> #include <net/net_namespace.h> +#include <net/netlink.h> + +#include "security.h" static struct sock *selnl; @@ -44,7 +47,7 @@ static void selnl_add_payload(struct nlmsghdr *nlh, int len, int msgtype, void * { switch (msgtype) { case SELNL_MSG_SETENFORCE: { - struct selnl_msg_setenforce *msg = NLMSG_DATA(nlh); + struct selnl_msg_setenforce *msg = nlmsg_data(nlh); memset(msg, 0, len); msg->val = *((int *)data); @@ -52,7 +55,7 @@ static void selnl_add_payload(struct nlmsghdr *nlh, int len, int msgtype, void * } case SELNL_MSG_POLICYLOAD: { - struct selnl_msg_policyload *msg = NLMSG_DATA(nlh); + struct selnl_msg_policyload *msg = nlmsg_data(nlh); memset(msg, 0, len); msg->seqno = *((u32 *)data); @@ -73,12 +76,14 @@ static void selnl_notify(int msgtype, void *data) len = selnl_msglen(msgtype); - skb = alloc_skb(NLMSG_SPACE(len), GFP_USER); + skb = nlmsg_new(len, GFP_USER); if (!skb) goto oom; tmp = skb->tail; - nlh = NLMSG_PUT(skb, 0, 0, msgtype, len); + nlh = nlmsg_put(skb, 0, 0, msgtype, len, 0); + if (!nlh) + goto out_kfree_skb; selnl_add_payload(nlh, len, msgtype, data); nlh->nlmsg_len = skb->tail - tmp; NETLINK_CB(skb).dst_group = SELNLGRP_AVC; @@ -86,7 +91,7 @@ static void selnl_notify(int msgtype, void *data) out: return; -nlmsg_failure: +out_kfree_skb: kfree_skb(skb); oom: printk(KERN_ERR "SELinux: OOM in %s\n", __func__); @@ -105,11 +110,14 @@ void selnl_notify_policyload(u32 seqno) static int __init selnl_init(void) { - selnl = netlink_kernel_create(&init_net, NETLINK_SELINUX, - SELNLGRP_MAX, NULL, NULL, THIS_MODULE); + struct netlink_kernel_cfg cfg = { + .groups = SELNLGRP_MAX, + .flags = NL_CFG_F_NONROOT_RECV, + }; + + selnl = netlink_kernel_create(&init_net, NETLINK_SELINUX, &cfg); if (selnl == NULL) panic("SELinux: Cannot create netlink socket."); - netlink_set_nonroot(NETLINK_SELINUX, NL_NONROOT_RECV); return 0; } diff --git a/security/selinux/netnode.c b/security/selinux/netnode.c index 65ebfe954f8..03a72c32afd 100644 --- a/security/selinux/netnode.c +++ b/security/selinux/netnode.c @@ -6,7 +6,7 @@ * needed to reduce the lookup overhead since most of these queries happen on * a per-packet basis. * - * Author: Paul Moore <paul.moore@hp.com> + * Author: Paul Moore <paul@paul-moore.com> * * This code is heavily based on the "netif" concept originally developed by * James Morris <jmorris@redhat.com> @@ -69,22 +69,6 @@ static DEFINE_SPINLOCK(sel_netnode_lock); static struct sel_netnode_bkt sel_netnode_hash[SEL_NETNODE_HASH_SIZE]; /** - * sel_netnode_free - Frees a node entry - * @p: the entry's RCU field - * - * Description: - * This function is designed to be used as a callback to the call_rcu() - * function so that memory allocated to a hash table node entry can be - * released safely. - * - */ -static void sel_netnode_free(struct rcu_head *p) -{ - struct sel_netnode *node = container_of(p, struct sel_netnode, rcu); - kfree(node); -} - -/** * sel_netnode_hashfn_ipv4 - IPv4 hashing function for the node table * @addr: IPv4 address * @@ -141,6 +125,7 @@ static struct sel_netnode *sel_netnode_find(const void *addr, u16 family) break; default: BUG(); + return NULL; } list_for_each_entry_rcu(node, &sel_netnode_hash[idx].list, list) @@ -181,6 +166,7 @@ static void sel_netnode_insert(struct sel_netnode *node) break; default: BUG(); + return; } /* we need to impose a limit on the growth of the hash table so check @@ -189,10 +175,11 @@ static void sel_netnode_insert(struct sel_netnode *node) if (sel_netnode_hash[idx].size == SEL_NETNODE_HASH_BKT_LIMIT) { struct sel_netnode *tail; tail = list_entry( - rcu_dereference(sel_netnode_hash[idx].list.prev), + rcu_dereference_protected(sel_netnode_hash[idx].list.prev, + lockdep_is_held(&sel_netnode_lock)), struct sel_netnode, list); list_del_rcu(&tail->list); - call_rcu(&tail->rcu, sel_netnode_free); + kfree_rcu(tail, rcu); } else sel_netnode_hash[idx].size++; } @@ -235,10 +222,11 @@ static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid) case PF_INET6: ret = security_node_sid(PF_INET6, addr, sizeof(struct in6_addr), sid); - ipv6_addr_copy(&new->nsec.addr.ipv6, addr); + new->nsec.addr.ipv6 = *(struct in6_addr *)addr; break; default: BUG(); + ret = -EINVAL; } if (ret != 0) goto out; @@ -305,15 +293,14 @@ static void sel_netnode_flush(void) list_for_each_entry_safe(node, node_tmp, &sel_netnode_hash[idx].list, list) { list_del_rcu(&node->list); - call_rcu(&node->rcu, sel_netnode_free); + kfree_rcu(node, rcu); } sel_netnode_hash[idx].size = 0; } spin_unlock_bh(&sel_netnode_lock); } -static int sel_netnode_avc_callback(u32 event, u32 ssid, u32 tsid, - u16 class, u32 perms, u32 *retained) +static int sel_netnode_avc_callback(u32 event) { if (event == AVC_CALLBACK_RESET) { sel_netnode_flush(); @@ -335,8 +322,7 @@ static __init int sel_netnode_init(void) sel_netnode_hash[iter].size = 0; } - ret = avc_add_callback(sel_netnode_avc_callback, AVC_CALLBACK_RESET, - SECSID_NULL, SECSID_NULL, SECCLASS_NULL, 0); + ret = avc_add_callback(sel_netnode_avc_callback, AVC_CALLBACK_RESET); if (ret != 0) panic("avc_add_callback() failed, error %d\n", ret); diff --git a/security/selinux/netport.c b/security/selinux/netport.c index cfe2d72d3fb..d35379781c2 100644 --- a/security/selinux/netport.c +++ b/security/selinux/netport.c @@ -5,7 +5,7 @@ * mapping is maintained as part of the normal policy but a fast cache is * needed to reduce the lookup overhead. * - * Author: Paul Moore <paul.moore@hp.com> + * Author: Paul Moore <paul@paul-moore.com> * * This code is heavily based on the "netif" concept originally developed by * James Morris <jmorris@redhat.com> @@ -68,22 +68,6 @@ static DEFINE_SPINLOCK(sel_netport_lock); static struct sel_netport_bkt sel_netport_hash[SEL_NETPORT_HASH_SIZE]; /** - * sel_netport_free - Frees a port entry - * @p: the entry's RCU field - * - * Description: - * This function is designed to be used as a callback to the call_rcu() - * function so that memory allocated to a hash table port entry can be - * released safely. - * - */ -static void sel_netport_free(struct rcu_head *p) -{ - struct sel_netport *port = container_of(p, struct sel_netport, rcu); - kfree(port); -} - -/** * sel_netport_hashfn - Hashing function for the port table * @pnum: port number * @@ -139,10 +123,12 @@ static void sel_netport_insert(struct sel_netport *port) if (sel_netport_hash[idx].size == SEL_NETPORT_HASH_BKT_LIMIT) { struct sel_netport *tail; tail = list_entry( - rcu_dereference(sel_netport_hash[idx].list.prev), + rcu_dereference_protected( + sel_netport_hash[idx].list.prev, + lockdep_is_held(&sel_netport_lock)), struct sel_netport, list); list_del_rcu(&tail->list); - call_rcu(&tail->rcu, sel_netport_free); + kfree_rcu(tail, rcu); } else sel_netport_hash[idx].size++; } @@ -241,15 +227,14 @@ static void sel_netport_flush(void) list_for_each_entry_safe(port, port_tmp, &sel_netport_hash[idx].list, list) { list_del_rcu(&port->list); - call_rcu(&port->rcu, sel_netport_free); + kfree_rcu(port, rcu); } sel_netport_hash[idx].size = 0; } spin_unlock_bh(&sel_netport_lock); } -static int sel_netport_avc_callback(u32 event, u32 ssid, u32 tsid, - u16 class, u32 perms, u32 *retained) +static int sel_netport_avc_callback(u32 event) { if (event == AVC_CALLBACK_RESET) { sel_netport_flush(); @@ -271,8 +256,7 @@ static __init int sel_netport_init(void) sel_netport_hash[iter].size = 0; } - ret = avc_add_callback(sel_netport_avc_callback, AVC_CALLBACK_RESET, - SECSID_NULL, SECSID_NULL, SECCLASS_NULL, 0); + ret = avc_add_callback(sel_netport_avc_callback, AVC_CALLBACK_RESET); if (ret != 0) panic("avc_add_callback() failed, error %d\n", ret); diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c index 75ec0c6ebac..2df7b900e25 100644 --- a/security/selinux/nlmsgtab.c +++ b/security/selinux/nlmsgtab.c @@ -14,13 +14,14 @@ #include <linux/netlink.h> #include <linux/rtnetlink.h> #include <linux/if.h> -#include <linux/netfilter_ipv4/ip_queue.h> #include <linux/inet_diag.h> #include <linux/xfrm.h> #include <linux/audit.h> +#include <linux/sock_diag.h> #include "flask.h" #include "av_permissions.h" +#include "security.h" struct nlmsg_perm { u16 nlmsg_type; @@ -65,18 +66,20 @@ static struct nlmsg_perm nlmsg_route_perms[] = { RTM_NEWADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_DELADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, { RTM_GETADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_READ }, -}; - -static struct nlmsg_perm nlmsg_firewall_perms[] = -{ - { IPQM_MODE, NETLINK_FIREWALL_SOCKET__NLMSG_WRITE }, - { IPQM_VERDICT, NETLINK_FIREWALL_SOCKET__NLMSG_WRITE }, + { RTM_GETDCB, NETLINK_ROUTE_SOCKET__NLMSG_READ }, + { RTM_SETDCB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, + { RTM_NEWNETCONF, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, + { RTM_GETNETCONF, NETLINK_ROUTE_SOCKET__NLMSG_READ }, + { RTM_NEWMDB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, + { RTM_DELMDB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE }, + { RTM_GETMDB, NETLINK_ROUTE_SOCKET__NLMSG_READ }, }; static struct nlmsg_perm nlmsg_tcpdiag_perms[] = { { TCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, { DCCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, + { SOCK_DIAG_BY_FAMILY, NETLINK_TCPDIAG_SOCKET__NLMSG_READ }, }; static struct nlmsg_perm nlmsg_xfrm_perms[] = @@ -115,6 +118,8 @@ static struct nlmsg_perm nlmsg_audit_perms[] = { AUDIT_MAKE_EQUIV, NETLINK_AUDIT_SOCKET__NLMSG_WRITE }, { AUDIT_TTY_GET, NETLINK_AUDIT_SOCKET__NLMSG_READ }, { AUDIT_TTY_SET, NETLINK_AUDIT_SOCKET__NLMSG_TTY_AUDIT }, + { AUDIT_GET_FEATURE, NETLINK_AUDIT_SOCKET__NLMSG_READ }, + { AUDIT_SET_FEATURE, NETLINK_AUDIT_SOCKET__NLMSG_WRITE }, }; @@ -142,12 +147,6 @@ int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm) sizeof(nlmsg_route_perms)); break; - case SECCLASS_NETLINK_FIREWALL_SOCKET: - case SECCLASS_NETLINK_IP6FW_SOCKET: - err = nlmsg_perm(nlmsg_type, perm, nlmsg_firewall_perms, - sizeof(nlmsg_firewall_perms)); - break; - case SECCLASS_NETLINK_TCPDIAG_SOCKET: err = nlmsg_perm(nlmsg_type, perm, nlmsg_tcpdiag_perms, sizeof(nlmsg_tcpdiag_perms)); diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index 073fd5b0a53..c71737f6d1c 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c @@ -2,7 +2,7 @@ * * Added conditional policy language extensions * - * Updated: Hewlett-Packard <paul.moore@hp.com> + * Updated: Hewlett-Packard <paul@paul-moore.com> * * Added support for the policy capability bitmap * @@ -28,6 +28,8 @@ #include <linux/percpu.h> #include <linux/audit.h> #include <linux/uaccess.h> +#include <linux/kobject.h> +#include <linux/ctype.h> /* selinuxfs pseudo filesystem for exporting the security policy API. Based on the proc code and the fs/nfsd/nfsctl.c code. */ @@ -42,7 +44,9 @@ /* Policy capability filenames */ static char *policycap_names[] = { "network_peer_controls", - "open_perms" + "open_perms", + "redhat1", + "always_check_network" }; unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE; @@ -50,7 +54,7 @@ unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE; static int __init checkreqprot_setup(char *str) { unsigned long checkreqprot; - if (!strict_strtoul(str, 0, &checkreqprot)) + if (!kstrtoul(str, 0, &checkreqprot)) selinux_checkreqprot = checkreqprot ? 1 : 0; return 1; } @@ -73,8 +77,6 @@ static char policy_opened; /* global data for policy capabilities */ static struct dentry *policycap_dir; -extern void selnl_notify_setenforce(int val); - /* Check whether a task is allowed to use a security operation. */ static int task_has_security(struct task_struct *tsk, u32 perms) @@ -141,19 +143,24 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - char *page; + char *page = NULL; ssize_t length; int new_value; + length = -ENOMEM; if (count >= PAGE_SIZE) - return -ENOMEM; - if (*ppos != 0) { - /* No partial writes. */ - return -EINVAL; - } + goto out; + + /* No partial writes. */ + length = EINVAL; + if (*ppos != 0) + goto out; + + length = -ENOMEM; page = (char *)get_zeroed_page(GFP_KERNEL); if (!page) - return -ENOMEM; + goto out; + length = -EFAULT; if (copy_from_user(page, buf, count)) goto out; @@ -169,7 +176,7 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf, audit_log(current->audit_context, GFP_KERNEL, AUDIT_MAC_STATUS, "enforcing=%d old_enforcing=%d auid=%u ses=%u", new_value, selinux_enforcing, - audit_get_loginuid(current), + from_kuid(&init_user_ns, audit_get_loginuid(current)), audit_get_sessionid(current)); selinux_enforcing = new_value; if (selinux_enforcing) @@ -197,7 +204,7 @@ static ssize_t sel_read_handle_unknown(struct file *filp, char __user *buf, { char tmpbuf[TMPBUFLEN]; ssize_t length; - ino_t ino = filp->f_path.dentry->d_inode->i_ino; + ino_t ino = file_inode(filp)->i_ino; int handle_unknown = (ino == SEL_REJECT_UNKNOWN) ? security_get_reject_unknown() : !security_get_allow_unknown(); @@ -268,20 +275,24 @@ static ssize_t sel_write_disable(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - char *page; + char *page = NULL; ssize_t length; int new_value; - extern int selinux_disable(void); + length = -ENOMEM; if (count >= PAGE_SIZE) - return -ENOMEM; - if (*ppos != 0) { - /* No partial writes. */ - return -EINVAL; - } + goto out; + + /* No partial writes. */ + length = -EINVAL; + if (*ppos != 0) + goto out; + + length = -ENOMEM; page = (char *)get_zeroed_page(GFP_KERNEL); if (!page) - return -ENOMEM; + goto out; + length = -EFAULT; if (copy_from_user(page, buf, count)) goto out; @@ -292,11 +303,11 @@ static ssize_t sel_write_disable(struct file *file, const char __user *buf, if (new_value) { length = selinux_disable(); - if (length < 0) + if (length) goto out; audit_log(current->audit_context, GFP_KERNEL, AUDIT_MAC_STATUS, "selinux=0 auid=%u ses=%u", - audit_get_loginuid(current), + from_kuid(&init_user_ns, audit_get_loginuid(current)), audit_get_sessionid(current)); } @@ -335,7 +346,7 @@ static int sel_make_classes(void); static int sel_make_policycap(void); /* declaration for sel_make_class_dirs */ -static int sel_make_dir(struct inode *dir, struct dentry *dentry, +static struct dentry *sel_make_dir(struct dentry *dir, const char *name, unsigned long *ino); static ssize_t sel_read_mls(struct file *filp, char __user *buf, @@ -466,7 +477,7 @@ static struct vm_operations_struct sel_mmap_policy_ops = { .page_mkwrite = sel_mmap_policy_fault, }; -int sel_mmap_policy(struct file *filp, struct vm_area_struct *vma) +static int sel_mmap_policy(struct file *filp, struct vm_area_struct *vma) { if (vma->vm_flags & VM_SHARED) { /* do not allow mprotect to make mapping writable */ @@ -476,7 +487,7 @@ int sel_mmap_policy(struct file *filp, struct vm_area_struct *vma) return -EACCES; } - vma->vm_flags |= VM_RESERVED; + vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; vma->vm_ops = &sel_mmap_policy_ops; return 0; @@ -487,13 +498,13 @@ static const struct file_operations sel_policy_ops = { .read = sel_read_policy, .mmap = sel_mmap_policy, .release = sel_release_policy, + .llseek = generic_file_llseek, }; static ssize_t sel_write_load(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - int ret; ssize_t length; void *data = NULL; @@ -503,17 +514,19 @@ static ssize_t sel_write_load(struct file *file, const char __user *buf, if (length) goto out; - if (*ppos != 0) { - /* No partial writes. */ - length = -EINVAL; + /* No partial writes. */ + length = -EINVAL; + if (*ppos != 0) goto out; - } - if ((count > 64 * 1024 * 1024) - || (data = vmalloc(count)) == NULL) { - length = -ENOMEM; + length = -EFBIG; + if (count > 64 * 1024 * 1024) + goto out; + + length = -ENOMEM; + data = vmalloc(count); + if (!data) goto out; - } length = -EFAULT; if (copy_from_user(data, buf, count) != 0) @@ -523,28 +536,24 @@ static ssize_t sel_write_load(struct file *file, const char __user *buf, if (length) goto out; - ret = sel_make_bools(); - if (ret) { - length = ret; + length = sel_make_bools(); + if (length) goto out1; - } - ret = sel_make_classes(); - if (ret) { - length = ret; + length = sel_make_classes(); + if (length) goto out1; - } - ret = sel_make_policycap(); - if (ret) - length = ret; - else - length = count; + length = sel_make_policycap(); + if (length) + goto out1; + + length = count; out1: audit_log(current->audit_context, GFP_KERNEL, AUDIT_MAC_POLICY_LOAD, "policy loaded auid=%u ses=%u", - audit_get_loginuid(current), + from_kuid(&init_user_ns, audit_get_loginuid(current)), audit_get_sessionid(current)); out: mutex_unlock(&sel_mutex); @@ -559,26 +568,26 @@ static const struct file_operations sel_load_ops = { static ssize_t sel_write_context(struct file *file, char *buf, size_t size) { - char *canon; + char *canon = NULL; u32 sid, len; ssize_t length; length = task_has_security(current, SECURITY__CHECK_CONTEXT); if (length) - return length; + goto out; - length = security_context_to_sid(buf, size, &sid); - if (length < 0) - return length; + length = security_context_to_sid(buf, size, &sid, GFP_KERNEL); + if (length) + goto out; length = security_sid_to_context(sid, &canon, &len); - if (length < 0) - return length; + if (length) + goto out; + length = -ERANGE; if (len > SIMPLE_TRANSACTION_LIMIT) { printk(KERN_ERR "SELinux: %s: context size (%u) exceeds " "payload max\n", __func__, len); - length = -ERANGE; goto out; } @@ -602,23 +611,28 @@ static ssize_t sel_read_checkreqprot(struct file *filp, char __user *buf, static ssize_t sel_write_checkreqprot(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - char *page; + char *page = NULL; ssize_t length; unsigned int new_value; length = task_has_security(current, SECURITY__SETCHECKREQPROT); if (length) - return length; + goto out; + length = -ENOMEM; if (count >= PAGE_SIZE) - return -ENOMEM; - if (*ppos != 0) { - /* No partial writes. */ - return -EINVAL; - } + goto out; + + /* No partial writes. */ + length = -EINVAL; + if (*ppos != 0) + goto out; + + length = -ENOMEM; page = (char *)get_zeroed_page(GFP_KERNEL); if (!page) - return -ENOMEM; + goto out; + length = -EFAULT; if (copy_from_user(page, buf, count)) goto out; @@ -659,7 +673,7 @@ static ssize_t (*write_op[])(struct file *, char *, size_t) = { static ssize_t selinux_transaction_write(struct file *file, const char __user *buf, size_t size, loff_t *pos) { - ino_t ino = file->f_path.dentry->d_inode->i_ino; + ino_t ino = file_inode(file)->i_ino; char *data; ssize_t rv; @@ -693,7 +707,7 @@ static const struct file_operations transaction_ops = { static ssize_t sel_write_access(struct file *file, char *buf, size_t size) { - char *scon, *tcon; + char *scon = NULL, *tcon = NULL; u32 ssid, tsid; u16 tclass; struct av_decision avd; @@ -701,27 +715,31 @@ static ssize_t sel_write_access(struct file *file, char *buf, size_t size) length = task_has_security(current, SECURITY__COMPUTE_AV); if (length) - return length; + goto out; length = -ENOMEM; scon = kzalloc(size + 1, GFP_KERNEL); if (!scon) - return length; + goto out; + length = -ENOMEM; tcon = kzalloc(size + 1, GFP_KERNEL); if (!tcon) goto out; length = -EINVAL; if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3) - goto out2; + goto out; + + length = security_context_to_sid(scon, strlen(scon) + 1, &ssid, + GFP_KERNEL); + if (length) + goto out; - length = security_context_to_sid(scon, strlen(scon) + 1, &ssid); - if (length < 0) - goto out2; - length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid); - if (length < 0) - goto out2; + length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid, + GFP_KERNEL); + if (length) + goto out; security_compute_av_user(ssid, tsid, tclass, &avd); @@ -730,133 +748,175 @@ static ssize_t sel_write_access(struct file *file, char *buf, size_t size) avd.allowed, 0xffffffff, avd.auditallow, avd.auditdeny, avd.seqno, avd.flags); -out2: - kfree(tcon); out: + kfree(tcon); kfree(scon); return length; } static ssize_t sel_write_create(struct file *file, char *buf, size_t size) { - char *scon, *tcon; + char *scon = NULL, *tcon = NULL; + char *namebuf = NULL, *objname = NULL; u32 ssid, tsid, newsid; u16 tclass; ssize_t length; - char *newcon; + char *newcon = NULL; u32 len; + int nargs; length = task_has_security(current, SECURITY__COMPUTE_CREATE); if (length) - return length; + goto out; length = -ENOMEM; scon = kzalloc(size + 1, GFP_KERNEL); if (!scon) - return length; + goto out; + length = -ENOMEM; tcon = kzalloc(size + 1, GFP_KERNEL); if (!tcon) goto out; + length = -ENOMEM; + namebuf = kzalloc(size + 1, GFP_KERNEL); + if (!namebuf) + goto out; + length = -EINVAL; - if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3) - goto out2; + nargs = sscanf(buf, "%s %s %hu %s", scon, tcon, &tclass, namebuf); + if (nargs < 3 || nargs > 4) + goto out; + if (nargs == 4) { + /* + * If and when the name of new object to be queried contains + * either whitespace or multibyte characters, they shall be + * encoded based on the percentage-encoding rule. + * If not encoded, the sscanf logic picks up only left-half + * of the supplied name; splitted by a whitespace unexpectedly. + */ + char *r, *w; + int c1, c2; + + r = w = namebuf; + do { + c1 = *r++; + if (c1 == '+') + c1 = ' '; + else if (c1 == '%') { + c1 = hex_to_bin(*r++); + if (c1 < 0) + goto out; + c2 = hex_to_bin(*r++); + if (c2 < 0) + goto out; + c1 = (c1 << 4) | c2; + } + *w++ = c1; + } while (c1 != '\0'); + + objname = namebuf; + } + + length = security_context_to_sid(scon, strlen(scon) + 1, &ssid, + GFP_KERNEL); + if (length) + goto out; - length = security_context_to_sid(scon, strlen(scon) + 1, &ssid); - if (length < 0) - goto out2; - length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid); - if (length < 0) - goto out2; + length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid, + GFP_KERNEL); + if (length) + goto out; - length = security_transition_sid_user(ssid, tsid, tclass, &newsid); - if (length < 0) - goto out2; + length = security_transition_sid_user(ssid, tsid, tclass, + objname, &newsid); + if (length) + goto out; length = security_sid_to_context(newsid, &newcon, &len); - if (length < 0) - goto out2; + if (length) + goto out; + length = -ERANGE; if (len > SIMPLE_TRANSACTION_LIMIT) { printk(KERN_ERR "SELinux: %s: context size (%u) exceeds " "payload max\n", __func__, len); - length = -ERANGE; - goto out3; + goto out; } memcpy(buf, newcon, len); length = len; -out3: +out: kfree(newcon); -out2: + kfree(namebuf); kfree(tcon); -out: kfree(scon); return length; } static ssize_t sel_write_relabel(struct file *file, char *buf, size_t size) { - char *scon, *tcon; + char *scon = NULL, *tcon = NULL; u32 ssid, tsid, newsid; u16 tclass; ssize_t length; - char *newcon; + char *newcon = NULL; u32 len; length = task_has_security(current, SECURITY__COMPUTE_RELABEL); if (length) - return length; + goto out; length = -ENOMEM; scon = kzalloc(size + 1, GFP_KERNEL); if (!scon) - return length; + goto out; + length = -ENOMEM; tcon = kzalloc(size + 1, GFP_KERNEL); if (!tcon) goto out; length = -EINVAL; if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3) - goto out2; + goto out; + + length = security_context_to_sid(scon, strlen(scon) + 1, &ssid, + GFP_KERNEL); + if (length) + goto out; - length = security_context_to_sid(scon, strlen(scon) + 1, &ssid); - if (length < 0) - goto out2; - length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid); - if (length < 0) - goto out2; + length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid, + GFP_KERNEL); + if (length) + goto out; length = security_change_sid(ssid, tsid, tclass, &newsid); - if (length < 0) - goto out2; + if (length) + goto out; length = security_sid_to_context(newsid, &newcon, &len); - if (length < 0) - goto out2; + if (length) + goto out; - if (len > SIMPLE_TRANSACTION_LIMIT) { - length = -ERANGE; - goto out3; - } + length = -ERANGE; + if (len > SIMPLE_TRANSACTION_LIMIT) + goto out; memcpy(buf, newcon, len); length = len; -out3: +out: kfree(newcon); -out2: kfree(tcon); -out: kfree(scon); return length; } static ssize_t sel_write_user(struct file *file, char *buf, size_t size) { - char *con, *user, *ptr; - u32 sid, *sids; + char *con = NULL, *user = NULL, *ptr; + u32 sid, *sids = NULL; ssize_t length; char *newcon; int i, rc; @@ -864,28 +924,29 @@ static ssize_t sel_write_user(struct file *file, char *buf, size_t size) length = task_has_security(current, SECURITY__COMPUTE_USER); if (length) - return length; + goto out; length = -ENOMEM; con = kzalloc(size + 1, GFP_KERNEL); if (!con) - return length; + goto out; + length = -ENOMEM; user = kzalloc(size + 1, GFP_KERNEL); if (!user) goto out; length = -EINVAL; if (sscanf(buf, "%s %s", con, user) != 2) - goto out2; + goto out; - length = security_context_to_sid(con, strlen(con) + 1, &sid); - if (length < 0) - goto out2; + length = security_context_to_sid(con, strlen(con) + 1, &sid, GFP_KERNEL); + if (length) + goto out; length = security_get_user_sids(sid, user, &sids, &nsids); - if (length < 0) - goto out2; + if (length) + goto out; length = sprintf(buf, "%u", nsids) + 1; ptr = buf + length; @@ -893,82 +954,82 @@ static ssize_t sel_write_user(struct file *file, char *buf, size_t size) rc = security_sid_to_context(sids[i], &newcon, &len); if (rc) { length = rc; - goto out3; + goto out; } if ((length + len) >= SIMPLE_TRANSACTION_LIMIT) { kfree(newcon); length = -ERANGE; - goto out3; + goto out; } memcpy(ptr, newcon, len); kfree(newcon); ptr += len; length += len; } -out3: +out: kfree(sids); -out2: kfree(user); -out: kfree(con); return length; } static ssize_t sel_write_member(struct file *file, char *buf, size_t size) { - char *scon, *tcon; + char *scon = NULL, *tcon = NULL; u32 ssid, tsid, newsid; u16 tclass; ssize_t length; - char *newcon; + char *newcon = NULL; u32 len; length = task_has_security(current, SECURITY__COMPUTE_MEMBER); if (length) - return length; + goto out; length = -ENOMEM; scon = kzalloc(size + 1, GFP_KERNEL); if (!scon) - return length; + goto out; + length = -ENOMEM; tcon = kzalloc(size + 1, GFP_KERNEL); if (!tcon) goto out; length = -EINVAL; if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3) - goto out2; + goto out; + + length = security_context_to_sid(scon, strlen(scon) + 1, &ssid, + GFP_KERNEL); + if (length) + goto out; - length = security_context_to_sid(scon, strlen(scon) + 1, &ssid); - if (length < 0) - goto out2; - length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid); - if (length < 0) - goto out2; + length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid, + GFP_KERNEL); + if (length) + goto out; length = security_member_sid(ssid, tsid, tclass, &newsid); - if (length < 0) - goto out2; + if (length) + goto out; length = security_sid_to_context(newsid, &newcon, &len); - if (length < 0) - goto out2; + if (length) + goto out; + length = -ERANGE; if (len > SIMPLE_TRANSACTION_LIMIT) { printk(KERN_ERR "SELinux: %s: context size (%u) exceeds " "payload max\n", __func__, len); - length = -ERANGE; - goto out3; + goto out; } memcpy(buf, newcon, len); length = len; -out3: +out: kfree(newcon); -out2: kfree(tcon); -out: kfree(scon); return length; } @@ -978,7 +1039,6 @@ static struct inode *sel_make_inode(struct super_block *sb, int mode) struct inode *ret = new_inode(sb); if (ret) { - ret->i_ino = get_next_ino(); ret->i_mode = mode; ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME; } @@ -992,22 +1052,19 @@ static ssize_t sel_read_bool(struct file *filep, char __user *buf, ssize_t length; ssize_t ret; int cur_enforcing; - struct inode *inode = filep->f_path.dentry->d_inode; - unsigned index = inode->i_ino & SEL_INO_MASK; + unsigned index = file_inode(filep)->i_ino & SEL_INO_MASK; const char *name = filep->f_path.dentry->d_name.name; mutex_lock(&sel_mutex); - if (index >= bool_num || strcmp(name, bool_pending_names[index])) { - ret = -EINVAL; + ret = -EINVAL; + if (index >= bool_num || strcmp(name, bool_pending_names[index])) goto out; - } + ret = -ENOMEM; page = (char *)get_zeroed_page(GFP_KERNEL); - if (!page) { - ret = -ENOMEM; + if (!page) goto out; - } cur_enforcing = security_get_bool_value(index); if (cur_enforcing < 0) { @@ -1019,8 +1076,7 @@ static ssize_t sel_read_bool(struct file *filep, char __user *buf, ret = simple_read_from_buffer(buf, count, ppos, page, length); out: mutex_unlock(&sel_mutex); - if (page) - free_page((unsigned long)page); + free_page((unsigned long)page); return ret; } @@ -1030,8 +1086,7 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf, char *page = NULL; ssize_t length; int new_value; - struct inode *inode = filep->f_path.dentry->d_inode; - unsigned index = inode->i_ino & SEL_INO_MASK; + unsigned index = file_inode(filep)->i_ino & SEL_INO_MASK; const char *name = filep->f_path.dentry->d_name.name; mutex_lock(&sel_mutex); @@ -1040,26 +1095,23 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf, if (length) goto out; - if (index >= bool_num || strcmp(name, bool_pending_names[index])) { - length = -EINVAL; + length = -EINVAL; + if (index >= bool_num || strcmp(name, bool_pending_names[index])) goto out; - } - if (count >= PAGE_SIZE) { - length = -ENOMEM; + length = -ENOMEM; + if (count >= PAGE_SIZE) goto out; - } - if (*ppos != 0) { - /* No partial writes. */ - length = -EINVAL; + /* No partial writes. */ + length = -EINVAL; + if (*ppos != 0) goto out; - } + + length = -ENOMEM; page = (char *)get_zeroed_page(GFP_KERNEL); - if (!page) { - length = -ENOMEM; + if (!page) goto out; - } length = -EFAULT; if (copy_from_user(page, buf, count)) @@ -1077,8 +1129,7 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf, out: mutex_unlock(&sel_mutex); - if (page) - free_page((unsigned long) page); + free_page((unsigned long) page); return length; } @@ -1102,19 +1153,19 @@ static ssize_t sel_commit_bools_write(struct file *filep, if (length) goto out; - if (count >= PAGE_SIZE) { - length = -ENOMEM; + length = -ENOMEM; + if (count >= PAGE_SIZE) goto out; - } - if (*ppos != 0) { - /* No partial writes. */ + + /* No partial writes. */ + length = -EINVAL; + if (*ppos != 0) goto out; - } + + length = -ENOMEM; page = (char *)get_zeroed_page(GFP_KERNEL); - if (!page) { - length = -ENOMEM; + if (!page) goto out; - } length = -EFAULT; if (copy_from_user(page, buf, count)) @@ -1124,15 +1175,16 @@ static ssize_t sel_commit_bools_write(struct file *filep, if (sscanf(page, "%d", &new_value) != 1) goto out; + length = 0; if (new_value && bool_pending_values) - security_set_bools(bool_num, bool_pending_values); + length = security_set_bools(bool_num, bool_pending_values); - length = count; + if (!length) + length = count; out: mutex_unlock(&sel_mutex); - if (page) - free_page((unsigned long) page); + free_page((unsigned long) page); return length; } @@ -1145,31 +1197,35 @@ static void sel_remove_entries(struct dentry *de) { struct list_head *node; - spin_lock(&dcache_lock); + spin_lock(&de->d_lock); node = de->d_subdirs.next; while (node != &de->d_subdirs) { struct dentry *d = list_entry(node, struct dentry, d_u.d_child); + + spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED); list_del_init(node); if (d->d_inode) { - d = dget_locked(d); - spin_unlock(&dcache_lock); + dget_dlock(d); + spin_unlock(&de->d_lock); + spin_unlock(&d->d_lock); d_delete(d); simple_unlink(de->d_inode, d); dput(d); - spin_lock(&dcache_lock); - } + spin_lock(&de->d_lock); + } else + spin_unlock(&d->d_lock); node = de->d_subdirs.next; } - spin_unlock(&dcache_lock); + spin_unlock(&de->d_lock); } #define BOOL_DIR_NAME "booleans" static int sel_make_bools(void) { - int i, ret = 0; + int i, ret; ssize_t len; struct dentry *dentry = NULL; struct dentry *dir = bool_dir; @@ -1185,43 +1241,42 @@ static int sel_make_bools(void) kfree(bool_pending_names[i]); kfree(bool_pending_names); kfree(bool_pending_values); + bool_num = 0; bool_pending_names = NULL; bool_pending_values = NULL; sel_remove_entries(dir); + ret = -ENOMEM; page = (char *)get_zeroed_page(GFP_KERNEL); if (!page) - return -ENOMEM; + goto out; ret = security_get_bools(&num, &names, &values); - if (ret != 0) + if (ret) goto out; for (i = 0; i < num; i++) { + ret = -ENOMEM; dentry = d_alloc_name(dir, names[i]); - if (!dentry) { - ret = -ENOMEM; - goto err; - } + if (!dentry) + goto out; + + ret = -ENOMEM; inode = sel_make_inode(dir->d_sb, S_IFREG | S_IRUGO | S_IWUSR); - if (!inode) { - ret = -ENOMEM; - goto err; - } + if (!inode) + goto out; + ret = -ENAMETOOLONG; len = snprintf(page, PAGE_SIZE, "/%s/%s", BOOL_DIR_NAME, names[i]); - if (len < 0) { - ret = -EINVAL; - goto err; - } else if (len >= PAGE_SIZE) { - ret = -ENAMETOOLONG; - goto err; - } + if (len >= PAGE_SIZE) + goto out; + isec = (struct inode_security_struct *)inode->i_security; ret = security_genfs_sid("selinuxfs", page, SECCLASS_FILE, &sid); if (ret) - goto err; + goto out; + isec->sid = sid; isec->initialized = 1; inode->i_fop = &sel_bool_ops; @@ -1231,10 +1286,12 @@ static int sel_make_bools(void) bool_num = num; bool_pending_names = names; bool_pending_values = values; + + free_page((unsigned long)page); + return 0; out: free_page((unsigned long)page); - return ret; -err: + if (names) { for (i = 0; i < num; i++) kfree(names[i]); @@ -1242,13 +1299,13 @@ err: } kfree(values); sel_remove_entries(dir); - ret = -ENOMEM; - goto out; + + return ret; } #define NULL_FILE_NAME "null" -struct dentry *selinux_null; +struct path selinux_null; static ssize_t sel_read_avc_cache_threshold(struct file *filp, char __user *buf, size_t count, loff_t *ppos) @@ -1265,47 +1322,41 @@ static ssize_t sel_write_avc_cache_threshold(struct file *file, size_t count, loff_t *ppos) { - char *page; + char *page = NULL; ssize_t ret; int new_value; - if (count >= PAGE_SIZE) { - ret = -ENOMEM; + ret = task_has_security(current, SECURITY__SETSECPARAM); + if (ret) goto out; - } - if (*ppos != 0) { - /* No partial writes. */ - ret = -EINVAL; + ret = -ENOMEM; + if (count >= PAGE_SIZE) goto out; - } + /* No partial writes. */ + ret = -EINVAL; + if (*ppos != 0) + goto out; + + ret = -ENOMEM; page = (char *)get_zeroed_page(GFP_KERNEL); - if (!page) { - ret = -ENOMEM; + if (!page) goto out; - } - if (copy_from_user(page, buf, count)) { - ret = -EFAULT; - goto out_free; - } + ret = -EFAULT; + if (copy_from_user(page, buf, count)) + goto out; - if (sscanf(page, "%u", &new_value) != 1) { - ret = -EINVAL; + ret = -EINVAL; + if (sscanf(page, "%u", &new_value) != 1) goto out; - } - if (new_value != avc_cache_threshold) { - ret = task_has_security(current, SECURITY__SETSECPARAM); - if (ret) - goto out_free; - avc_cache_threshold = new_value; - } + avc_cache_threshold = new_value; + ret = count; -out_free: - free_page((unsigned long)page); out: + free_page((unsigned long)page); return ret; } @@ -1313,19 +1364,18 @@ static ssize_t sel_read_avc_hash_stats(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { char *page; - ssize_t ret = 0; + ssize_t length; page = (char *)__get_free_page(GFP_KERNEL); - if (!page) { - ret = -ENOMEM; - goto out; - } - ret = avc_get_hash_stats(page); - if (ret >= 0) - ret = simple_read_from_buffer(buf, count, ppos, page, ret); + if (!page) + return -ENOMEM; + + length = avc_get_hash_stats(page); + if (length >= 0) + length = simple_read_from_buffer(buf, count, ppos, page, length); free_page((unsigned long)page); -out: - return ret; + + return length; } static const struct file_operations sel_avc_cache_threshold_ops = { @@ -1375,10 +1425,14 @@ static int sel_avc_stats_seq_show(struct seq_file *seq, void *v) if (v == SEQ_START_TOKEN) seq_printf(seq, "lookups hits misses allocations reclaims " "frees\n"); - else - seq_printf(seq, "%u %u %u %u %u %u\n", st->lookups, - st->hits, st->misses, st->allocations, + else { + unsigned int lookups = st->lookups; + unsigned int misses = st->misses; + unsigned int hits = lookups - misses; + seq_printf(seq, "%u %u %u %u %u %u\n", lookups, + hits, misses, st->allocations, st->reclaims, st->frees); + } return 0; } @@ -1407,7 +1461,7 @@ static const struct file_operations sel_avc_cache_stats_ops = { static int sel_make_avc_files(struct dentry *dir) { - int i, ret = 0; + int i; static struct tree_descr files[] = { { "cache_threshold", &sel_avc_cache_threshold_ops, S_IRUGO|S_IWUSR }, @@ -1422,36 +1476,31 @@ static int sel_make_avc_files(struct dentry *dir) struct dentry *dentry; dentry = d_alloc_name(dir, files[i].name); - if (!dentry) { - ret = -ENOMEM; - goto out; - } + if (!dentry) + return -ENOMEM; inode = sel_make_inode(dir->d_sb, S_IFREG|files[i].mode); - if (!inode) { - ret = -ENOMEM; - goto out; - } + if (!inode) + return -ENOMEM; + inode->i_fop = files[i].ops; inode->i_ino = ++sel_last_ino; d_add(dentry, inode); } -out: - return ret; + + return 0; } static ssize_t sel_read_initcon(struct file *file, char __user *buf, size_t count, loff_t *ppos) { - struct inode *inode; char *con; u32 sid, len; ssize_t ret; - inode = file->f_path.dentry->d_inode; - sid = inode->i_ino&SEL_INO_MASK; + sid = file_inode(file)->i_ino&SEL_INO_MASK; ret = security_sid_to_context(sid, &con, &len); - if (ret < 0) + if (ret) return ret; ret = simple_read_from_buffer(buf, count, ppos, con, len); @@ -1466,33 +1515,25 @@ static const struct file_operations sel_initcon_ops = { static int sel_make_initcon_files(struct dentry *dir) { - int i, ret = 0; + int i; for (i = 1; i <= SECINITSID_NUM; i++) { struct inode *inode; struct dentry *dentry; dentry = d_alloc_name(dir, security_get_initial_sid_context(i)); - if (!dentry) { - ret = -ENOMEM; - goto out; - } + if (!dentry) + return -ENOMEM; inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO); - if (!inode) { - ret = -ENOMEM; - goto out; - } + if (!inode) + return -ENOMEM; + inode->i_fop = &sel_initcon_ops; inode->i_ino = i|SEL_INITCON_INO_OFFSET; d_add(dentry, inode); } -out: - return ret; -} -static inline unsigned int sel_div(unsigned long a, unsigned long b) -{ - return a / b - (a % b < 0); + return 0; } static inline unsigned long sel_class_to_ino(u16 class) @@ -1502,7 +1543,7 @@ static inline unsigned long sel_class_to_ino(u16 class) static inline u16 sel_ino_to_class(unsigned long ino) { - return sel_div(ino & SEL_INO_MASK, SEL_VEC_MAX + 1); + return (ino & SEL_INO_MASK) / (SEL_VEC_MAX + 1); } static inline unsigned long sel_perm_to_ino(u16 class, u32 perm) @@ -1518,21 +1559,10 @@ static inline u32 sel_ino_to_perm(unsigned long ino) static ssize_t sel_read_class(struct file *file, char __user *buf, size_t count, loff_t *ppos) { - ssize_t rc, len; - char *page; - unsigned long ino = file->f_path.dentry->d_inode->i_ino; - - page = (char *)__get_free_page(GFP_KERNEL); - if (!page) { - rc = -ENOMEM; - goto out; - } - - len = snprintf(page, PAGE_SIZE, "%d", sel_ino_to_class(ino)); - rc = simple_read_from_buffer(buf, count, ppos, page, len); - free_page((unsigned long)page); -out: - return rc; + unsigned long ino = file_inode(file)->i_ino; + char res[TMPBUFLEN]; + ssize_t len = snprintf(res, sizeof(res), "%d", sel_ino_to_class(ino)); + return simple_read_from_buffer(buf, count, ppos, res, len); } static const struct file_operations sel_class_ops = { @@ -1543,21 +1573,10 @@ static const struct file_operations sel_class_ops = { static ssize_t sel_read_perm(struct file *file, char __user *buf, size_t count, loff_t *ppos) { - ssize_t rc, len; - char *page; - unsigned long ino = file->f_path.dentry->d_inode->i_ino; - - page = (char *)__get_free_page(GFP_KERNEL); - if (!page) { - rc = -ENOMEM; - goto out; - } - - len = snprintf(page, PAGE_SIZE, "%d", sel_ino_to_perm(ino)); - rc = simple_read_from_buffer(buf, count, ppos, page, len); - free_page((unsigned long)page); -out: - return rc; + unsigned long ino = file_inode(file)->i_ino; + char res[TMPBUFLEN]; + ssize_t len = snprintf(res, sizeof(res), "%d", sel_ino_to_perm(ino)); + return simple_read_from_buffer(buf, count, ppos, res, len); } static const struct file_operations sel_perm_ops = { @@ -1571,7 +1590,7 @@ static ssize_t sel_read_policycap(struct file *file, char __user *buf, int value; char tmpbuf[TMPBUFLEN]; ssize_t length; - unsigned long i_ino = file->f_path.dentry->d_inode->i_ino; + unsigned long i_ino = file_inode(file)->i_ino; value = security_policycap_supported(i_ino & SEL_INO_MASK); length = scnprintf(tmpbuf, TMPBUFLEN, "%d", value); @@ -1587,39 +1606,37 @@ static const struct file_operations sel_policycap_ops = { static int sel_make_perm_files(char *objclass, int classvalue, struct dentry *dir) { - int i, rc = 0, nperms; + int i, rc, nperms; char **perms; rc = security_get_permissions(objclass, &perms, &nperms); if (rc) - goto out; + return rc; for (i = 0; i < nperms; i++) { struct inode *inode; struct dentry *dentry; + rc = -ENOMEM; dentry = d_alloc_name(dir, perms[i]); - if (!dentry) { - rc = -ENOMEM; - goto out1; - } + if (!dentry) + goto out; + rc = -ENOMEM; inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO); - if (!inode) { - rc = -ENOMEM; - goto out1; - } + if (!inode) + goto out; + inode->i_fop = &sel_perm_ops; /* i+1 since perm values are 1-indexed */ inode->i_ino = sel_perm_to_ino(classvalue, i + 1); d_add(dentry, inode); } - -out1: + rc = 0; +out: for (i = 0; i < nperms; i++) kfree(perms[i]); kfree(perms); -out: return rc; } @@ -1631,34 +1648,23 @@ static int sel_make_class_dir_entries(char *classname, int index, int rc; dentry = d_alloc_name(dir, "index"); - if (!dentry) { - rc = -ENOMEM; - goto out; - } + if (!dentry) + return -ENOMEM; inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO); - if (!inode) { - rc = -ENOMEM; - goto out; - } + if (!inode) + return -ENOMEM; inode->i_fop = &sel_class_ops; inode->i_ino = sel_class_to_ino(index); d_add(dentry, inode); - dentry = d_alloc_name(dir, "perms"); - if (!dentry) { - rc = -ENOMEM; - goto out; - } - - rc = sel_make_dir(dir->d_inode, dentry, &last_class_ino); - if (rc) - goto out; + dentry = sel_make_dir(dir, "perms", &last_class_ino); + if (IS_ERR(dentry)) + return PTR_ERR(dentry); rc = sel_make_perm_files(classname, index, dentry); -out: return rc; } @@ -1688,15 +1694,15 @@ static void sel_remove_classes(void) static int sel_make_classes(void) { - int rc = 0, nclasses, i; + int rc, nclasses, i; char **classes; /* delete any existing entries */ sel_remove_classes(); rc = security_get_classes(&classes, &nclasses); - if (rc < 0) - goto out; + if (rc) + return rc; /* +2 since classes are 1-indexed */ last_class_ino = sel_class_to_ino(nclasses + 2); @@ -1704,29 +1710,24 @@ static int sel_make_classes(void) for (i = 0; i < nclasses; i++) { struct dentry *class_name_dir; - class_name_dir = d_alloc_name(class_dir, classes[i]); - if (!class_name_dir) { - rc = -ENOMEM; - goto out1; - } - - rc = sel_make_dir(class_dir->d_inode, class_name_dir, + class_name_dir = sel_make_dir(class_dir, classes[i], &last_class_ino); - if (rc) - goto out1; + if (IS_ERR(class_name_dir)) { + rc = PTR_ERR(class_name_dir); + goto out; + } /* i+1 since class values are 1-indexed */ rc = sel_make_class_dir_entries(classes[i], i + 1, class_name_dir); if (rc) - goto out1; + goto out; } - -out1: + rc = 0; +out: for (i = 0; i < nclasses; i++) kfree(classes[i]); kfree(classes); -out: return rc; } @@ -1760,17 +1761,21 @@ static int sel_make_policycap(void) return 0; } -static int sel_make_dir(struct inode *dir, struct dentry *dentry, +static struct dentry *sel_make_dir(struct dentry *dir, const char *name, unsigned long *ino) { - int ret = 0; + struct dentry *dentry = d_alloc_name(dir, name); struct inode *inode; - inode = sel_make_inode(dir->i_sb, S_IFDIR | S_IRUGO | S_IXUGO); + if (!dentry) + return ERR_PTR(-ENOMEM); + + inode = sel_make_inode(dir->d_sb, S_IFDIR | S_IRUGO | S_IXUGO); if (!inode) { - ret = -ENOMEM; - goto out; + dput(dentry); + return ERR_PTR(-ENOMEM); } + inode->i_op = &simple_dir_inode_operations; inode->i_fop = &simple_dir_operations; inode->i_ino = ++(*ino); @@ -1778,16 +1783,16 @@ static int sel_make_dir(struct inode *dir, struct dentry *dentry, inc_nlink(inode); d_add(dentry, inode); /* bump link count on parent directory, too */ - inc_nlink(dir); -out: - return ret; + inc_nlink(dir->d_inode); + + return dentry; } static int sel_fill_super(struct super_block *sb, void *data, int silent) { int ret; struct dentry *dentry; - struct inode *inode, *root_inode; + struct inode *inode; struct inode_security_struct *isec; static struct tree_descr selinux_files[] = { @@ -1807,38 +1812,30 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent) [SEL_REJECT_UNKNOWN] = {"reject_unknown", &sel_handle_unknown_ops, S_IRUGO}, [SEL_DENY_UNKNOWN] = {"deny_unknown", &sel_handle_unknown_ops, S_IRUGO}, [SEL_STATUS] = {"status", &sel_handle_status_ops, S_IRUGO}, - [SEL_POLICY] = {"policy", &sel_policy_ops, S_IRUSR}, + [SEL_POLICY] = {"policy", &sel_policy_ops, S_IRUGO}, /* last one */ {""} }; ret = simple_fill_super(sb, SELINUX_MAGIC, selinux_files); if (ret) goto err; - root_inode = sb->s_root->d_inode; - - dentry = d_alloc_name(sb->s_root, BOOL_DIR_NAME); - if (!dentry) { - ret = -ENOMEM; + bool_dir = sel_make_dir(sb->s_root, BOOL_DIR_NAME, &sel_last_ino); + if (IS_ERR(bool_dir)) { + ret = PTR_ERR(bool_dir); + bool_dir = NULL; goto err; } - ret = sel_make_dir(root_inode, dentry, &sel_last_ino); - if (ret) - goto err; - - bool_dir = dentry; - + ret = -ENOMEM; dentry = d_alloc_name(sb->s_root, NULL_FILE_NAME); - if (!dentry) { - ret = -ENOMEM; + if (!dentry) goto err; - } + ret = -ENOMEM; inode = sel_make_inode(sb, S_IFCHR | S_IRUGO | S_IWUGO); - if (!inode) { - ret = -ENOMEM; + if (!inode) goto err; - } + inode->i_ino = ++sel_last_ino; isec = (struct inode_security_struct *)inode->i_security; isec->sid = SECINITSID_DEVNULL; @@ -1847,66 +1844,46 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent) init_special_inode(inode, S_IFCHR | S_IRUGO | S_IWUGO, MKDEV(MEM_MAJOR, 3)); d_add(dentry, inode); - selinux_null = dentry; + selinux_null.dentry = dentry; - dentry = d_alloc_name(sb->s_root, "avc"); - if (!dentry) { - ret = -ENOMEM; + dentry = sel_make_dir(sb->s_root, "avc", &sel_last_ino); + if (IS_ERR(dentry)) { + ret = PTR_ERR(dentry); goto err; } - ret = sel_make_dir(root_inode, dentry, &sel_last_ino); - if (ret) - goto err; - ret = sel_make_avc_files(dentry); if (ret) goto err; - dentry = d_alloc_name(sb->s_root, "initial_contexts"); - if (!dentry) { - ret = -ENOMEM; + dentry = sel_make_dir(sb->s_root, "initial_contexts", &sel_last_ino); + if (IS_ERR(dentry)) { + ret = PTR_ERR(dentry); goto err; } - ret = sel_make_dir(root_inode, dentry, &sel_last_ino); - if (ret) - goto err; - ret = sel_make_initcon_files(dentry); if (ret) goto err; - dentry = d_alloc_name(sb->s_root, "class"); - if (!dentry) { - ret = -ENOMEM; + class_dir = sel_make_dir(sb->s_root, "class", &sel_last_ino); + if (IS_ERR(class_dir)) { + ret = PTR_ERR(class_dir); + class_dir = NULL; goto err; } - ret = sel_make_dir(root_inode, dentry, &sel_last_ino); - if (ret) - goto err; - - class_dir = dentry; - - dentry = d_alloc_name(sb->s_root, "policy_capabilities"); - if (!dentry) { - ret = -ENOMEM; + policycap_dir = sel_make_dir(sb->s_root, "policy_capabilities", &sel_last_ino); + if (IS_ERR(policycap_dir)) { + ret = PTR_ERR(policycap_dir); + policycap_dir = NULL; goto err; } - - ret = sel_make_dir(root_inode, dentry, &sel_last_ino); - if (ret) - goto err; - - policycap_dir = dentry; - -out: - return ret; + return 0; err: printk(KERN_ERR "SELinux: %s: failed while creating inodes\n", __func__); - goto out; + return ret; } static struct dentry *sel_mount(struct file_system_type *fs_type, @@ -1922,6 +1899,7 @@ static struct file_system_type sel_fs_type = { }; struct vfsmount *selinuxfs_mount; +static struct kobject *selinuxfs_kobj; static int __init init_sel_fs(void) { @@ -1929,15 +1907,24 @@ static int __init init_sel_fs(void) if (!selinux_enabled) return 0; + + selinuxfs_kobj = kobject_create_and_add("selinux", fs_kobj); + if (!selinuxfs_kobj) + return -ENOMEM; + err = register_filesystem(&sel_fs_type); - if (!err) { - selinuxfs_mount = kern_mount(&sel_fs_type); - if (IS_ERR(selinuxfs_mount)) { - printk(KERN_ERR "selinuxfs: could not mount!\n"); - err = PTR_ERR(selinuxfs_mount); - selinuxfs_mount = NULL; - } + if (err) { + kobject_put(selinuxfs_kobj); + return err; } + + selinux_null.mnt = selinuxfs_mount = kern_mount(&sel_fs_type); + if (IS_ERR(selinuxfs_mount)) { + printk(KERN_ERR "selinuxfs: could not mount!\n"); + err = PTR_ERR(selinuxfs_mount); + selinuxfs_mount = NULL; + } + return err; } @@ -1946,6 +1933,8 @@ __initcall(init_sel_fs); #ifdef CONFIG_SECURITY_SELINUX_DISABLE void exit_sel_fs(void) { + kobject_put(selinuxfs_kobj); + kern_unmount(selinuxfs_mount); unregister_filesystem(&sel_fs_type); } #endif diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h index dff0c75345c..63ce2f9e441 100644 --- a/security/selinux/ss/avtab.h +++ b/security/selinux/ss/avtab.h @@ -14,7 +14,7 @@ * * Copyright (C) 2003 Tresys Technology, LLC * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by + * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 2. * * Updated: Yuichi Nakamura <ynakam@hitachisoft.jp> @@ -27,16 +27,16 @@ struct avtab_key { u16 source_type; /* source type */ u16 target_type; /* target type */ u16 target_class; /* target object class */ -#define AVTAB_ALLOWED 1 -#define AVTAB_AUDITALLOW 2 -#define AVTAB_AUDITDENY 4 -#define AVTAB_AV (AVTAB_ALLOWED | AVTAB_AUDITALLOW | AVTAB_AUDITDENY) -#define AVTAB_TRANSITION 16 -#define AVTAB_MEMBER 32 -#define AVTAB_CHANGE 64 -#define AVTAB_TYPE (AVTAB_TRANSITION | AVTAB_MEMBER | AVTAB_CHANGE) -#define AVTAB_ENABLED_OLD 0x80000000 /* reserved for used in cond_avtab */ -#define AVTAB_ENABLED 0x8000 /* reserved for used in cond_avtab */ +#define AVTAB_ALLOWED 0x0001 +#define AVTAB_AUDITALLOW 0x0002 +#define AVTAB_AUDITDENY 0x0004 +#define AVTAB_AV (AVTAB_ALLOWED | AVTAB_AUDITALLOW | AVTAB_AUDITDENY) +#define AVTAB_TRANSITION 0x0010 +#define AVTAB_MEMBER 0x0020 +#define AVTAB_CHANGE 0x0040 +#define AVTAB_TYPE (AVTAB_TRANSITION | AVTAB_MEMBER | AVTAB_CHANGE) +#define AVTAB_ENABLED_OLD 0x80000000 /* reserved for used in cond_avtab */ +#define AVTAB_ENABLED 0x8000 /* reserved for used in cond_avtab */ u16 specified; /* what field is specified */ }; @@ -86,7 +86,6 @@ void avtab_cache_destroy(void); #define MAX_AVTAB_HASH_BITS 11 #define MAX_AVTAB_HASH_BUCKETS (1 << MAX_AVTAB_HASH_BITS) -#define MAX_AVTAB_HASH_MASK (MAX_AVTAB_HASH_BUCKETS-1) #endif /* _SS_AVTAB_H_ */ diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c index 655fe1c6cc6..377d148e715 100644 --- a/security/selinux/ss/conditional.c +++ b/security/selinux/ss/conditional.c @@ -175,10 +175,10 @@ void cond_policydb_destroy(struct policydb *p) int cond_init_bool_indexes(struct policydb *p) { kfree(p->bool_val_to_struct); - p->bool_val_to_struct = (struct cond_bool_datum **) + p->bool_val_to_struct = kmalloc(p->p_bools.nprim * sizeof(struct cond_bool_datum *), GFP_KERNEL); if (!p->bool_val_to_struct) - return -1; + return -ENOMEM; return 0; } @@ -193,6 +193,7 @@ int cond_index_bool(void *key, void *datum, void *datap) { struct policydb *p; struct cond_bool_datum *booldatum; + struct flex_array *fa; booldatum = datum; p = datap; @@ -200,7 +201,10 @@ int cond_index_bool(void *key, void *datum, void *datap) if (!booldatum->value || booldatum->value > p->p_bools.nprim) return -EINVAL; - p->p_bool_val_to_name[booldatum->value - 1] = key; + fa = p->sym_val_to_name[SYM_BOOLS]; + if (flex_array_put_ptr(fa, booldatum->value - 1, key, + GFP_KERNEL | __GFP_ZERO)) + BUG(); p->bool_val_to_struct[booldatum->value - 1] = booldatum; return 0; @@ -551,7 +555,7 @@ static int cond_write_av_list(struct policydb *p, return 0; } -int cond_write_node(struct policydb *p, struct cond_node *node, +static int cond_write_node(struct policydb *p, struct cond_node *node, struct policy_file *fp) { struct cond_expr *cur_expr; diff --git a/security/selinux/ss/conditional.h b/security/selinux/ss/conditional.h index 3f209c63529..4d1f8746650 100644 --- a/security/selinux/ss/conditional.h +++ b/security/selinux/ss/conditional.h @@ -13,6 +13,7 @@ #include "avtab.h" #include "symtab.h" #include "policydb.h" +#include "../include/conditional.h" #define COND_EXPR_MAXDEPTH 10 diff --git a/security/selinux/ss/constraint.h b/security/selinux/ss/constraint.h index 149dda731fd..96fd947c494 100644 --- a/security/selinux/ss/constraint.h +++ b/security/selinux/ss/constraint.h @@ -48,6 +48,7 @@ struct constraint_expr { u32 op; /* operator */ struct ebitmap names; /* names */ + struct type_set *type_names; struct constraint_expr *next; /* next expression */ }; diff --git a/security/selinux/ss/context.h b/security/selinux/ss/context.h index 45e8fb0515f..212e3479a0d 100644 --- a/security/selinux/ss/context.h +++ b/security/selinux/ss/context.h @@ -74,6 +74,26 @@ out: return rc; } +/* + * Sets both levels in the MLS range of 'dst' to the high level of 'src'. + */ +static inline int mls_context_cpy_high(struct context *dst, struct context *src) +{ + int rc; + + dst->range.level[0].sens = src->range.level[1].sens; + rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[1].cat); + if (rc) + goto out; + + dst->range.level[1].sens = src->range.level[1].sens; + rc = ebitmap_cpy(&dst->range.level[1].cat, &src->range.level[1].cat); + if (rc) + ebitmap_destroy(&dst->range.level[0].cat); +out: + return rc; +} + static inline int mls_context_cmp(struct context *c1, struct context *c2) { return ((c1->range.level[0].sens == c2->range.level[0].sens) && diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c index d42951fcbe8..820313a04d4 100644 --- a/security/selinux/ss/ebitmap.c +++ b/security/selinux/ss/ebitmap.c @@ -4,7 +4,7 @@ * Author : Stephen Smalley, <sds@epoch.ncsc.mil> */ /* - * Updated: Hewlett-Packard <paul.moore@hp.com> + * Updated: Hewlett-Packard <paul@paul-moore.com> * * Added support to import/export the NetLabel category bitmap * @@ -213,7 +213,12 @@ netlbl_import_failure: } #endif /* CONFIG_NETLABEL */ -int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2) +/* + * Check to see if all the bits set in e2 are also set in e1. Optionally, + * if last_e2bit is non-zero, the highest set bit in e2 cannot exceed + * last_e2bit. + */ +int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit) { struct ebitmap_node *n1, *n2; int i; @@ -223,14 +228,25 @@ int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2) n1 = e1->node; n2 = e2->node; + while (n1 && n2 && (n1->startbit <= n2->startbit)) { if (n1->startbit < n2->startbit) { n1 = n1->next; continue; } - for (i = 0; i < EBITMAP_UNIT_NUMS; i++) { + for (i = EBITMAP_UNIT_NUMS - 1; (i >= 0) && !n2->maps[i]; ) + i--; /* Skip trailing NULL map entries */ + if (last_e2bit && (i >= 0)) { + u32 lastsetbit = n2->startbit + i * EBITMAP_UNIT_SIZE + + __fls(n2->maps[i]); + if (lastsetbit > last_e2bit) + return 0; + } + + while (i >= 0) { if ((n1->maps[i] & n2->maps[i]) != n2->maps[i]) return 0; + i--; } n1 = n1->next; diff --git a/security/selinux/ss/ebitmap.h b/security/selinux/ss/ebitmap.h index 1f4e93c2ae8..712c8a7b8e8 100644 --- a/security/selinux/ss/ebitmap.h +++ b/security/selinux/ss/ebitmap.h @@ -16,7 +16,13 @@ #include <net/netlabel.h> -#define EBITMAP_UNIT_NUMS ((32 - sizeof(void *) - sizeof(u32)) \ +#ifdef CONFIG_64BIT +#define EBITMAP_NODE_SIZE 64 +#else +#define EBITMAP_NODE_SIZE 32 +#endif + +#define EBITMAP_UNIT_NUMS ((EBITMAP_NODE_SIZE-sizeof(void *)-sizeof(u32))\ / sizeof(unsigned long)) #define EBITMAP_UNIT_SIZE BITS_PER_LONG #define EBITMAP_SIZE (EBITMAP_UNIT_NUMS * EBITMAP_UNIT_SIZE) @@ -36,7 +42,6 @@ struct ebitmap { }; #define ebitmap_length(e) ((e)->highbit) -#define ebitmap_startbit(e) ((e)->node ? (e)->node->startbit : 0) static inline unsigned int ebitmap_start_positive(struct ebitmap *e, struct ebitmap_node **n) @@ -118,7 +123,7 @@ static inline void ebitmap_node_clr_bit(struct ebitmap_node *n, int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2); int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src); -int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2); +int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit); int ebitmap_get_bit(struct ebitmap *e, unsigned long bit); int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value); void ebitmap_destroy(struct ebitmap *e); diff --git a/security/selinux/ss/hashtab.c b/security/selinux/ss/hashtab.c index 933e735bb18..2cc49614984 100644 --- a/security/selinux/ss/hashtab.c +++ b/security/selinux/ss/hashtab.c @@ -6,6 +6,7 @@ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/errno.h> +#include <linux/sched.h> #include "hashtab.h" struct hashtab *hashtab_create(u32 (*hash_value)(struct hashtab *h, const void *key), @@ -40,6 +41,8 @@ int hashtab_insert(struct hashtab *h, void *key, void *datum) u32 hvalue; struct hashtab_node *prev, *cur, *newnode; + cond_resched(); + if (!h || h->nel == HASHTAB_MAX_NODES) return -EINVAL; diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c index b4eff7a60c5..d307b37ddc2 100644 --- a/security/selinux/ss/mls.c +++ b/security/selinux/ss/mls.c @@ -11,7 +11,7 @@ * Copyright (C) 2004-2006 Trusted Computer Solutions, Inc. */ /* - * Updated: Hewlett-Packard <paul.moore@hp.com> + * Updated: Hewlett-Packard <paul@paul-moore.com> * * Added support to import/export the MLS label from NetLabel * @@ -45,7 +45,7 @@ int mls_compute_context_len(struct context *context) len = 1; /* for the beginning ":" */ for (l = 0; l < 2; l++) { int index_sens = context->range.level[l].sens; - len += strlen(policydb.p_sens_val_to_name[index_sens - 1]); + len += strlen(sym_name(&policydb, SYM_LEVELS, index_sens - 1)); /* categories */ head = -2; @@ -55,17 +55,17 @@ int mls_compute_context_len(struct context *context) if (i - prev > 1) { /* one or more negative bits are skipped */ if (head != prev) { - nm = policydb.p_cat_val_to_name[prev]; + nm = sym_name(&policydb, SYM_CATS, prev); len += strlen(nm) + 1; } - nm = policydb.p_cat_val_to_name[i]; + nm = sym_name(&policydb, SYM_CATS, i); len += strlen(nm) + 1; head = i; } prev = i; } if (prev != head) { - nm = policydb.p_cat_val_to_name[prev]; + nm = sym_name(&policydb, SYM_CATS, prev); len += strlen(nm) + 1; } if (l == 0) { @@ -102,8 +102,8 @@ void mls_sid_to_context(struct context *context, scontextp++; for (l = 0; l < 2; l++) { - strcpy(scontextp, - policydb.p_sens_val_to_name[context->range.level[l].sens - 1]); + strcpy(scontextp, sym_name(&policydb, SYM_LEVELS, + context->range.level[l].sens - 1)); scontextp += strlen(scontextp); /* categories */ @@ -118,7 +118,7 @@ void mls_sid_to_context(struct context *context, *scontextp++ = '.'; else *scontextp++ = ','; - nm = policydb.p_cat_val_to_name[prev]; + nm = sym_name(&policydb, SYM_CATS, prev); strcpy(scontextp, nm); scontextp += strlen(nm); } @@ -126,7 +126,7 @@ void mls_sid_to_context(struct context *context, *scontextp++ = ':'; else *scontextp++ = ','; - nm = policydb.p_cat_val_to_name[i]; + nm = sym_name(&policydb, SYM_CATS, i); strcpy(scontextp, nm); scontextp += strlen(nm); head = i; @@ -139,7 +139,7 @@ void mls_sid_to_context(struct context *context, *scontextp++ = '.'; else *scontextp++ = ','; - nm = policydb.p_cat_val_to_name[prev]; + nm = sym_name(&policydb, SYM_CATS, prev); strcpy(scontextp, nm); scontextp += strlen(nm); } @@ -160,29 +160,21 @@ void mls_sid_to_context(struct context *context, int mls_level_isvalid(struct policydb *p, struct mls_level *l) { struct level_datum *levdatum; - struct ebitmap_node *node; - int i; if (!l->sens || l->sens > p->p_levels.nprim) return 0; levdatum = hashtab_search(p->p_levels.table, - p->p_sens_val_to_name[l->sens - 1]); + sym_name(p, SYM_LEVELS, l->sens - 1)); if (!levdatum) return 0; - ebitmap_for_each_positive_bit(&l->cat, node, i) { - if (i > p->p_cats.nprim) - return 0; - if (!ebitmap_get_bit(&levdatum->level->cat, i)) { - /* - * Category may not be associated with - * sensitivity. - */ - return 0; - } - } - - return 1; + /* + * Return 1 iff all the bits set in l->cat are also be set in + * levdatum->level->cat and no bit in l->cat is larger than + * p->p_cats.nprim. + */ + return ebitmap_contains(&levdatum->level->cat, &l->cat, + p->p_cats.nprim); } int mls_range_isvalid(struct policydb *p, struct mls_range *r) @@ -482,7 +474,8 @@ int mls_convert_context(struct policydb *oldp, for (l = 0; l < 2; l++) { levdatum = hashtab_search(newp->p_levels.table, - oldp->p_sens_val_to_name[c->range.level[l].sens - 1]); + sym_name(oldp, SYM_LEVELS, + c->range.level[l].sens - 1)); if (!levdatum) return -EINVAL; @@ -493,12 +486,14 @@ int mls_convert_context(struct policydb *oldp, int rc; catdatum = hashtab_search(newp->p_cats.table, - oldp->p_cat_val_to_name[i]); + sym_name(oldp, SYM_CATS, i)); if (!catdatum) return -EINVAL; rc = ebitmap_set_bit(&bitmap, catdatum->value - 1, 1); if (rc) return rc; + + cond_resched(); } ebitmap_destroy(&c->range.level[l].cat); c->range.level[l].cat = bitmap; @@ -511,10 +506,13 @@ int mls_compute_sid(struct context *scontext, struct context *tcontext, u16 tclass, u32 specified, - struct context *newcontext) + struct context *newcontext, + bool sock) { struct range_trans rtr; struct mls_range *r; + struct class_datum *cladatum; + int default_range = 0; if (!policydb.mls_enabled) return 0; @@ -528,9 +526,31 @@ int mls_compute_sid(struct context *scontext, r = hashtab_search(policydb.range_tr, &rtr); if (r) return mls_range_set(newcontext, r); + + if (tclass && tclass <= policydb.p_classes.nprim) { + cladatum = policydb.class_val_to_struct[tclass - 1]; + if (cladatum) + default_range = cladatum->default_range; + } + + switch (default_range) { + case DEFAULT_SOURCE_LOW: + return mls_context_cpy_low(newcontext, scontext); + case DEFAULT_SOURCE_HIGH: + return mls_context_cpy_high(newcontext, scontext); + case DEFAULT_SOURCE_LOW_HIGH: + return mls_context_cpy(newcontext, scontext); + case DEFAULT_TARGET_LOW: + return mls_context_cpy_low(newcontext, tcontext); + case DEFAULT_TARGET_HIGH: + return mls_context_cpy_high(newcontext, tcontext); + case DEFAULT_TARGET_LOW_HIGH: + return mls_context_cpy(newcontext, tcontext); + } + /* Fallthrough */ case AVTAB_CHANGE: - if (tclass == policydb.process_class) + if ((tclass == policydb.process_class) || (sock == true)) /* Use the process MLS attributes. */ return mls_context_cpy(newcontext, scontext); else diff --git a/security/selinux/ss/mls.h b/security/selinux/ss/mls.h index cd9152632e5..e4369e3e636 100644 --- a/security/selinux/ss/mls.h +++ b/security/selinux/ss/mls.h @@ -11,7 +11,7 @@ * Copyright (C) 2004-2006 Trusted Computer Solutions, Inc. */ /* - * Updated: Hewlett-Packard <paul.moore@hp.com> + * Updated: Hewlett-Packard <paul@paul-moore.com> * * Added support to import/export the MLS label from NetLabel * @@ -49,7 +49,8 @@ int mls_compute_sid(struct context *scontext, struct context *tcontext, u16 tclass, u32 specified, - struct context *newcontext); + struct context *newcontext, + bool sock); int mls_setup_user_range(struct context *fromcon, struct user_datum *user, struct context *usercon); diff --git a/security/selinux/ss/mls_types.h b/security/selinux/ss/mls_types.h index 03bed52a805..e9364877413 100644 --- a/security/selinux/ss/mls_types.h +++ b/security/selinux/ss/mls_types.h @@ -35,7 +35,7 @@ static inline int mls_level_eq(struct mls_level *l1, struct mls_level *l2) static inline int mls_level_dom(struct mls_level *l1, struct mls_level *l2) { return ((l1->sens >= l2->sens) && - ebitmap_contains(&l1->cat, &l2->cat)); + ebitmap_contains(&l1->cat, &l2->cat, 0)); } #define mls_level_incomp(l1, l2) \ diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c index 94f630d93a5..9c5cdc2caae 100644 --- a/security/selinux/ss/policydb.c +++ b/security/selinux/ss/policydb.c @@ -13,7 +13,7 @@ * * Added conditional policy language extensions * - * Updated: Hewlett-Packard <paul.moore@hp.com> + * Updated: Hewlett-Packard <paul@paul-moore.com> * * Added support for the policy capability bitmap * @@ -123,6 +123,31 @@ static struct policydb_compat_info policydb_compat[] = { .sym_num = SYM_NUM, .ocon_num = OCON_NUM, }, + { + .version = POLICYDB_VERSION_FILENAME_TRANS, + .sym_num = SYM_NUM, + .ocon_num = OCON_NUM, + }, + { + .version = POLICYDB_VERSION_ROLETRANS, + .sym_num = SYM_NUM, + .ocon_num = OCON_NUM, + }, + { + .version = POLICYDB_VERSION_NEW_OBJECT_DEFAULTS, + .sym_num = SYM_NUM, + .ocon_num = OCON_NUM, + }, + { + .version = POLICYDB_VERSION_DEFAULT_TYPE, + .sym_num = SYM_NUM, + .ocon_num = OCON_NUM, + }, + { + .version = POLICYDB_VERSION_CONSTRAINT_NAMES, + .sym_num = SYM_NUM, + .ocon_num = OCON_NUM, + }, }; static struct policydb_compat_info *policydb_lookup_compat(int version) @@ -148,32 +173,67 @@ static int roles_init(struct policydb *p) int rc; struct role_datum *role; + rc = -ENOMEM; role = kzalloc(sizeof(*role), GFP_KERNEL); - if (!role) { - rc = -ENOMEM; + if (!role) goto out; - } + + rc = -EINVAL; role->value = ++p->p_roles.nprim; - if (role->value != OBJECT_R_VAL) { - rc = -EINVAL; - goto out_free_role; - } + if (role->value != OBJECT_R_VAL) + goto out; + + rc = -ENOMEM; key = kstrdup(OBJECT_R, GFP_KERNEL); - if (!key) { - rc = -ENOMEM; - goto out_free_role; - } + if (!key) + goto out; + rc = hashtab_insert(p->p_roles.table, key, role); if (rc) - goto out_free_key; -out: - return rc; + goto out; -out_free_key: + return 0; +out: kfree(key); -out_free_role: kfree(role); - goto out; + return rc; +} + +static u32 filenametr_hash(struct hashtab *h, const void *k) +{ + const struct filename_trans *ft = k; + unsigned long hash; + unsigned int byte_num; + unsigned char focus; + + hash = ft->stype ^ ft->ttype ^ ft->tclass; + + byte_num = 0; + while ((focus = ft->name[byte_num++])) + hash = partial_name_hash(focus, hash); + return hash & (h->size - 1); +} + +static int filenametr_cmp(struct hashtab *h, const void *k1, const void *k2) +{ + const struct filename_trans *ft1 = k1; + const struct filename_trans *ft2 = k2; + int v; + + v = ft1->stype - ft2->stype; + if (v) + return v; + + v = ft1->ttype - ft2->ttype; + if (v) + return v; + + v = ft1->tclass - ft2->tclass; + if (v) + return v; + + return strcmp(ft1->name, ft2->name); + } static u32 rangetr_hash(struct hashtab *h, const void *k) @@ -213,35 +273,40 @@ static int policydb_init(struct policydb *p) for (i = 0; i < SYM_NUM; i++) { rc = symtab_init(&p->symtab[i], symtab_sizes[i]); if (rc) - goto out_free_symtab; + goto out; } rc = avtab_init(&p->te_avtab); if (rc) - goto out_free_symtab; + goto out; rc = roles_init(p); if (rc) - goto out_free_symtab; + goto out; rc = cond_policydb_init(p); if (rc) - goto out_free_symtab; + goto out; + + p->filename_trans = hashtab_create(filenametr_hash, filenametr_cmp, (1 << 10)); + if (!p->filename_trans) + goto out; p->range_tr = hashtab_create(rangetr_hash, rangetr_cmp, 256); if (!p->range_tr) - goto out_free_symtab; + goto out; + ebitmap_init(&p->filename_trans_ttypes); ebitmap_init(&p->policycaps); ebitmap_init(&p->permissive_map); + return 0; out: - return rc; - -out_free_symtab: + hashtab_destroy(p->filename_trans); + hashtab_destroy(p->range_tr); for (i = 0; i < SYM_NUM; i++) hashtab_destroy(p->symtab[i].table); - goto out; + return rc; } /* @@ -258,12 +323,17 @@ static int common_index(void *key, void *datum, void *datap) { struct policydb *p; struct common_datum *comdatum; + struct flex_array *fa; comdatum = datum; p = datap; if (!comdatum->value || comdatum->value > p->p_commons.nprim) return -EINVAL; - p->p_common_val_to_name[comdatum->value - 1] = key; + + fa = p->sym_val_to_name[SYM_COMMONS]; + if (flex_array_put_ptr(fa, comdatum->value - 1, key, + GFP_KERNEL | __GFP_ZERO)) + BUG(); return 0; } @@ -271,12 +341,16 @@ static int class_index(void *key, void *datum, void *datap) { struct policydb *p; struct class_datum *cladatum; + struct flex_array *fa; cladatum = datum; p = datap; if (!cladatum->value || cladatum->value > p->p_classes.nprim) return -EINVAL; - p->p_class_val_to_name[cladatum->value - 1] = key; + fa = p->sym_val_to_name[SYM_CLASSES]; + if (flex_array_put_ptr(fa, cladatum->value - 1, key, + GFP_KERNEL | __GFP_ZERO)) + BUG(); p->class_val_to_struct[cladatum->value - 1] = cladatum; return 0; } @@ -285,6 +359,7 @@ static int role_index(void *key, void *datum, void *datap) { struct policydb *p; struct role_datum *role; + struct flex_array *fa; role = datum; p = datap; @@ -292,7 +367,11 @@ static int role_index(void *key, void *datum, void *datap) || role->value > p->p_roles.nprim || role->bounds > p->p_roles.nprim) return -EINVAL; - p->p_role_val_to_name[role->value - 1] = key; + + fa = p->sym_val_to_name[SYM_ROLES]; + if (flex_array_put_ptr(fa, role->value - 1, key, + GFP_KERNEL | __GFP_ZERO)) + BUG(); p->role_val_to_struct[role->value - 1] = role; return 0; } @@ -301,6 +380,7 @@ static int type_index(void *key, void *datum, void *datap) { struct policydb *p; struct type_datum *typdatum; + struct flex_array *fa; typdatum = datum; p = datap; @@ -310,8 +390,15 @@ static int type_index(void *key, void *datum, void *datap) || typdatum->value > p->p_types.nprim || typdatum->bounds > p->p_types.nprim) return -EINVAL; - p->p_type_val_to_name[typdatum->value - 1] = key; - p->type_val_to_struct[typdatum->value - 1] = typdatum; + fa = p->sym_val_to_name[SYM_TYPES]; + if (flex_array_put_ptr(fa, typdatum->value - 1, key, + GFP_KERNEL | __GFP_ZERO)) + BUG(); + + fa = p->type_val_to_struct_array; + if (flex_array_put_ptr(fa, typdatum->value - 1, typdatum, + GFP_KERNEL | __GFP_ZERO)) + BUG(); } return 0; @@ -321,6 +408,7 @@ static int user_index(void *key, void *datum, void *datap) { struct policydb *p; struct user_datum *usrdatum; + struct flex_array *fa; usrdatum = datum; p = datap; @@ -328,7 +416,11 @@ static int user_index(void *key, void *datum, void *datap) || usrdatum->value > p->p_users.nprim || usrdatum->bounds > p->p_users.nprim) return -EINVAL; - p->p_user_val_to_name[usrdatum->value - 1] = key; + + fa = p->sym_val_to_name[SYM_USERS]; + if (flex_array_put_ptr(fa, usrdatum->value - 1, key, + GFP_KERNEL | __GFP_ZERO)) + BUG(); p->user_val_to_struct[usrdatum->value - 1] = usrdatum; return 0; } @@ -337,6 +429,7 @@ static int sens_index(void *key, void *datum, void *datap) { struct policydb *p; struct level_datum *levdatum; + struct flex_array *fa; levdatum = datum; p = datap; @@ -345,7 +438,10 @@ static int sens_index(void *key, void *datum, void *datap) if (!levdatum->level->sens || levdatum->level->sens > p->p_levels.nprim) return -EINVAL; - p->p_sens_val_to_name[levdatum->level->sens - 1] = key; + fa = p->sym_val_to_name[SYM_LEVELS]; + if (flex_array_put_ptr(fa, levdatum->level->sens - 1, key, + GFP_KERNEL | __GFP_ZERO)) + BUG(); } return 0; @@ -355,6 +451,7 @@ static int cat_index(void *key, void *datum, void *datap) { struct policydb *p; struct cat_datum *catdatum; + struct flex_array *fa; catdatum = datum; p = datap; @@ -362,7 +459,10 @@ static int cat_index(void *key, void *datum, void *datap) if (!catdatum->isalias) { if (!catdatum->value || catdatum->value > p->p_cats.nprim) return -EINVAL; - p->p_cat_val_to_name[catdatum->value - 1] = key; + fa = p->sym_val_to_name[SYM_CATS]; + if (flex_array_put_ptr(fa, catdatum->value - 1, key, + GFP_KERNEL | __GFP_ZERO)) + BUG(); } return 0; @@ -380,74 +480,27 @@ static int (*index_f[SYM_NUM]) (void *key, void *datum, void *datap) = cat_index, }; -/* - * Define the common val_to_name array and the class - * val_to_name and val_to_struct arrays in a policy - * database structure. - * - * Caller must clean up upon failure. - */ -static int policydb_index_classes(struct policydb *p) +#ifdef DEBUG_HASHES +static void hash_eval(struct hashtab *h, const char *hash_name) { - int rc; - - p->p_common_val_to_name = - kmalloc(p->p_commons.nprim * sizeof(char *), GFP_KERNEL); - if (!p->p_common_val_to_name) { - rc = -ENOMEM; - goto out; - } - - rc = hashtab_map(p->p_commons.table, common_index, p); - if (rc) - goto out; - - p->class_val_to_struct = - kmalloc(p->p_classes.nprim * sizeof(*(p->class_val_to_struct)), GFP_KERNEL); - if (!p->class_val_to_struct) { - rc = -ENOMEM; - goto out; - } - - p->p_class_val_to_name = - kmalloc(p->p_classes.nprim * sizeof(char *), GFP_KERNEL); - if (!p->p_class_val_to_name) { - rc = -ENOMEM; - goto out; - } + struct hashtab_info info; - rc = hashtab_map(p->p_classes.table, class_index, p); -out: - return rc; + hashtab_stat(h, &info); + printk(KERN_DEBUG "SELinux: %s: %d entries and %d/%d buckets used, " + "longest chain length %d\n", hash_name, h->nel, + info.slots_used, h->size, info.max_chain_len); } -#ifdef DEBUG_HASHES static void symtab_hash_eval(struct symtab *s) { int i; - for (i = 0; i < SYM_NUM; i++) { - struct hashtab *h = s[i].table; - struct hashtab_info info; - - hashtab_stat(h, &info); - printk(KERN_DEBUG "SELinux: %s: %d entries and %d/%d buckets used, " - "longest chain length %d\n", symtab_name[i], h->nel, - info.slots_used, h->size, info.max_chain_len); - } + for (i = 0; i < SYM_NUM; i++) + hash_eval(s[i].table, symtab_name[i]); } -static void rangetr_hash_eval(struct hashtab *h) -{ - struct hashtab_info info; - - hashtab_stat(h, &info); - printk(KERN_DEBUG "SELinux: rangetr: %d entries and %d/%d buckets used, " - "longest chain length %d\n", h->nel, - info.slots_used, h->size, info.max_chain_len); -} #else -static inline void rangetr_hash_eval(struct hashtab *h) +static inline void hash_eval(struct hashtab *h, char *hash_name) { } #endif @@ -458,9 +511,9 @@ static inline void rangetr_hash_eval(struct hashtab *h) * * Caller must clean up on failure. */ -static int policydb_index_others(struct policydb *p) +static int policydb_index(struct policydb *p) { - int i, rc = 0; + int i, rc; printk(KERN_DEBUG "SELinux: %d users, %d roles, %d types, %d bools", p->p_users.nprim, p->p_roles.nprim, p->p_types.nprim, p->p_bools.nprim); @@ -477,47 +530,63 @@ static int policydb_index_others(struct policydb *p) symtab_hash_eval(p->symtab); #endif + rc = -ENOMEM; + p->class_val_to_struct = + kmalloc(p->p_classes.nprim * sizeof(*(p->class_val_to_struct)), + GFP_KERNEL); + if (!p->class_val_to_struct) + goto out; + + rc = -ENOMEM; p->role_val_to_struct = kmalloc(p->p_roles.nprim * sizeof(*(p->role_val_to_struct)), GFP_KERNEL); - if (!p->role_val_to_struct) { - rc = -ENOMEM; + if (!p->role_val_to_struct) goto out; - } + rc = -ENOMEM; p->user_val_to_struct = kmalloc(p->p_users.nprim * sizeof(*(p->user_val_to_struct)), GFP_KERNEL); - if (!p->user_val_to_struct) { - rc = -ENOMEM; + if (!p->user_val_to_struct) goto out; - } - p->type_val_to_struct = - kmalloc(p->p_types.nprim * sizeof(*(p->type_val_to_struct)), - GFP_KERNEL); - if (!p->type_val_to_struct) { - rc = -ENOMEM; + /* Yes, I want the sizeof the pointer, not the structure */ + rc = -ENOMEM; + p->type_val_to_struct_array = flex_array_alloc(sizeof(struct type_datum *), + p->p_types.nprim, + GFP_KERNEL | __GFP_ZERO); + if (!p->type_val_to_struct_array) goto out; - } - if (cond_init_bool_indexes(p)) { - rc = -ENOMEM; + rc = flex_array_prealloc(p->type_val_to_struct_array, 0, + p->p_types.nprim, GFP_KERNEL | __GFP_ZERO); + if (rc) goto out; - } - for (i = SYM_ROLES; i < SYM_NUM; i++) { - p->sym_val_to_name[i] = - kmalloc(p->symtab[i].nprim * sizeof(char *), GFP_KERNEL); - if (!p->sym_val_to_name[i]) { - rc = -ENOMEM; + rc = cond_init_bool_indexes(p); + if (rc) + goto out; + + for (i = 0; i < SYM_NUM; i++) { + rc = -ENOMEM; + p->sym_val_to_name[i] = flex_array_alloc(sizeof(char *), + p->symtab[i].nprim, + GFP_KERNEL | __GFP_ZERO); + if (!p->sym_val_to_name[i]) goto out; - } + + rc = flex_array_prealloc(p->sym_val_to_name[i], + 0, p->symtab[i].nprim, + GFP_KERNEL | __GFP_ZERO); + if (rc) + goto out; + rc = hashtab_map(p->symtab[i].table, index_f[i], p); if (rc) goto out; } - + rc = 0; out: return rc; } @@ -540,13 +609,28 @@ static int common_destroy(void *key, void *datum, void *p) struct common_datum *comdatum; kfree(key); - comdatum = datum; - hashtab_map(comdatum->permissions.table, perm_destroy, NULL); - hashtab_destroy(comdatum->permissions.table); + if (datum) { + comdatum = datum; + hashtab_map(comdatum->permissions.table, perm_destroy, NULL); + hashtab_destroy(comdatum->permissions.table); + } kfree(datum); return 0; } +static void constraint_expr_destroy(struct constraint_expr *expr) +{ + if (expr) { + ebitmap_destroy(&expr->names); + if (expr->type_names) { + ebitmap_destroy(&expr->type_names->types); + ebitmap_destroy(&expr->type_names->negset); + kfree(expr->type_names); + } + kfree(expr); + } +} + static int cls_destroy(void *key, void *datum, void *p) { struct class_datum *cladatum; @@ -554,38 +638,37 @@ static int cls_destroy(void *key, void *datum, void *p) struct constraint_expr *e, *etmp; kfree(key); - cladatum = datum; - hashtab_map(cladatum->permissions.table, perm_destroy, NULL); - hashtab_destroy(cladatum->permissions.table); - constraint = cladatum->constraints; - while (constraint) { - e = constraint->expr; - while (e) { - ebitmap_destroy(&e->names); - etmp = e; - e = e->next; - kfree(etmp); + if (datum) { + cladatum = datum; + hashtab_map(cladatum->permissions.table, perm_destroy, NULL); + hashtab_destroy(cladatum->permissions.table); + constraint = cladatum->constraints; + while (constraint) { + e = constraint->expr; + while (e) { + etmp = e; + e = e->next; + constraint_expr_destroy(etmp); + } + ctemp = constraint; + constraint = constraint->next; + kfree(ctemp); } - ctemp = constraint; - constraint = constraint->next; - kfree(ctemp); - } - - constraint = cladatum->validatetrans; - while (constraint) { - e = constraint->expr; - while (e) { - ebitmap_destroy(&e->names); - etmp = e; - e = e->next; - kfree(etmp); + + constraint = cladatum->validatetrans; + while (constraint) { + e = constraint->expr; + while (e) { + etmp = e; + e = e->next; + constraint_expr_destroy(etmp); + } + ctemp = constraint; + constraint = constraint->next; + kfree(ctemp); } - ctemp = constraint; - constraint = constraint->next; - kfree(ctemp); + kfree(cladatum->comkey); } - - kfree(cladatum->comkey); kfree(datum); return 0; } @@ -595,9 +678,11 @@ static int role_destroy(void *key, void *datum, void *p) struct role_datum *role; kfree(key); - role = datum; - ebitmap_destroy(&role->dominates); - ebitmap_destroy(&role->types); + if (datum) { + role = datum; + ebitmap_destroy(&role->dominates); + ebitmap_destroy(&role->types); + } kfree(datum); return 0; } @@ -614,11 +699,13 @@ static int user_destroy(void *key, void *datum, void *p) struct user_datum *usrdatum; kfree(key); - usrdatum = datum; - ebitmap_destroy(&usrdatum->roles); - ebitmap_destroy(&usrdatum->range.level[0].cat); - ebitmap_destroy(&usrdatum->range.level[1].cat); - ebitmap_destroy(&usrdatum->dfltlevel.cat); + if (datum) { + usrdatum = datum; + ebitmap_destroy(&usrdatum->roles); + ebitmap_destroy(&usrdatum->range.level[0].cat); + ebitmap_destroy(&usrdatum->range.level[1].cat); + ebitmap_destroy(&usrdatum->dfltlevel.cat); + } kfree(datum); return 0; } @@ -628,9 +715,11 @@ static int sens_destroy(void *key, void *datum, void *p) struct level_datum *levdatum; kfree(key); - levdatum = datum; - ebitmap_destroy(&levdatum->level->cat); - kfree(levdatum->level); + if (datum) { + levdatum = datum; + ebitmap_destroy(&levdatum->level->cat); + kfree(levdatum->level); + } kfree(datum); return 0; } @@ -654,6 +743,16 @@ static int (*destroy_f[SYM_NUM]) (void *key, void *datum, void *datap) = cat_destroy, }; +static int filenametr_destroy(void *key, void *datum, void *p) +{ + struct filename_trans *ft = key; + kfree(ft->name); + kfree(key); + kfree(datum); + cond_resched(); + return 0; +} + static int range_tr_destroy(void *key, void *datum, void *p) { struct mls_range *rt = datum; @@ -695,13 +794,16 @@ void policydb_destroy(struct policydb *p) hashtab_destroy(p->symtab[i].table); } - for (i = 0; i < SYM_NUM; i++) - kfree(p->sym_val_to_name[i]); + for (i = 0; i < SYM_NUM; i++) { + if (p->sym_val_to_name[i]) + flex_array_free(p->sym_val_to_name[i]); + } kfree(p->class_val_to_struct); kfree(p->role_val_to_struct); kfree(p->user_val_to_struct); - kfree(p->type_val_to_struct); + if (p->type_val_to_struct_array) + flex_array_free(p->type_val_to_struct_array); avtab_destroy(&p->te_avtab); @@ -748,6 +850,9 @@ void policydb_destroy(struct policydb *p) } kfree(lra); + hashtab_map(p->filename_trans, filenametr_destroy, NULL); + hashtab_destroy(p->filename_trans); + hashtab_map(p->range_tr, range_tr_destroy, NULL); hashtab_destroy(p->range_tr); @@ -762,6 +867,8 @@ void policydb_destroy(struct policydb *p) } flex_array_free(p->type_attr_map_array); } + + ebitmap_destroy(&p->filename_trans_ttypes); ebitmap_destroy(&p->policycaps); ebitmap_destroy(&p->permissive_map); @@ -785,19 +892,21 @@ int policydb_load_isids(struct policydb *p, struct sidtab *s) head = p->ocontexts[OCON_ISID]; for (c = head; c; c = c->next) { + rc = -EINVAL; if (!c->context[0].user) { - printk(KERN_ERR "SELinux: SID %s was never " - "defined.\n", c->u.name); - rc = -EINVAL; + printk(KERN_ERR "SELinux: SID %s was never defined.\n", + c->u.name); goto out; } - if (sidtab_insert(s, c->sid[0], &c->context[0])) { - printk(KERN_ERR "SELinux: unable to load initial " - "SID %s.\n", c->u.name); - rc = -EINVAL; + + rc = sidtab_insert(s, c->sid[0], &c->context[0]); + if (rc) { + printk(KERN_ERR "SELinux: unable to load initial SID %s.\n", + c->u.name); goto out; } } + rc = 0; out: return rc; } @@ -846,8 +955,7 @@ int policydb_context_isvalid(struct policydb *p, struct context *c) * Role must be authorized for the type. */ role = p->role_val_to_struct[c->role - 1]; - if (!ebitmap_get_bit(&role->types, - c->type - 1)) + if (!ebitmap_get_bit(&role->types, c->type - 1)) /* role may not be associated with type */ return 0; @@ -858,8 +966,7 @@ int policydb_context_isvalid(struct policydb *p, struct context *c) if (!usrdatum) return 0; - if (!ebitmap_get_bit(&usrdatum->roles, - c->role - 1)) + if (!ebitmap_get_bit(&usrdatum->roles, c->role - 1)) /* user may not be associated with role */ return 0; } @@ -881,20 +988,22 @@ static int mls_read_range_helper(struct mls_range *r, void *fp) int rc; rc = next_entry(buf, fp, sizeof(u32)); - if (rc < 0) + if (rc) goto out; + rc = -EINVAL; items = le32_to_cpu(buf[0]); if (items > ARRAY_SIZE(buf)) { printk(KERN_ERR "SELinux: mls: range overflow\n"); - rc = -EINVAL; goto out; } + rc = next_entry(buf, fp, sizeof(u32) * items); - if (rc < 0) { + if (rc) { printk(KERN_ERR "SELinux: mls: truncated range\n"); goto out; } + r->level[0].sens = le32_to_cpu(buf[0]); if (items > 1) r->level[1].sens = le32_to_cpu(buf[1]); @@ -903,15 +1012,13 @@ static int mls_read_range_helper(struct mls_range *r, void *fp) rc = ebitmap_read(&r->level[0].cat, fp); if (rc) { - printk(KERN_ERR "SELinux: mls: error reading low " - "categories\n"); + printk(KERN_ERR "SELinux: mls: error reading low categories\n"); goto out; } if (items > 1) { rc = ebitmap_read(&r->level[1].cat, fp); if (rc) { - printk(KERN_ERR "SELinux: mls: error reading high " - "categories\n"); + printk(KERN_ERR "SELinux: mls: error reading high categories\n"); goto bad_high; } } else { @@ -922,12 +1029,11 @@ static int mls_read_range_helper(struct mls_range *r, void *fp) } } - rc = 0; -out: - return rc; + return 0; bad_high: ebitmap_destroy(&r->level[0].cat); - goto out; +out: + return rc; } /* @@ -942,7 +1048,7 @@ static int context_read_and_validate(struct context *c, int rc; rc = next_entry(buf, fp, sizeof buf); - if (rc < 0) { + if (rc) { printk(KERN_ERR "SELinux: context truncated\n"); goto out; } @@ -950,19 +1056,20 @@ static int context_read_and_validate(struct context *c, c->role = le32_to_cpu(buf[1]); c->type = le32_to_cpu(buf[2]); if (p->policyvers >= POLICYDB_VERSION_MLS) { - if (mls_read_range_helper(&c->range, fp)) { - printk(KERN_ERR "SELinux: error reading MLS range of " - "context\n"); - rc = -EINVAL; + rc = mls_read_range_helper(&c->range, fp); + if (rc) { + printk(KERN_ERR "SELinux: error reading MLS range of context\n"); goto out; } } + rc = -EINVAL; if (!policydb_context_isvalid(p, c)) { printk(KERN_ERR "SELinux: invalid security context\n"); context_destroy(c); - rc = -EINVAL; + goto out; } + rc = 0; out: return rc; } @@ -981,37 +1088,36 @@ static int perm_read(struct policydb *p, struct hashtab *h, void *fp) __le32 buf[2]; u32 len; + rc = -ENOMEM; perdatum = kzalloc(sizeof(*perdatum), GFP_KERNEL); - if (!perdatum) { - rc = -ENOMEM; - goto out; - } + if (!perdatum) + goto bad; rc = next_entry(buf, fp, sizeof buf); - if (rc < 0) + if (rc) goto bad; len = le32_to_cpu(buf[0]); perdatum->value = le32_to_cpu(buf[1]); + rc = -ENOMEM; key = kmalloc(len + 1, GFP_KERNEL); - if (!key) { - rc = -ENOMEM; + if (!key) goto bad; - } + rc = next_entry(key, fp, len); - if (rc < 0) + if (rc) goto bad; key[len] = '\0'; rc = hashtab_insert(h, key, perdatum); if (rc) goto bad; -out: - return rc; + + return 0; bad: perm_destroy(key, perdatum, NULL); - goto out; + return rc; } static int common_read(struct policydb *p, struct hashtab *h, void *fp) @@ -1022,14 +1128,13 @@ static int common_read(struct policydb *p, struct hashtab *h, void *fp) u32 len, nel; int i, rc; + rc = -ENOMEM; comdatum = kzalloc(sizeof(*comdatum), GFP_KERNEL); - if (!comdatum) { - rc = -ENOMEM; - goto out; - } + if (!comdatum) + goto bad; rc = next_entry(buf, fp, sizeof buf); - if (rc < 0) + if (rc) goto bad; len = le32_to_cpu(buf[0]); @@ -1041,13 +1146,13 @@ static int common_read(struct policydb *p, struct hashtab *h, void *fp) comdatum->permissions.nprim = le32_to_cpu(buf[2]); nel = le32_to_cpu(buf[3]); + rc = -ENOMEM; key = kmalloc(len + 1, GFP_KERNEL); - if (!key) { - rc = -ENOMEM; + if (!key) goto bad; - } + rc = next_entry(key, fp, len); - if (rc < 0) + if (rc) goto bad; key[len] = '\0'; @@ -1060,15 +1165,40 @@ static int common_read(struct policydb *p, struct hashtab *h, void *fp) rc = hashtab_insert(h, key, comdatum); if (rc) goto bad; -out: - return rc; + return 0; bad: common_destroy(key, comdatum, NULL); - goto out; + return rc; } -static int read_cons_helper(struct constraint_node **nodep, int ncons, - int allowxtarget, void *fp) +static void type_set_init(struct type_set *t) +{ + ebitmap_init(&t->types); + ebitmap_init(&t->negset); +} + +static int type_set_read(struct type_set *t, void *fp) +{ + __le32 buf[1]; + int rc; + + if (ebitmap_read(&t->types, fp)) + return -EINVAL; + if (ebitmap_read(&t->negset, fp)) + return -EINVAL; + + rc = next_entry(buf, fp, sizeof(u32)); + if (rc < 0) + return -EINVAL; + t->flags = le32_to_cpu(buf[0]); + + return 0; +} + + +static int read_cons_helper(struct policydb *p, + struct constraint_node **nodep, + int ncons, int allowxtarget, void *fp) { struct constraint_node *c, *lc; struct constraint_expr *e, *le; @@ -1088,7 +1218,7 @@ static int read_cons_helper(struct constraint_node **nodep, int ncons, *nodep = c; rc = next_entry(buf, fp, (sizeof(u32) * 2)); - if (rc < 0) + if (rc) return rc; c->permissions = le32_to_cpu(buf[0]); nexpr = le32_to_cpu(buf[1]); @@ -1105,7 +1235,7 @@ static int read_cons_helper(struct constraint_node **nodep, int ncons, c->expr = e; rc = next_entry(buf, fp, (sizeof(u32) * 3)); - if (rc < 0) + if (rc) return rc; e->expr_type = le32_to_cpu(buf[0]); e->attr = le32_to_cpu(buf[1]); @@ -1133,8 +1263,21 @@ static int read_cons_helper(struct constraint_node **nodep, int ncons, if (depth == (CEXPR_MAXDEPTH - 1)) return -EINVAL; depth++; - if (ebitmap_read(&e->names, fp)) - return -EINVAL; + rc = ebitmap_read(&e->names, fp); + if (rc) + return rc; + if (p->policyvers >= + POLICYDB_VERSION_CONSTRAINT_NAMES) { + e->type_names = kzalloc(sizeof + (*e->type_names), + GFP_KERNEL); + if (!e->type_names) + return -ENOMEM; + type_set_init(e->type_names); + rc = type_set_read(e->type_names, fp); + if (rc) + return rc; + } break; default: return -EINVAL; @@ -1157,14 +1300,13 @@ static int class_read(struct policydb *p, struct hashtab *h, void *fp) u32 len, len2, ncons, nel; int i, rc; + rc = -ENOMEM; cladatum = kzalloc(sizeof(*cladatum), GFP_KERNEL); - if (!cladatum) { - rc = -ENOMEM; - goto out; - } + if (!cladatum) + goto bad; rc = next_entry(buf, fp, sizeof(u32)*6); - if (rc < 0) + if (rc) goto bad; len = le32_to_cpu(buf[0]); @@ -1179,33 +1321,30 @@ static int class_read(struct policydb *p, struct hashtab *h, void *fp) ncons = le32_to_cpu(buf[5]); + rc = -ENOMEM; key = kmalloc(len + 1, GFP_KERNEL); - if (!key) { - rc = -ENOMEM; + if (!key) goto bad; - } + rc = next_entry(key, fp, len); - if (rc < 0) + if (rc) goto bad; key[len] = '\0'; if (len2) { + rc = -ENOMEM; cladatum->comkey = kmalloc(len2 + 1, GFP_KERNEL); - if (!cladatum->comkey) { - rc = -ENOMEM; + if (!cladatum->comkey) goto bad; - } rc = next_entry(cladatum->comkey, fp, len2); - if (rc < 0) + if (rc) goto bad; cladatum->comkey[len2] = '\0'; - cladatum->comdatum = hashtab_search(p->p_commons.table, - cladatum->comkey); + rc = -EINVAL; + cladatum->comdatum = hashtab_search(p->p_commons.table, cladatum->comkey); if (!cladatum->comdatum) { - printk(KERN_ERR "SELinux: unknown common %s\n", - cladatum->comkey); - rc = -EINVAL; + printk(KERN_ERR "SELinux: unknown common %s\n", cladatum->comkey); goto bad; } } @@ -1215,31 +1354,47 @@ static int class_read(struct policydb *p, struct hashtab *h, void *fp) goto bad; } - rc = read_cons_helper(&cladatum->constraints, ncons, 0, fp); + rc = read_cons_helper(p, &cladatum->constraints, ncons, 0, fp); if (rc) goto bad; if (p->policyvers >= POLICYDB_VERSION_VALIDATETRANS) { /* grab the validatetrans rules */ rc = next_entry(buf, fp, sizeof(u32)); - if (rc < 0) + if (rc) goto bad; ncons = le32_to_cpu(buf[0]); - rc = read_cons_helper(&cladatum->validatetrans, ncons, 1, fp); + rc = read_cons_helper(p, &cladatum->validatetrans, + ncons, 1, fp); + if (rc) + goto bad; + } + + if (p->policyvers >= POLICYDB_VERSION_NEW_OBJECT_DEFAULTS) { + rc = next_entry(buf, fp, sizeof(u32) * 3); if (rc) goto bad; + + cladatum->default_user = le32_to_cpu(buf[0]); + cladatum->default_role = le32_to_cpu(buf[1]); + cladatum->default_range = le32_to_cpu(buf[2]); + } + + if (p->policyvers >= POLICYDB_VERSION_DEFAULT_TYPE) { + rc = next_entry(buf, fp, sizeof(u32) * 1); + if (rc) + goto bad; + cladatum->default_type = le32_to_cpu(buf[0]); } rc = hashtab_insert(h, key, cladatum); if (rc) goto bad; - rc = 0; -out: - return rc; + return 0; bad: cls_destroy(key, cladatum, NULL); - goto out; + return rc; } static int role_read(struct policydb *p, struct hashtab *h, void *fp) @@ -1250,17 +1405,16 @@ static int role_read(struct policydb *p, struct hashtab *h, void *fp) __le32 buf[3]; u32 len; + rc = -ENOMEM; role = kzalloc(sizeof(*role), GFP_KERNEL); - if (!role) { - rc = -ENOMEM; - goto out; - } + if (!role) + goto bad; if (p->policyvers >= POLICYDB_VERSION_BOUNDARY) to_read = 3; rc = next_entry(buf, fp, sizeof(buf[0]) * to_read); - if (rc < 0) + if (rc) goto bad; len = le32_to_cpu(buf[0]); @@ -1268,13 +1422,13 @@ static int role_read(struct policydb *p, struct hashtab *h, void *fp) if (p->policyvers >= POLICYDB_VERSION_BOUNDARY) role->bounds = le32_to_cpu(buf[2]); + rc = -ENOMEM; key = kmalloc(len + 1, GFP_KERNEL); - if (!key) { - rc = -ENOMEM; + if (!key) goto bad; - } + rc = next_entry(key, fp, len); - if (rc < 0) + if (rc) goto bad; key[len] = '\0'; @@ -1287,10 +1441,10 @@ static int role_read(struct policydb *p, struct hashtab *h, void *fp) goto bad; if (strcmp(key, OBJECT_R) == 0) { + rc = -EINVAL; if (role->value != OBJECT_R_VAL) { printk(KERN_ERR "SELinux: Role %s has wrong value %d\n", OBJECT_R, role->value); - rc = -EINVAL; goto bad; } rc = 0; @@ -1300,11 +1454,10 @@ static int role_read(struct policydb *p, struct hashtab *h, void *fp) rc = hashtab_insert(h, key, role); if (rc) goto bad; -out: - return rc; + return 0; bad: role_destroy(key, role, NULL); - goto out; + return rc; } static int type_read(struct policydb *p, struct hashtab *h, void *fp) @@ -1315,17 +1468,16 @@ static int type_read(struct policydb *p, struct hashtab *h, void *fp) __le32 buf[4]; u32 len; + rc = -ENOMEM; typdatum = kzalloc(sizeof(*typdatum), GFP_KERNEL); - if (!typdatum) { - rc = -ENOMEM; - return rc; - } + if (!typdatum) + goto bad; if (p->policyvers >= POLICYDB_VERSION_BOUNDARY) to_read = 4; rc = next_entry(buf, fp, sizeof(buf[0]) * to_read); - if (rc < 0) + if (rc) goto bad; len = le32_to_cpu(buf[0]); @@ -1343,24 +1495,22 @@ static int type_read(struct policydb *p, struct hashtab *h, void *fp) typdatum->primary = le32_to_cpu(buf[2]); } + rc = -ENOMEM; key = kmalloc(len + 1, GFP_KERNEL); - if (!key) { - rc = -ENOMEM; + if (!key) goto bad; - } rc = next_entry(key, fp, len); - if (rc < 0) + if (rc) goto bad; key[len] = '\0'; rc = hashtab_insert(h, key, typdatum); if (rc) goto bad; -out: - return rc; + return 0; bad: type_destroy(key, typdatum, NULL); - goto out; + return rc; } @@ -1376,22 +1526,18 @@ static int mls_read_level(struct mls_level *lp, void *fp) memset(lp, 0, sizeof(*lp)); rc = next_entry(buf, fp, sizeof buf); - if (rc < 0) { + if (rc) { printk(KERN_ERR "SELinux: mls: truncated level\n"); - goto bad; + return rc; } lp->sens = le32_to_cpu(buf[0]); - if (ebitmap_read(&lp->cat, fp)) { - printk(KERN_ERR "SELinux: mls: error reading level " - "categories\n"); - goto bad; + rc = ebitmap_read(&lp->cat, fp); + if (rc) { + printk(KERN_ERR "SELinux: mls: error reading level categories\n"); + return rc; } - return 0; - -bad: - return -EINVAL; } static int user_read(struct policydb *p, struct hashtab *h, void *fp) @@ -1402,17 +1548,16 @@ static int user_read(struct policydb *p, struct hashtab *h, void *fp) __le32 buf[3]; u32 len; + rc = -ENOMEM; usrdatum = kzalloc(sizeof(*usrdatum), GFP_KERNEL); - if (!usrdatum) { - rc = -ENOMEM; - goto out; - } + if (!usrdatum) + goto bad; if (p->policyvers >= POLICYDB_VERSION_BOUNDARY) to_read = 3; rc = next_entry(buf, fp, sizeof(buf[0]) * to_read); - if (rc < 0) + if (rc) goto bad; len = le32_to_cpu(buf[0]); @@ -1420,13 +1565,12 @@ static int user_read(struct policydb *p, struct hashtab *h, void *fp) if (p->policyvers >= POLICYDB_VERSION_BOUNDARY) usrdatum->bounds = le32_to_cpu(buf[2]); + rc = -ENOMEM; key = kmalloc(len + 1, GFP_KERNEL); - if (!key) { - rc = -ENOMEM; + if (!key) goto bad; - } rc = next_entry(key, fp, len); - if (rc < 0) + if (rc) goto bad; key[len] = '\0'; @@ -1446,11 +1590,10 @@ static int user_read(struct policydb *p, struct hashtab *h, void *fp) rc = hashtab_insert(h, key, usrdatum); if (rc) goto bad; -out: - return rc; + return 0; bad: user_destroy(key, usrdatum, NULL); - goto out; + return rc; } static int sens_read(struct policydb *p, struct hashtab *h, void *fp) @@ -1461,47 +1604,43 @@ static int sens_read(struct policydb *p, struct hashtab *h, void *fp) __le32 buf[2]; u32 len; + rc = -ENOMEM; levdatum = kzalloc(sizeof(*levdatum), GFP_ATOMIC); - if (!levdatum) { - rc = -ENOMEM; - goto out; - } + if (!levdatum) + goto bad; rc = next_entry(buf, fp, sizeof buf); - if (rc < 0) + if (rc) goto bad; len = le32_to_cpu(buf[0]); levdatum->isalias = le32_to_cpu(buf[1]); + rc = -ENOMEM; key = kmalloc(len + 1, GFP_ATOMIC); - if (!key) { - rc = -ENOMEM; + if (!key) goto bad; - } rc = next_entry(key, fp, len); - if (rc < 0) + if (rc) goto bad; key[len] = '\0'; + rc = -ENOMEM; levdatum->level = kmalloc(sizeof(struct mls_level), GFP_ATOMIC); - if (!levdatum->level) { - rc = -ENOMEM; + if (!levdatum->level) goto bad; - } - if (mls_read_level(levdatum->level, fp)) { - rc = -EINVAL; + + rc = mls_read_level(levdatum->level, fp); + if (rc) goto bad; - } rc = hashtab_insert(h, key, levdatum); if (rc) goto bad; -out: - return rc; + return 0; bad: sens_destroy(key, levdatum, NULL); - goto out; + return rc; } static int cat_read(struct policydb *p, struct hashtab *h, void *fp) @@ -1512,39 +1651,35 @@ static int cat_read(struct policydb *p, struct hashtab *h, void *fp) __le32 buf[3]; u32 len; + rc = -ENOMEM; catdatum = kzalloc(sizeof(*catdatum), GFP_ATOMIC); - if (!catdatum) { - rc = -ENOMEM; - goto out; - } + if (!catdatum) + goto bad; rc = next_entry(buf, fp, sizeof buf); - if (rc < 0) + if (rc) goto bad; len = le32_to_cpu(buf[0]); catdatum->value = le32_to_cpu(buf[1]); catdatum->isalias = le32_to_cpu(buf[2]); + rc = -ENOMEM; key = kmalloc(len + 1, GFP_ATOMIC); - if (!key) { - rc = -ENOMEM; + if (!key) goto bad; - } rc = next_entry(key, fp, len); - if (rc < 0) + if (rc) goto bad; key[len] = '\0'; rc = hashtab_insert(h, key, catdatum); if (rc) goto bad; -out: - return rc; - + return 0; bad: cat_destroy(key, catdatum, NULL); - goto out; + return rc; } static int (*read_f[SYM_NUM]) (struct policydb *p, struct hashtab *h, void *fp) = @@ -1585,9 +1720,9 @@ static int user_bounds_sanity_check(void *key, void *datum, void *datap) printk(KERN_ERR "SELinux: boundary violated policy: " "user=%s role=%s bounds=%s\n", - p->p_user_val_to_name[user->value - 1], - p->p_role_val_to_name[bit], - p->p_user_val_to_name[upper->value - 1]); + sym_name(p, SYM_USERS, user->value - 1), + sym_name(p, SYM_ROLES, bit), + sym_name(p, SYM_USERS, upper->value - 1)); return -EINVAL; } @@ -1622,9 +1757,9 @@ static int role_bounds_sanity_check(void *key, void *datum, void *datap) printk(KERN_ERR "SELinux: boundary violated policy: " "role=%s type=%s bounds=%s\n", - p->p_role_val_to_name[role->value - 1], - p->p_type_val_to_name[bit], - p->p_role_val_to_name[upper->value - 1]); + sym_name(p, SYM_ROLES, role->value - 1), + sym_name(p, SYM_TYPES, bit), + sym_name(p, SYM_ROLES, upper->value - 1)); return -EINVAL; } @@ -1648,12 +1783,15 @@ static int type_bounds_sanity_check(void *key, void *datum, void *datap) return -EINVAL; } - upper = p->type_val_to_struct[upper->bounds - 1]; + upper = flex_array_get_ptr(p->type_val_to_struct_array, + upper->bounds - 1); + BUG_ON(!upper); + if (upper->attribute) { printk(KERN_ERR "SELinux: type %s: " "bounded by attribute %s", (char *) key, - p->p_type_val_to_name[upper->value - 1]); + sym_name(p, SYM_TYPES, upper->value - 1)); return -EINVAL; } } @@ -1686,8 +1824,6 @@ static int policydb_bounds_sanity_check(struct policydb *p) return 0; } -extern int ss_initialized; - u16 string_to_security_class(struct policydb *p, const char *name) { struct class_datum *cladatum; @@ -1786,7 +1922,7 @@ static int range_read(struct policydb *p, void *fp) rt = NULL; r = NULL; } - rangetr_hash_eval(p->range_tr); + hash_eval(p->range_tr, "rangetr"); rc = 0; out: kfree(rt); @@ -1794,6 +1930,95 @@ out: return rc; } +static int filename_trans_read(struct policydb *p, void *fp) +{ + struct filename_trans *ft; + struct filename_trans_datum *otype; + char *name; + u32 nel, len; + __le32 buf[4]; + int rc, i; + + if (p->policyvers < POLICYDB_VERSION_FILENAME_TRANS) + return 0; + + rc = next_entry(buf, fp, sizeof(u32)); + if (rc) + return rc; + nel = le32_to_cpu(buf[0]); + + for (i = 0; i < nel; i++) { + ft = NULL; + otype = NULL; + name = NULL; + + rc = -ENOMEM; + ft = kzalloc(sizeof(*ft), GFP_KERNEL); + if (!ft) + goto out; + + rc = -ENOMEM; + otype = kmalloc(sizeof(*otype), GFP_KERNEL); + if (!otype) + goto out; + + /* length of the path component string */ + rc = next_entry(buf, fp, sizeof(u32)); + if (rc) + goto out; + len = le32_to_cpu(buf[0]); + + rc = -ENOMEM; + name = kmalloc(len + 1, GFP_KERNEL); + if (!name) + goto out; + + ft->name = name; + + /* path component string */ + rc = next_entry(name, fp, len); + if (rc) + goto out; + name[len] = 0; + + rc = next_entry(buf, fp, sizeof(u32) * 4); + if (rc) + goto out; + + ft->stype = le32_to_cpu(buf[0]); + ft->ttype = le32_to_cpu(buf[1]); + ft->tclass = le32_to_cpu(buf[2]); + + otype->otype = le32_to_cpu(buf[3]); + + rc = ebitmap_set_bit(&p->filename_trans_ttypes, ft->ttype, 1); + if (rc) + goto out; + + rc = hashtab_insert(p->filename_trans, ft, otype); + if (rc) { + /* + * Do not return -EEXIST to the caller, or the system + * will not boot. + */ + if (rc != -EEXIST) + goto out; + /* But free memory to avoid memory leak. */ + kfree(ft); + kfree(name); + kfree(otype); + } + } + hash_eval(p->filename_trans, "filenametr"); + return 0; +out: + kfree(ft); + kfree(name); + kfree(otype); + + return rc; +} + static int genfs_read(struct policydb *p, void *fp) { int i, j, rc; @@ -2009,7 +2234,10 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info, rc = -EINVAL; c->v.behavior = le32_to_cpu(buf[0]); - if (c->v.behavior > SECURITY_FS_USE_NONE) + /* Determined at runtime, not in policy DB. */ + if (c->v.behavior == SECURITY_FS_USE_MNTPOINT) + goto out; + if (c->v.behavior > SECURITY_FS_USE_MAX) goto out; rc = -ENOMEM; @@ -2066,13 +2294,14 @@ int policydb_read(struct policydb *p, void *fp) rc = policydb_init(p); if (rc) - goto out; + return rc; /* Read the magic number and string length. */ rc = next_entry(buf, fp, sizeof(u32) * 2); - if (rc < 0) + if (rc) goto bad; + rc = -EINVAL; if (le32_to_cpu(buf[0]) != POLICYDB_MAGIC) { printk(KERN_ERR "SELinux: policydb magic number 0x%x does " "not match expected magic number 0x%x\n", @@ -2080,6 +2309,7 @@ int policydb_read(struct policydb *p, void *fp) goto bad; } + rc = -EINVAL; len = le32_to_cpu(buf[1]); if (len != strlen(POLICYDB_STRING)) { printk(KERN_ERR "SELinux: policydb string length %d does not " @@ -2087,19 +2317,23 @@ int policydb_read(struct policydb *p, void *fp) len, strlen(POLICYDB_STRING)); goto bad; } + + rc = -ENOMEM; policydb_str = kmalloc(len + 1, GFP_KERNEL); if (!policydb_str) { printk(KERN_ERR "SELinux: unable to allocate memory for policydb " "string of length %d\n", len); - rc = -ENOMEM; goto bad; } + rc = next_entry(policydb_str, fp, len); - if (rc < 0) { + if (rc) { printk(KERN_ERR "SELinux: truncated policydb string identifier\n"); kfree(policydb_str); goto bad; } + + rc = -EINVAL; policydb_str[len] = '\0'; if (strcmp(policydb_str, POLICYDB_STRING)) { printk(KERN_ERR "SELinux: policydb string %s does not match " @@ -2113,9 +2347,10 @@ int policydb_read(struct policydb *p, void *fp) /* Read the version and table sizes. */ rc = next_entry(buf, fp, sizeof(u32)*4); - if (rc < 0) + if (rc) goto bad; + rc = -EINVAL; p->policyvers = le32_to_cpu(buf[0]); if (p->policyvers < POLICYDB_VERSION_MIN || p->policyvers > POLICYDB_VERSION_MAX) { @@ -2128,6 +2363,7 @@ int policydb_read(struct policydb *p, void *fp) if ((le32_to_cpu(buf[1]) & POLICYDB_CONFIG_MLS)) { p->mls_enabled = 1; + rc = -EINVAL; if (p->policyvers < POLICYDB_VERSION_MLS) { printk(KERN_ERR "SELinux: security policydb version %d " "(MLS) not backwards compatible\n", @@ -2138,14 +2374,19 @@ int policydb_read(struct policydb *p, void *fp) p->reject_unknown = !!(le32_to_cpu(buf[1]) & REJECT_UNKNOWN); p->allow_unknown = !!(le32_to_cpu(buf[1]) & ALLOW_UNKNOWN); - if (p->policyvers >= POLICYDB_VERSION_POLCAP && - ebitmap_read(&p->policycaps, fp) != 0) - goto bad; + if (p->policyvers >= POLICYDB_VERSION_POLCAP) { + rc = ebitmap_read(&p->policycaps, fp); + if (rc) + goto bad; + } - if (p->policyvers >= POLICYDB_VERSION_PERMISSIVE && - ebitmap_read(&p->permissive_map, fp) != 0) - goto bad; + if (p->policyvers >= POLICYDB_VERSION_PERMISSIVE) { + rc = ebitmap_read(&p->permissive_map, fp); + if (rc) + goto bad; + } + rc = -EINVAL; info = policydb_lookup_compat(p->policyvers); if (!info) { printk(KERN_ERR "SELinux: unable to find policy compat info " @@ -2153,6 +2394,7 @@ int policydb_read(struct policydb *p, void *fp) goto bad; } + rc = -EINVAL; if (le32_to_cpu(buf[2]) != info->sym_num || le32_to_cpu(buf[3]) != info->ocon_num) { printk(KERN_ERR "SELinux: policydb table sizes (%d,%d) do " @@ -2164,7 +2406,7 @@ int policydb_read(struct policydb *p, void *fp) for (i = 0; i < info->sym_num; i++) { rc = next_entry(buf, fp, sizeof(u32)*2); - if (rc < 0) + if (rc) goto bad; nprim = le32_to_cpu(buf[0]); nel = le32_to_cpu(buf[1]); @@ -2177,6 +2419,11 @@ int policydb_read(struct policydb *p, void *fp) p->symtab[i].nprim = nprim; } + rc = -EINVAL; + p->process_class = string_to_security_class(p, "process"); + if (!p->process_class) + goto bad; + rc = avtab_read(&p->te_avtab, fp, p); if (rc) goto bad; @@ -2188,78 +2435,81 @@ int policydb_read(struct policydb *p, void *fp) } rc = next_entry(buf, fp, sizeof(u32)); - if (rc < 0) + if (rc) goto bad; nel = le32_to_cpu(buf[0]); ltr = NULL; for (i = 0; i < nel; i++) { + rc = -ENOMEM; tr = kzalloc(sizeof(*tr), GFP_KERNEL); - if (!tr) { - rc = -ENOMEM; + if (!tr) goto bad; - } if (ltr) ltr->next = tr; else p->role_tr = tr; rc = next_entry(buf, fp, sizeof(u32)*3); - if (rc < 0) + if (rc) goto bad; + + rc = -EINVAL; tr->role = le32_to_cpu(buf[0]); tr->type = le32_to_cpu(buf[1]); tr->new_role = le32_to_cpu(buf[2]); + if (p->policyvers >= POLICYDB_VERSION_ROLETRANS) { + rc = next_entry(buf, fp, sizeof(u32)); + if (rc) + goto bad; + tr->tclass = le32_to_cpu(buf[0]); + } else + tr->tclass = p->process_class; + if (!policydb_role_isvalid(p, tr->role) || !policydb_type_isvalid(p, tr->type) || - !policydb_role_isvalid(p, tr->new_role)) { - rc = -EINVAL; + !policydb_class_isvalid(p, tr->tclass) || + !policydb_role_isvalid(p, tr->new_role)) goto bad; - } ltr = tr; } rc = next_entry(buf, fp, sizeof(u32)); - if (rc < 0) + if (rc) goto bad; nel = le32_to_cpu(buf[0]); lra = NULL; for (i = 0; i < nel; i++) { + rc = -ENOMEM; ra = kzalloc(sizeof(*ra), GFP_KERNEL); - if (!ra) { - rc = -ENOMEM; + if (!ra) goto bad; - } if (lra) lra->next = ra; else p->role_allow = ra; rc = next_entry(buf, fp, sizeof(u32)*2); - if (rc < 0) + if (rc) goto bad; + + rc = -EINVAL; ra->role = le32_to_cpu(buf[0]); ra->new_role = le32_to_cpu(buf[1]); if (!policydb_role_isvalid(p, ra->role) || - !policydb_role_isvalid(p, ra->new_role)) { - rc = -EINVAL; + !policydb_role_isvalid(p, ra->new_role)) goto bad; - } lra = ra; } - rc = policydb_index_classes(p); + rc = filename_trans_read(p, fp); if (rc) goto bad; - rc = policydb_index_others(p); + rc = policydb_index(p); if (rc) goto bad; - p->process_class = string_to_security_class(p, "process"); - if (!p->process_class) - goto bad; - p->process_trans_perms = string_to_av_perm(p, p->process_class, - "transition"); - p->process_trans_perms |= string_to_av_perm(p, p->process_class, - "dyntransition"); + rc = -EINVAL; + p->process_trans_perms = string_to_av_perm(p, p->process_class, "transition"); + p->process_trans_perms |= string_to_av_perm(p, p->process_class, "dyntransition"); if (!p->process_trans_perms) goto bad; @@ -2283,7 +2533,7 @@ int policydb_read(struct policydb *p, void *fp) goto bad; /* preallocate so we don't have to worry about the put ever failing */ - rc = flex_array_prealloc(p->type_attr_map_array, 0, p->p_types.nprim - 1, + rc = flex_array_prealloc(p->type_attr_map_array, 0, p->p_types.nprim, GFP_KERNEL | __GFP_ZERO); if (rc) goto bad; @@ -2312,8 +2562,6 @@ int policydb_read(struct policydb *p, void *fp) out: return rc; bad: - if (!rc) - rc = -EINVAL; policydb_destroy(p); goto out; } @@ -2431,8 +2679,9 @@ static int cat_write(void *vkey, void *datum, void *ptr) return 0; } -static int role_trans_write(struct role_trans *r, void *fp) +static int role_trans_write(struct policydb *p, void *fp) { + struct role_trans *r = p->role_tr; struct role_trans *tr; u32 buf[3]; size_t nel; @@ -2452,6 +2701,12 @@ static int role_trans_write(struct role_trans *r, void *fp) rc = put_entry(buf, sizeof(u32), 3, fp); if (rc) return rc; + if (p->policyvers >= POLICYDB_VERSION_ROLETRANS) { + buf[0] = cpu_to_le32(tr->tclass); + rc = put_entry(buf, sizeof(u32), 1, fp); + if (rc) + return rc; + } } return 0; @@ -2564,6 +2819,24 @@ static int common_write(void *vkey, void *datum, void *ptr) return 0; } +static int type_set_write(struct type_set *t, void *fp) +{ + int rc; + __le32 buf[1]; + + if (ebitmap_write(&t->types, fp)) + return -EINVAL; + if (ebitmap_write(&t->negset, fp)) + return -EINVAL; + + buf[0] = cpu_to_le32(t->flags); + rc = put_entry(buf, sizeof(u32), 1, fp); + if (rc) + return -EINVAL; + + return 0; +} + static int write_cons_helper(struct policydb *p, struct constraint_node *node, void *fp) { @@ -2595,6 +2868,12 @@ static int write_cons_helper(struct policydb *p, struct constraint_node *node, rc = ebitmap_write(&e->names, fp); if (rc) return rc; + if (p->policyvers >= + POLICYDB_VERSION_CONSTRAINT_NAMES) { + rc = type_set_write(e->type_names, fp); + if (rc) + return rc; + } break; default: break; @@ -2673,6 +2952,23 @@ static int class_write(void *vkey, void *datum, void *ptr) if (rc) return rc; + if (p->policyvers >= POLICYDB_VERSION_NEW_OBJECT_DEFAULTS) { + buf[0] = cpu_to_le32(cladatum->default_user); + buf[1] = cpu_to_le32(cladatum->default_role); + buf[2] = cpu_to_le32(cladatum->default_range); + + rc = put_entry(buf, sizeof(uint32_t), 3, fp); + if (rc) + return rc; + } + + if (p->policyvers >= POLICYDB_VERSION_DEFAULT_TYPE) { + buf[0] = cpu_to_le32(cladatum->default_type); + rc = put_entry(buf, sizeof(uint32_t), 1, fp); + if (rc) + return rc; + } + return 0; } @@ -2959,7 +3255,7 @@ static int genfs_write(struct policydb *p, void *fp) return 0; } -static int range_count(void *key, void *data, void *ptr) +static int hashtab_cnt(void *key, void *data, void *ptr) { int *cnt = ptr; *cnt = *cnt + 1; @@ -2997,9 +3293,8 @@ static int range_write_helper(void *key, void *data, void *ptr) static int range_write(struct policydb *p, void *fp) { - size_t nel; __le32 buf[1]; - int rc; + int rc, nel; struct policy_data pd; pd.p = p; @@ -3007,7 +3302,7 @@ static int range_write(struct policydb *p, void *fp) /* count the number of entries in the hashtab */ nel = 0; - rc = hashtab_map(p->range_tr, range_count, &nel); + rc = hashtab_map(p->range_tr, hashtab_cnt, &nel); if (rc) return rc; @@ -3024,6 +3319,63 @@ static int range_write(struct policydb *p, void *fp) return 0; } +static int filename_write_helper(void *key, void *data, void *ptr) +{ + __le32 buf[4]; + struct filename_trans *ft = key; + struct filename_trans_datum *otype = data; + void *fp = ptr; + int rc; + u32 len; + + len = strlen(ft->name); + buf[0] = cpu_to_le32(len); + rc = put_entry(buf, sizeof(u32), 1, fp); + if (rc) + return rc; + + rc = put_entry(ft->name, sizeof(char), len, fp); + if (rc) + return rc; + + buf[0] = cpu_to_le32(ft->stype); + buf[1] = cpu_to_le32(ft->ttype); + buf[2] = cpu_to_le32(ft->tclass); + buf[3] = cpu_to_le32(otype->otype); + + rc = put_entry(buf, sizeof(u32), 4, fp); + if (rc) + return rc; + + return 0; +} + +static int filename_trans_write(struct policydb *p, void *fp) +{ + u32 nel; + __le32 buf[1]; + int rc; + + if (p->policyvers < POLICYDB_VERSION_FILENAME_TRANS) + return 0; + + nel = 0; + rc = hashtab_map(p->filename_trans, hashtab_cnt, &nel); + if (rc) + return rc; + + buf[0] = cpu_to_le32(nel); + rc = put_entry(buf, sizeof(u32), 1, fp); + if (rc) + return rc; + + rc = hashtab_map(p->filename_trans, filename_write_helper, fp); + if (rc) + return rc; + + return 0; +} + /* * Write the configuration data in a policy database * structure to a policy database binary representation @@ -3076,7 +3428,7 @@ int policydb_write(struct policydb *p, void *fp) if (!info) { printk(KERN_ERR "SELinux: compatibility lookup failed for policy " "version %d", p->policyvers); - return rc; + return -EINVAL; } buf[0] = cpu_to_le32(p->policyvers); @@ -3126,7 +3478,7 @@ int policydb_write(struct policydb *p, void *fp) if (rc) return rc; - rc = role_trans_write(p->role_tr, fp); + rc = role_trans_write(p, fp); if (rc) return rc; @@ -3134,6 +3486,10 @@ int policydb_write(struct policydb *p, void *fp) if (rc) return rc; + rc = filename_trans_write(p, fp); + if (rc) + return rc; + rc = ocontext_write(p, info, fp); if (rc) return rc; diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h index 95d3d7de361..725d5945a97 100644 --- a/security/selinux/ss/policydb.h +++ b/security/selinux/ss/policydb.h @@ -60,6 +60,20 @@ struct class_datum { struct symtab permissions; /* class-specific permission symbol table */ struct constraint_node *constraints; /* constraints on class permissions */ struct constraint_node *validatetrans; /* special transition rules */ +/* Options how a new object user, role, and type should be decided */ +#define DEFAULT_SOURCE 1 +#define DEFAULT_TARGET 2 + char default_user; + char default_role; + char default_type; +/* Options how a new object range should be decided */ +#define DEFAULT_SOURCE_LOW 1 +#define DEFAULT_SOURCE_HIGH 2 +#define DEFAULT_SOURCE_LOW_HIGH 3 +#define DEFAULT_TARGET_LOW 4 +#define DEFAULT_TARGET_HIGH 5 +#define DEFAULT_TARGET_LOW_HIGH 6 + char default_range; }; /* Role attributes */ @@ -72,11 +86,23 @@ struct role_datum { struct role_trans { u32 role; /* current role */ - u32 type; /* program executable type */ + u32 type; /* program executable type, or new object type */ + u32 tclass; /* process class, or new object class */ u32 new_role; /* new role */ struct role_trans *next; }; +struct filename_trans { + u32 stype; /* current process */ + u32 ttype; /* parent dir context */ + u16 tclass; /* class of new object */ + const char *name; /* last path component */ +}; + +struct filename_trans_datum { + u32 otype; /* expected of new object */ +}; + struct role_allow { u32 role; /* current role */ u32 new_role; /* new role */ @@ -128,6 +154,17 @@ struct cond_bool_datum { struct cond_node; /* + * type set preserves data needed to determine constraint info from + * policy source. This is not used by the kernel policy but allows + * utilities such as audit2allow to determine constraint denials. + */ +struct type_set { + struct ebitmap types; + struct ebitmap negset; + u32 flags; +}; + +/* * The configuration data includes security contexts for * initial SIDs, unlabeled file systems, TCP and UDP port numbers, * network interfaces, and nodes. This structure stores the @@ -203,21 +240,13 @@ struct policydb { #define p_cats symtab[SYM_CATS] /* symbol names indexed by (value - 1) */ - char **sym_val_to_name[SYM_NUM]; -#define p_common_val_to_name sym_val_to_name[SYM_COMMONS] -#define p_class_val_to_name sym_val_to_name[SYM_CLASSES] -#define p_role_val_to_name sym_val_to_name[SYM_ROLES] -#define p_type_val_to_name sym_val_to_name[SYM_TYPES] -#define p_user_val_to_name sym_val_to_name[SYM_USERS] -#define p_bool_val_to_name sym_val_to_name[SYM_BOOLS] -#define p_sens_val_to_name sym_val_to_name[SYM_LEVELS] -#define p_cat_val_to_name sym_val_to_name[SYM_CATS] + struct flex_array *sym_val_to_name[SYM_NUM]; /* class, role, and user attributes indexed by (value - 1) */ struct class_datum **class_val_to_struct; struct role_datum **role_val_to_struct; struct user_datum **user_val_to_struct; - struct type_datum **type_val_to_struct; + struct flex_array *type_val_to_struct_array; /* type enforcement access vectors and transitions */ struct avtab te_avtab; @@ -225,6 +254,12 @@ struct policydb { /* role transitions */ struct role_trans *role_tr; + /* file transitions with the last path component */ + /* quickly exclude lookups when parent ttype has no rules */ + struct ebitmap filename_trans_ttypes; + /* actual set of filename_trans rules */ + struct hashtab *filename_trans; + /* bools indexed by (value - 1) */ struct cond_bool_datum **bool_val_to_struct; /* type enforcement conditional access vectors and transitions */ @@ -310,7 +345,7 @@ static inline int next_entry(void *buf, struct policy_file *fp, size_t bytes) return 0; } -static inline int put_entry(void *buf, size_t bytes, int num, struct policy_file *fp) +static inline int put_entry(const void *buf, size_t bytes, int num, struct policy_file *fp) { size_t len = bytes * num; @@ -321,6 +356,13 @@ static inline int put_entry(void *buf, size_t bytes, int num, struct policy_file return 0; } +static inline char *sym_name(struct policydb *p, unsigned int sym_num, unsigned int element_nr) +{ + struct flex_array *fa = p->sym_val_to_name[sym_num]; + + return flex_array_get_ptr(fa, element_nr); +} + extern u16 string_to_security_class(struct policydb *p, const char *name); extern u32 string_to_av_perm(struct policydb *p, u16 tclass, const char *name); diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index 223c1ff6ef2..4bca49414a4 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -13,7 +13,7 @@ * * Added conditional policy language extensions * - * Updated: Hewlett-Packard <paul.moore@hp.com> + * Updated: Hewlett-Packard <paul@paul-moore.com> * * Added support for NetLabel * Added support for the policy capability bitmap @@ -70,10 +70,9 @@ #include "ebitmap.h" #include "audit.h" -extern void selnl_notify_policyload(u32 seqno); - int selinux_policycap_netpeer; int selinux_policycap_openperm; +int selinux_policycap_alwaysnetwork; static DEFINE_RWLOCK(policy_rwlock); @@ -201,6 +200,21 @@ static u16 unmap_class(u16 tclass) return tclass; } +/* + * Get kernel value for class from its policy value + */ +static u16 map_class(u16 pol_value) +{ + u16 i; + + for (i = 1; i < current_mapping_size; i++) { + if (current_mapping[i].value == pol_value) + return i; + } + + return SECCLASS_NULL; +} + static void map_decision(u16 tclass, struct av_decision *avd, int allow_unknown) { @@ -464,7 +478,7 @@ static void security_dump_masked_av(struct context *scontext, if (!permissions) return; - tclass_name = policydb.p_class_val_to_name[tclass - 1]; + tclass_name = sym_name(&policydb, SYM_CLASSES, tclass - 1); tclass_dat = policydb.class_val_to_struct[tclass - 1]; common_dat = tclass_dat->comdatum; @@ -530,12 +544,18 @@ static void type_attribute_bounds_av(struct context *scontext, struct context lo_scontext; struct context lo_tcontext; struct av_decision lo_avd; - struct type_datum *source - = policydb.type_val_to_struct[scontext->type - 1]; - struct type_datum *target - = policydb.type_val_to_struct[tcontext->type - 1]; + struct type_datum *source; + struct type_datum *target; u32 masked = 0; + source = flex_array_get_ptr(policydb.type_val_to_struct_array, + scontext->type - 1); + BUG_ON(!source); + + target = flex_array_get_ptr(policydb.type_val_to_struct_array, + tcontext->type - 1); + BUG_ON(!target); + if (source->bounds) { memset(&lo_avd, 0, sizeof(lo_avd)); @@ -701,16 +721,16 @@ static int security_validtrans_handle_fail(struct context *ocontext, char *o = NULL, *n = NULL, *t = NULL; u32 olen, nlen, tlen; - if (context_struct_to_string(ocontext, &o, &olen) < 0) + if (context_struct_to_string(ocontext, &o, &olen)) goto out; - if (context_struct_to_string(ncontext, &n, &nlen) < 0) + if (context_struct_to_string(ncontext, &n, &nlen)) goto out; - if (context_struct_to_string(tcontext, &t, &tlen) < 0) + if (context_struct_to_string(tcontext, &t, &tlen)) goto out; audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR, "security_validate_transition: denied for" " oldcontext=%s newcontext=%s taskcontext=%s tclass=%s", - o, n, t, policydb.p_class_val_to_name[tclass-1]); + o, n, t, sym_name(&policydb, SYM_CLASSES, tclass-1)); out: kfree(o); kfree(n); @@ -801,10 +821,11 @@ int security_bounded_transition(u32 old_sid, u32 new_sid) struct context *old_context, *new_context; struct type_datum *type; int index; - int rc = -EINVAL; + int rc; read_lock(&policy_rwlock); + rc = -EINVAL; old_context = sidtab_search(&sidtab, old_sid); if (!old_context) { printk(KERN_ERR "SELinux: %s: unrecognized SID %u\n", @@ -812,6 +833,7 @@ int security_bounded_transition(u32 old_sid, u32 new_sid) goto out; } + rc = -EINVAL; new_context = sidtab_search(&sidtab, new_sid); if (!new_context) { printk(KERN_ERR "SELinux: %s: unrecognized SID %u\n", @@ -819,28 +841,27 @@ int security_bounded_transition(u32 old_sid, u32 new_sid) goto out; } + rc = 0; /* type/domain unchanged */ - if (old_context->type == new_context->type) { - rc = 0; + if (old_context->type == new_context->type) goto out; - } index = new_context->type; while (true) { - type = policydb.type_val_to_struct[index - 1]; + type = flex_array_get_ptr(policydb.type_val_to_struct_array, + index - 1); BUG_ON(!type); /* not bounded anymore */ - if (!type->bounds) { - rc = -EPERM; + rc = -EPERM; + if (!type->bounds) break; - } /* @newsid is bounded by @oldsid */ - if (type->bounds == old_context->type) { - rc = 0; + rc = 0; + if (type->bounds == old_context->type) break; - } + index = type->bounds; } @@ -998,16 +1019,18 @@ static int context_struct_to_string(struct context *context, char **scontext, u3 if (context->len) { *scontext_len = context->len; - *scontext = kstrdup(context->str, GFP_ATOMIC); - if (!(*scontext)) - return -ENOMEM; + if (scontext) { + *scontext = kstrdup(context->str, GFP_ATOMIC); + if (!(*scontext)) + return -ENOMEM; + } return 0; } /* Compute the size of the context. */ - *scontext_len += strlen(policydb.p_user_val_to_name[context->user - 1]) + 1; - *scontext_len += strlen(policydb.p_role_val_to_name[context->role - 1]) + 1; - *scontext_len += strlen(policydb.p_type_val_to_name[context->type - 1]) + 1; + *scontext_len += strlen(sym_name(&policydb, SYM_USERS, context->user - 1)) + 1; + *scontext_len += strlen(sym_name(&policydb, SYM_ROLES, context->role - 1)) + 1; + *scontext_len += strlen(sym_name(&policydb, SYM_TYPES, context->type - 1)) + 1; *scontext_len += mls_compute_context_len(context); if (!scontext) @@ -1023,12 +1046,12 @@ static int context_struct_to_string(struct context *context, char **scontext, u3 * Copy the user name, role name and type name into the context. */ sprintf(scontextp, "%s:%s:%s", - policydb.p_user_val_to_name[context->user - 1], - policydb.p_role_val_to_name[context->role - 1], - policydb.p_type_val_to_name[context->type - 1]); - scontextp += strlen(policydb.p_user_val_to_name[context->user - 1]) + - 1 + strlen(policydb.p_role_val_to_name[context->role - 1]) + - 1 + strlen(policydb.p_type_val_to_name[context->type - 1]); + sym_name(&policydb, SYM_USERS, context->user - 1), + sym_name(&policydb, SYM_ROLES, context->role - 1), + sym_name(&policydb, SYM_TYPES, context->type - 1)); + scontextp += strlen(sym_name(&policydb, SYM_USERS, context->user - 1)) + + 1 + strlen(sym_name(&policydb, SYM_ROLES, context->role - 1)) + + 1 + strlen(sym_name(&policydb, SYM_TYPES, context->type - 1)); mls_sid_to_context(context, &scontextp); @@ -1187,16 +1210,13 @@ static int string_to_context_struct(struct policydb *pol, if (rc) goto out; - if ((p - scontext) < scontext_len) { - rc = -EINVAL; + rc = -EINVAL; + if ((p - scontext) < scontext_len) goto out; - } /* Check the validity of the new context. */ - if (!policydb_context_isvalid(pol, ctx)) { - rc = -EINVAL; + if (!policydb_context_isvalid(pol, ctx)) goto out; - } rc = 0; out: if (rc) @@ -1212,6 +1232,10 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len, struct context context; int rc = 0; + /* An empty security context is never valid. */ + if (!scontext_len) + return -EINVAL; + if (!ss_initialized) { int i; @@ -1235,27 +1259,26 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len, if (force) { /* Save another copy for storing in uninterpreted form */ + rc = -ENOMEM; str = kstrdup(scontext2, gfp_flags); - if (!str) { - kfree(scontext2); - return -ENOMEM; - } + if (!str) + goto out; } read_lock(&policy_rwlock); - rc = string_to_context_struct(&policydb, &sidtab, - scontext2, scontext_len, - &context, def_sid); + rc = string_to_context_struct(&policydb, &sidtab, scontext2, + scontext_len, &context, def_sid); if (rc == -EINVAL && force) { context.str = str; context.len = scontext_len; str = NULL; } else if (rc) - goto out; + goto out_unlock; rc = sidtab_context_to_sid(&sidtab, &context, sid); context_destroy(&context); -out: +out_unlock: read_unlock(&policy_rwlock); +out: kfree(scontext2); kfree(str); return rc; @@ -1266,16 +1289,18 @@ out: * @scontext: security context * @scontext_len: length in bytes * @sid: security identifier, SID + * @gfp: context for the allocation * * Obtains a SID associated with the security context that * has the string representation specified by @scontext. * Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient * memory is available, or 0 on success. */ -int security_context_to_sid(const char *scontext, u32 scontext_len, u32 *sid) +int security_context_to_sid(const char *scontext, u32 scontext_len, u32 *sid, + gfp_t gfp) { return security_context_to_sid_core(scontext, scontext_len, - sid, SECSID_NULL, GFP_KERNEL, 0); + sid, SECSID_NULL, gfp, 0); } /** @@ -1319,18 +1344,18 @@ static int compute_sid_handle_invalid_context( char *s = NULL, *t = NULL, *n = NULL; u32 slen, tlen, nlen; - if (context_struct_to_string(scontext, &s, &slen) < 0) + if (context_struct_to_string(scontext, &s, &slen)) goto out; - if (context_struct_to_string(tcontext, &t, &tlen) < 0) + if (context_struct_to_string(tcontext, &t, &tlen)) goto out; - if (context_struct_to_string(newcontext, &n, &nlen) < 0) + if (context_struct_to_string(newcontext, &n, &nlen)) goto out; audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR, "security_compute_sid: invalid context %s" " for scontext=%s" " tcontext=%s" " tclass=%s", - n, s, t, policydb.p_class_val_to_name[tclass-1]); + n, s, t, sym_name(&policydb, SYM_CLASSES, tclass-1)); out: kfree(s); kfree(t); @@ -1340,13 +1365,40 @@ out: return -EACCES; } +static void filename_compute_type(struct policydb *p, struct context *newcontext, + u32 stype, u32 ttype, u16 tclass, + const char *objname) +{ + struct filename_trans ft; + struct filename_trans_datum *otype; + + /* + * Most filename trans rules are going to live in specific directories + * like /dev or /var/run. This bitmap will quickly skip rule searches + * if the ttype does not contain any rules. + */ + if (!ebitmap_get_bit(&p->filename_trans_ttypes, ttype)) + return; + + ft.stype = stype; + ft.ttype = ttype; + ft.tclass = tclass; + ft.name = objname; + + otype = hashtab_search(p->filename_trans, &ft); + if (otype) + newcontext->type = otype->otype; +} + static int security_compute_sid(u32 ssid, u32 tsid, u16 orig_tclass, u32 specified, + const char *objname, u32 *out_sid, bool kern) { + struct class_datum *cladatum = NULL; struct context *scontext = NULL, *tcontext = NULL, newcontext; struct role_trans *roletr = NULL; struct avtab_key avkey; @@ -1354,6 +1406,7 @@ static int security_compute_sid(u32 ssid, struct avtab_node *node; u16 tclass; int rc = 0; + bool sock; if (!ss_initialized) { switch (orig_tclass) { @@ -1371,10 +1424,13 @@ static int security_compute_sid(u32 ssid, read_lock(&policy_rwlock); - if (kern) + if (kern) { tclass = unmap_class(orig_tclass); - else + sock = security_is_socket_class(orig_tclass); + } else { tclass = orig_tclass; + sock = security_is_socket_class(map_class(tclass)); + } scontext = sidtab_search(&sidtab, ssid); if (!scontext) { @@ -1391,12 +1447,20 @@ static int security_compute_sid(u32 ssid, goto out_unlock; } + if (tclass && tclass <= policydb.p_classes.nprim) + cladatum = policydb.class_val_to_struct[tclass - 1]; + /* Set the user identity. */ switch (specified) { case AVTAB_TRANSITION: case AVTAB_CHANGE: - /* Use the process user identity. */ - newcontext.user = scontext->user; + if (cladatum && cladatum->default_user == DEFAULT_TARGET) { + newcontext.user = tcontext->user; + } else { + /* notice this gets both DEFAULT_SOURCE and unset */ + /* Use the process user identity. */ + newcontext.user = scontext->user; + } break; case AVTAB_MEMBER: /* Use the related object owner. */ @@ -1404,16 +1468,31 @@ static int security_compute_sid(u32 ssid, break; } - /* Set the role and type to default values. */ - if (tclass == policydb.process_class) { - /* Use the current role and type of process. */ + /* Set the role to default values. */ + if (cladatum && cladatum->default_role == DEFAULT_SOURCE) { newcontext.role = scontext->role; - newcontext.type = scontext->type; + } else if (cladatum && cladatum->default_role == DEFAULT_TARGET) { + newcontext.role = tcontext->role; } else { - /* Use the well-defined object role. */ - newcontext.role = OBJECT_R_VAL; - /* Use the type of the related object. */ + if ((tclass == policydb.process_class) || (sock == true)) + newcontext.role = scontext->role; + else + newcontext.role = OBJECT_R_VAL; + } + + /* Set the type to default values. */ + if (cladatum && cladatum->default_type == DEFAULT_SOURCE) { + newcontext.type = scontext->type; + } else if (cladatum && cladatum->default_type == DEFAULT_TARGET) { newcontext.type = tcontext->type; + } else { + if ((tclass == policydb.process_class) || (sock == true)) { + /* Use the type of process. */ + newcontext.type = scontext->type; + } else { + /* Use the type of the related object. */ + newcontext.type = tcontext->type; + } } /* Look for a type transition/member/change rule. */ @@ -1439,25 +1518,29 @@ static int security_compute_sid(u32 ssid, newcontext.type = avdatum->data; } + /* if we have a objname this is a file trans check so check those rules */ + if (objname) + filename_compute_type(&policydb, &newcontext, scontext->type, + tcontext->type, tclass, objname); + /* Check for class-specific changes. */ - if (tclass == policydb.process_class) { - if (specified & AVTAB_TRANSITION) { - /* Look for a role transition rule. */ - for (roletr = policydb.role_tr; roletr; - roletr = roletr->next) { - if (roletr->role == scontext->role && - roletr->type == tcontext->type) { - /* Use the role transition rule. */ - newcontext.role = roletr->new_role; - break; - } + if (specified & AVTAB_TRANSITION) { + /* Look for a role transition rule. */ + for (roletr = policydb.role_tr; roletr; roletr = roletr->next) { + if ((roletr->role == scontext->role) && + (roletr->type == tcontext->type) && + (roletr->tclass == tclass)) { + /* Use the role transition rule. */ + newcontext.role = roletr->new_role; + break; } } } /* Set the MLS attributes. This is done last because it may allocate memory. */ - rc = mls_compute_sid(scontext, tcontext, tclass, specified, &newcontext); + rc = mls_compute_sid(scontext, tcontext, tclass, specified, + &newcontext, sock); if (rc) goto out_unlock; @@ -1492,22 +1575,18 @@ out: * if insufficient memory is available, or %0 if the new SID was * computed successfully. */ -int security_transition_sid(u32 ssid, - u32 tsid, - u16 tclass, - u32 *out_sid) +int security_transition_sid(u32 ssid, u32 tsid, u16 tclass, + const struct qstr *qstr, u32 *out_sid) { return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION, - out_sid, true); + qstr ? qstr->name : NULL, out_sid, true); } -int security_transition_sid_user(u32 ssid, - u32 tsid, - u16 tclass, - u32 *out_sid) +int security_transition_sid_user(u32 ssid, u32 tsid, u16 tclass, + const char *objname, u32 *out_sid) { return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION, - out_sid, false); + objname, out_sid, false); } /** @@ -1528,8 +1607,8 @@ int security_member_sid(u32 ssid, u16 tclass, u32 *out_sid) { - return security_compute_sid(ssid, tsid, tclass, AVTAB_MEMBER, out_sid, - false); + return security_compute_sid(ssid, tsid, tclass, AVTAB_MEMBER, NULL, + out_sid, false); } /** @@ -1550,8 +1629,8 @@ int security_change_sid(u32 ssid, u16 tclass, u32 *out_sid) { - return security_compute_sid(ssid, tsid, tclass, AVTAB_CHANGE, out_sid, - false); + return security_compute_sid(ssid, tsid, tclass, AVTAB_CHANGE, NULL, + out_sid, false); } /* Clone the SID into the new SID table. */ @@ -1569,22 +1648,17 @@ static int clone_sid(u32 sid, static inline int convert_context_handle_invalid_context(struct context *context) { - int rc = 0; + char *s; + u32 len; - if (selinux_enforcing) { - rc = -EINVAL; - } else { - char *s; - u32 len; - - if (!context_struct_to_string(context, &s, &len)) { - printk(KERN_WARNING - "SELinux: Context %s would be invalid if enforcing\n", - s); - kfree(s); - } + if (selinux_enforcing) + return -EINVAL; + + if (!context_struct_to_string(context, &s, &len)) { + printk(KERN_WARNING "SELinux: Context %s would be invalid if enforcing\n", s); + kfree(s); } - return rc; + return 0; } struct convert_context_args { @@ -1621,17 +1695,17 @@ static int convert_context(u32 key, if (c->str) { struct context ctx; + + rc = -ENOMEM; s = kstrdup(c->str, GFP_KERNEL); - if (!s) { - rc = -ENOMEM; + if (!s) goto out; - } + rc = string_to_context_struct(args->newp, NULL, s, c->len, &ctx, SECSID_NULL); kfree(s); if (!rc) { - printk(KERN_INFO - "SELinux: Context %s became valid (mapped).\n", + printk(KERN_INFO "SELinux: Context %s became valid (mapped).\n", c->str); /* Replace string with mapped representation. */ kfree(c->str); @@ -1643,8 +1717,7 @@ static int convert_context(u32 key, goto out; } else { /* Other error condition, e.g. ENOMEM. */ - printk(KERN_ERR - "SELinux: Unable to map context %s, rc = %d.\n", + printk(KERN_ERR "SELinux: Unable to map context %s, rc = %d.\n", c->str, -rc); goto out; } @@ -1654,25 +1727,26 @@ static int convert_context(u32 key, if (rc) goto out; - rc = -EINVAL; - /* Convert the user. */ + rc = -EINVAL; usrdatum = hashtab_search(args->newp->p_users.table, - args->oldp->p_user_val_to_name[c->user - 1]); + sym_name(args->oldp, SYM_USERS, c->user - 1)); if (!usrdatum) goto bad; c->user = usrdatum->value; /* Convert the role. */ + rc = -EINVAL; role = hashtab_search(args->newp->p_roles.table, - args->oldp->p_role_val_to_name[c->role - 1]); + sym_name(args->oldp, SYM_ROLES, c->role - 1)); if (!role) goto bad; c->role = role->value; /* Convert the type. */ + rc = -EINVAL; typdatum = hashtab_search(args->newp->p_types.table, - args->oldp->p_type_val_to_name[c->type - 1]); + sym_name(args->oldp, SYM_TYPES, c->type - 1)); if (!typdatum) goto bad; c->type = typdatum->value; @@ -1700,6 +1774,7 @@ static int convert_context(u32 key, oc = args->newp->ocontexts[OCON_ISID]; while (oc && oc->sid[0] != SECINITSID_UNLABELED) oc = oc->next; + rc = -EINVAL; if (!oc) { printk(KERN_ERR "SELinux: unable to look up" " the initial SIDs list\n"); @@ -1719,19 +1794,20 @@ static int convert_context(u32 key, } context_destroy(&oldc); + rc = 0; out: return rc; bad: /* Map old representation to string and save it. */ - if (context_struct_to_string(&oldc, &s, &len)) - return -ENOMEM; + rc = context_struct_to_string(&oldc, &s, &len); + if (rc) + return rc; context_destroy(&oldc); context_destroy(c); c->str = s; c->len = len; - printk(KERN_INFO - "SELinux: Context %s became invalid (unmapped).\n", + printk(KERN_INFO "SELinux: Context %s became invalid (unmapped).\n", c->str); rc = 0; goto out; @@ -1743,9 +1819,10 @@ static void security_load_policycaps(void) POLICYDB_CAPABILITY_NETPEER); selinux_policycap_openperm = ebitmap_get_bit(&policydb.policycaps, POLICYDB_CAPABILITY_OPENPERM); + selinux_policycap_alwaysnetwork = ebitmap_get_bit(&policydb.policycaps, + POLICYDB_CAPABILITY_ALWAYSNETWORK); } -extern void selinux_complete_init(void); static int security_preserve_bools(struct policydb *p); /** @@ -1760,7 +1837,7 @@ static int security_preserve_bools(struct policydb *p); */ int security_load_policy(void *data, size_t len) { - struct policydb oldpolicydb, newpolicydb; + struct policydb *oldpolicydb, *newpolicydb; struct sidtab oldsidtab, newsidtab; struct selinux_mapping *oldmap, *map = NULL; struct convert_context_args args; @@ -1769,12 +1846,19 @@ int security_load_policy(void *data, size_t len) int rc = 0; struct policy_file file = { data, len }, *fp = &file; + oldpolicydb = kzalloc(2 * sizeof(*oldpolicydb), GFP_KERNEL); + if (!oldpolicydb) { + rc = -ENOMEM; + goto out; + } + newpolicydb = oldpolicydb + 1; + if (!ss_initialized) { avtab_cache_init(); rc = policydb_read(&policydb, fp); if (rc) { avtab_cache_destroy(); - return rc; + goto out; } policydb.len = len; @@ -1784,14 +1868,14 @@ int security_load_policy(void *data, size_t len) if (rc) { policydb_destroy(&policydb); avtab_cache_destroy(); - return rc; + goto out; } rc = policydb_load_isids(&policydb, &sidtab); if (rc) { policydb_destroy(&policydb); avtab_cache_destroy(); - return rc; + goto out; } security_load_policycaps(); @@ -1803,36 +1887,36 @@ int security_load_policy(void *data, size_t len) selinux_status_update_policyload(seqno); selinux_netlbl_cache_invalidate(); selinux_xfrm_notify_policyload(); - return 0; + goto out; } #if 0 sidtab_hash_eval(&sidtab, "sids"); #endif - rc = policydb_read(&newpolicydb, fp); + rc = policydb_read(newpolicydb, fp); if (rc) - return rc; + goto out; - newpolicydb.len = len; + newpolicydb->len = len; /* If switching between different policy types, log MLS status */ - if (policydb.mls_enabled && !newpolicydb.mls_enabled) + if (policydb.mls_enabled && !newpolicydb->mls_enabled) printk(KERN_INFO "SELinux: Disabling MLS support...\n"); - else if (!policydb.mls_enabled && newpolicydb.mls_enabled) + else if (!policydb.mls_enabled && newpolicydb->mls_enabled) printk(KERN_INFO "SELinux: Enabling MLS support...\n"); - rc = policydb_load_isids(&newpolicydb, &newsidtab); + rc = policydb_load_isids(newpolicydb, &newsidtab); if (rc) { printk(KERN_ERR "SELinux: unable to load the initial SIDs\n"); - policydb_destroy(&newpolicydb); - return rc; + policydb_destroy(newpolicydb); + goto out; } - rc = selinux_set_mapping(&newpolicydb, secclass_map, &map, &map_size); + rc = selinux_set_mapping(newpolicydb, secclass_map, &map, &map_size); if (rc) goto err; - rc = security_preserve_bools(&newpolicydb); + rc = security_preserve_bools(newpolicydb); if (rc) { printk(KERN_ERR "SELinux: unable to preserve booleans\n"); goto err; @@ -1850,7 +1934,7 @@ int security_load_policy(void *data, size_t len) * in the new SID table. */ args.oldp = &policydb; - args.newp = &newpolicydb; + args.newp = newpolicydb; rc = sidtab_map(&newsidtab, convert_context, &args); if (rc) { printk(KERN_ERR "SELinux: unable to convert the internal" @@ -1860,12 +1944,12 @@ int security_load_policy(void *data, size_t len) } /* Save the old policydb and SID table to free later. */ - memcpy(&oldpolicydb, &policydb, sizeof policydb); + memcpy(oldpolicydb, &policydb, sizeof(policydb)); sidtab_set(&oldsidtab, &sidtab); /* Install the new policydb and SID table. */ write_lock_irq(&policy_rwlock); - memcpy(&policydb, &newpolicydb, sizeof policydb); + memcpy(&policydb, newpolicydb, sizeof(policydb)); sidtab_set(&sidtab, &newsidtab); security_load_policycaps(); oldmap = current_mapping; @@ -1875,7 +1959,7 @@ int security_load_policy(void *data, size_t len) write_unlock_irq(&policy_rwlock); /* Free the old policydb and SID table. */ - policydb_destroy(&oldpolicydb); + policydb_destroy(oldpolicydb); sidtab_destroy(&oldsidtab); kfree(oldmap); @@ -1885,14 +1969,17 @@ int security_load_policy(void *data, size_t len) selinux_netlbl_cache_invalidate(); selinux_xfrm_notify_policyload(); - return 0; + rc = 0; + goto out; err: kfree(map); sidtab_destroy(&newsidtab); - policydb_destroy(&newpolicydb); - return rc; + policydb_destroy(newpolicydb); +out: + kfree(oldpolicydb); + return rc; } size_t security_policydb_len(void) @@ -2012,7 +2099,7 @@ int security_node_sid(u16 domain, u32 addrlen, u32 *out_sid) { - int rc = 0; + int rc; struct ocontext *c; read_lock(&policy_rwlock); @@ -2021,10 +2108,9 @@ int security_node_sid(u16 domain, case AF_INET: { u32 addr; - if (addrlen != sizeof(u32)) { - rc = -EINVAL; + rc = -EINVAL; + if (addrlen != sizeof(u32)) goto out; - } addr = *((u32 *)addrp); @@ -2038,10 +2124,9 @@ int security_node_sid(u16 domain, } case AF_INET6: - if (addrlen != sizeof(u64) * 2) { - rc = -EINVAL; + rc = -EINVAL; + if (addrlen != sizeof(u64) * 2) goto out; - } c = policydb.ocontexts[OCON_NODE6]; while (c) { if (match_ipv6_addrmask(addrp, c->u.node6.addr, @@ -2052,6 +2137,7 @@ int security_node_sid(u16 domain, break; default: + rc = 0; *out_sid = SECINITSID_NODE; goto out; } @@ -2069,6 +2155,7 @@ int security_node_sid(u16 domain, *out_sid = SECINITSID_NODE; } + rc = 0; out: read_unlock(&policy_rwlock); return rc; @@ -2113,24 +2200,22 @@ int security_get_user_sids(u32 fromsid, context_init(&usercon); + rc = -EINVAL; fromcon = sidtab_search(&sidtab, fromsid); - if (!fromcon) { - rc = -EINVAL; + if (!fromcon) goto out_unlock; - } + rc = -EINVAL; user = hashtab_search(policydb.p_users.table, username); - if (!user) { - rc = -EINVAL; + if (!user) goto out_unlock; - } + usercon.user = user->value; + rc = -ENOMEM; mysids = kcalloc(maxnel, sizeof(*mysids), GFP_ATOMIC); - if (!mysids) { - rc = -ENOMEM; + if (!mysids) goto out_unlock; - } ebitmap_for_each_positive_bit(&user->roles, rnode, i) { role = policydb.role_val_to_struct[i]; @@ -2147,12 +2232,11 @@ int security_get_user_sids(u32 fromsid, if (mynel < maxnel) { mysids[mynel++] = sid; } else { + rc = -ENOMEM; maxnel += SIDS_NEL; mysids2 = kcalloc(maxnel, sizeof(*mysids2), GFP_ATOMIC); - if (!mysids2) { - rc = -ENOMEM; + if (!mysids2) goto out_unlock; - } memcpy(mysids2, mysids, mynel * sizeof(*mysids2)); kfree(mysids); mysids = mysids2; @@ -2160,7 +2244,7 @@ int security_get_user_sids(u32 fromsid, } } } - + rc = 0; out_unlock: read_unlock(&policy_rwlock); if (rc || !mynel) { @@ -2168,17 +2252,18 @@ out_unlock: goto out; } + rc = -ENOMEM; mysids2 = kcalloc(mynel, sizeof(*mysids2), GFP_KERNEL); if (!mysids2) { - rc = -ENOMEM; kfree(mysids); goto out; } for (i = 0, j = 0; i < mynel; i++) { + struct av_decision dummy_avd; rc = avc_has_perm_noaudit(fromsid, mysids[i], SECCLASS_PROCESS, /* kernel value */ PROCESS__TRANSITION, AVC_STRICT, - NULL); + &dummy_avd); if (!rc) mysids2[j++] = mysids[i]; cond_resched(); @@ -2211,7 +2296,7 @@ int security_genfs_sid(const char *fstype, u16 sclass; struct genfs *genfs; struct ocontext *c; - int rc = 0, cmp = 0; + int rc, cmp = 0; while (path[0] == '/' && path[1] == '/') path++; @@ -2219,6 +2304,7 @@ int security_genfs_sid(const char *fstype, read_lock(&policy_rwlock); sclass = unmap_class(orig_sclass); + *sid = SECINITSID_UNLABELED; for (genfs = policydb.genfs; genfs; genfs = genfs->next) { cmp = strcmp(fstype, genfs->fstype); @@ -2226,11 +2312,9 @@ int security_genfs_sid(const char *fstype, break; } - if (!genfs || cmp) { - *sid = SECINITSID_UNLABELED; - rc = -ENOENT; + rc = -ENOENT; + if (!genfs || cmp) goto out; - } for (c = genfs->head; c; c = c->next) { len = strlen(c->u.name); @@ -2239,21 +2323,18 @@ int security_genfs_sid(const char *fstype, break; } - if (!c) { - *sid = SECINITSID_UNLABELED; - rc = -ENOENT; + rc = -ENOENT; + if (!c) goto out; - } if (!c->sid[0]) { - rc = sidtab_context_to_sid(&sidtab, - &c->context[0], - &c->sid[0]); + rc = sidtab_context_to_sid(&sidtab, &c->context[0], &c->sid[0]); if (rc) goto out; } *sid = c->sid[0]; + rc = 0; out: read_unlock(&policy_rwlock); return rc; @@ -2261,17 +2342,14 @@ out: /** * security_fs_use - Determine how to handle labeling for a filesystem. - * @fstype: filesystem type - * @behavior: labeling behavior - * @sid: SID for filesystem (superblock) + * @sb: superblock in question */ -int security_fs_use( - const char *fstype, - unsigned int *behavior, - u32 *sid) +int security_fs_use(struct super_block *sb) { int rc = 0; struct ocontext *c; + struct superblock_security_struct *sbsec = sb->s_security; + const char *fstype = sb->s_type->name; read_lock(&policy_rwlock); @@ -2283,22 +2361,21 @@ int security_fs_use( } if (c) { - *behavior = c->v.behavior; + sbsec->behavior = c->v.behavior; if (!c->sid[0]) { - rc = sidtab_context_to_sid(&sidtab, - &c->context[0], + rc = sidtab_context_to_sid(&sidtab, &c->context[0], &c->sid[0]); if (rc) goto out; } - *sid = c->sid[0]; + sbsec->sid = c->sid[0]; } else { - rc = security_genfs_sid(fstype, "/", SECCLASS_DIR, sid); + rc = security_genfs_sid(fstype, "/", SECCLASS_DIR, &sbsec->sid); if (rc) { - *behavior = SECURITY_FS_USE_NONE; + sbsec->behavior = SECURITY_FS_USE_NONE; rc = 0; } else { - *behavior = SECURITY_FS_USE_GENFS; + sbsec->behavior = SECURITY_FS_USE_GENFS; } } @@ -2309,34 +2386,39 @@ out: int security_get_bools(int *len, char ***names, int **values) { - int i, rc = -ENOMEM; + int i, rc; read_lock(&policy_rwlock); *names = NULL; *values = NULL; + rc = 0; *len = policydb.p_bools.nprim; - if (!*len) { - rc = 0; + if (!*len) goto out; - } - *names = kcalloc(*len, sizeof(char *), GFP_ATOMIC); + rc = -ENOMEM; + *names = kcalloc(*len, sizeof(char *), GFP_ATOMIC); if (!*names) goto err; - *values = kcalloc(*len, sizeof(int), GFP_ATOMIC); + rc = -ENOMEM; + *values = kcalloc(*len, sizeof(int), GFP_ATOMIC); if (!*values) goto err; for (i = 0; i < *len; i++) { size_t name_len; + (*values)[i] = policydb.bool_val_to_struct[i]->state; - name_len = strlen(policydb.p_bool_val_to_name[i]) + 1; - (*names)[i] = kmalloc(sizeof(char) * name_len, GFP_ATOMIC); + name_len = strlen(sym_name(&policydb, SYM_BOOLS, i)) + 1; + + rc = -ENOMEM; + (*names)[i] = kmalloc(sizeof(char) * name_len, GFP_ATOMIC); if (!(*names)[i]) goto err; - strncpy((*names)[i], policydb.p_bool_val_to_name[i], name_len); + + strncpy((*names)[i], sym_name(&policydb, SYM_BOOLS, i), name_len); (*names)[i][name_len - 1] = 0; } rc = 0; @@ -2355,27 +2437,26 @@ err: int security_set_bools(int len, int *values) { - int i, rc = 0; + int i, rc; int lenp, seqno = 0; struct cond_node *cur; write_lock_irq(&policy_rwlock); + rc = -EFAULT; lenp = policydb.p_bools.nprim; - if (len != lenp) { - rc = -EFAULT; + if (len != lenp) goto out; - } for (i = 0; i < len; i++) { if (!!values[i] != policydb.bool_val_to_struct[i]->state) { audit_log(current->audit_context, GFP_ATOMIC, AUDIT_MAC_CONFIG_CHANGE, "bool=%s val=%d old_val=%d auid=%u ses=%u", - policydb.p_bool_val_to_name[i], + sym_name(&policydb, SYM_BOOLS, i), !!values[i], policydb.bool_val_to_struct[i]->state, - audit_get_loginuid(current), + from_kuid(&init_user_ns, audit_get_loginuid(current)), audit_get_sessionid(current)); } if (values[i]) @@ -2391,7 +2472,7 @@ int security_set_bools(int len, int *values) } seqno = ++latest_granting; - + rc = 0; out: write_unlock_irq(&policy_rwlock); if (!rc) { @@ -2405,16 +2486,15 @@ out: int security_get_bool_value(int bool) { - int rc = 0; + int rc; int len; read_lock(&policy_rwlock); + rc = -EFAULT; len = policydb.p_bools.nprim; - if (bool >= len) { - rc = -EFAULT; + if (bool >= len) goto out; - } rc = policydb.bool_val_to_struct[bool]->state; out: @@ -2464,8 +2544,9 @@ int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid) struct context newcon; char *s; u32 len; - int rc = 0; + int rc; + rc = 0; if (!ss_initialized || !policydb.mls_enabled) { *new_sid = sid; goto out; @@ -2474,19 +2555,20 @@ int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid) context_init(&newcon); read_lock(&policy_rwlock); + + rc = -EINVAL; context1 = sidtab_search(&sidtab, sid); if (!context1) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, sid); - rc = -EINVAL; goto out_unlock; } + rc = -EINVAL; context2 = sidtab_search(&sidtab, mls_sid); if (!context2) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, mls_sid); - rc = -EINVAL; goto out_unlock; } @@ -2500,20 +2582,17 @@ int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid) /* Check the validity of the new context. */ if (!policydb_context_isvalid(&policydb, &newcon)) { rc = convert_context_handle_invalid_context(&newcon); - if (rc) - goto bad; + if (rc) { + if (!context_struct_to_string(&newcon, &s, &len)) { + audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR, + "security_sid_mls_copy: invalid context %s", s); + kfree(s); + } + goto out_unlock; + } } rc = sidtab_context_to_sid(&sidtab, &newcon, new_sid); - goto out_unlock; - -bad: - if (!context_struct_to_string(&newcon, &s, &len)) { - audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR, - "security_sid_mls_copy: invalid context %s", s); - kfree(s); - } - out_unlock: read_unlock(&policy_rwlock); context_destroy(&newcon); @@ -2549,6 +2628,8 @@ int security_net_peersid_resolve(u32 nlbl_sid, u32 nlbl_type, struct context *nlbl_ctx; struct context *xfrm_ctx; + *peer_sid = SECSID_NULL; + /* handle the common (which also happens to be the set of easy) cases * right away, these two if statements catch everything involving a * single or absent peer SID/label */ @@ -2567,40 +2648,37 @@ int security_net_peersid_resolve(u32 nlbl_sid, u32 nlbl_type, /* we don't need to check ss_initialized here since the only way both * nlbl_sid and xfrm_sid are not equal to SECSID_NULL would be if the * security server was initialized and ss_initialized was true */ - if (!policydb.mls_enabled) { - *peer_sid = SECSID_NULL; + if (!policydb.mls_enabled) return 0; - } read_lock(&policy_rwlock); + rc = -EINVAL; nlbl_ctx = sidtab_search(&sidtab, nlbl_sid); if (!nlbl_ctx) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, nlbl_sid); - rc = -EINVAL; - goto out_slowpath; + goto out; } + rc = -EINVAL; xfrm_ctx = sidtab_search(&sidtab, xfrm_sid); if (!xfrm_ctx) { printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n", __func__, xfrm_sid); - rc = -EINVAL; - goto out_slowpath; + goto out; } rc = (mls_context_cmp(nlbl_ctx, xfrm_ctx) ? 0 : -EACCES); + if (rc) + goto out; -out_slowpath: + /* at present NetLabel SIDs/labels really only carry MLS + * information so if the MLS portion of the NetLabel SID + * matches the MLS portion of the labeled XFRM SID/label + * then pass along the XFRM SID as it is the most + * expressive */ + *peer_sid = xfrm_sid; +out: read_unlock(&policy_rwlock); - if (rc == 0) - /* at present NetLabel SIDs/labels really only carry MLS - * information so if the MLS portion of the NetLabel SID - * matches the MLS portion of the labeled XFRM SID/label - * then pass along the XFRM SID as it is the most - * expressive */ - *peer_sid = xfrm_sid; - else - *peer_sid = SECSID_NULL; return rc; } @@ -2619,10 +2697,11 @@ static int get_classes_callback(void *k, void *d, void *args) int security_get_classes(char ***classes, int *nclasses) { - int rc = -ENOMEM; + int rc; read_lock(&policy_rwlock); + rc = -ENOMEM; *nclasses = policydb.p_classes.nprim; *classes = kcalloc(*nclasses, sizeof(**classes), GFP_ATOMIC); if (!*classes) @@ -2630,7 +2709,7 @@ int security_get_classes(char ***classes, int *nclasses) rc = hashtab_map(policydb.p_classes.table, get_classes_callback, *classes); - if (rc < 0) { + if (rc) { int i; for (i = 0; i < *nclasses; i++) kfree((*classes)[i]); @@ -2657,19 +2736,20 @@ static int get_permissions_callback(void *k, void *d, void *args) int security_get_permissions(char *class, char ***perms, int *nperms) { - int rc = -ENOMEM, i; + int rc, i; struct class_datum *match; read_lock(&policy_rwlock); + rc = -EINVAL; match = hashtab_search(policydb.p_classes.table, class); if (!match) { printk(KERN_ERR "SELinux: %s: unrecognized class %s\n", __func__, class); - rc = -EINVAL; goto out; } + rc = -ENOMEM; *nperms = match->permissions.nprim; *perms = kcalloc(*nperms, sizeof(**perms), GFP_ATOMIC); if (!*perms) @@ -2678,13 +2758,13 @@ int security_get_permissions(char *class, char ***perms, int *nperms) if (match->comdatum) { rc = hashtab_map(match->comdatum->permissions.table, get_permissions_callback, *perms); - if (rc < 0) + if (rc) goto err; } rc = hashtab_map(match->permissions.table, get_permissions_callback, *perms); - if (rc < 0) + if (rc) goto err; out: @@ -2774,7 +2854,7 @@ int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule) case AUDIT_SUBJ_CLR: case AUDIT_OBJ_LEV_LOW: case AUDIT_OBJ_LEV_HIGH: - /* we do not allow a range, indicated by the presense of '-' */ + /* we do not allow a range, indicated by the presence of '-' */ if (strchr(rulestr, '-')) return -EINVAL; break; @@ -2796,36 +2876,39 @@ int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule) switch (field) { case AUDIT_SUBJ_USER: case AUDIT_OBJ_USER: + rc = -EINVAL; userdatum = hashtab_search(policydb.p_users.table, rulestr); if (!userdatum) - rc = -EINVAL; - else - tmprule->au_ctxt.user = userdatum->value; + goto out; + tmprule->au_ctxt.user = userdatum->value; break; case AUDIT_SUBJ_ROLE: case AUDIT_OBJ_ROLE: + rc = -EINVAL; roledatum = hashtab_search(policydb.p_roles.table, rulestr); if (!roledatum) - rc = -EINVAL; - else - tmprule->au_ctxt.role = roledatum->value; + goto out; + tmprule->au_ctxt.role = roledatum->value; break; case AUDIT_SUBJ_TYPE: case AUDIT_OBJ_TYPE: + rc = -EINVAL; typedatum = hashtab_search(policydb.p_types.table, rulestr); if (!typedatum) - rc = -EINVAL; - else - tmprule->au_ctxt.type = typedatum->value; + goto out; + tmprule->au_ctxt.type = typedatum->value; break; case AUDIT_SUBJ_SEN: case AUDIT_SUBJ_CLR: case AUDIT_OBJ_LEV_LOW: case AUDIT_OBJ_LEV_HIGH: rc = mls_from_string(rulestr, &tmprule->au_ctxt, GFP_ATOMIC); + if (rc) + goto out; break; } - + rc = 0; +out: read_unlock(&policy_rwlock); if (rc) { @@ -2871,25 +2954,21 @@ int selinux_audit_rule_match(u32 sid, u32 field, u32 op, void *vrule, struct selinux_audit_rule *rule = vrule; int match = 0; - if (!rule) { - audit_log(actx, GFP_ATOMIC, AUDIT_SELINUX_ERR, - "selinux_audit_rule_match: missing rule\n"); + if (unlikely(!rule)) { + WARN_ONCE(1, "selinux_audit_rule_match: missing rule\n"); return -ENOENT; } read_lock(&policy_rwlock); if (rule->au_seqno < latest_granting) { - audit_log(actx, GFP_ATOMIC, AUDIT_SELINUX_ERR, - "selinux_audit_rule_match: stale rule\n"); match = -ESTALE; goto out; } ctxt = sidtab_search(&sidtab, sid); - if (!ctxt) { - audit_log(actx, GFP_ATOMIC, AUDIT_SELINUX_ERR, - "selinux_audit_rule_match: unrecognized SID %d\n", + if (unlikely(!ctxt)) { + WARN_ONCE(1, "selinux_audit_rule_match: unrecognized SID %d\n", sid); match = -ENOENT; goto out; @@ -2977,8 +3056,7 @@ out: static int (*aurule_callback)(void) = audit_update_lsm_rules; -static int aurule_avc_callback(u32 event, u32 ssid, u32 tsid, - u16 class, u32 perms, u32 *retained) +static int aurule_avc_callback(u32 event) { int err = 0; @@ -2991,8 +3069,7 @@ static int __init aurule_init(void) { int err; - err = avc_add_callback(aurule_avc_callback, AVC_CALLBACK_RESET, - SECSID_NULL, SECSID_NULL, SECCLASS_NULL, 0); + err = avc_add_callback(aurule_avc_callback, AVC_CALLBACK_RESET); if (err) panic("avc_add_callback() failed, error %d\n", err); @@ -3040,7 +3117,7 @@ static void security_netlbl_cache_add(struct netlbl_lsm_secattr *secattr, * Description: * Convert the given NetLabel security attributes in @secattr into a * SELinux SID. If the @secattr field does not contain a full SELinux - * SID/context then use SECINITSID_NETMSG as the foundation. If possibile the + * SID/context then use SECINITSID_NETMSG as the foundation. If possible the * 'cache' field of @secattr is set and the CACHE flag is set; this is to * allow the @secattr to be used by NetLabel to cache the secattr to SID * conversion for future lookups. Returns zero on success, negative values on @@ -3050,7 +3127,7 @@ static void security_netlbl_cache_add(struct netlbl_lsm_secattr *secattr, int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr, u32 *sid) { - int rc = -EIDRM; + int rc; struct context *ctx; struct context ctx_new; @@ -3061,16 +3138,15 @@ int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr, read_lock(&policy_rwlock); - if (secattr->flags & NETLBL_SECATTR_CACHE) { + if (secattr->flags & NETLBL_SECATTR_CACHE) *sid = *(u32 *)secattr->cache->data; - rc = 0; - } else if (secattr->flags & NETLBL_SECATTR_SECID) { + else if (secattr->flags & NETLBL_SECATTR_SECID) *sid = secattr->attr.secid; - rc = 0; - } else if (secattr->flags & NETLBL_SECATTR_MLS_LVL) { + else if (secattr->flags & NETLBL_SECATTR_MLS_LVL) { + rc = -EIDRM; ctx = sidtab_search(&sidtab, SECINITSID_NETMSG); if (ctx == NULL) - goto netlbl_secattr_to_sid_return; + goto out; context_init(&ctx_new); ctx_new.user = ctx->user; @@ -3078,34 +3154,35 @@ int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr, ctx_new.type = ctx->type; mls_import_netlbl_lvl(&ctx_new, secattr); if (secattr->flags & NETLBL_SECATTR_MLS_CAT) { - if (ebitmap_netlbl_import(&ctx_new.range.level[0].cat, - secattr->attr.mls.cat) != 0) - goto netlbl_secattr_to_sid_return; + rc = ebitmap_netlbl_import(&ctx_new.range.level[0].cat, + secattr->attr.mls.cat); + if (rc) + goto out; memcpy(&ctx_new.range.level[1].cat, &ctx_new.range.level[0].cat, sizeof(ctx_new.range.level[0].cat)); } - if (mls_context_isvalid(&policydb, &ctx_new) != 1) - goto netlbl_secattr_to_sid_return_cleanup; + rc = -EIDRM; + if (!mls_context_isvalid(&policydb, &ctx_new)) + goto out_free; rc = sidtab_context_to_sid(&sidtab, &ctx_new, sid); - if (rc != 0) - goto netlbl_secattr_to_sid_return_cleanup; + if (rc) + goto out_free; security_netlbl_cache_add(secattr, *sid); ebitmap_destroy(&ctx_new.range.level[0].cat); - } else { + } else *sid = SECSID_NULL; - rc = 0; - } -netlbl_secattr_to_sid_return: read_unlock(&policy_rwlock); - return rc; -netlbl_secattr_to_sid_return_cleanup: + return 0; +out_free: ebitmap_destroy(&ctx_new.range.level[0].cat); - goto netlbl_secattr_to_sid_return; +out: + read_unlock(&policy_rwlock); + return rc; } /** @@ -3127,28 +3204,23 @@ int security_netlbl_sid_to_secattr(u32 sid, struct netlbl_lsm_secattr *secattr) return 0; read_lock(&policy_rwlock); + + rc = -ENOENT; ctx = sidtab_search(&sidtab, sid); - if (ctx == NULL) { - rc = -ENOENT; - goto netlbl_sid_to_secattr_failure; - } - secattr->domain = kstrdup(policydb.p_type_val_to_name[ctx->type - 1], + if (ctx == NULL) + goto out; + + rc = -ENOMEM; + secattr->domain = kstrdup(sym_name(&policydb, SYM_TYPES, ctx->type - 1), GFP_ATOMIC); - if (secattr->domain == NULL) { - rc = -ENOMEM; - goto netlbl_sid_to_secattr_failure; - } + if (secattr->domain == NULL) + goto out; + secattr->attr.secid = sid; secattr->flags |= NETLBL_SECATTR_DOMAIN_CPY | NETLBL_SECATTR_SECID; mls_export_netlbl_lvl(ctx, secattr); rc = mls_export_netlbl_cat(ctx, secattr); - if (rc != 0) - goto netlbl_sid_to_secattr_failure; - read_unlock(&policy_rwlock); - - return 0; - -netlbl_sid_to_secattr_failure: +out: read_unlock(&policy_rwlock); return rc; } @@ -3160,7 +3232,7 @@ netlbl_sid_to_secattr_failure: * @len: length of data in bytes * */ -int security_read_policy(void **data, ssize_t *len) +int security_read_policy(void **data, size_t *len) { int rc; struct policy_file fp; diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c index e817989764c..5840a35155f 100644 --- a/security/selinux/ss/sidtab.c +++ b/security/selinux/ss/sidtab.c @@ -147,6 +147,17 @@ out: return rc; } +static void sidtab_update_cache(struct sidtab *s, struct sidtab_node *n, int loc) +{ + BUG_ON(loc >= SIDTAB_CACHE_LEN); + + while (loc > 0) { + s->cache[loc] = s->cache[loc - 1]; + loc--; + } + s->cache[0] = n; +} + static inline u32 sidtab_search_context(struct sidtab *s, struct context *context) { @@ -156,14 +167,33 @@ static inline u32 sidtab_search_context(struct sidtab *s, for (i = 0; i < SIDTAB_SIZE; i++) { cur = s->htable[i]; while (cur) { - if (context_cmp(&cur->context, context)) + if (context_cmp(&cur->context, context)) { + sidtab_update_cache(s, cur, SIDTAB_CACHE_LEN - 1); return cur->sid; + } cur = cur->next; } } return 0; } +static inline u32 sidtab_search_cache(struct sidtab *s, struct context *context) +{ + int i; + struct sidtab_node *node; + + for (i = 0; i < SIDTAB_CACHE_LEN; i++) { + node = s->cache[i]; + if (unlikely(!node)) + return 0; + if (context_cmp(&node->context, context)) { + sidtab_update_cache(s, node, i); + return node->sid; + } + } + return 0; +} + int sidtab_context_to_sid(struct sidtab *s, struct context *context, u32 *out_sid) @@ -174,7 +204,9 @@ int sidtab_context_to_sid(struct sidtab *s, *out_sid = SECSID_NULL; - sid = sidtab_search_context(s, context); + sid = sidtab_search_cache(s, context); + if (!sid) + sid = sidtab_search_context(s, context); if (!sid) { spin_lock_irqsave(&s->lock, flags); /* Rescan now that we hold the lock. */ @@ -259,12 +291,15 @@ void sidtab_destroy(struct sidtab *s) void sidtab_set(struct sidtab *dst, struct sidtab *src) { unsigned long flags; + int i; spin_lock_irqsave(&src->lock, flags); dst->htable = src->htable; dst->nel = src->nel; dst->next_sid = src->next_sid; dst->shutdown = 0; + for (i = 0; i < SIDTAB_CACHE_LEN; i++) + dst->cache[i] = NULL; spin_unlock_irqrestore(&src->lock, flags); } diff --git a/security/selinux/ss/sidtab.h b/security/selinux/ss/sidtab.h index 64ea5b1cdea..84dc154d938 100644 --- a/security/selinux/ss/sidtab.h +++ b/security/selinux/ss/sidtab.h @@ -26,6 +26,8 @@ struct sidtab { unsigned int nel; /* number of elements */ unsigned int next_sid; /* next SID to allocate */ unsigned char shutdown; +#define SIDTAB_CACHE_LEN 3 + struct sidtab_node *cache[SIDTAB_CACHE_LEN]; spinlock_t lock; }; diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c index fff78d3b51a..98b042630a9 100644 --- a/security/selinux/xfrm.c +++ b/security/selinux/xfrm.c @@ -46,7 +46,7 @@ #include <net/xfrm.h> #include <net/checksum.h> #include <net/udp.h> -#include <asm/atomic.h> +#include <linux/atomic.h> #include "avc.h" #include "objsec.h" @@ -56,7 +56,7 @@ atomic_t selinux_xfrm_refcount = ATOMIC_INIT(0); /* - * Returns true if an LSM/SELinux context + * Returns true if the context is an LSM/SELinux context. */ static inline int selinux_authorizable_ctx(struct xfrm_sec_ctx *ctx) { @@ -66,7 +66,7 @@ static inline int selinux_authorizable_ctx(struct xfrm_sec_ctx *ctx) } /* - * Returns true if the xfrm contains a security blob for SELinux + * Returns true if the xfrm contains a security blob for SELinux. */ static inline int selinux_authorizable_xfrm(struct xfrm_state *x) { @@ -74,48 +74,112 @@ static inline int selinux_authorizable_xfrm(struct xfrm_state *x) } /* - * LSM hook implementation that authorizes that a flow can use - * a xfrm policy rule. + * Allocates a xfrm_sec_state and populates it using the supplied security + * xfrm_user_sec_ctx context. */ -int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir) +static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp, + struct xfrm_user_sec_ctx *uctx, + gfp_t gfp) { int rc; - u32 sel_sid; + const struct task_security_struct *tsec = current_security(); + struct xfrm_sec_ctx *ctx = NULL; + u32 str_len; - /* Context sid is either set to label or ANY_ASSOC */ - if (ctx) { - if (!selinux_authorizable_ctx(ctx)) - return -EINVAL; - - sel_sid = ctx->ctx_sid; - } else - /* - * All flows should be treated as polmatch'ing an - * otherwise applicable "non-labeled" policy. This - * would prevent inadvertent "leaks". - */ - return 0; + if (ctxp == NULL || uctx == NULL || + uctx->ctx_doi != XFRM_SC_DOI_LSM || + uctx->ctx_alg != XFRM_SC_ALG_SELINUX) + return -EINVAL; + + str_len = uctx->ctx_len; + if (str_len >= PAGE_SIZE) + return -ENOMEM; + + ctx = kmalloc(sizeof(*ctx) + str_len + 1, gfp); + if (!ctx) + return -ENOMEM; + + ctx->ctx_doi = XFRM_SC_DOI_LSM; + ctx->ctx_alg = XFRM_SC_ALG_SELINUX; + ctx->ctx_len = str_len; + memcpy(ctx->ctx_str, &uctx[1], str_len); + ctx->ctx_str[str_len] = '\0'; + rc = security_context_to_sid(ctx->ctx_str, str_len, &ctx->ctx_sid, gfp); + if (rc) + goto err; - rc = avc_has_perm(fl_secid, sel_sid, SECCLASS_ASSOCIATION, - ASSOCIATION__POLMATCH, - NULL); + rc = avc_has_perm(tsec->sid, ctx->ctx_sid, + SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT, NULL); + if (rc) + goto err; - if (rc == -EACCES) - return -ESRCH; + *ctxp = ctx; + atomic_inc(&selinux_xfrm_refcount); + return 0; +err: + kfree(ctx); return rc; } /* + * Free the xfrm_sec_ctx structure. + */ +static void selinux_xfrm_free(struct xfrm_sec_ctx *ctx) +{ + if (!ctx) + return; + + atomic_dec(&selinux_xfrm_refcount); + kfree(ctx); +} + +/* + * Authorize the deletion of a labeled SA or policy rule. + */ +static int selinux_xfrm_delete(struct xfrm_sec_ctx *ctx) +{ + const struct task_security_struct *tsec = current_security(); + + if (!ctx) + return 0; + + return avc_has_perm(tsec->sid, ctx->ctx_sid, + SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT, + NULL); +} + +/* + * LSM hook implementation that authorizes that a flow can use a xfrm policy + * rule. + */ +int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir) +{ + int rc; + + /* All flows should be treated as polmatch'ing an otherwise applicable + * "non-labeled" policy. This would prevent inadvertent "leaks". */ + if (!ctx) + return 0; + + /* Context sid is either set to label or ANY_ASSOC */ + if (!selinux_authorizable_ctx(ctx)) + return -EINVAL; + + rc = avc_has_perm(fl_secid, ctx->ctx_sid, + SECCLASS_ASSOCIATION, ASSOCIATION__POLMATCH, NULL); + return (rc == -EACCES ? -ESRCH : rc); +} + +/* * LSM hook implementation that authorizes that a state matches * the given policy, flow combo. */ - -int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *xp, - struct flowi *fl) +int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, + struct xfrm_policy *xp, + const struct flowi *fl) { u32 state_sid; - int rc; if (!xp->security) if (x->security) @@ -135,189 +199,115 @@ int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy * state_sid = x->security->ctx_sid; - if (fl->secid != state_sid) + if (fl->flowi_secid != state_sid) return 0; - rc = avc_has_perm(fl->secid, state_sid, SECCLASS_ASSOCIATION, - ASSOCIATION__SENDTO, - NULL)? 0:1; - - /* - * We don't need a separate SA Vs. policy polmatch check - * since the SA is now of the same label as the flow and - * a flow Vs. policy polmatch check had already happened - * in selinux_xfrm_policy_lookup() above. - */ - - return rc; + /* We don't need a separate SA Vs. policy polmatch check since the SA + * is now of the same label as the flow and a flow Vs. policy polmatch + * check had already happened in selinux_xfrm_policy_lookup() above. */ + return (avc_has_perm(fl->flowi_secid, state_sid, + SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO, + NULL) ? 0 : 1); } -/* - * LSM hook implementation that checks and/or returns the xfrm sid for the - * incoming packet. - */ - -int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall) +static u32 selinux_xfrm_skb_sid_egress(struct sk_buff *skb) { - struct sec_path *sp; + struct dst_entry *dst = skb_dst(skb); + struct xfrm_state *x; - *sid = SECSID_NULL; + if (dst == NULL) + return SECSID_NULL; + x = dst->xfrm; + if (x == NULL || !selinux_authorizable_xfrm(x)) + return SECSID_NULL; - if (skb == NULL) - return 0; + return x->security->ctx_sid; +} + +static int selinux_xfrm_skb_sid_ingress(struct sk_buff *skb, + u32 *sid, int ckall) +{ + u32 sid_session = SECSID_NULL; + struct sec_path *sp = skb->sp; - sp = skb->sp; if (sp) { - int i, sid_set = 0; + int i; - for (i = sp->len-1; i >= 0; i--) { + for (i = sp->len - 1; i >= 0; i--) { struct xfrm_state *x = sp->xvec[i]; if (selinux_authorizable_xfrm(x)) { struct xfrm_sec_ctx *ctx = x->security; - if (!sid_set) { - *sid = ctx->ctx_sid; - sid_set = 1; - + if (sid_session == SECSID_NULL) { + sid_session = ctx->ctx_sid; if (!ckall) - break; - } else if (*sid != ctx->ctx_sid) + goto out; + } else if (sid_session != ctx->ctx_sid) { + *sid = SECSID_NULL; return -EINVAL; + } } } } +out: + *sid = sid_session; return 0; } /* - * Security blob allocation for xfrm_policy and xfrm_state - * CTX does not have a meaningful value on input + * LSM hook implementation that checks and/or returns the xfrm sid for the + * incoming packet. */ -static int selinux_xfrm_sec_ctx_alloc(struct xfrm_sec_ctx **ctxp, - struct xfrm_user_sec_ctx *uctx, u32 sid) +int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall) { - int rc = 0; - const struct task_security_struct *tsec = current_security(); - struct xfrm_sec_ctx *ctx = NULL; - char *ctx_str = NULL; - u32 str_len; - - BUG_ON(uctx && sid); - - if (!uctx) - goto not_from_user; - - if (uctx->ctx_doi != XFRM_SC_ALG_SELINUX) - return -EINVAL; - - str_len = uctx->ctx_len; - if (str_len >= PAGE_SIZE) - return -ENOMEM; - - *ctxp = ctx = kmalloc(sizeof(*ctx) + - str_len + 1, - GFP_KERNEL); - - if (!ctx) - return -ENOMEM; - - ctx->ctx_doi = uctx->ctx_doi; - ctx->ctx_len = str_len; - ctx->ctx_alg = uctx->ctx_alg; - - memcpy(ctx->ctx_str, - uctx+1, - str_len); - ctx->ctx_str[str_len] = 0; - rc = security_context_to_sid(ctx->ctx_str, - str_len, - &ctx->ctx_sid); - - if (rc) - goto out; - - /* - * Does the subject have permission to set security context? - */ - rc = avc_has_perm(tsec->sid, ctx->ctx_sid, - SECCLASS_ASSOCIATION, - ASSOCIATION__SETCONTEXT, NULL); - if (rc) - goto out; - - return rc; - -not_from_user: - rc = security_sid_to_context(sid, &ctx_str, &str_len); - if (rc) - goto out; - - *ctxp = ctx = kmalloc(sizeof(*ctx) + - str_len, - GFP_ATOMIC); - - if (!ctx) { - rc = -ENOMEM; - goto out; + if (skb == NULL) { + *sid = SECSID_NULL; + return 0; } + return selinux_xfrm_skb_sid_ingress(skb, sid, ckall); +} - ctx->ctx_doi = XFRM_SC_DOI_LSM; - ctx->ctx_alg = XFRM_SC_ALG_SELINUX; - ctx->ctx_sid = sid; - ctx->ctx_len = str_len; - memcpy(ctx->ctx_str, - ctx_str, - str_len); +int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid) +{ + int rc; - goto out2; + rc = selinux_xfrm_skb_sid_ingress(skb, sid, 0); + if (rc == 0 && *sid == SECSID_NULL) + *sid = selinux_xfrm_skb_sid_egress(skb); -out: - *ctxp = NULL; - kfree(ctx); -out2: - kfree(ctx_str); return rc; } /* - * LSM hook implementation that allocs and transfers uctx spec to - * xfrm_policy. + * LSM hook implementation that allocs and transfers uctx spec to xfrm_policy. */ int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, - struct xfrm_user_sec_ctx *uctx) + struct xfrm_user_sec_ctx *uctx, + gfp_t gfp) { - int err; - - BUG_ON(!uctx); - - err = selinux_xfrm_sec_ctx_alloc(ctxp, uctx, 0); - if (err == 0) - atomic_inc(&selinux_xfrm_refcount); - - return err; + return selinux_xfrm_alloc_user(ctxp, uctx, gfp); } - /* - * LSM hook implementation that copies security data structure from old to - * new for policy cloning. + * LSM hook implementation that copies security data structure from old to new + * for policy cloning. */ int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, struct xfrm_sec_ctx **new_ctxp) { struct xfrm_sec_ctx *new_ctx; - if (old_ctx) { - new_ctx = kmalloc(sizeof(*old_ctx) + old_ctx->ctx_len, - GFP_KERNEL); - if (!new_ctx) - return -ENOMEM; + if (!old_ctx) + return 0; + + new_ctx = kmemdup(old_ctx, sizeof(*old_ctx) + old_ctx->ctx_len, + GFP_ATOMIC); + if (!new_ctx) + return -ENOMEM; + atomic_inc(&selinux_xfrm_refcount); + *new_ctxp = new_ctx; - memcpy(new_ctx, old_ctx, sizeof(*new_ctx)); - memcpy(new_ctx->ctx_str, old_ctx->ctx_str, new_ctx->ctx_len); - *new_ctxp = new_ctx; - } return 0; } @@ -326,7 +316,7 @@ int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx, */ void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx) { - kfree(ctx); + selinux_xfrm_free(ctx); } /* @@ -334,35 +324,58 @@ void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx) */ int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx) { - const struct task_security_struct *tsec = current_security(); - int rc = 0; - - if (ctx) { - rc = avc_has_perm(tsec->sid, ctx->ctx_sid, - SECCLASS_ASSOCIATION, - ASSOCIATION__SETCONTEXT, NULL); - if (rc == 0) - atomic_dec(&selinux_xfrm_refcount); - } + return selinux_xfrm_delete(ctx); +} - return rc; +/* + * LSM hook implementation that allocates a xfrm_sec_state, populates it using + * the supplied security context, and assigns it to the xfrm_state. + */ +int selinux_xfrm_state_alloc(struct xfrm_state *x, + struct xfrm_user_sec_ctx *uctx) +{ + return selinux_xfrm_alloc_user(&x->security, uctx, GFP_KERNEL); } /* - * LSM hook implementation that allocs and transfers sec_ctx spec to - * xfrm_state. + * LSM hook implementation that allocates a xfrm_sec_state and populates based + * on a secid. */ -int selinux_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *uctx, - u32 secid) +int selinux_xfrm_state_alloc_acquire(struct xfrm_state *x, + struct xfrm_sec_ctx *polsec, u32 secid) { - int err; + int rc; + struct xfrm_sec_ctx *ctx; + char *ctx_str = NULL; + int str_len; + + if (!polsec) + return 0; - BUG_ON(!x); + if (secid == 0) + return -EINVAL; + + rc = security_sid_to_context(secid, &ctx_str, &str_len); + if (rc) + return rc; + + ctx = kmalloc(sizeof(*ctx) + str_len, GFP_ATOMIC); + if (!ctx) { + rc = -ENOMEM; + goto out; + } + + ctx->ctx_doi = XFRM_SC_DOI_LSM; + ctx->ctx_alg = XFRM_SC_ALG_SELINUX; + ctx->ctx_sid = secid; + ctx->ctx_len = str_len; + memcpy(ctx->ctx_str, ctx_str, str_len); - err = selinux_xfrm_sec_ctx_alloc(&x->security, uctx, secid); - if (err == 0) - atomic_inc(&selinux_xfrm_refcount); - return err; + x->security = ctx; + atomic_inc(&selinux_xfrm_refcount); +out: + kfree(ctx_str); + return rc; } /* @@ -370,28 +383,15 @@ int selinux_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *uct */ void selinux_xfrm_state_free(struct xfrm_state *x) { - struct xfrm_sec_ctx *ctx = x->security; - kfree(ctx); + selinux_xfrm_free(x->security); } - /* - * LSM hook implementation that authorizes deletion of labeled SAs. - */ +/* + * LSM hook implementation that authorizes deletion of labeled SAs. + */ int selinux_xfrm_state_delete(struct xfrm_state *x) { - const struct task_security_struct *tsec = current_security(); - struct xfrm_sec_ctx *ctx = x->security; - int rc = 0; - - if (ctx) { - rc = avc_has_perm(tsec->sid, ctx->ctx_sid, - SECCLASS_ASSOCIATION, - ASSOCIATION__SETCONTEXT, NULL); - if (rc == 0) - atomic_dec(&selinux_xfrm_refcount); - } - - return rc; + return selinux_xfrm_delete(x->security); } /* @@ -401,14 +401,12 @@ int selinux_xfrm_state_delete(struct xfrm_state *x) * we need to check for unlabelled access since this may not have * gone thru the IPSec process. */ -int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb, - struct common_audit_data *ad) +int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb, + struct common_audit_data *ad) { - int i, rc = 0; - struct sec_path *sp; - u32 sel_sid = SECINITSID_UNLABELED; - - sp = skb->sp; + int i; + struct sec_path *sp = skb->sp; + u32 peer_sid = SECINITSID_UNLABELED; if (sp) { for (i = 0; i < sp->len; i++) { @@ -416,23 +414,17 @@ int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb, if (x && selinux_authorizable_xfrm(x)) { struct xfrm_sec_ctx *ctx = x->security; - sel_sid = ctx->ctx_sid; + peer_sid = ctx->ctx_sid; break; } } } - /* - * This check even when there's no association involved is - * intended, according to Trent Jaeger, to make sure a - * process can't engage in non-ipsec communication unless - * explicitly allowed by policy. - */ - - rc = avc_has_perm(isec_sid, sel_sid, SECCLASS_ASSOCIATION, - ASSOCIATION__RECVFROM, ad); - - return rc; + /* This check even when there's no association involved is intended, + * according to Trent Jaeger, to make sure a process can't engage in + * non-IPsec communication unless explicitly allowed by policy. */ + return avc_has_perm(sk_sid, peer_sid, + SECCLASS_ASSOCIATION, ASSOCIATION__RECVFROM, ad); } /* @@ -442,49 +434,38 @@ int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb, * If we do have a authorizable security association, then it has already been * checked in the selinux_xfrm_state_pol_flow_match hook above. */ -int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb, - struct common_audit_data *ad, u8 proto) +int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb, + struct common_audit_data *ad, u8 proto) { struct dst_entry *dst; - int rc = 0; - - dst = skb_dst(skb); - - if (dst) { - struct dst_entry *dst_test; - - for (dst_test = dst; dst_test != NULL; - dst_test = dst_test->child) { - struct xfrm_state *x = dst_test->xfrm; - - if (x && selinux_authorizable_xfrm(x)) - goto out; - } - } switch (proto) { case IPPROTO_AH: case IPPROTO_ESP: case IPPROTO_COMP: - /* - * We should have already seen this packet once before - * it underwent xfrm(s). No need to subject it to the - * unlabeled check. - */ - goto out; + /* We should have already seen this packet once before it + * underwent xfrm(s). No need to subject it to the unlabeled + * check. */ + return 0; default: break; } - /* - * This check even when there's no association involved is - * intended, according to Trent Jaeger, to make sure a - * process can't engage in non-ipsec communication unless - * explicitly allowed by policy. - */ + dst = skb_dst(skb); + if (dst) { + struct dst_entry *iter; + + for (iter = dst; iter != NULL; iter = iter->child) { + struct xfrm_state *x = iter->xfrm; - rc = avc_has_perm(isec_sid, SECINITSID_UNLABELED, SECCLASS_ASSOCIATION, - ASSOCIATION__SENDTO, ad); -out: - return rc; + if (x && selinux_authorizable_xfrm(x)) + return 0; + } + } + + /* This check even when there's no association involved is intended, + * according to Trent Jaeger, to make sure a process can't engage in + * non-IPsec communication unless explicitly allowed by policy. */ + return avc_has_perm(sk_sid, SECINITSID_UNLABELED, + SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO, ad); } diff --git a/security/smack/Kconfig b/security/smack/Kconfig index 603b0878434..e69de9c642b 100644 --- a/security/smack/Kconfig +++ b/security/smack/Kconfig @@ -1,6 +1,10 @@ config SECURITY_SMACK bool "Simplified Mandatory Access Control Kernel Support" - depends on NETLABEL && SECURITY_NETWORK + depends on NET + depends on INET + depends on SECURITY + select NETLABEL + select SECURITY_NETWORK default n help This selects the Simplified Mandatory Access Control Kernel. diff --git a/security/smack/smack.h b/security/smack/smack.h index 43ae747a5aa..020307ef097 100644 --- a/security/smack/smack.h +++ b/security/smack/smack.h @@ -23,13 +23,52 @@ #include <linux/lsm_audit.h> /* + * Smack labels were limited to 23 characters for a long time. + */ +#define SMK_LABELLEN 24 +#define SMK_LONGLABEL 256 + +/* + * This is the repository for labels seen so that it is + * not necessary to keep allocating tiny chuncks of memory + * and so that they can be shared. + * + * Labels are never modified in place. Anytime a label + * is imported (e.g. xattrset on a file) the list is checked + * for it and it is added if it doesn't exist. The address + * is passed out in either case. Entries are added, but + * never deleted. + * + * Since labels are hanging around anyway it doesn't + * hurt to maintain a secid for those awkward situations + * where kernel components that ought to use LSM independent + * interfaces don't. The secid should go away when all of + * these components have been repaired. + * + * The cipso value associated with the label gets stored here, too. + * + * Keep the access rules for this subject label here so that + * the entire set of rules does not need to be examined every + * time. + */ +struct smack_known { + struct list_head list; + struct hlist_node smk_hashed; + char *smk_known; + u32 smk_secid; + struct netlbl_lsm_secattr smk_netlabel; /* on wire labels */ + struct list_head smk_rules; /* access rules */ + struct mutex smk_rules_lock; /* lock for rules */ +}; + +/* + * Maximum number of bytes for the levels in a CIPSO IP option. * Why 23? CIPSO is constrained to 30, so a 32 byte buffer is * bigger than can be used, and 24 is the next lower multiple * of 8, and there are too many issues if there isn't space set * aside for the terminating null byte. */ -#define SMK_MAXLEN 23 -#define SMK_LABELLEN (SMK_MAXLEN+1) +#define SMK_CIPSOLEN 24 struct superblock_smack { char *smk_root; @@ -37,46 +76,47 @@ struct superblock_smack { char *smk_hat; char *smk_default; int smk_initialized; - spinlock_t smk_sblock; /* for initialization */ }; struct socket_smack { - char *smk_out; /* outbound label */ - char *smk_in; /* inbound label */ - char smk_packet[SMK_LABELLEN]; /* TCP peer label */ + struct smack_known *smk_out; /* outbound label */ + struct smack_known *smk_in; /* inbound label */ + struct smack_known *smk_packet; /* TCP peer label */ }; /* * Inode smack data */ struct inode_smack { - char *smk_inode; /* label of the fso */ - struct mutex smk_lock; /* initialization lock */ - int smk_flags; /* smack inode flags */ + char *smk_inode; /* label of the fso */ + struct smack_known *smk_task; /* label of the task */ + struct smack_known *smk_mmap; /* label of the mmap domain */ + struct mutex smk_lock; /* initialization lock */ + int smk_flags; /* smack inode flags */ +}; + +struct task_smack { + struct smack_known *smk_task; /* label for access control */ + struct smack_known *smk_forked; /* label when forked */ + struct list_head smk_rules; /* per task access rules */ + struct mutex smk_rules_lock; /* lock for the rules */ }; #define SMK_INODE_INSTANT 0x01 /* inode is instantiated */ +#define SMK_INODE_TRANSMUTE 0x02 /* directory is transmuting */ +#define SMK_INODE_CHANGED 0x04 /* smack was transmuted */ /* * A label access rule. */ struct smack_rule { struct list_head list; - char *smk_subject; + struct smack_known *smk_subject; char *smk_object; int smk_access; }; /* - * An entry in the table mapping smack values to - * CIPSO level/category-set values. - */ -struct smack_cipso { - int smk_level; - char smk_catset[SMK_LABELLEN]; -}; - -/* * An entry in the table identifying hosts. */ struct smk_netlbladdr { @@ -87,32 +127,14 @@ struct smk_netlbladdr { }; /* - * This is the repository for labels seen so that it is - * not necessary to keep allocating tiny chuncks of memory - * and so that they can be shared. - * - * Labels are never modified in place. Anytime a label - * is imported (e.g. xattrset on a file) the list is checked - * for it and it is added if it doesn't exist. The address - * is passed out in either case. Entries are added, but - * never deleted. - * - * Since labels are hanging around anyway it doesn't - * hurt to maintain a secid for those awkward situations - * where kernel components that ought to use LSM independent - * interfaces don't. The secid should go away when all of - * these components have been repaired. - * - * If there is a cipso value associated with the label it - * gets stored here, too. This will most likely be rare as - * the cipso direct mapping in used internally. + * An entry in the table identifying ports. */ -struct smack_known { +struct smk_port_label { struct list_head list; - char smk_known[SMK_LABELLEN]; - u32 smk_secid; - struct smack_cipso *smk_cipso; - spinlock_t smk_cipsolock; /* for changing cipso map */ + struct sock *smk_sock; /* socket initialized on */ + unsigned short smk_port; /* the port number */ + struct smack_known *smk_in; /* inbound label */ + struct smack_known *smk_out; /* outgoing label */ }; /* @@ -122,6 +144,7 @@ struct smack_known { #define SMK_FSFLOOR "smackfsfloor=" #define SMK_FSHAT "smackfshat=" #define SMK_FSROOT "smackfsroot=" +#define SMK_FSTRANS "smackfstransmute=" #define SMACK_CIPSO_OPTION "-CIPSO" @@ -139,40 +162,56 @@ struct smack_known { #define SMACK_CIPSO_SOCKET 1 /* - * smackfs magic number - * smackfs macic number + * CIPSO defaults. + */ +#define SMACK_CIPSO_DOI_DEFAULT 3 /* Historical */ +#define SMACK_CIPSO_DOI_INVALID -1 /* Not a DOI */ +#define SMACK_CIPSO_DIRECT_DEFAULT 250 /* Arbitrary */ +#define SMACK_CIPSO_MAPPED_DEFAULT 251 /* Also arbitrary */ +#define SMACK_CIPSO_MAXLEVEL 255 /* CIPSO 2.2 standard */ +/* + * CIPSO 2.2 standard is 239, but Smack wants to use the + * categories in a structured way that limits the value to + * the bits in 23 bytes, hence the unusual number. */ -#define SMACK_MAGIC 0x43415d53 /* "SMAC" */ +#define SMACK_CIPSO_MAXCATNUM 184 /* 23 * 8 */ /* - * A limit on the number of entries in the lists - * makes some of the list administration easier. + * Ptrace rules */ -#define SMACK_LIST_MAX 10000 +#define SMACK_PTRACE_DEFAULT 0 +#define SMACK_PTRACE_EXACT 1 +#define SMACK_PTRACE_DRACONIAN 2 +#define SMACK_PTRACE_MAX SMACK_PTRACE_DRACONIAN /* - * CIPSO defaults. + * Flags for untraditional access modes. + * It shouldn't be necessary to avoid conflicts with definitions + * in fs.h, but do so anyway. */ -#define SMACK_CIPSO_DOI_DEFAULT 3 /* Historical */ -#define SMACK_CIPSO_DOI_INVALID -1 /* Not a DOI */ -#define SMACK_CIPSO_DIRECT_DEFAULT 250 /* Arbitrary */ -#define SMACK_CIPSO_MAXCATVAL 63 /* Bigger gets harder */ -#define SMACK_CIPSO_MAXLEVEL 255 /* CIPSO 2.2 standard */ -#define SMACK_CIPSO_MAXCATNUM 239 /* CIPSO 2.2 standard */ +#define MAY_TRANSMUTE 0x00001000 /* Controls directory labeling */ +#define MAY_LOCK 0x00002000 /* Locks should be writes, but ... */ /* * Just to make the common cases easier to deal with */ -#define MAY_ANY (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC) #define MAY_ANYREAD (MAY_READ | MAY_EXEC) -#define MAY_ANYWRITE (MAY_WRITE | MAY_APPEND) #define MAY_READWRITE (MAY_READ | MAY_WRITE) #define MAY_NOT 0 /* - * Number of access types used by Smack (rwxa) + * Number of access types used by Smack (rwxatl) */ -#define SMK_NUM_ACCESS_TYPE 4 +#define SMK_NUM_ACCESS_TYPE 6 + +/* SMACK data */ +struct smack_audit_data { + const char *function; + char *subject; + char *object; + char *request; + int result; +}; /* * Smack audit data; is empty if CONFIG_AUDIT not set @@ -181,6 +220,7 @@ struct smack_known { struct smk_audit_info { #ifdef CONFIG_AUDIT struct common_audit_data a; + struct smack_audit_data sad; #endif }; /* @@ -191,22 +231,29 @@ struct inode_smack *new_inode_smack(char *); /* * These functions are in smack_access.c */ -int smk_access(char *, char *, int, struct smk_audit_info *); +int smk_access_entry(char *, char *, struct list_head *); +int smk_access(struct smack_known *, char *, int, struct smk_audit_info *); +int smk_tskacc(struct task_smack *, char *, u32, struct smk_audit_info *); int smk_curacc(char *, u32, struct smk_audit_info *); -int smack_to_cipso(const char *, struct smack_cipso *); -void smack_from_cipso(u32, char *, char *); -char *smack_from_secid(const u32); +struct smack_known *smack_from_secid(const u32); +char *smk_parse_smack(const char *string, int len); +int smk_netlbl_mls(int, char *, struct netlbl_lsm_secattr *, int); char *smk_import(const char *, int); struct smack_known *smk_import_entry(const char *, int); +void smk_insert_entry(struct smack_known *skp); +struct smack_known *smk_find_entry(const char *); u32 smack_to_secid(const char *); /* * Shared data. */ extern int smack_cipso_direct; -extern char *smack_net_ambient; -extern char *smack_onlycap; +extern int smack_cipso_mapped; +extern struct smack_known *smack_net_ambient; +extern struct smack_known *smack_onlycap; +extern struct smack_known *smack_syslog_label; extern const char *smack_cipso_option; +extern int smack_ptrace_rule; extern struct smack_known smack_known_floor; extern struct smack_known smack_known_hat; @@ -215,22 +262,22 @@ extern struct smack_known smack_known_invalid; extern struct smack_known smack_known_star; extern struct smack_known smack_known_web; +extern struct mutex smack_known_lock; extern struct list_head smack_known_list; -extern struct list_head smack_rule_list; extern struct list_head smk_netlbladdr_list; extern struct security_operations smack_ops; +#define SMACK_HASH_SLOTS 16 +extern struct hlist_head smack_known_hash[SMACK_HASH_SLOTS]; + /* - * Stricly for CIPSO level manipulation. - * Set the category bit number in a smack label sized buffer. + * Is the directory transmuting? */ -static inline void smack_catset_bit(int cat, char *catsetp) +static inline int smk_inode_transmutable(const struct inode *isp) { - if (cat > SMK_LABELLEN * 8) - return; - - catsetp[(cat - 1) / 8] |= 0x80 >> ((cat - 1) % 8); + struct inode_smack *sip = isp->i_security; + return (sip->smk_flags & SMK_INODE_TRANSMUTE) != 0; } /* @@ -243,6 +290,45 @@ static inline char *smk_of_inode(const struct inode *isp) } /* + * Present a pointer to the smack label entry in an task blob. + */ +static inline struct smack_known *smk_of_task(const struct task_smack *tsp) +{ + return tsp->smk_task; +} + +/* + * Present a pointer to the forked smack label entry in an task blob. + */ +static inline struct smack_known *smk_of_forked(const struct task_smack *tsp) +{ + return tsp->smk_forked; +} + +/* + * Present a pointer to the smack label in the current task blob. + */ +static inline struct smack_known *smk_of_current(void) +{ + return smk_of_task(current_security()); +} + +/* + * Is the task privileged and allowed to be privileged + * by the onlycap rule. + */ +static inline int smack_privileged(int cap) +{ + struct smack_known *skp = smk_of_current(); + + if (!capable(cap)) + return 0; + if (smack_onlycap == NULL || smack_onlycap == skp) + return 1; + return 0; +} + +/* * logging functions */ #define SMACK_AUDIT_DENIED 0x1 @@ -263,9 +349,18 @@ void smack_log(char *subject_label, char *object_label, static inline void smk_ad_init(struct smk_audit_info *a, const char *func, char type) { - memset(a, 0, sizeof(*a)); + memset(&a->sad, 0, sizeof(a->sad)); a->a.type = type; - a->a.smack_audit_data.function = func; + a->a.smack_audit_data = &a->sad; + a->a.smack_audit_data->function = func; +} + +static inline void smk_ad_init_net(struct smk_audit_info *a, const char *func, + char type, struct lsm_network_audit *net) +{ + smk_ad_init(a, func, type); + memset(net, 0, sizeof(*net)); + a->a.u.net = net; } static inline void smk_ad_setfield_u_tsk(struct smk_audit_info *a, @@ -276,27 +371,22 @@ static inline void smk_ad_setfield_u_tsk(struct smk_audit_info *a, static inline void smk_ad_setfield_u_fs_path_dentry(struct smk_audit_info *a, struct dentry *d) { - a->a.u.fs.path.dentry = d; -} -static inline void smk_ad_setfield_u_fs_path_mnt(struct smk_audit_info *a, - struct vfsmount *m) -{ - a->a.u.fs.path.mnt = m; + a->a.u.dentry = d; } static inline void smk_ad_setfield_u_fs_inode(struct smk_audit_info *a, struct inode *i) { - a->a.u.fs.inode = i; + a->a.u.inode = i; } static inline void smk_ad_setfield_u_fs_path(struct smk_audit_info *a, struct path p) { - a->a.u.fs.path = p; + a->a.u.path = p; } static inline void smk_ad_setfield_u_net_sk(struct smk_audit_info *a, struct sock *sk) { - a->a.u.net.sk = sk; + a->a.u.net->sk = sk; } #else /* no AUDIT */ diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c index f4fac64c4da..c062e9467b6 100644 --- a/security/smack/smack_access.c +++ b/security/smack/smack_access.c @@ -19,37 +19,31 @@ struct smack_known smack_known_huh = { .smk_known = "?", .smk_secid = 2, - .smk_cipso = NULL, }; struct smack_known smack_known_hat = { .smk_known = "^", .smk_secid = 3, - .smk_cipso = NULL, }; struct smack_known smack_known_star = { .smk_known = "*", .smk_secid = 4, - .smk_cipso = NULL, }; struct smack_known smack_known_floor = { .smk_known = "_", .smk_secid = 5, - .smk_cipso = NULL, }; struct smack_known smack_known_invalid = { .smk_known = "", .smk_secid = 6, - .smk_cipso = NULL, }; struct smack_known smack_known_web = { .smk_known = "@", .smk_secid = 7, - .smk_cipso = NULL, }; LIST_HEAD(smack_known_list); @@ -67,9 +61,58 @@ static u32 smack_next_secid = 10; int log_policy = SMACK_AUDIT_DENIED; /** - * smk_access - determine if a subject has a specific access to an object + * smk_access_entry - look up matching access rule * @subject_label: a pointer to the subject's Smack label * @object_label: a pointer to the object's Smack label + * @rule_list: the list of rules to search + * + * This function looks up the subject/object pair in the + * access rule list and returns the access mode. If no + * entry is found returns -ENOENT. + * + * NOTE: + * + * Earlier versions of this function allowed for labels that + * were not on the label list. This was done to allow for + * labels to come over the network that had never been seen + * before on this host. Unless the receiving socket has the + * star label this will always result in a failure check. The + * star labeled socket case is now handled in the networking + * hooks so there is no case where the label is not on the + * label list. Checking to see if the address of two labels + * is the same is now a reliable test. + * + * Do the object check first because that is more + * likely to differ. + * + * Allowing write access implies allowing locking. + */ +int smk_access_entry(char *subject_label, char *object_label, + struct list_head *rule_list) +{ + int may = -ENOENT; + struct smack_rule *srp; + + list_for_each_entry_rcu(srp, rule_list, list) { + if (srp->smk_object == object_label && + srp->smk_subject->smk_known == subject_label) { + may = srp->smk_access; + break; + } + } + + /* + * MAY_WRITE implies MAY_LOCK. + */ + if ((may & MAY_WRITE) == MAY_WRITE) + may |= MAY_LOCK; + return may; +} + +/** + * smk_access - determine if a subject has a specific access to an object + * @subject_known: a pointer to the subject's Smack label entry + * @object_label: a pointer to the object's Smack label * @request: the access requested, in "MAY" format * @a : a pointer to the audit data * @@ -77,20 +120,12 @@ int log_policy = SMACK_AUDIT_DENIED; * access rule list and returns 0 if the access is permitted, * non zero otherwise. * - * Even though Smack labels are usually shared on smack_list - * labels that come in off the network can't be imported - * and added to the list for locking reasons. - * - * Therefore, it is necessary to check the contents of the labels, - * not just the pointer values. Of course, in most cases the labels - * will be on the list, so checking the pointers may be a worthwhile - * optimization. + * Smack labels are shared on smack_list */ -int smk_access(char *subject_label, char *object_label, int request, - struct smk_audit_info *a) +int smk_access(struct smack_known *subject_known, char *object_label, + int request, struct smk_audit_info *a) { - u32 may = MAY_NOT; - struct smack_rule *srp; + int may = MAY_NOT; int rc = 0; /* @@ -98,8 +133,7 @@ int smk_access(char *subject_label, char *object_label, int request, * * A star subject can't access any object. */ - if (subject_label == smack_known_star.smk_known || - strcmp(subject_label, smack_known_star.smk_known) == 0) { + if (subject_known == &smack_known_star) { rc = -EACCES; goto out_audit; } @@ -109,107 +143,123 @@ int smk_access(char *subject_label, char *object_label, int request, * An internet subject can access any object. */ if (object_label == smack_known_web.smk_known || - subject_label == smack_known_web.smk_known || - strcmp(object_label, smack_known_web.smk_known) == 0 || - strcmp(subject_label, smack_known_web.smk_known) == 0) + subject_known == &smack_known_web) goto out_audit; /* * A star object can be accessed by any subject. */ - if (object_label == smack_known_star.smk_known || - strcmp(object_label, smack_known_star.smk_known) == 0) + if (object_label == smack_known_star.smk_known) goto out_audit; /* * An object can be accessed in any way by a subject * with the same label. */ - if (subject_label == object_label || - strcmp(subject_label, object_label) == 0) + if (subject_known->smk_known == object_label) goto out_audit; /* * A hat subject can read any object. * A floor object can be read by any subject. */ if ((request & MAY_ANYREAD) == request) { - if (object_label == smack_known_floor.smk_known || - strcmp(object_label, smack_known_floor.smk_known) == 0) + if (object_label == smack_known_floor.smk_known) goto out_audit; - if (subject_label == smack_known_hat.smk_known || - strcmp(subject_label, smack_known_hat.smk_known) == 0) + if (subject_known == &smack_known_hat) goto out_audit; } /* * Beyond here an explicit relationship is required. * If the requested access is contained in the available * access (e.g. read is included in readwrite) it's - * good. + * good. A negative response from smk_access_entry() + * indicates there is no entry for this pair. */ rcu_read_lock(); - list_for_each_entry_rcu(srp, &smack_rule_list, list) { - if (srp->smk_subject == subject_label || - strcmp(srp->smk_subject, subject_label) == 0) { - if (srp->smk_object == object_label || - strcmp(srp->smk_object, object_label) == 0) { - may = srp->smk_access; - break; - } - } - } + may = smk_access_entry(subject_known->smk_known, object_label, + &subject_known->smk_rules); rcu_read_unlock(); - /* - * This is a bit map operation. - */ - if ((request & may) == request) + + if (may > 0 && (request & may) == request) goto out_audit; rc = -EACCES; out_audit: #ifdef CONFIG_AUDIT if (a) - smack_log(subject_label, object_label, request, rc, a); + smack_log(subject_known->smk_known, object_label, request, + rc, a); #endif return rc; } /** - * smk_curacc - determine if current has a specific access to an object + * smk_tskacc - determine if a task has a specific access to an object + * @tsp: a pointer to the subject task * @obj_label: a pointer to the object's Smack label * @mode: the access requested, in "MAY" format * @a : common audit data * - * This function checks the current subject label/object label pair + * This function checks the subject task's label/object label pair * in the access rule list and returns 0 if the access is permitted, - * non zero otherwise. It allows that current may have the capability + * non zero otherwise. It allows that the task may have the capability * to override the rules. */ -int smk_curacc(char *obj_label, u32 mode, struct smk_audit_info *a) +int smk_tskacc(struct task_smack *subject, char *obj_label, + u32 mode, struct smk_audit_info *a) { + struct smack_known *skp = smk_of_task(subject); + int may; int rc; - char *sp = current_security(); - - rc = smk_access(sp, obj_label, mode, NULL); - if (rc == 0) - goto out_audit; /* - * Return if a specific label has been designated as the - * only one that gets privilege and current does not - * have that label. + * Check the global rule list */ - if (smack_onlycap != NULL && smack_onlycap != current->cred->security) - goto out_audit; + rc = smk_access(skp, obj_label, mode, NULL); + if (rc == 0) { + /* + * If there is an entry in the task's rule list + * it can further restrict access. + */ + may = smk_access_entry(skp->smk_known, obj_label, + &subject->smk_rules); + if (may < 0) + goto out_audit; + if ((mode & may) == mode) + goto out_audit; + rc = -EACCES; + } - if (capable(CAP_MAC_OVERRIDE)) - return 0; + /* + * Allow for priviliged to override policy. + */ + if (rc != 0 && smack_privileged(CAP_MAC_OVERRIDE)) + rc = 0; out_audit: #ifdef CONFIG_AUDIT if (a) - smack_log(sp, obj_label, mode, rc, a); + smack_log(skp->smk_known, obj_label, mode, rc, a); #endif return rc; } +/** + * smk_curacc - determine if current has a specific access to an object + * @obj_label: a pointer to the object's Smack label + * @mode: the access requested, in "MAY" format + * @a : common audit data + * + * This function checks the current subject label/object label pair + * in the access rule list and returns 0 if the access is permitted, + * non zero otherwise. It allows that current may have the capability + * to override the rules. + */ +int smk_curacc(char *obj_label, u32 mode, struct smk_audit_info *a) +{ + struct task_smack *tsp = current_security(); + + return smk_tskacc(tsp, obj_label, mode, a); +} + #ifdef CONFIG_AUDIT /** * smack_str_from_perm : helper to transalate an int to a @@ -221,6 +271,7 @@ out_audit: static inline void smack_str_from_perm(char *string, int access) { int i = 0; + if (access & MAY_READ) string[i++] = 'r'; if (access & MAY_WRITE) @@ -229,6 +280,10 @@ static inline void smack_str_from_perm(char *string, int access) string[i++] = 'x'; if (access & MAY_APPEND) string[i++] = 'a'; + if (access & MAY_TRANSMUTE) + string[i++] = 't'; + if (access & MAY_LOCK) + string[i++] = 'l'; string[i] = '\0'; } /** @@ -241,15 +296,18 @@ static inline void smack_str_from_perm(char *string, int access) static void smack_log_callback(struct audit_buffer *ab, void *a) { struct common_audit_data *ad = a; - struct smack_audit_data *sad = &ad->smack_audit_data; + struct smack_audit_data *sad = ad->smack_audit_data; audit_log_format(ab, "lsm=SMACK fn=%s action=%s", - ad->smack_audit_data.function, + ad->smack_audit_data->function, sad->result ? "denied" : "granted"); audit_log_format(ab, " subject="); audit_log_untrustedstring(ab, sad->subject); audit_log_format(ab, " object="); audit_log_untrustedstring(ab, sad->object); - audit_log_format(ab, " requested=%s", sad->request); + if (sad->request[0] == '\0') + audit_log_format(ab, " labels_differ"); + else + audit_log_format(ab, " requested=%s", sad->request); } /** @@ -276,19 +334,19 @@ void smack_log(char *subject_label, char *object_label, int request, if (result == 0 && (log_policy & SMACK_AUDIT_ACCEPT) == 0) return; - if (a->smack_audit_data.function == NULL) - a->smack_audit_data.function = "unknown"; + sad = a->smack_audit_data; + + if (sad->function == NULL) + sad->function = "unknown"; /* end preparing the audit data */ - sad = &a->smack_audit_data; smack_str_from_perm(request_buffer, request); sad->subject = subject_label; sad->object = object_label; sad->request = request_buffer; sad->result = result; - a->lsm_pre_audit = smack_log_callback; - common_lsm_audit(a); + common_lsm_audit(a, smack_log_callback, NULL); } #else /* #ifdef CONFIG_AUDIT */ void smack_log(char *subject_label, char *object_label, int request, @@ -297,7 +355,127 @@ void smack_log(char *subject_label, char *object_label, int request, } #endif -static DEFINE_MUTEX(smack_known_lock); +DEFINE_MUTEX(smack_known_lock); + +struct hlist_head smack_known_hash[SMACK_HASH_SLOTS]; + +/** + * smk_insert_entry - insert a smack label into a hash map, + * + * this function must be called under smack_known_lock + */ +void smk_insert_entry(struct smack_known *skp) +{ + unsigned int hash; + struct hlist_head *head; + + hash = full_name_hash(skp->smk_known, strlen(skp->smk_known)); + head = &smack_known_hash[hash & (SMACK_HASH_SLOTS - 1)]; + + hlist_add_head_rcu(&skp->smk_hashed, head); + list_add_rcu(&skp->list, &smack_known_list); +} + +/** + * smk_find_entry - find a label on the list, return the list entry + * @string: a text string that might be a Smack label + * + * Returns a pointer to the entry in the label list that + * matches the passed string. + */ +struct smack_known *smk_find_entry(const char *string) +{ + unsigned int hash; + struct hlist_head *head; + struct smack_known *skp; + + hash = full_name_hash(string, strlen(string)); + head = &smack_known_hash[hash & (SMACK_HASH_SLOTS - 1)]; + + hlist_for_each_entry_rcu(skp, head, smk_hashed) + if (strcmp(skp->smk_known, string) == 0) + return skp; + + return NULL; +} + +/** + * smk_parse_smack - parse smack label from a text string + * @string: a text string that might contain a Smack label + * @len: the maximum size, or zero if it is NULL terminated. + * + * Returns a pointer to the clean label, or NULL + */ +char *smk_parse_smack(const char *string, int len) +{ + char *smack; + int i; + + if (len <= 0) + len = strlen(string) + 1; + + /* + * Reserve a leading '-' as an indicator that + * this isn't a label, but an option to interfaces + * including /smack/cipso and /smack/cipso2 + */ + if (string[0] == '-') + return NULL; + + for (i = 0; i < len; i++) + if (string[i] > '~' || string[i] <= ' ' || string[i] == '/' || + string[i] == '"' || string[i] == '\\' || string[i] == '\'') + break; + + if (i == 0 || i >= SMK_LONGLABEL) + return NULL; + + smack = kzalloc(i + 1, GFP_KERNEL); + if (smack != NULL) { + strncpy(smack, string, i + 1); + smack[i] = '\0'; + } + return smack; +} + +/** + * smk_netlbl_mls - convert a catset to netlabel mls categories + * @catset: the Smack categories + * @sap: where to put the netlabel categories + * + * Allocates and fills attr.mls + * Returns 0 on success, error code on failure. + */ +int smk_netlbl_mls(int level, char *catset, struct netlbl_lsm_secattr *sap, + int len) +{ + unsigned char *cp; + unsigned char m; + int cat; + int rc; + int byte; + + sap->flags |= NETLBL_SECATTR_MLS_CAT; + sap->attr.mls.lvl = level; + sap->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); + if (!sap->attr.mls.cat) + return -ENOMEM; + sap->attr.mls.cat->startbit = 0; + + for (cat = 1, cp = catset, byte = 0; byte < len; cp++, byte++) + for (m = 0x80; m != 0; m >>= 1, cat++) { + if ((m & *cp) == 0) + continue; + rc = netlbl_secattr_catmap_setbit(sap->attr.mls.cat, + cat, GFP_ATOMIC); + if (rc < 0) { + netlbl_secattr_catmap_free(sap->attr.mls.cat); + return rc; + } + } + + return 0; +} /** * smk_import_entry - import a label, return the list entry @@ -310,53 +488,59 @@ static DEFINE_MUTEX(smack_known_lock); struct smack_known *smk_import_entry(const char *string, int len) { struct smack_known *skp; - char smack[SMK_LABELLEN]; - int found; - int i; - - if (len <= 0 || len > SMK_MAXLEN) - len = SMK_MAXLEN; - - for (i = 0, found = 0; i < SMK_LABELLEN; i++) { - if (found) - smack[i] = '\0'; - else if (i >= len || string[i] > '~' || string[i] <= ' ' || - string[i] == '/' || string[i] == '"' || - string[i] == '\\' || string[i] == '\'') { - smack[i] = '\0'; - found = 1; - } else - smack[i] = string[i]; - } + char *smack; + int slen; + int rc; - if (smack[0] == '\0') + smack = smk_parse_smack(string, len); + if (smack == NULL) return NULL; mutex_lock(&smack_known_lock); - found = 0; - list_for_each_entry_rcu(skp, &smack_known_list, list) { - if (strncmp(skp->smk_known, smack, SMK_MAXLEN) == 0) { - found = 1; - break; - } - } + skp = smk_find_entry(smack); + if (skp != NULL) + goto freeout; - if (found == 0) { - skp = kzalloc(sizeof(struct smack_known), GFP_KERNEL); - if (skp != NULL) { - strncpy(skp->smk_known, smack, SMK_MAXLEN); - skp->smk_secid = smack_next_secid++; - skp->smk_cipso = NULL; - spin_lock_init(&skp->smk_cipsolock); - /* - * Make sure that the entry is actually - * filled before putting it on the list. - */ - list_add_rcu(&skp->list, &smack_known_list); - } - } + skp = kzalloc(sizeof(*skp), GFP_KERNEL); + if (skp == NULL) + goto freeout; + skp->smk_known = smack; + skp->smk_secid = smack_next_secid++; + skp->smk_netlabel.domain = skp->smk_known; + skp->smk_netlabel.flags = + NETLBL_SECATTR_DOMAIN | NETLBL_SECATTR_MLS_LVL; + /* + * If direct labeling works use it. + * Otherwise use mapped labeling. + */ + slen = strlen(smack); + if (slen < SMK_CIPSOLEN) + rc = smk_netlbl_mls(smack_cipso_direct, skp->smk_known, + &skp->smk_netlabel, slen); + else + rc = smk_netlbl_mls(smack_cipso_mapped, (char *)&skp->smk_secid, + &skp->smk_netlabel, sizeof(skp->smk_secid)); + + if (rc >= 0) { + INIT_LIST_HEAD(&skp->smk_rules); + mutex_init(&skp->smk_rules_lock); + /* + * Make sure that the entry is actually + * filled before putting it on the list. + */ + smk_insert_entry(skp); + goto unlockout; + } + /* + * smk_netlbl_mls failed. + */ + kfree(skp); + skp = NULL; +freeout: + kfree(smack); +unlockout: mutex_unlock(&smack_known_lock); return skp; @@ -387,10 +571,10 @@ char *smk_import(const char *string, int len) * smack_from_secid - find the Smack label associated with a secid * @secid: an integer that might be associated with a Smack label * - * Returns a pointer to the appropraite Smack label if there is one, + * Returns a pointer to the appropriate Smack label entry if there is one, * otherwise a pointer to the invalid Smack label. */ -char *smack_from_secid(const u32 secid) +struct smack_known *smack_from_secid(const u32 secid) { struct smack_known *skp; @@ -398,7 +582,7 @@ char *smack_from_secid(const u32 secid) list_for_each_entry_rcu(skp, &smack_known_list, list) { if (skp->smk_secid == secid) { rcu_read_unlock(); - return skp->smk_known; + return skp; } } @@ -407,7 +591,7 @@ char *smack_from_secid(const u32 secid) * of a secid that is not on the list. */ rcu_read_unlock(); - return smack_known_invalid.smk_known; + return &smack_known_invalid; } /** @@ -419,85 +603,9 @@ char *smack_from_secid(const u32 secid) */ u32 smack_to_secid(const char *smack) { - struct smack_known *skp; - - rcu_read_lock(); - list_for_each_entry_rcu(skp, &smack_known_list, list) { - if (strncmp(skp->smk_known, smack, SMK_MAXLEN) == 0) { - rcu_read_unlock(); - return skp->smk_secid; - } - } - rcu_read_unlock(); - return 0; -} - -/** - * smack_from_cipso - find the Smack label associated with a CIPSO option - * @level: Bell & LaPadula level from the network - * @cp: Bell & LaPadula categories from the network - * @result: where to put the Smack value - * - * This is a simple lookup in the label table. - * - * This is an odd duck as far as smack handling goes in that - * it sends back a copy of the smack label rather than a pointer - * to the master list. This is done because it is possible for - * a foreign host to send a smack label that is new to this - * machine and hence not on the list. That would not be an - * issue except that adding an entry to the master list can't - * be done at that point. - */ -void smack_from_cipso(u32 level, char *cp, char *result) -{ - struct smack_known *kp; - char *final = NULL; - - rcu_read_lock(); - list_for_each_entry(kp, &smack_known_list, list) { - if (kp->smk_cipso == NULL) - continue; - - spin_lock_bh(&kp->smk_cipsolock); - - if (kp->smk_cipso->smk_level == level && - memcmp(kp->smk_cipso->smk_catset, cp, SMK_LABELLEN) == 0) - final = kp->smk_known; - - spin_unlock_bh(&kp->smk_cipsolock); - } - rcu_read_unlock(); - if (final == NULL) - final = smack_known_huh.smk_known; - strncpy(result, final, SMK_MAXLEN); - return; -} - -/** - * smack_to_cipso - find the CIPSO option to go with a Smack label - * @smack: a pointer to the smack label in question - * @cp: where to put the result - * - * Returns zero if a value is available, non-zero otherwise. - */ -int smack_to_cipso(const char *smack, struct smack_cipso *cp) -{ - struct smack_known *kp; - int found = 0; + struct smack_known *skp = smk_find_entry(smack); - rcu_read_lock(); - list_for_each_entry_rcu(kp, &smack_known_list, list) { - if (kp->smk_known == smack || - strcmp(kp->smk_known, smack) == 0) { - found = 1; - break; - } - } - rcu_read_unlock(); - - if (found == 0 || kp->smk_cipso == NULL) - return -ENOENT; - - memcpy(cp, kp->smk_cipso, sizeof(struct smack_cipso)); - return 0; + if (skp == NULL) + return 0; + return skp->smk_secid; } diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c index bc39f4067af..f2c30801ce4 100644 --- a/security/smack/smack_lsm.c +++ b/security/smack/smack_lsm.c @@ -3,12 +3,15 @@ * * This file contains the smack hook function implementations. * - * Author: + * Authors: * Casey Schaufler <casey@schaufler-ca.com> + * Jarkko Sakkinen <jarkko.sakkinen@intel.com> * * Copyright (C) 2007 Casey Schaufler <casey@schaufler-ca.com> * Copyright (C) 2009 Hewlett-Packard Development Company, L.P. - * Paul Moore <paul.moore@hp.com> + * Paul Moore <paul@paul-moore.com> + * Copyright (C) 2010 Nokia Corporation + * Copyright (C) 2011 Intel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, @@ -24,17 +27,33 @@ #include <linux/ip.h> #include <linux/tcp.h> #include <linux/udp.h> +#include <linux/dccp.h> #include <linux/slab.h> #include <linux/mutex.h> #include <linux/pipe_fs_i.h> -#include <net/netlabel.h> #include <net/cipso_ipv4.h> +#include <net/ip.h> +#include <net/ipv6.h> #include <linux/audit.h> #include <linux/magic.h> +#include <linux/dcache.h> +#include <linux/personality.h> +#include <linux/msg.h> +#include <linux/shm.h> +#include <linux/binfmts.h> #include "smack.h" #define task_security(task) (task_cred_xxx((task), security)) +#define TRANS_TRUE "TRUE" +#define TRANS_TRUE_SIZE 4 + +#define SMK_CONNECTING 0 +#define SMK_RECEIVING 1 +#define SMK_SENDING 2 + +LIST_HEAD(smk_ipv6_port_list); + /** * smk_fetch - Fetch the smack label from a file. * @ip: a pointer to the inode @@ -43,19 +62,27 @@ * Returns a pointer to the master list entry for the Smack label * or NULL if there was no label to fetch. */ -static char *smk_fetch(struct inode *ip, struct dentry *dp) +static struct smack_known *smk_fetch(const char *name, struct inode *ip, + struct dentry *dp) { int rc; - char in[SMK_LABELLEN]; + char *buffer; + struct smack_known *skp = NULL; if (ip->i_op->getxattr == NULL) return NULL; - rc = ip->i_op->getxattr(dp, XATTR_NAME_SMACK, in, SMK_LABELLEN); - if (rc < 0) + buffer = kzalloc(SMK_LONGLABEL, GFP_KERNEL); + if (buffer == NULL) return NULL; - return smk_import(in, rc); + rc = ip->i_op->getxattr(dp, name, buffer, SMK_LONGLABEL); + if (rc > 0) + skp = smk_import_entry(buffer, rc); + + kfree(buffer); + + return skp; } /** @@ -68,7 +95,7 @@ struct inode_smack *new_inode_smack(char *smack) { struct inode_smack *isp; - isp = kzalloc(sizeof(struct inode_smack), GFP_KERNEL); + isp = kzalloc(sizeof(struct inode_smack), GFP_NOFS); if (isp == NULL) return NULL; @@ -79,6 +106,125 @@ struct inode_smack *new_inode_smack(char *smack) return isp; } +/** + * new_task_smack - allocate a task security blob + * @smack: a pointer to the Smack label to use in the blob + * + * Returns the new blob or NULL if there's no memory available + */ +static struct task_smack *new_task_smack(struct smack_known *task, + struct smack_known *forked, gfp_t gfp) +{ + struct task_smack *tsp; + + tsp = kzalloc(sizeof(struct task_smack), gfp); + if (tsp == NULL) + return NULL; + + tsp->smk_task = task; + tsp->smk_forked = forked; + INIT_LIST_HEAD(&tsp->smk_rules); + mutex_init(&tsp->smk_rules_lock); + + return tsp; +} + +/** + * smk_copy_rules - copy a rule set + * @nhead - new rules header pointer + * @ohead - old rules header pointer + * + * Returns 0 on success, -ENOMEM on error + */ +static int smk_copy_rules(struct list_head *nhead, struct list_head *ohead, + gfp_t gfp) +{ + struct smack_rule *nrp; + struct smack_rule *orp; + int rc = 0; + + INIT_LIST_HEAD(nhead); + + list_for_each_entry_rcu(orp, ohead, list) { + nrp = kzalloc(sizeof(struct smack_rule), gfp); + if (nrp == NULL) { + rc = -ENOMEM; + break; + } + *nrp = *orp; + list_add_rcu(&nrp->list, nhead); + } + return rc; +} + +/** + * smk_ptrace_mode - helper function for converting PTRACE_MODE_* into MAY_* + * @mode - input mode in form of PTRACE_MODE_* + * + * Returns a converted MAY_* mode usable by smack rules + */ +static inline unsigned int smk_ptrace_mode(unsigned int mode) +{ + switch (mode) { + case PTRACE_MODE_READ: + return MAY_READ; + case PTRACE_MODE_ATTACH: + return MAY_READWRITE; + } + + return 0; +} + +/** + * smk_ptrace_rule_check - helper for ptrace access + * @tracer: tracer process + * @tracee_label: label of the process that's about to be traced, + * the pointer must originate from smack structures + * @mode: ptrace attachment mode (PTRACE_MODE_*) + * @func: name of the function that called us, used for audit + * + * Returns 0 on access granted, -error on error + */ +static int smk_ptrace_rule_check(struct task_struct *tracer, char *tracee_label, + unsigned int mode, const char *func) +{ + int rc; + struct smk_audit_info ad, *saip = NULL; + struct task_smack *tsp; + struct smack_known *skp; + + if ((mode & PTRACE_MODE_NOAUDIT) == 0) { + smk_ad_init(&ad, func, LSM_AUDIT_DATA_TASK); + smk_ad_setfield_u_tsk(&ad, tracer); + saip = &ad; + } + + tsp = task_security(tracer); + skp = smk_of_task(tsp); + + if ((mode & PTRACE_MODE_ATTACH) && + (smack_ptrace_rule == SMACK_PTRACE_EXACT || + smack_ptrace_rule == SMACK_PTRACE_DRACONIAN)) { + if (skp->smk_known == tracee_label) + rc = 0; + else if (smack_ptrace_rule == SMACK_PTRACE_DRACONIAN) + rc = -EACCES; + else if (capable(CAP_SYS_PTRACE)) + rc = 0; + else + rc = -EACCES; + + if (saip) + smack_log(skp->smk_known, tracee_label, 0, rc, saip); + + return rc; + } + + /* In case of rule==SMACK_PTRACE_DEFAULT or mode==PTRACE_MODE_READ */ + rc = smk_tskacc(tsp, tracee_label, smk_ptrace_mode(mode), saip); + return rc; +} + /* * LSM hooks. * We he, that is fun! @@ -87,33 +233,24 @@ struct inode_smack *new_inode_smack(char *smack) /** * smack_ptrace_access_check - Smack approval on PTRACE_ATTACH * @ctp: child task pointer - * @mode: ptrace attachment mode + * @mode: ptrace attachment mode (PTRACE_MODE_*) * * Returns 0 if access is OK, an error code otherwise * - * Do the capability checks, and require read and write. + * Do the capability checks. */ static int smack_ptrace_access_check(struct task_struct *ctp, unsigned int mode) { int rc; - struct smk_audit_info ad; - char *sp, *tsp; + struct smack_known *skp; rc = cap_ptrace_access_check(ctp, mode); if (rc != 0) return rc; - sp = current_security(); - tsp = task_security(ctp); - smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK); - smk_ad_setfield_u_tsk(&ad, ctp); + skp = smk_of_task(task_security(ctp)); - /* we won't log here, because rc can be overriden */ - rc = smk_access(sp, tsp, MAY_READWRITE, NULL); - if (rc != 0 && capable(CAP_MAC_OVERRIDE)) - rc = 0; - - smack_log(sp, tsp, MAY_READWRITE, rc, &ad); + rc = smk_ptrace_rule_check(current, skp->smk_known, mode, __func__); return rc; } @@ -123,29 +260,21 @@ static int smack_ptrace_access_check(struct task_struct *ctp, unsigned int mode) * * Returns 0 if access is OK, an error code otherwise * - * Do the capability checks, and require read and write. + * Do the capability checks, and require PTRACE_MODE_ATTACH. */ static int smack_ptrace_traceme(struct task_struct *ptp) { int rc; - struct smk_audit_info ad; - char *sp, *tsp; + struct smack_known *skp; rc = cap_ptrace_traceme(ptp); if (rc != 0) return rc; - smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK); - smk_ad_setfield_u_tsk(&ad, ptp); - - sp = current_security(); - tsp = task_security(ptp); - /* we won't log here, because rc can be overriden */ - rc = smk_access(tsp, sp, MAY_READWRITE, NULL); - if (rc != 0 && has_capability(ptp, CAP_MAC_OVERRIDE)) - rc = 0; + skp = smk_of_task(current_security()); - smack_log(tsp, sp, MAY_READWRITE, rc, &ad); + rc = smk_ptrace_rule_check(ptp, skp->smk_known, + PTRACE_MODE_ATTACH, __func__); return rc; } @@ -153,23 +282,17 @@ static int smack_ptrace_traceme(struct task_struct *ptp) * smack_syslog - Smack approval on syslog * @type: message type * - * Require that the task has the floor label - * * Returns 0 on success, error code otherwise. */ -static int smack_syslog(int type, bool from_file) +static int smack_syslog(int typefrom_file) { - int rc; - char *sp = current_security(); - - rc = cap_syslog(type, from_file); - if (rc != 0) - return rc; + int rc = 0; + struct smack_known *skp = smk_of_current(); - if (capable(CAP_MAC_OVERRIDE)) + if (smack_privileged(CAP_MAC_OVERRIDE)) return 0; - if (sp != smack_known_floor.smk_known) + if (smack_syslog_label != NULL && smack_syslog_label != skp) rc = -EACCES; return rc; @@ -199,9 +322,9 @@ static int smack_sb_alloc_security(struct super_block *sb) sbsp->smk_default = smack_known_floor.smk_known; sbsp->smk_floor = smack_known_floor.smk_known; sbsp->smk_hat = smack_known_hat.smk_known; - sbsp->smk_initialized = 0; - spin_lock_init(&sbsp->smk_sblock); - + /* + * smk_initialized will be zero from kzalloc. + */ sb->s_security = sbsp; return 0; @@ -245,6 +368,8 @@ static int smack_sb_copy_data(char *orig, char *smackopts) dp = smackopts; else if (strstr(cp, SMK_FSROOT) == cp) dp = smackopts; + else if (strstr(cp, SMK_FSTRANS) == cp) + dp = smackopts; else dp = otheropts; @@ -277,17 +402,17 @@ static int smack_sb_kern_mount(struct super_block *sb, int flags, void *data) struct inode *inode = root->d_inode; struct superblock_smack *sp = sb->s_security; struct inode_smack *isp; + struct smack_known *skp; char *op; char *commap; char *nsp; + int transmute = 0; + int specified = 0; - spin_lock(&sp->smk_sblock); - if (sp->smk_initialized != 0) { - spin_unlock(&sp->smk_sblock); + if (sp->smk_initialized) return 0; - } + sp->smk_initialized = 1; - spin_unlock(&sp->smk_sblock); for (op = data; op != NULL; op = commap) { commap = strchr(op, ','); @@ -297,36 +422,71 @@ static int smack_sb_kern_mount(struct super_block *sb, int flags, void *data) if (strncmp(op, SMK_FSHAT, strlen(SMK_FSHAT)) == 0) { op += strlen(SMK_FSHAT); nsp = smk_import(op, 0); - if (nsp != NULL) + if (nsp != NULL) { sp->smk_hat = nsp; + specified = 1; + } } else if (strncmp(op, SMK_FSFLOOR, strlen(SMK_FSFLOOR)) == 0) { op += strlen(SMK_FSFLOOR); nsp = smk_import(op, 0); - if (nsp != NULL) + if (nsp != NULL) { sp->smk_floor = nsp; + specified = 1; + } } else if (strncmp(op, SMK_FSDEFAULT, strlen(SMK_FSDEFAULT)) == 0) { op += strlen(SMK_FSDEFAULT); nsp = smk_import(op, 0); - if (nsp != NULL) + if (nsp != NULL) { sp->smk_default = nsp; + specified = 1; + } } else if (strncmp(op, SMK_FSROOT, strlen(SMK_FSROOT)) == 0) { op += strlen(SMK_FSROOT); nsp = smk_import(op, 0); - if (nsp != NULL) + if (nsp != NULL) { sp->smk_root = nsp; + specified = 1; + } + } else if (strncmp(op, SMK_FSTRANS, strlen(SMK_FSTRANS)) == 0) { + op += strlen(SMK_FSTRANS); + nsp = smk_import(op, 0); + if (nsp != NULL) { + sp->smk_root = nsp; + transmute = 1; + specified = 1; + } } } + if (!smack_privileged(CAP_MAC_ADMIN)) { + /* + * Unprivileged mounts don't get to specify Smack values. + */ + if (specified) + return -EPERM; + /* + * Unprivileged mounts get root and default from the caller. + */ + skp = smk_of_current(); + sp->smk_root = skp->smk_known; + sp->smk_default = skp->smk_known; + } /* * Initialize the root inode. */ isp = inode->i_security; - if (isp == NULL) - inode->i_security = new_inode_smack(sp->smk_root); - else + if (isp == NULL) { + isp = new_inode_smack(sp->smk_root); + if (isp == NULL) + return -ENOMEM; + inode->i_security = isp; + } else isp->smk_inode = sp->smk_root; + if (transmute) + isp->smk_flags |= SMK_INODE_TRANSMUTE; + return 0; } @@ -343,55 +503,94 @@ static int smack_sb_statfs(struct dentry *dentry) int rc; struct smk_audit_info ad; - smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); + smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY); smk_ad_setfield_u_fs_path_dentry(&ad, dentry); rc = smk_curacc(sbp->smk_floor, MAY_READ, &ad); return rc; } +/* + * BPRM hooks + */ + /** - * smack_sb_mount - Smack check for mounting - * @dev_name: unused - * @path: mount point - * @type: unused - * @flags: unused - * @data: unused + * smack_bprm_set_creds - set creds for exec + * @bprm: the exec information * - * Returns 0 if current can write the floor of the filesystem - * being mounted on, an error code otherwise. + * Returns 0 if it gets a blob, -EPERM if exec forbidden and -ENOMEM otherwise */ -static int smack_sb_mount(char *dev_name, struct path *path, - char *type, unsigned long flags, void *data) +static int smack_bprm_set_creds(struct linux_binprm *bprm) { - struct superblock_smack *sbp = path->mnt->mnt_sb->s_security; - struct smk_audit_info ad; + struct inode *inode = file_inode(bprm->file); + struct task_smack *bsp = bprm->cred->security; + struct inode_smack *isp; + int rc; + + rc = cap_bprm_set_creds(bprm); + if (rc != 0) + return rc; - smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); - smk_ad_setfield_u_fs_path(&ad, *path); + if (bprm->cred_prepared) + return 0; - return smk_curacc(sbp->smk_floor, MAY_WRITE, &ad); + isp = inode->i_security; + if (isp->smk_task == NULL || isp->smk_task == bsp->smk_task) + return 0; + + if (bprm->unsafe & (LSM_UNSAFE_PTRACE | LSM_UNSAFE_PTRACE_CAP)) { + struct task_struct *tracer; + rc = 0; + + rcu_read_lock(); + tracer = ptrace_parent(current); + if (likely(tracer != NULL)) + rc = smk_ptrace_rule_check(tracer, + isp->smk_task->smk_known, + PTRACE_MODE_ATTACH, + __func__); + rcu_read_unlock(); + + if (rc != 0) + return rc; + } else if (bprm->unsafe) + return -EPERM; + + bsp->smk_task = isp->smk_task; + bprm->per_clear |= PER_CLEAR_ON_SETID; + + return 0; } /** - * smack_sb_umount - Smack check for unmounting - * @mnt: file system to unmount - * @flags: unused + * smack_bprm_committing_creds - Prepare to install the new credentials + * from bprm. * - * Returns 0 if current can write the floor of the filesystem - * being unmounted, an error code otherwise. + * @bprm: binprm for exec */ -static int smack_sb_umount(struct vfsmount *mnt, int flags) +static void smack_bprm_committing_creds(struct linux_binprm *bprm) { - struct superblock_smack *sbp; - struct smk_audit_info ad; + struct task_smack *bsp = bprm->cred->security; + + if (bsp->smk_task != bsp->smk_forked) + current->pdeath_signal = 0; +} - smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); - smk_ad_setfield_u_fs_path_dentry(&ad, mnt->mnt_root); - smk_ad_setfield_u_fs_path_mnt(&ad, mnt); +/** + * smack_bprm_secureexec - Return the decision to use secureexec. + * @bprm: binprm for exec + * + * Returns 0 on success. + */ +static int smack_bprm_secureexec(struct linux_binprm *bprm) +{ + struct task_smack *tsp = current_security(); + int ret = cap_bprm_secureexec(bprm); - sbp = mnt->mnt_sb->s_security; - return smk_curacc(sbp->smk_floor, MAY_WRITE, &ad); + if (!ret && (tsp->smk_task != tsp->smk_forked)) + ret = 1; + + return ret; } /* @@ -406,7 +605,9 @@ static int smack_sb_umount(struct vfsmount *mnt, int flags) */ static int smack_inode_alloc_security(struct inode *inode) { - inode->i_security = new_inode_smack(current_security()); + struct smack_known *skp = smk_of_current(); + + inode->i_security = new_inode_smack(skp->smk_known); if (inode->i_security == NULL) return -ENOMEM; return 0; @@ -428,6 +629,7 @@ static void smack_inode_free_security(struct inode *inode) * smack_inode_init_security - copy out the smack from an inode * @inode: the inode * @dir: unused + * @qstr: unused * @name: where to put the attribute name * @value: where to put the attribute value * @len: where to put the length of the attribute @@ -435,18 +637,36 @@ static void smack_inode_free_security(struct inode *inode) * Returns 0 if it all works out, -ENOMEM if there's no memory */ static int smack_inode_init_security(struct inode *inode, struct inode *dir, - char **name, void **value, size_t *len) + const struct qstr *qstr, const char **name, + void **value, size_t *len) { + struct inode_smack *issp = inode->i_security; + struct smack_known *skp = smk_of_current(); char *isp = smk_of_inode(inode); + char *dsp = smk_of_inode(dir); + int may; - if (name) { - *name = kstrdup(XATTR_SMACK_SUFFIX, GFP_KERNEL); - if (*name == NULL) - return -ENOMEM; - } + if (name) + *name = XATTR_SMACK_SUFFIX; if (value) { - *value = kstrdup(isp, GFP_KERNEL); + rcu_read_lock(); + may = smk_access_entry(skp->smk_known, dsp, &skp->smk_rules); + rcu_read_unlock(); + + /* + * If the access rule allows transmutation and + * the directory requests transmutation then + * by all means transmute. + * Mark the inode as changed. + */ + if (may > 0 && ((may & MAY_TRANSMUTE) != 0) && + smk_inode_transmutable(dir)) { + isp = dsp; + issp->smk_flags |= SMK_INODE_CHANGED; + } + + *value = kstrdup(isp, GFP_NOFS); if (*value == NULL) return -ENOMEM; } @@ -472,7 +692,7 @@ static int smack_inode_link(struct dentry *old_dentry, struct inode *dir, struct smk_audit_info ad; int rc; - smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); + smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY); smk_ad_setfield_u_fs_path_dentry(&ad, old_dentry); isp = smk_of_inode(old_dentry->d_inode); @@ -501,7 +721,7 @@ static int smack_inode_unlink(struct inode *dir, struct dentry *dentry) struct smk_audit_info ad; int rc; - smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); + smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY); smk_ad_setfield_u_fs_path_dentry(&ad, dentry); /* @@ -512,7 +732,7 @@ static int smack_inode_unlink(struct inode *dir, struct dentry *dentry) /* * You also need write access to the containing directory */ - smk_ad_setfield_u_fs_path_dentry(&ad, NULL); + smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_INODE); smk_ad_setfield_u_fs_inode(&ad, dir); rc = smk_curacc(smk_of_inode(dir), MAY_WRITE, &ad); } @@ -532,7 +752,7 @@ static int smack_inode_rmdir(struct inode *dir, struct dentry *dentry) struct smk_audit_info ad; int rc; - smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); + smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY); smk_ad_setfield_u_fs_path_dentry(&ad, dentry); /* @@ -543,7 +763,7 @@ static int smack_inode_rmdir(struct inode *dir, struct dentry *dentry) /* * You also need write access to the containing directory */ - smk_ad_setfield_u_fs_path_dentry(&ad, NULL); + smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_INODE); smk_ad_setfield_u_fs_inode(&ad, dir); rc = smk_curacc(smk_of_inode(dir), MAY_WRITE, &ad); } @@ -572,7 +792,7 @@ static int smack_inode_rename(struct inode *old_inode, char *isp; struct smk_audit_info ad; - smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); + smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY); smk_ad_setfield_u_fs_path_dentry(&ad, old_dentry); isp = smk_of_inode(old_dentry->d_inode); @@ -598,6 +818,7 @@ static int smack_inode_rename(struct inode *old_inode, static int smack_inode_permission(struct inode *inode, int mask) { struct smk_audit_info ad; + int no_block = mask & MAY_NOT_BLOCK; mask &= (MAY_READ|MAY_WRITE|MAY_EXEC|MAY_APPEND); /* @@ -605,7 +826,11 @@ static int smack_inode_permission(struct inode *inode, int mask) */ if (mask == 0) return 0; - smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); + + /* May be droppable after audit */ + if (no_block) + return -ECHILD; + smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_INODE); smk_ad_setfield_u_fs_inode(&ad, inode); return smk_curacc(smk_of_inode(inode), mask, &ad); } @@ -625,7 +850,7 @@ static int smack_inode_setattr(struct dentry *dentry, struct iattr *iattr) */ if (iattr->ia_valid & ATTR_FORCE) return 0; - smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); + smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY); smk_ad_setfield_u_fs_path_dentry(&ad, dentry); return smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE, &ad); @@ -641,10 +866,13 @@ static int smack_inode_setattr(struct dentry *dentry, struct iattr *iattr) static int smack_inode_getattr(struct vfsmount *mnt, struct dentry *dentry) { struct smk_audit_info ad; + struct path path; - smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); - smk_ad_setfield_u_fs_path_dentry(&ad, dentry); - smk_ad_setfield_u_fs_path_mnt(&ad, mnt); + path.dentry = dentry; + path.mnt = mnt; + + smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH); + smk_ad_setfield_u_fs_path(&ad, path); return smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ, &ad); } @@ -664,24 +892,44 @@ static int smack_inode_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { struct smk_audit_info ad; + struct smack_known *skp; + int check_priv = 0; + int check_import = 0; + int check_star = 0; int rc = 0; + /* + * Check label validity here so import won't fail in post_setxattr + */ if (strcmp(name, XATTR_NAME_SMACK) == 0 || strcmp(name, XATTR_NAME_SMACKIPIN) == 0 || strcmp(name, XATTR_NAME_SMACKIPOUT) == 0) { - if (!capable(CAP_MAC_ADMIN)) - rc = -EPERM; - /* - * check label validity here so import wont fail on - * post_setxattr - */ - if (size == 0 || size >= SMK_LABELLEN || - smk_import(value, size) == NULL) + check_priv = 1; + check_import = 1; + } else if (strcmp(name, XATTR_NAME_SMACKEXEC) == 0 || + strcmp(name, XATTR_NAME_SMACKMMAP) == 0) { + check_priv = 1; + check_import = 1; + check_star = 1; + } else if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0) { + check_priv = 1; + if (size != TRANS_TRUE_SIZE || + strncmp(value, TRANS_TRUE, TRANS_TRUE_SIZE) != 0) rc = -EINVAL; } else rc = cap_inode_setxattr(dentry, name, value, size, flags); - smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); + if (check_priv && !smack_privileged(CAP_MAC_ADMIN)) + rc = -EPERM; + + if (rc == 0 && check_import) { + skp = smk_import_entry(value, size); + if (skp == NULL || (check_star && + (skp == &smack_known_star || skp == &smack_known_web))) + rc = -EINVAL; + } + + smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY); smk_ad_setfield_u_fs_path_dentry(&ad, dentry); if (rc == 0) @@ -704,31 +952,38 @@ static int smack_inode_setxattr(struct dentry *dentry, const char *name, static void smack_inode_post_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { - struct inode_smack *isp; - char *nsp; + struct smack_known *skp; + struct inode_smack *isp = dentry->d_inode->i_security; - /* - * Not SMACK - */ - if (strcmp(name, XATTR_NAME_SMACK)) + if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0) { + isp->smk_flags |= SMK_INODE_TRANSMUTE; return; + } - isp = dentry->d_inode->i_security; - - /* - * No locking is done here. This is a pointer - * assignment. - */ - nsp = smk_import(value, size); - if (nsp != NULL) - isp->smk_inode = nsp; - else - isp->smk_inode = smack_known_invalid.smk_known; + if (strcmp(name, XATTR_NAME_SMACK) == 0) { + skp = smk_import_entry(value, size); + if (skp != NULL) + isp->smk_inode = skp->smk_known; + else + isp->smk_inode = smack_known_invalid.smk_known; + } else if (strcmp(name, XATTR_NAME_SMACKEXEC) == 0) { + skp = smk_import_entry(value, size); + if (skp != NULL) + isp->smk_task = skp; + else + isp->smk_task = &smack_known_invalid; + } else if (strcmp(name, XATTR_NAME_SMACKMMAP) == 0) { + skp = smk_import_entry(value, size); + if (skp != NULL) + isp->smk_mmap = skp; + else + isp->smk_mmap = &smack_known_invalid; + } return; } -/* +/** * smack_inode_getxattr - Smack check on getxattr * @dentry: the object * @name: unused @@ -739,13 +994,13 @@ static int smack_inode_getxattr(struct dentry *dentry, const char *name) { struct smk_audit_info ad; - smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); + smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY); smk_ad_setfield_u_fs_path_dentry(&ad, dentry); return smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ, &ad); } -/* +/** * smack_inode_removexattr - Smack check on removexattr * @dentry: the object * @name: name of the attribute @@ -756,23 +1011,46 @@ static int smack_inode_getxattr(struct dentry *dentry, const char *name) */ static int smack_inode_removexattr(struct dentry *dentry, const char *name) { + struct inode_smack *isp; struct smk_audit_info ad; int rc = 0; if (strcmp(name, XATTR_NAME_SMACK) == 0 || strcmp(name, XATTR_NAME_SMACKIPIN) == 0 || - strcmp(name, XATTR_NAME_SMACKIPOUT) == 0) { - if (!capable(CAP_MAC_ADMIN)) + strcmp(name, XATTR_NAME_SMACKIPOUT) == 0 || + strcmp(name, XATTR_NAME_SMACKEXEC) == 0 || + strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0 || + strcmp(name, XATTR_NAME_SMACKMMAP) == 0) { + if (!smack_privileged(CAP_MAC_ADMIN)) rc = -EPERM; } else rc = cap_inode_removexattr(dentry, name); - smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); + if (rc != 0) + return rc; + + smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY); smk_ad_setfield_u_fs_path_dentry(&ad, dentry); - if (rc == 0) - rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE, &ad); - return rc; + rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE, &ad); + if (rc != 0) + return rc; + + isp = dentry->d_inode->i_security; + /* + * Don't do anything special for these. + * XATTR_NAME_SMACKIPIN + * XATTR_NAME_SMACKIPOUT + * XATTR_NAME_SMACKEXEC + */ + if (strcmp(name, XATTR_NAME_SMACK) == 0) + isp->smk_task = NULL; + else if (strcmp(name, XATTR_NAME_SMACKMMAP) == 0) + isp->smk_mmap = NULL; + else if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0) + isp->smk_flags &= ~SMK_INODE_TRANSMUTE; + + return 0; } /** @@ -817,9 +1095,9 @@ static int smack_inode_getsecurity(const struct inode *inode, ssp = sock->sk->sk_security; if (strcmp(name, XATTR_SMACK_IPIN) == 0) - isp = ssp->smk_in; + isp = ssp->smk_in->smk_known; else if (strcmp(name, XATTR_SMACK_IPOUT) == 0) - isp = ssp->smk_out; + isp = ssp->smk_out->smk_known; else return -EOPNOTSUPP; @@ -899,7 +1177,9 @@ static int smack_file_permission(struct file *file, int mask) */ static int smack_file_alloc_security(struct file *file) { - file->f_security = current_security(); + struct smack_known *skp = smk_of_current(); + + file->f_security = skp->smk_known; return 0; } @@ -931,7 +1211,7 @@ static int smack_file_ioctl(struct file *file, unsigned int cmd, int rc = 0; struct smk_audit_info ad; - smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); + smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH); smk_ad_setfield_u_fs_path(&ad, file->f_path); if (_IOC_DIR(cmd) & _IOC_WRITE) @@ -948,15 +1228,15 @@ static int smack_file_ioctl(struct file *file, unsigned int cmd, * @file: the object * @cmd: unused * - * Returns 0 if current has write access, error code otherwise + * Returns 0 if current has lock access, error code otherwise */ static int smack_file_lock(struct file *file, unsigned int cmd) { struct smk_audit_info ad; - smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); - smk_ad_setfield_u_fs_path_dentry(&ad, file->f_path.dentry); - return smk_curacc(file->f_security, MAY_WRITE, &ad); + smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH); + smk_ad_setfield_u_fs_path(&ad, file->f_path); + return smk_curacc(file->f_security, MAY_LOCK, &ad); } /** @@ -965,38 +1245,141 @@ static int smack_file_lock(struct file *file, unsigned int cmd) * @cmd: what action to check * @arg: unused * + * Generally these operations are harmless. + * File locking operations present an obvious mechanism + * for passing information, so they require write access. + * * Returns 0 if current has access, error code otherwise */ static int smack_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg) { struct smk_audit_info ad; - int rc; + int rc = 0; - smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS); - smk_ad_setfield_u_fs_path(&ad, file->f_path); switch (cmd) { - case F_DUPFD: - case F_GETFD: - case F_GETFL: case F_GETLK: - case F_GETOWN: - case F_GETSIG: - rc = smk_curacc(file->f_security, MAY_READ, &ad); break; - case F_SETFD: - case F_SETFL: case F_SETLK: case F_SETLKW: + smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH); + smk_ad_setfield_u_fs_path(&ad, file->f_path); + rc = smk_curacc(file->f_security, MAY_LOCK, &ad); + break; case F_SETOWN: case F_SETSIG: + smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH); + smk_ad_setfield_u_fs_path(&ad, file->f_path); rc = smk_curacc(file->f_security, MAY_WRITE, &ad); break; default: - rc = smk_curacc(file->f_security, MAY_READWRITE, &ad); + break; + } + + return rc; +} + +/** + * smack_mmap_file : + * Check permissions for a mmap operation. The @file may be NULL, e.g. + * if mapping anonymous memory. + * @file contains the file structure for file to map (may be NULL). + * @reqprot contains the protection requested by the application. + * @prot contains the protection that will be applied by the kernel. + * @flags contains the operational flags. + * Return 0 if permission is granted. + */ +static int smack_mmap_file(struct file *file, + unsigned long reqprot, unsigned long prot, + unsigned long flags) +{ + struct smack_known *skp; + struct smack_known *mkp; + struct smack_rule *srp; + struct task_smack *tsp; + char *osmack; + struct inode_smack *isp; + int may; + int mmay; + int tmay; + int rc; + + if (file == NULL) + return 0; + + isp = file_inode(file)->i_security; + if (isp->smk_mmap == NULL) + return 0; + mkp = isp->smk_mmap; + + tsp = current_security(); + skp = smk_of_current(); + rc = 0; + + rcu_read_lock(); + /* + * For each Smack rule associated with the subject + * label verify that the SMACK64MMAP also has access + * to that rule's object label. + */ + list_for_each_entry_rcu(srp, &skp->smk_rules, list) { + osmack = srp->smk_object; + /* + * Matching labels always allows access. + */ + if (mkp->smk_known == osmack) + continue; + /* + * If there is a matching local rule take + * that into account as well. + */ + may = smk_access_entry(srp->smk_subject->smk_known, osmack, + &tsp->smk_rules); + if (may == -ENOENT) + may = srp->smk_access; + else + may &= srp->smk_access; + /* + * If may is zero the SMACK64MMAP subject can't + * possibly have less access. + */ + if (may == 0) + continue; + + /* + * Fetch the global list entry. + * If there isn't one a SMACK64MMAP subject + * can't have as much access as current. + */ + mmay = smk_access_entry(mkp->smk_known, osmack, + &mkp->smk_rules); + if (mmay == -ENOENT) { + rc = -EACCES; + break; + } + /* + * If there is a local entry it modifies the + * potential access, too. + */ + tmay = smk_access_entry(mkp->smk_known, osmack, + &tsp->smk_rules); + if (tmay != -ENOENT) + mmay &= tmay; + + /* + * If there is any access available to current that is + * not available to a SMACK64MMAP subject + * deny access. + */ + if ((may | mmay) != mmay) { + rc = -EACCES; + break; + } } + rcu_read_unlock(); + return rc; } @@ -1009,7 +1392,9 @@ static int smack_file_fcntl(struct file *file, unsigned int cmd, */ static int smack_file_set_fowner(struct file *file) { - file->f_security = current_security(); + struct smack_known *skp = smk_of_current(); + + file->f_security = skp->smk_known; return 0; } @@ -1027,23 +1412,26 @@ static int smack_file_set_fowner(struct file *file) static int smack_file_send_sigiotask(struct task_struct *tsk, struct fown_struct *fown, int signum) { + struct smack_known *skp; + struct smack_known *tkp = smk_of_task(tsk->cred->security); struct file *file; int rc; - char *tsp = tsk->cred->security; struct smk_audit_info ad; /* * struct fown_struct is never outside the context of a struct file */ file = container_of(fown, struct file, f_owner); + /* we don't log here as rc can be overriden */ - rc = smk_access(file->f_security, tsp, MAY_WRITE, NULL); + skp = smk_find_entry(file->f_security); + rc = smk_access(skp, tkp->smk_known, MAY_WRITE, NULL); if (rc != 0 && has_capability(tsk, CAP_MAC_OVERRIDE)) rc = 0; smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK); smk_ad_setfield_u_tsk(&ad, tsk); - smack_log(file->f_security, tsp, MAY_WRITE, rc, &ad); + smack_log(file->f_security, tkp->smk_known, MAY_WRITE, rc, &ad); return rc; } @@ -1058,7 +1446,7 @@ static int smack_file_receive(struct file *file) int may = 0; struct smk_audit_info ad; - smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK); + smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH); smk_ad_setfield_u_fs_path(&ad, file->f_path); /* * This code relies on bitmasks. @@ -1071,6 +1459,37 @@ static int smack_file_receive(struct file *file) return smk_curacc(file->f_security, may, &ad); } +/** + * smack_file_open - Smack dentry open processing + * @file: the object + * @cred: task credential + * + * Set the security blob in the file structure. + * Allow the open only if the task has read access. There are + * many read operations (e.g. fstat) that you can do with an + * fd even if you have the file open write-only. + * + * Returns 0 + */ +static int smack_file_open(struct file *file, const struct cred *cred) +{ + struct task_smack *tsp = cred->security; + struct inode_smack *isp = file_inode(file)->i_security; + struct smk_audit_info ad; + int rc; + + if (smack_privileged(CAP_MAC_OVERRIDE)) + return 0; + + smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH); + smk_ad_setfield_u_fs_path(&ad, file->f_path); + rc = smk_access(tsp->smk_task, isp->smk_inode, MAY_READ, &ad); + if (rc == 0) + file->f_security = isp->smk_inode; + + return rc; +} + /* * Task hooks */ @@ -1086,7 +1505,14 @@ static int smack_file_receive(struct file *file) */ static int smack_cred_alloc_blank(struct cred *cred, gfp_t gfp) { - cred->security = NULL; + struct task_smack *tsp; + + tsp = new_task_smack(NULL, NULL, gfp); + if (tsp == NULL) + return -ENOMEM; + + cred->security = tsp; + return 0; } @@ -1095,13 +1521,24 @@ static int smack_cred_alloc_blank(struct cred *cred, gfp_t gfp) * smack_cred_free - "free" task-level security credentials * @cred: the credentials in question * - * Smack isn't using copies of blobs. Everyone - * points to an immutable list. The blobs never go away. - * There is no leak here. */ static void smack_cred_free(struct cred *cred) { + struct task_smack *tsp = cred->security; + struct smack_rule *rp; + struct list_head *l; + struct list_head *n; + + if (tsp == NULL) + return; cred->security = NULL; + + list_for_each_safe(l, n, &tsp->smk_rules) { + rp = list_entry(l, struct smack_rule, list); + list_del(&rp->list); + kfree(rp); + } + kfree(tsp); } /** @@ -1115,7 +1552,19 @@ static void smack_cred_free(struct cred *cred) static int smack_cred_prepare(struct cred *new, const struct cred *old, gfp_t gfp) { - new->security = old->security; + struct task_smack *old_tsp = old->security; + struct task_smack *new_tsp; + int rc; + + new_tsp = new_task_smack(old_tsp->smk_task, old_tsp->smk_task, gfp); + if (new_tsp == NULL) + return -ENOMEM; + + rc = smk_copy_rules(&new_tsp->smk_rules, &old_tsp->smk_rules, gfp); + if (rc != 0) + return rc; + + new->security = new_tsp; return 0; } @@ -1128,7 +1577,16 @@ static int smack_cred_prepare(struct cred *new, const struct cred *old, */ static void smack_cred_transfer(struct cred *new, const struct cred *old) { - new->security = old->security; + struct task_smack *old_tsp = old->security; + struct task_smack *new_tsp = new->security; + + new_tsp->smk_task = old_tsp->smk_task; + new_tsp->smk_forked = old_tsp->smk_task; + mutex_init(&new_tsp->smk_rules_lock); + INIT_LIST_HEAD(&new_tsp->smk_rules); + + + /* cbs copy rule list */ } /** @@ -1140,12 +1598,13 @@ static void smack_cred_transfer(struct cred *new, const struct cred *old) */ static int smack_kernel_act_as(struct cred *new, u32 secid) { - char *smack = smack_from_secid(secid); + struct task_smack *new_tsp = new->security; + struct smack_known *skp = smack_from_secid(secid); - if (smack == NULL) + if (skp == NULL) return -EINVAL; - new->security = smack; + new_tsp->smk_task = skp; return 0; } @@ -1161,25 +1620,30 @@ static int smack_kernel_create_files_as(struct cred *new, struct inode *inode) { struct inode_smack *isp = inode->i_security; + struct task_smack *tsp = new->security; - new->security = isp->smk_inode; + tsp->smk_forked = smk_find_entry(isp->smk_inode); + tsp->smk_task = tsp->smk_forked; return 0; } /** * smk_curacc_on_task - helper to log task related access * @p: the task object - * @access : the access requested + * @access: the access requested + * @caller: name of the calling function for audit * * Return 0 if access is permitted */ -static int smk_curacc_on_task(struct task_struct *p, int access) +static int smk_curacc_on_task(struct task_struct *p, int access, + const char *caller) { struct smk_audit_info ad; + struct smack_known *skp = smk_of_task(task_security(p)); - smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK); + smk_ad_init(&ad, caller, LSM_AUDIT_DATA_TASK); smk_ad_setfield_u_tsk(&ad, p); - return smk_curacc(task_security(p), access, &ad); + return smk_curacc(skp->smk_known, access, &ad); } /** @@ -1191,7 +1655,7 @@ static int smk_curacc_on_task(struct task_struct *p, int access) */ static int smack_task_setpgid(struct task_struct *p, pid_t pgid) { - return smk_curacc_on_task(p, MAY_WRITE); + return smk_curacc_on_task(p, MAY_WRITE, __func__); } /** @@ -1202,7 +1666,7 @@ static int smack_task_setpgid(struct task_struct *p, pid_t pgid) */ static int smack_task_getpgid(struct task_struct *p) { - return smk_curacc_on_task(p, MAY_READ); + return smk_curacc_on_task(p, MAY_READ, __func__); } /** @@ -1213,7 +1677,7 @@ static int smack_task_getpgid(struct task_struct *p) */ static int smack_task_getsid(struct task_struct *p) { - return smk_curacc_on_task(p, MAY_READ); + return smk_curacc_on_task(p, MAY_READ, __func__); } /** @@ -1225,7 +1689,9 @@ static int smack_task_getsid(struct task_struct *p) */ static void smack_task_getsecid(struct task_struct *p, u32 *secid) { - *secid = smack_to_secid(task_security(p)); + struct smack_known *skp = smk_of_task(task_security(p)); + + *secid = skp->smk_secid; } /** @@ -1241,7 +1707,7 @@ static int smack_task_setnice(struct task_struct *p, int nice) rc = cap_task_setnice(p, nice); if (rc == 0) - rc = smk_curacc_on_task(p, MAY_WRITE); + rc = smk_curacc_on_task(p, MAY_WRITE, __func__); return rc; } @@ -1258,7 +1724,7 @@ static int smack_task_setioprio(struct task_struct *p, int ioprio) rc = cap_task_setioprio(p, ioprio); if (rc == 0) - rc = smk_curacc_on_task(p, MAY_WRITE); + rc = smk_curacc_on_task(p, MAY_WRITE, __func__); return rc; } @@ -1270,7 +1736,7 @@ static int smack_task_setioprio(struct task_struct *p, int ioprio) */ static int smack_task_getioprio(struct task_struct *p) { - return smk_curacc_on_task(p, MAY_READ); + return smk_curacc_on_task(p, MAY_READ, __func__); } /** @@ -1287,7 +1753,7 @@ static int smack_task_setscheduler(struct task_struct *p) rc = cap_task_setscheduler(p); if (rc == 0) - rc = smk_curacc_on_task(p, MAY_WRITE); + rc = smk_curacc_on_task(p, MAY_WRITE, __func__); return rc; } @@ -1299,7 +1765,7 @@ static int smack_task_setscheduler(struct task_struct *p) */ static int smack_task_getscheduler(struct task_struct *p) { - return smk_curacc_on_task(p, MAY_READ); + return smk_curacc_on_task(p, MAY_READ, __func__); } /** @@ -1310,7 +1776,7 @@ static int smack_task_getscheduler(struct task_struct *p) */ static int smack_task_movememory(struct task_struct *p) { - return smk_curacc_on_task(p, MAY_WRITE); + return smk_curacc_on_task(p, MAY_WRITE, __func__); } /** @@ -1329,6 +1795,8 @@ static int smack_task_kill(struct task_struct *p, struct siginfo *info, int sig, u32 secid) { struct smk_audit_info ad; + struct smack_known *skp; + struct smack_known *tkp = smk_of_task(task_security(p)); smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK); smk_ad_setfield_u_tsk(&ad, p); @@ -1337,53 +1805,33 @@ static int smack_task_kill(struct task_struct *p, struct siginfo *info, * can write the receiver. */ if (secid == 0) - return smk_curacc(task_security(p), MAY_WRITE, &ad); + return smk_curacc(tkp->smk_known, MAY_WRITE, &ad); /* * If the secid isn't 0 we're dealing with some USB IO * specific behavior. This is not clean. For one thing * we can't take privilege into account. */ - return smk_access(smack_from_secid(secid), task_security(p), - MAY_WRITE, &ad); + skp = smack_from_secid(secid); + return smk_access(skp, tkp->smk_known, MAY_WRITE, &ad); } /** * smack_task_wait - Smack access check for waiting * @p: task to wait for * - * Returns 0 if current can wait for p, error code otherwise + * Returns 0 */ static int smack_task_wait(struct task_struct *p) { - struct smk_audit_info ad; - char *sp = current_security(); - char *tsp = task_security(p); - int rc; - - /* we don't log here, we can be overriden */ - rc = smk_access(sp, tsp, MAY_WRITE, NULL); - if (rc == 0) - goto out_log; - /* - * Allow the operation to succeed if either task - * has privilege to perform operations that might - * account for the smack labels having gotten to - * be different in the first place. - * - * This breaks the strict subject/object access - * control ideal, taking the object's privilege - * state into account in the decision as well as - * the smack value. + * Allow the operation to succeed. + * Zombies are bad. + * In userless environments (e.g. phones) programs + * get marked with SMACK64EXEC and even if the parent + * and child shouldn't be talking the parent still + * may expect to know when the child exits. */ - if (capable(CAP_MAC_OVERRIDE) || has_capability(p, CAP_MAC_OVERRIDE)) - rc = 0; - /* we log only if we didn't get overriden */ - out_log: - smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK); - smk_ad_setfield_u_tsk(&ad, p); - smack_log(sp, tsp, MAY_WRITE, rc, &ad); - return rc; + return 0; } /** @@ -1396,7 +1844,9 @@ static int smack_task_wait(struct task_struct *p) static void smack_task_to_inode(struct task_struct *p, struct inode *inode) { struct inode_smack *isp = inode->i_security; - isp->smk_inode = task_security(p); + struct smack_known *skp = smk_of_task(task_security(p)); + + isp->smk_inode = skp->smk_known; } /* @@ -1415,16 +1865,16 @@ static void smack_task_to_inode(struct task_struct *p, struct inode *inode) */ static int smack_sk_alloc_security(struct sock *sk, int family, gfp_t gfp_flags) { - char *csp = current_security(); + struct smack_known *skp = smk_of_current(); struct socket_smack *ssp; ssp = kzalloc(sizeof(struct socket_smack), gfp_flags); if (ssp == NULL) return -ENOMEM; - ssp->smk_in = csp; - ssp->smk_out = csp; - ssp->smk_packet[0] = '\0'; + ssp->smk_in = skp; + ssp->smk_out = skp; + ssp->smk_packet = NULL; sk->sk_security = ssp; @@ -1480,65 +1930,6 @@ static char *smack_host_label(struct sockaddr_in *sip) } /** - * smack_set_catset - convert a capset to netlabel mls categories - * @catset: the Smack categories - * @sap: where to put the netlabel categories - * - * Allocates and fills attr.mls.cat - */ -static void smack_set_catset(char *catset, struct netlbl_lsm_secattr *sap) -{ - unsigned char *cp; - unsigned char m; - int cat; - int rc; - int byte; - - if (!catset) - return; - - sap->flags |= NETLBL_SECATTR_MLS_CAT; - sap->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC); - sap->attr.mls.cat->startbit = 0; - - for (cat = 1, cp = catset, byte = 0; byte < SMK_LABELLEN; cp++, byte++) - for (m = 0x80; m != 0; m >>= 1, cat++) { - if ((m & *cp) == 0) - continue; - rc = netlbl_secattr_catmap_setbit(sap->attr.mls.cat, - cat, GFP_ATOMIC); - } -} - -/** - * smack_to_secattr - fill a secattr from a smack value - * @smack: the smack value - * @nlsp: where the result goes - * - * Casey says that CIPSO is good enough for now. - * It can be used to effect. - * It can also be abused to effect when necessary. - * Appologies to the TSIG group in general and GW in particular. - */ -static void smack_to_secattr(char *smack, struct netlbl_lsm_secattr *nlsp) -{ - struct smack_cipso cipso; - int rc; - - nlsp->domain = smack; - nlsp->flags = NETLBL_SECATTR_DOMAIN | NETLBL_SECATTR_MLS_LVL; - - rc = smack_to_cipso(smack, &cipso); - if (rc == 0) { - nlsp->attr.mls.lvl = cipso.smk_level; - smack_set_catset(cipso.smk_catset, nlsp); - } else { - nlsp->attr.mls.lvl = smack_cipso_direct; - smack_set_catset(smack, nlsp); - } -} - -/** * smack_netlabel - Set the secattr on a socket * @sk: the socket * @labeled: socket label scheme @@ -1550,8 +1941,8 @@ static void smack_to_secattr(char *smack, struct netlbl_lsm_secattr *nlsp) */ static int smack_netlabel(struct sock *sk, int labeled) { + struct smack_known *skp; struct socket_smack *ssp = sk->sk_security; - struct netlbl_lsm_secattr secattr; int rc = 0; /* @@ -1569,10 +1960,8 @@ static int smack_netlabel(struct sock *sk, int labeled) labeled == SMACK_UNLABELED_SOCKET) netlbl_sock_delattr(sk); else { - netlbl_secattr_init(&secattr); - smack_to_secattr(ssp->smk_out, &secattr); - rc = netlbl_sock_setattr(sk, sk->sk_family, &secattr); - netlbl_secattr_destroy(&secattr); + skp = ssp->smk_out; + rc = netlbl_sock_setattr(sk, sk->sk_family, &skp->smk_netlabel); } bh_unlock_sock(sk); @@ -1594,6 +1983,7 @@ static int smack_netlabel(struct sock *sk, int labeled) */ static int smack_netlabel_send(struct sock *sk, struct sockaddr_in *sap) { + struct smack_known *skp; int rc; int sk_lbl; char *hostsp; @@ -1603,14 +1993,17 @@ static int smack_netlabel_send(struct sock *sk, struct sockaddr_in *sap) rcu_read_lock(); hostsp = smack_host_label(sap); if (hostsp != NULL) { - sk_lbl = SMACK_UNLABELED_SOCKET; #ifdef CONFIG_AUDIT - smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET); - ad.a.u.net.family = sap->sin_family; - ad.a.u.net.dport = sap->sin_port; - ad.a.u.net.v4info.daddr = sap->sin_addr.s_addr; + struct lsm_network_audit net; + + smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net); + ad.a.u.net->family = sap->sin_family; + ad.a.u.net->dport = sap->sin_port; + ad.a.u.net->v4info.daddr = sap->sin_addr.s_addr; #endif - rc = smk_access(ssp->smk_out, hostsp, MAY_WRITE, &ad); + sk_lbl = SMACK_UNLABELED_SOCKET; + skp = ssp->smk_out; + rc = smk_access(skp, hostsp, MAY_WRITE, &ad); } else { sk_lbl = SMACK_CIPSO_SOCKET; rc = 0; @@ -1623,6 +2016,153 @@ static int smack_netlabel_send(struct sock *sk, struct sockaddr_in *sap) } /** + * smk_ipv6_port_label - Smack port access table management + * @sock: socket + * @address: address + * + * Create or update the port list entry + */ +static void smk_ipv6_port_label(struct socket *sock, struct sockaddr *address) +{ + struct sock *sk = sock->sk; + struct sockaddr_in6 *addr6; + struct socket_smack *ssp = sock->sk->sk_security; + struct smk_port_label *spp; + unsigned short port = 0; + + if (address == NULL) { + /* + * This operation is changing the Smack information + * on the bound socket. Take the changes to the port + * as well. + */ + list_for_each_entry(spp, &smk_ipv6_port_list, list) { + if (sk != spp->smk_sock) + continue; + spp->smk_in = ssp->smk_in; + spp->smk_out = ssp->smk_out; + return; + } + /* + * A NULL address is only used for updating existing + * bound entries. If there isn't one, it's OK. + */ + return; + } + + addr6 = (struct sockaddr_in6 *)address; + port = ntohs(addr6->sin6_port); + /* + * This is a special case that is safely ignored. + */ + if (port == 0) + return; + + /* + * Look for an existing port list entry. + * This is an indication that a port is getting reused. + */ + list_for_each_entry(spp, &smk_ipv6_port_list, list) { + if (spp->smk_port != port) + continue; + spp->smk_port = port; + spp->smk_sock = sk; + spp->smk_in = ssp->smk_in; + spp->smk_out = ssp->smk_out; + return; + } + + /* + * A new port entry is required. + */ + spp = kzalloc(sizeof(*spp), GFP_KERNEL); + if (spp == NULL) + return; + + spp->smk_port = port; + spp->smk_sock = sk; + spp->smk_in = ssp->smk_in; + spp->smk_out = ssp->smk_out; + + list_add(&spp->list, &smk_ipv6_port_list); + return; +} + +/** + * smk_ipv6_port_check - check Smack port access + * @sock: socket + * @address: address + * + * Create or update the port list entry + */ +static int smk_ipv6_port_check(struct sock *sk, struct sockaddr_in6 *address, + int act) +{ + __be16 *bep; + __be32 *be32p; + struct smk_port_label *spp; + struct socket_smack *ssp = sk->sk_security; + struct smack_known *skp; + unsigned short port = 0; + char *object; + struct smk_audit_info ad; +#ifdef CONFIG_AUDIT + struct lsm_network_audit net; +#endif + + if (act == SMK_RECEIVING) { + skp = smack_net_ambient; + object = ssp->smk_in->smk_known; + } else { + skp = ssp->smk_out; + object = smack_net_ambient->smk_known; + } + + /* + * Get the IP address and port from the address. + */ + port = ntohs(address->sin6_port); + bep = (__be16 *)(&address->sin6_addr); + be32p = (__be32 *)(&address->sin6_addr); + + /* + * It's remote, so port lookup does no good. + */ + if (be32p[0] || be32p[1] || be32p[2] || bep[6] || ntohs(bep[7]) != 1) + goto auditout; + + /* + * It's local so the send check has to have passed. + */ + if (act == SMK_RECEIVING) { + skp = &smack_known_web; + goto auditout; + } + + list_for_each_entry(spp, &smk_ipv6_port_list, list) { + if (spp->smk_port != port) + continue; + object = spp->smk_in->smk_known; + if (act == SMK_CONNECTING) + ssp->smk_packet = spp->smk_out; + break; + } + +auditout: + +#ifdef CONFIG_AUDIT + smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net); + ad.a.u.net->family = sk->sk_family; + ad.a.u.net->dport = port; + if (act == SMK_RECEIVING) + ad.a.u.net->v6info.saddr = address->sin6_addr; + else + ad.a.u.net->v6info.daddr = address->sin6_addr; +#endif + return smk_access(skp, object, MAY_WRITE, &ad); +} + +/** * smack_inode_setsecurity - set smack xattrs * @inode: the object * @name: attribute name @@ -1637,21 +2177,21 @@ static int smack_netlabel_send(struct sock *sk, struct sockaddr_in *sap) static int smack_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags) { - char *sp; + struct smack_known *skp; struct inode_smack *nsp = inode->i_security; struct socket_smack *ssp; struct socket *sock; int rc = 0; - if (value == NULL || size > SMK_LABELLEN || size == 0) - return -EACCES; + if (value == NULL || size > SMK_LONGLABEL || size == 0) + return -EINVAL; - sp = smk_import(value, size); - if (sp == NULL) + skp = smk_import_entry(value, size); + if (skp == NULL) return -EINVAL; if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) { - nsp->smk_inode = sp; + nsp->smk_inode = skp->smk_known; nsp->smk_flags |= SMK_INODE_INSTANT; return 0; } @@ -1668,16 +2208,22 @@ static int smack_inode_setsecurity(struct inode *inode, const char *name, ssp = sock->sk->sk_security; if (strcmp(name, XATTR_SMACK_IPIN) == 0) - ssp->smk_in = sp; + ssp->smk_in = skp; else if (strcmp(name, XATTR_SMACK_IPOUT) == 0) { - ssp->smk_out = sp; - rc = smack_netlabel(sock->sk, SMACK_CIPSO_SOCKET); - if (rc != 0) - printk(KERN_WARNING "Smack: \"%s\" netlbl error %d.\n", - __func__, -rc); + ssp->smk_out = skp; + if (sock->sk->sk_family == PF_INET) { + rc = smack_netlabel(sock->sk, SMACK_CIPSO_SOCKET); + if (rc != 0) + printk(KERN_WARNING + "Smack: \"%s\" netlbl error %d.\n", + __func__, -rc); + } } else return -EOPNOTSUPP; + if (sock->sk->sk_family == PF_INET6) + smk_ipv6_port_label(sock, NULL); + return 0; } @@ -1705,6 +2251,25 @@ static int smack_socket_post_create(struct socket *sock, int family, } /** + * smack_socket_bind - record port binding information. + * @sock: the socket + * @address: the port address + * @addrlen: size of the address + * + * Records the label bound to a port. + * + * Returns 0 + */ +static int smack_socket_bind(struct socket *sock, struct sockaddr *address, + int addrlen) +{ + if (sock->sk != NULL && sock->sk->sk_family == PF_INET6) + smk_ipv6_port_label(sock, address); + + return 0; +} + +/** * smack_socket_connect - connect access check * @sock: the socket * @sap: the other end @@ -1717,12 +2282,25 @@ static int smack_socket_post_create(struct socket *sock, int family, static int smack_socket_connect(struct socket *sock, struct sockaddr *sap, int addrlen) { - if (sock->sk == NULL || sock->sk->sk_family != PF_INET) + int rc = 0; + + if (sock->sk == NULL) return 0; - if (addrlen < sizeof(struct sockaddr_in)) - return -EINVAL; - return smack_netlabel_send(sock->sk, (struct sockaddr_in *)sap); + switch (sock->sk->sk_family) { + case PF_INET: + if (addrlen < sizeof(struct sockaddr_in)) + return -EINVAL; + rc = smack_netlabel_send(sock->sk, (struct sockaddr_in *)sap); + break; + case PF_INET6: + if (addrlen < sizeof(struct sockaddr_in6)) + return -EINVAL; + rc = smk_ipv6_port_check(sock->sk, (struct sockaddr_in6 *)sap, + SMK_CONNECTING); + break; + } + return rc; } /** @@ -1753,7 +2331,9 @@ static int smack_flags_to_may(int flags) */ static int smack_msg_msg_alloc_security(struct msg_msg *msg) { - msg->security = current_security(); + struct smack_known *skp = smk_of_current(); + + msg->security = skp->smk_known; return 0; } @@ -1788,8 +2368,9 @@ static char *smack_of_shm(struct shmid_kernel *shp) static int smack_shm_alloc_security(struct shmid_kernel *shp) { struct kern_ipc_perm *isp = &shp->shm_perm; + struct smack_known *skp = smk_of_current(); - isp->security = current_security(); + isp->security = skp->smk_known; return 0; } @@ -1911,8 +2492,9 @@ static char *smack_of_sem(struct sem_array *sma) static int smack_sem_alloc_security(struct sem_array *sma) { struct kern_ipc_perm *isp = &sma->sem_perm; + struct smack_known *skp = smk_of_current(); - isp->security = current_security(); + isp->security = skp->smk_known; return 0; } @@ -2029,8 +2611,9 @@ static int smack_sem_semop(struct sem_array *sma, struct sembuf *sops, static int smack_msg_queue_alloc_security(struct msg_queue *msq) { struct kern_ipc_perm *kisp = &msq->q_perm; + struct smack_known *skp = smk_of_current(); - kisp->security = current_security(); + kisp->security = skp->smk_known; return 0; } @@ -2202,9 +2785,12 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode) struct super_block *sbp; struct superblock_smack *sbsp; struct inode_smack *isp; - char *csp = current_security(); - char *fetched; + struct smack_known *skp; + struct smack_known *ckp = smk_of_current(); char *final; + char trattr[TRANS_TRUE_SIZE]; + int transflag = 0; + int rc; struct dentry *dp; if (inode == NULL) @@ -2235,6 +2821,15 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode) * of the superblock. */ if (opt_dentry->d_parent == opt_dentry) { + if (sbp->s_magic == CGROUP_SUPER_MAGIC) { + /* + * The cgroup filesystem is never mounted, + * so there's no opportunity to set the mount + * options. + */ + sbsp->smk_root = smack_known_star.smk_known; + sbsp->smk_default = smack_known_star.smk_known; + } isp->smk_inode = sbsp->smk_root; isp->smk_flags |= SMK_INODE_INSTANT; goto unlockandout; @@ -2248,16 +2843,20 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode) */ switch (sbp->s_magic) { case SMACK_MAGIC: + case PIPEFS_MAGIC: + case SOCKFS_MAGIC: + case CGROUP_SUPER_MAGIC: /* - * Casey says that it's a little embarassing + * Casey says that it's a little embarrassing * that the smack file system doesn't do * extended attributes. - */ - final = smack_known_star.smk_known; - break; - case PIPEFS_MAGIC: - /* + * * Casey says pipes are easy (?) + * + * Socket access is controlled by the socket + * structures associated with the task involved. + * + * Cgroupfs is special */ final = smack_known_star.smk_known; break; @@ -2267,13 +2866,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode) * Programs that change smack have to treat the * pty with respect. */ - final = csp; - break; - case SOCKFS_MAGIC: - /* - * Casey says sockets get the smack of the task. - */ - final = csp; + final = ckp->smk_known; break; case PROC_SUPER_MAGIC: /* @@ -2300,7 +2893,16 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode) /* * This isn't an understood special case. * Get the value from the xattr. - * + */ + + /* + * UNIX domain sockets use lower level socket data. + */ + if (S_ISSOCK(inode->i_mode)) { + final = smack_known_star.smk_known; + break; + } + /* * No xattr support means, alas, no SMACK label. * Use the aforeapplied default. * It would be curious if the label of the task @@ -2312,19 +2914,62 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode) * Get the dentry for xattr. */ dp = dget(opt_dentry); - fetched = smk_fetch(inode, dp); - if (fetched != NULL) - final = fetched; + skp = smk_fetch(XATTR_NAME_SMACK, inode, dp); + if (skp != NULL) + final = skp->smk_known; + + /* + * Transmuting directory + */ + if (S_ISDIR(inode->i_mode)) { + /* + * If this is a new directory and the label was + * transmuted when the inode was initialized + * set the transmute attribute on the directory + * and mark the inode. + * + * If there is a transmute attribute on the + * directory mark the inode. + */ + if (isp->smk_flags & SMK_INODE_CHANGED) { + isp->smk_flags &= ~SMK_INODE_CHANGED; + rc = inode->i_op->setxattr(dp, + XATTR_NAME_SMACKTRANSMUTE, + TRANS_TRUE, TRANS_TRUE_SIZE, + 0); + } else { + rc = inode->i_op->getxattr(dp, + XATTR_NAME_SMACKTRANSMUTE, trattr, + TRANS_TRUE_SIZE); + if (rc >= 0 && strncmp(trattr, TRANS_TRUE, + TRANS_TRUE_SIZE) != 0) + rc = -EINVAL; + } + if (rc >= 0) + transflag = SMK_INODE_TRANSMUTE; + } + /* + * Don't let the exec or mmap label be "*" or "@". + */ + skp = smk_fetch(XATTR_NAME_SMACKEXEC, inode, dp); + if (skp == &smack_known_star || skp == &smack_known_web) + skp = NULL; + isp->smk_task = skp; + skp = smk_fetch(XATTR_NAME_SMACKMMAP, inode, dp); + if (skp == &smack_known_star || skp == &smack_known_web) + skp = NULL; + isp->smk_mmap = skp; + dput(dp); break; } if (final == NULL) - isp->smk_inode = csp; + isp->smk_inode = ckp->smk_known; else isp->smk_inode = final; - isp->smk_flags |= SMK_INODE_INSTANT; + isp->smk_flags |= (SMK_INODE_INSTANT | transflag); unlockandout: mutex_unlock(&isp->smk_lock); @@ -2343,13 +2988,14 @@ unlockandout: */ static int smack_getprocattr(struct task_struct *p, char *name, char **value) { + struct smack_known *skp = smk_of_task(task_security(p)); char *cp; int slen; if (strcmp(name, "current") != 0) return -EINVAL; - cp = kstrdup(task_security(p), GFP_KERNEL); + cp = kstrdup(skp->smk_known, GFP_KERNEL); if (cp == NULL) return -ENOMEM; @@ -2373,8 +3019,9 @@ static int smack_getprocattr(struct task_struct *p, char *name, char **value) static int smack_setprocattr(struct task_struct *p, char *name, void *value, size_t size) { + struct task_smack *tsp; struct cred *new; - char *newsmack; + struct smack_known *skp; /* * Changing another process' Smack value is too dangerous @@ -2383,53 +3030,80 @@ static int smack_setprocattr(struct task_struct *p, char *name, if (p != current) return -EPERM; - if (!capable(CAP_MAC_ADMIN)) + if (!smack_privileged(CAP_MAC_ADMIN)) return -EPERM; - if (value == NULL || size == 0 || size >= SMK_LABELLEN) + if (value == NULL || size == 0 || size >= SMK_LONGLABEL) return -EINVAL; if (strcmp(name, "current") != 0) return -EINVAL; - newsmack = smk_import(value, size); - if (newsmack == NULL) + skp = smk_import_entry(value, size); + if (skp == NULL) return -EINVAL; /* * No process is ever allowed the web ("@") label. */ - if (newsmack == smack_known_web.smk_known) + if (skp == &smack_known_web) return -EPERM; new = prepare_creds(); if (new == NULL) return -ENOMEM; - new->security = newsmack; + + tsp = new->security; + tsp->smk_task = skp; + commit_creds(new); return size; } /** * smack_unix_stream_connect - Smack access on UDS - * @sock: one socket - * @other: the other socket + * @sock: one sock + * @other: the other sock * @newsk: unused * * Return 0 if a subject with the smack of sock could access * an object with the smack of other, otherwise an error code */ -static int smack_unix_stream_connect(struct socket *sock, - struct socket *other, struct sock *newsk) +static int smack_unix_stream_connect(struct sock *sock, + struct sock *other, struct sock *newsk) { - struct inode *sp = SOCK_INODE(sock); - struct inode *op = SOCK_INODE(other); + struct smack_known *skp; + struct smack_known *okp; + struct socket_smack *ssp = sock->sk_security; + struct socket_smack *osp = other->sk_security; + struct socket_smack *nsp = newsk->sk_security; struct smk_audit_info ad; + int rc = 0; +#ifdef CONFIG_AUDIT + struct lsm_network_audit net; +#endif - smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET); - smk_ad_setfield_u_net_sk(&ad, other->sk); - return smk_access(smk_of_inode(sp), smk_of_inode(op), - MAY_READWRITE, &ad); + if (!smack_privileged(CAP_MAC_OVERRIDE)) { + skp = ssp->smk_out; + okp = osp->smk_out; +#ifdef CONFIG_AUDIT + smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net); + smk_ad_setfield_u_net_sk(&ad, other); +#endif + rc = smk_access(skp, okp->smk_known, MAY_WRITE, &ad); + if (rc == 0) + rc = smk_access(okp, okp->smk_known, MAY_WRITE, NULL); + } + + /* + * Cross reference the peer labels for SO_PEERSEC. + */ + if (rc == 0) { + nsp->smk_packet = ssp->smk_out; + ssp->smk_packet = osp->smk_out; + } + + return rc; } /** @@ -2442,13 +3116,23 @@ static int smack_unix_stream_connect(struct socket *sock, */ static int smack_unix_may_send(struct socket *sock, struct socket *other) { - struct inode *sp = SOCK_INODE(sock); - struct inode *op = SOCK_INODE(other); + struct socket_smack *ssp = sock->sk->sk_security; + struct socket_smack *osp = other->sk->sk_security; + struct smack_known *skp; struct smk_audit_info ad; - smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET); +#ifdef CONFIG_AUDIT + struct lsm_network_audit net; + + smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net); smk_ad_setfield_u_net_sk(&ad, other->sk); - return smk_access(smk_of_inode(sp), smk_of_inode(op), MAY_WRITE, &ad); +#endif + + if (smack_privileged(CAP_MAC_OVERRIDE)) + return 0; + + skp = ssp->smk_out; + return smk_access(skp, osp->smk_in->smk_known, MAY_WRITE, &ad); } /** @@ -2457,37 +3141,48 @@ static int smack_unix_may_send(struct socket *sock, struct socket *other) * @msg: the message * @size: the size of the message * - * Return 0 if the current subject can write to the destination - * host. This is only a question if the destination is a single - * label host. + * Return 0 if the current subject can write to the destination host. + * For IPv4 this is only a question if the destination is a single label host. + * For IPv6 this is a check against the label of the port. */ static int smack_socket_sendmsg(struct socket *sock, struct msghdr *msg, int size) { struct sockaddr_in *sip = (struct sockaddr_in *) msg->msg_name; + struct sockaddr_in6 *sap = (struct sockaddr_in6 *) msg->msg_name; + int rc = 0; /* * Perfectly reasonable for this to be NULL */ - if (sip == NULL || sip->sin_family != AF_INET) + if (sip == NULL) return 0; - return smack_netlabel_send(sock->sk, sip); + switch (sip->sin_family) { + case AF_INET: + rc = smack_netlabel_send(sock->sk, sip); + break; + case AF_INET6: + rc = smk_ipv6_port_check(sock->sk, sap, SMK_SENDING); + break; + } + return rc; } - /** * smack_from_secattr - Convert a netlabel attr.mls.lvl/attr.mls.cat pair to smack * @sap: netlabel secattr - * @sip: where to put the result + * @ssp: socket security information * - * Copies a smack label into sip + * Returns a pointer to a Smack label entry found on the label list. */ -static void smack_from_secattr(struct netlbl_lsm_secattr *sap, char *sip) +static struct smack_known *smack_from_secattr(struct netlbl_lsm_secattr *sap, + struct socket_smack *ssp) { - char smack[SMK_LABELLEN]; - char *sp; - int pcat; + struct smack_known *skp; + int found = 0; + int acat; + int kcat; if ((sap->flags & NETLBL_SECATTR_MLS_LVL) != 0) { /* @@ -2495,40 +3190,52 @@ static void smack_from_secattr(struct netlbl_lsm_secattr *sap, char *sip) * If there are flags but no level netlabel isn't * behaving the way we expect it to. * - * Get the categories, if any + * Look it up in the label table * Without guidance regarding the smack value * for the packet fall back on the network * ambient value. */ - memset(smack, '\0', SMK_LABELLEN); - if ((sap->flags & NETLBL_SECATTR_MLS_CAT) != 0) - for (pcat = -1;;) { - pcat = netlbl_secattr_catmap_walk( - sap->attr.mls.cat, pcat + 1); - if (pcat < 0) + rcu_read_lock(); + list_for_each_entry(skp, &smack_known_list, list) { + if (sap->attr.mls.lvl != skp->smk_netlabel.attr.mls.lvl) + continue; + /* + * Compare the catsets. Use the netlbl APIs. + */ + if ((sap->flags & NETLBL_SECATTR_MLS_CAT) == 0) { + if ((skp->smk_netlabel.flags & + NETLBL_SECATTR_MLS_CAT) == 0) + found = 1; + break; + } + for (acat = -1, kcat = -1; acat == kcat; ) { + acat = netlbl_secattr_catmap_walk( + sap->attr.mls.cat, acat + 1); + kcat = netlbl_secattr_catmap_walk( + skp->smk_netlabel.attr.mls.cat, + kcat + 1); + if (acat < 0 || kcat < 0) break; - smack_catset_bit(pcat, smack); } - /* - * If it is CIPSO using smack direct mapping - * we are already done. WeeHee. - */ - if (sap->attr.mls.lvl == smack_cipso_direct) { - memcpy(sip, smack, SMK_MAXLEN); - return; + if (acat == kcat) { + found = 1; + break; + } } - /* - * Look it up in the supplied table if it is not - * a direct mapping. - */ - smack_from_cipso(sap->attr.mls.lvl, smack, sip); - return; + rcu_read_unlock(); + + if (found) + return skp; + + if (ssp != NULL && ssp->smk_in == &smack_known_star) + return &smack_known_web; + return &smack_known_star; } if ((sap->flags & NETLBL_SECATTR_SECID) != 0) { /* * Looks like a fallback, which gives us a secid. */ - sp = smack_from_secid(sap->attr.secid); + skp = smack_from_secid(sap->attr.secid); /* * This has got to be a bug because it is * impossible to specify a fallback without @@ -2536,17 +3243,62 @@ static void smack_from_secattr(struct netlbl_lsm_secattr *sap, char *sip) * it has a secid, and the only way to get a * secid is from a fallback. */ - BUG_ON(sp == NULL); - strncpy(sip, sp, SMK_MAXLEN); - return; + BUG_ON(skp == NULL); + return skp; } /* * Without guidance regarding the smack value * for the packet fall back on the network * ambient value. */ - strncpy(sip, smack_net_ambient, SMK_MAXLEN); - return; + return smack_net_ambient; +} + +static int smk_skb_to_addr_ipv6(struct sk_buff *skb, struct sockaddr_in6 *sip) +{ + u8 nexthdr; + int offset; + int proto = -EINVAL; + struct ipv6hdr _ipv6h; + struct ipv6hdr *ip6; + __be16 frag_off; + struct tcphdr _tcph, *th; + struct udphdr _udph, *uh; + struct dccp_hdr _dccph, *dh; + + sip->sin6_port = 0; + + offset = skb_network_offset(skb); + ip6 = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h); + if (ip6 == NULL) + return -EINVAL; + sip->sin6_addr = ip6->saddr; + + nexthdr = ip6->nexthdr; + offset += sizeof(_ipv6h); + offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off); + if (offset < 0) + return -EINVAL; + + proto = nexthdr; + switch (proto) { + case IPPROTO_TCP: + th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph); + if (th != NULL) + sip->sin6_port = th->source; + break; + case IPPROTO_UDP: + uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph); + if (uh != NULL) + sip->sin6_port = uh->source; + break; + case IPPROTO_DCCP: + dh = skb_header_pointer(skb, offset, sizeof(_dccph), &_dccph); + if (dh != NULL) + sip->sin6_port = dh->dccph_sport; + break; + } + return proto; } /** @@ -2560,42 +3312,52 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) { struct netlbl_lsm_secattr secattr; struct socket_smack *ssp = sk->sk_security; - char smack[SMK_LABELLEN]; - char *csp; - int rc; + struct smack_known *skp; + struct sockaddr_in6 sadd; + int rc = 0; struct smk_audit_info ad; - if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6) - return 0; - - /* - * Translate what netlabel gave us. - */ - netlbl_secattr_init(&secattr); +#ifdef CONFIG_AUDIT + struct lsm_network_audit net; +#endif + switch (sk->sk_family) { + case PF_INET: + /* + * Translate what netlabel gave us. + */ + netlbl_secattr_init(&secattr); - rc = netlbl_skbuff_getattr(skb, sk->sk_family, &secattr); - if (rc == 0) { - smack_from_secattr(&secattr, smack); - csp = smack; - } else - csp = smack_net_ambient; + rc = netlbl_skbuff_getattr(skb, sk->sk_family, &secattr); + if (rc == 0) + skp = smack_from_secattr(&secattr, ssp); + else + skp = smack_net_ambient; - netlbl_secattr_destroy(&secattr); + netlbl_secattr_destroy(&secattr); #ifdef CONFIG_AUDIT - smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET); - ad.a.u.net.family = sk->sk_family; - ad.a.u.net.netif = skb->skb_iif; - ipv4_skb_to_auditdata(skb, &ad.a, NULL); + smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net); + ad.a.u.net->family = sk->sk_family; + ad.a.u.net->netif = skb->skb_iif; + ipv4_skb_to_auditdata(skb, &ad.a, NULL); #endif - /* - * Receiving a packet requires that the other end - * be able to write here. Read access is not required. - * This is the simplist possible security model - * for networking. - */ - rc = smk_access(csp, ssp->smk_in, MAY_WRITE, &ad); - if (rc != 0) - netlbl_skbuff_err(skb, rc, 0); + /* + * Receiving a packet requires that the other end + * be able to write here. Read access is not required. + * This is the simplist possible security model + * for networking. + */ + rc = smk_access(skp, ssp->smk_in->smk_known, MAY_WRITE, &ad); + if (rc != 0) + netlbl_skbuff_err(skb, rc, 0); + break; + case PF_INET6: + rc = smk_skb_to_addr_ipv6(skb, &sadd); + if (rc == IPPROTO_UDP || rc == IPPROTO_TCP) + rc = smk_ipv6_port_check(sk, &sadd, SMK_RECEIVING); + else + rc = 0; + break; + } return rc; } @@ -2613,15 +3375,19 @@ static int smack_socket_getpeersec_stream(struct socket *sock, int __user *optlen, unsigned len) { struct socket_smack *ssp; - int slen; + char *rcp = ""; + int slen = 1; int rc = 0; ssp = sock->sk->sk_security; - slen = strlen(ssp->smk_packet) + 1; + if (ssp->smk_packet != NULL) { + rcp = ssp->smk_packet->smk_known; + slen = strlen(rcp) + 1; + } if (slen > len) rc = -ERANGE; - else if (copy_to_user(optval, ssp->smk_packet, slen) != 0) + else if (copy_to_user(optval, rcp, slen) != 0) rc = -EFAULT; if (put_user(slen, optlen) != 0) @@ -2633,7 +3399,7 @@ static int smack_socket_getpeersec_stream(struct socket *sock, /** * smack_socket_getpeersec_dgram - pull in packet label - * @sock: the socket + * @sock: the peer socket * @skb: packet data * @secid: pointer to where to put the secid of the packet * @@ -2644,41 +3410,41 @@ static int smack_socket_getpeersec_dgram(struct socket *sock, { struct netlbl_lsm_secattr secattr; - struct sock *sk; - char smack[SMK_LABELLEN]; - int family = PF_INET; - u32 s; + struct socket_smack *ssp = NULL; + struct smack_known *skp; + int family = PF_UNSPEC; + u32 s = 0; /* 0 is the invalid secid */ int rc; - /* - * Only works for families with packets. - */ - if (sock != NULL) { - sk = sock->sk; - if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6) - return 0; - family = sk->sk_family; + if (skb != NULL) { + if (skb->protocol == htons(ETH_P_IP)) + family = PF_INET; + else if (skb->protocol == htons(ETH_P_IPV6)) + family = PF_INET6; } - /* - * Translate what netlabel gave us. - */ - netlbl_secattr_init(&secattr); - rc = netlbl_skbuff_getattr(skb, family, &secattr); - if (rc == 0) - smack_from_secattr(&secattr, smack); - netlbl_secattr_destroy(&secattr); - - /* - * Give up if we couldn't get anything - */ - if (rc != 0) - return rc; + if (family == PF_UNSPEC && sock != NULL) + family = sock->sk->sk_family; - s = smack_to_secid(smack); + if (family == PF_UNIX) { + ssp = sock->sk->sk_security; + s = ssp->smk_out->smk_secid; + } else if (family == PF_INET || family == PF_INET6) { + /* + * Translate what netlabel gave us. + */ + if (sock != NULL && sock->sk != NULL) + ssp = sock->sk->sk_security; + netlbl_secattr_init(&secattr); + rc = netlbl_skbuff_getattr(skb, family, &secattr); + if (rc == 0) { + skp = smack_from_secattr(&secattr, ssp); + s = skp->smk_secid; + } + netlbl_secattr_destroy(&secattr); + } + *secid = s; if (s == 0) return -EINVAL; - - *secid = s; return 0; } @@ -2693,13 +3459,15 @@ static int smack_socket_getpeersec_dgram(struct socket *sock, static void smack_sock_graft(struct sock *sk, struct socket *parent) { struct socket_smack *ssp; + struct smack_known *skp = smk_of_current(); if (sk == NULL || (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)) return; ssp = sk->sk_security; - ssp->smk_in = ssp->smk_out = current_security(); + ssp->smk_in = skp; + ssp->smk_out = skp; /* cssp->smk_packet is already set in smack_inet_csk_clone() */ } @@ -2716,37 +3484,49 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb, struct request_sock *req) { u16 family = sk->sk_family; + struct smack_known *skp; struct socket_smack *ssp = sk->sk_security; struct netlbl_lsm_secattr secattr; struct sockaddr_in addr; struct iphdr *hdr; - char smack[SMK_LABELLEN]; + char *hsp; int rc; struct smk_audit_info ad; +#ifdef CONFIG_AUDIT + struct lsm_network_audit net; +#endif - /* handle mapped IPv4 packets arriving via IPv6 sockets */ - if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP)) - family = PF_INET; + if (family == PF_INET6) { + /* + * Handle mapped IPv4 packets arriving + * via IPv6 sockets. Don't set up netlabel + * processing on IPv6. + */ + if (skb->protocol == htons(ETH_P_IP)) + family = PF_INET; + else + return 0; + } netlbl_secattr_init(&secattr); rc = netlbl_skbuff_getattr(skb, family, &secattr); if (rc == 0) - smack_from_secattr(&secattr, smack); + skp = smack_from_secattr(&secattr, ssp); else - strncpy(smack, smack_known_huh.smk_known, SMK_MAXLEN); + skp = &smack_known_huh; netlbl_secattr_destroy(&secattr); #ifdef CONFIG_AUDIT - smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET); - ad.a.u.net.family = family; - ad.a.u.net.netif = skb->skb_iif; + smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net); + ad.a.u.net->family = family; + ad.a.u.net->netif = skb->skb_iif; ipv4_skb_to_auditdata(skb, &ad.a, NULL); #endif /* * Receiving a packet requires that the other end be able to write * here. Read access is not required. */ - rc = smk_access(smack, ssp->smk_in, MAY_WRITE, &ad); + rc = smk_access(skp, ssp->smk_in->smk_known, MAY_WRITE, &ad); if (rc != 0) return rc; @@ -2754,26 +3534,23 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb, * Save the peer's label in the request_sock so we can later setup * smk_packet in the child socket so that SO_PEERCRED can report it. */ - req->peer_secid = smack_to_secid(smack); + req->peer_secid = skp->smk_secid; /* * We need to decide if we want to label the incoming connection here * if we do we only need to label the request_sock and the stack will - * propogate the wire-label to the sock when it is created. + * propagate the wire-label to the sock when it is created. */ hdr = ip_hdr(skb); addr.sin_addr.s_addr = hdr->saddr; rcu_read_lock(); - if (smack_host_label(&addr) == NULL) { - rcu_read_unlock(); - netlbl_secattr_init(&secattr); - smack_to_secattr(smack, &secattr); - rc = netlbl_req_setattr(req, &secattr); - netlbl_secattr_destroy(&secattr); - } else { - rcu_read_unlock(); + hsp = smack_host_label(&addr); + rcu_read_unlock(); + + if (hsp == NULL) + rc = netlbl_req_setattr(req, &skp->smk_netlabel); + else netlbl_req_delattr(req); - } return rc; } @@ -2789,13 +3566,13 @@ static void smack_inet_csk_clone(struct sock *sk, const struct request_sock *req) { struct socket_smack *ssp = sk->sk_security; - char *smack; + struct smack_known *skp; if (req->peer_secid != 0) { - smack = smack_from_secid(req->peer_secid); - strncpy(ssp->smk_packet, smack, SMK_MAXLEN); + skp = smack_from_secid(req->peer_secid); + ssp->smk_packet = skp; } else - ssp->smk_packet[0] = '\0'; + ssp->smk_packet = NULL; } /* @@ -2820,7 +3597,9 @@ static void smack_inet_csk_clone(struct sock *sk, static int smack_key_alloc(struct key *key, const struct cred *cred, unsigned long flags) { - key->security = cred->security; + struct smack_known *skp = smk_of_task(cred->security); + + key->security = skp->smk_known; return 0; } @@ -2845,10 +3624,12 @@ static void smack_key_free(struct key *key) * an error code otherwise */ static int smack_key_permission(key_ref_t key_ref, - const struct cred *cred, key_perm_t perm) + const struct cred *cred, unsigned perm) { struct key *keyp; struct smk_audit_info ad; + struct smack_known *tkp = smk_of_task(cred->security); + int request = 0; keyp = key_ref_to_ptr(key_ref); if (keyp == NULL) @@ -2862,15 +3643,18 @@ static int smack_key_permission(key_ref_t key_ref, /* * This should not occur */ - if (cred->security == NULL) + if (tkp == NULL) return -EACCES; #ifdef CONFIG_AUDIT smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_KEY); ad.a.u.key_struct.key = keyp->serial; ad.a.u.key_struct.key_desc = keyp->description; #endif - return smk_access(cred->security, keyp->security, - MAY_READWRITE, &ad); + if (perm & KEY_NEED_READ) + request = MAY_READ; + if (perm & (KEY_NEED_WRITE | KEY_NEED_LINK | KEY_NEED_SETATTR)) + request = MAY_WRITE; + return smk_access(tkp, keyp->security, request, &ad); } #endif /* CONFIG_KEYS */ @@ -2952,19 +3736,18 @@ static int smack_audit_rule_known(struct audit_krule *krule) static int smack_audit_rule_match(u32 secid, u32 field, u32 op, void *vrule, struct audit_context *actx) { - char *smack; + struct smack_known *skp; char *rule = vrule; - if (!rule) { - audit_log(actx, GFP_KERNEL, AUDIT_SELINUX_ERR, - "Smack: missing rule\n"); + if (unlikely(!rule)) { + WARN_ONCE(1, "Smack: missing rule\n"); return -ENOENT; } if (field != AUDIT_SUBJ_USER && field != AUDIT_OBJ_USER) return 0; - smack = smack_from_secid(secid); + skp = smack_from_secid(secid); /* * No need to do string comparisons. If a match occurs, @@ -2972,9 +3755,9 @@ static int smack_audit_rule_match(u32 secid, u32 field, u32 op, void *vrule, * label. */ if (op == Audit_equal) - return (rule == smack); + return (rule == skp->smk_known); if (op == Audit_not_equal) - return (rule != smack); + return (rule != skp->smk_known); return 0; } @@ -2993,6 +3776,16 @@ static void smack_audit_rule_free(void *vrule) #endif /* CONFIG_AUDIT */ /** + * smack_ismaclabel - check if xattr @name references a smack MAC label + * @name: Full xattr name to check. + */ +static int smack_ismaclabel(const char *name) +{ + return (strcmp(name, XATTR_SMACK_SUFFIX) == 0); +} + + +/** * smack_secid_to_secctx - return the smack label for a secid * @secid: incoming integer * @secdata: destination @@ -3002,11 +3795,11 @@ static void smack_audit_rule_free(void *vrule) */ static int smack_secid_to_secctx(u32 secid, char **secdata, u32 *seclen) { - char *sp = smack_from_secid(secid); + struct smack_known *skp = smack_from_secid(secid); if (secdata) - *secdata = sp; - *seclen = strlen(sp); + *secdata = skp->smk_known; + *seclen = strlen(skp->smk_known); return 0; } @@ -3068,8 +3861,10 @@ struct security_operations smack_ops = { .sb_copy_data = smack_sb_copy_data, .sb_kern_mount = smack_sb_kern_mount, .sb_statfs = smack_sb_statfs, - .sb_mount = smack_sb_mount, - .sb_umount = smack_sb_umount, + + .bprm_set_creds = smack_bprm_set_creds, + .bprm_committing_creds = smack_bprm_committing_creds, + .bprm_secureexec = smack_bprm_secureexec, .inode_alloc_security = smack_inode_alloc_security, .inode_free_security = smack_inode_free_security, @@ -3096,10 +3891,14 @@ struct security_operations smack_ops = { .file_ioctl = smack_file_ioctl, .file_lock = smack_file_lock, .file_fcntl = smack_file_fcntl, + .mmap_file = smack_mmap_file, + .mmap_addr = cap_mmap_addr, .file_set_fowner = smack_file_set_fowner, .file_send_sigiotask = smack_file_send_sigiotask, .file_receive = smack_file_receive, + .file_open = smack_file_open, + .cred_alloc_blank = smack_cred_alloc_blank, .cred_free = smack_cred_free, .cred_prepare = smack_cred_prepare, @@ -3154,6 +3953,7 @@ struct security_operations smack_ops = { .unix_may_send = smack_unix_may_send, .socket_post_create = smack_socket_post_create, + .socket_bind = smack_socket_bind, .socket_connect = smack_socket_connect, .socket_sendmsg = smack_socket_sendmsg, .socket_sock_rcv_skb = smack_socket_sock_rcv_skb, @@ -3180,6 +3980,7 @@ struct security_operations smack_ops = { .audit_rule_free = smack_audit_rule_free, #endif /* CONFIG_AUDIT */ + .ismaclabel = smack_ismaclabel, .secid_to_secctx = smack_secid_to_secctx, .secctx_to_secid = smack_secctx_to_secid, .release_secctx = smack_release_secctx, @@ -3189,14 +3990,35 @@ struct security_operations smack_ops = { }; -static __init void init_smack_know_list(void) +static __init void init_smack_known_list(void) { - list_add(&smack_known_huh.list, &smack_known_list); - list_add(&smack_known_hat.list, &smack_known_list); - list_add(&smack_known_star.list, &smack_known_list); - list_add(&smack_known_floor.list, &smack_known_list); - list_add(&smack_known_invalid.list, &smack_known_list); - list_add(&smack_known_web.list, &smack_known_list); + /* + * Initialize rule list locks + */ + mutex_init(&smack_known_huh.smk_rules_lock); + mutex_init(&smack_known_hat.smk_rules_lock); + mutex_init(&smack_known_floor.smk_rules_lock); + mutex_init(&smack_known_star.smk_rules_lock); + mutex_init(&smack_known_invalid.smk_rules_lock); + mutex_init(&smack_known_web.smk_rules_lock); + /* + * Initialize rule lists + */ + INIT_LIST_HEAD(&smack_known_huh.smk_rules); + INIT_LIST_HEAD(&smack_known_hat.smk_rules); + INIT_LIST_HEAD(&smack_known_star.smk_rules); + INIT_LIST_HEAD(&smack_known_floor.smk_rules); + INIT_LIST_HEAD(&smack_known_invalid.smk_rules); + INIT_LIST_HEAD(&smack_known_web.smk_rules); + /* + * Create the known labels list + */ + smk_insert_entry(&smack_known_huh); + smk_insert_entry(&smack_known_hat); + smk_insert_entry(&smack_known_star); + smk_insert_entry(&smack_known_floor); + smk_insert_entry(&smack_known_invalid); + smk_insert_entry(&smack_known_web); } /** @@ -3207,28 +4029,26 @@ static __init void init_smack_know_list(void) static __init int smack_init(void) { struct cred *cred; + struct task_smack *tsp; if (!security_module_enable(&smack_ops)) return 0; + tsp = new_task_smack(&smack_known_floor, &smack_known_floor, + GFP_KERNEL); + if (tsp == NULL) + return -ENOMEM; + printk(KERN_INFO "Smack: Initializing.\n"); /* * Set the security state for the initial task. */ cred = (struct cred *) current->cred; - cred->security = &smack_known_floor.smk_known; + cred->security = tsp; - /* initialize the smack_know_list */ - init_smack_know_list(); - /* - * Initialize locks - */ - spin_lock_init(&smack_known_huh.smk_cipsolock); - spin_lock_init(&smack_known_hat.smk_cipsolock); - spin_lock_init(&smack_known_star.smk_cipsolock); - spin_lock_init(&smack_known_floor.smk_cipsolock); - spin_lock_init(&smack_known_invalid.smk_cipsolock); + /* initialize the smack_known_list */ + init_smack_known_list(); /* * Register with LSM diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c index dc1fd6239f2..32b24882084 100644 --- a/security/smack/smackfs.c +++ b/security/smack/smackfs.c @@ -22,11 +22,11 @@ #include <linux/mutex.h> #include <linux/slab.h> #include <net/net_namespace.h> -#include <net/netlabel.h> #include <net/cipso_ipv4.h> #include <linux/seq_file.h> #include <linux/ctype.h> #include <linux/audit.h> +#include <linux/magic.h> #include "smack.h" /* @@ -43,14 +43,25 @@ enum smk_inos { SMK_NETLBLADDR = 8, /* single label hosts */ SMK_ONLYCAP = 9, /* the only "capable" label */ SMK_LOGGING = 10, /* logging */ + SMK_LOAD_SELF = 11, /* task specific rules */ + SMK_ACCESSES = 12, /* access policy */ + SMK_MAPPED = 13, /* CIPSO level indicating mapped label */ + SMK_LOAD2 = 14, /* load policy with long labels */ + SMK_LOAD_SELF2 = 15, /* load task specific rules with long labels */ + SMK_ACCESS2 = 16, /* make an access check with long labels */ + SMK_CIPSO2 = 17, /* load long label -> CIPSO mapping */ + SMK_REVOKE_SUBJ = 18, /* set rules with subject label to '-' */ + SMK_CHANGE_RULE = 19, /* change or add rules (long labels) */ + SMK_SYSLOG = 20, /* change syslog label) */ + SMK_PTRACE = 21, /* set ptrace rule */ }; /* * List locks */ -static DEFINE_MUTEX(smack_list_lock); static DEFINE_MUTEX(smack_cipso_lock); static DEFINE_MUTEX(smack_ambient_lock); +static DEFINE_MUTEX(smack_syslog_lock); static DEFINE_MUTEX(smk_netlbladdr_lock); /* @@ -58,7 +69,7 @@ static DEFINE_MUTEX(smk_netlbladdr_lock); * If it isn't somehow marked, use this. * It can be reset via smackfs/ambient */ -char *smack_net_ambient = smack_known_floor.smk_known; +struct smack_known *smack_net_ambient; /* * This is the level in a CIPSO header that indicates a @@ -68,6 +79,13 @@ char *smack_net_ambient = smack_known_floor.smk_known; int smack_cipso_direct = SMACK_CIPSO_DIRECT_DEFAULT; /* + * This is the level in a CIPSO header that indicates a + * secid is contained directly in the category set. + * It can be reset via smackfs/mapped + */ +int smack_cipso_mapped = SMACK_CIPSO_MAPPED_DEFAULT; + +/* * Unless a process is running with this label even * having CAP_MAC_OVERRIDE isn't enough to grant * privilege to violate MAC policy. If no label is @@ -75,7 +93,22 @@ int smack_cipso_direct = SMACK_CIPSO_DIRECT_DEFAULT; * everyone. It is expected that the hat (^) label * will be used if any label is used. */ -char *smack_onlycap; +struct smack_known *smack_onlycap; + +/* + * If this value is set restrict syslog use to the label specified. + * It can be reset via smackfs/syslog + */ +struct smack_known *smack_syslog_label; + +/* + * Ptrace current rule + * SMACK_PTRACE_DEFAULT regular smack ptrace rules (/proc based) + * SMACK_PTRACE_EXACT labels must match, but can be overriden with + * CAP_SYS_PTRACE + * SMACK_PTRACE_DRACONIAN lables must match, CAP_SYS_PTRACE has no effect + */ +int smack_ptrace_rule = SMACK_PTRACE_DEFAULT; /* * Certain IP addresses may be designated as single label hosts. @@ -84,15 +117,29 @@ char *smack_onlycap; */ LIST_HEAD(smk_netlbladdr_list); + +/* + * Rule lists are maintained for each label. + * This master list is just for reading /smack/load and /smack/load2. + */ +struct smack_master_list { + struct list_head list; + struct smack_rule *smk_rule; +}; + LIST_HEAD(smack_rule_list); +struct smack_parsed_rule { + struct smack_known *smk_subject; + char *smk_object; + int smk_access1; + int smk_access2; +}; + static int smk_cipso_doi_value = SMACK_CIPSO_DOI_DEFAULT; const char *smack_cipso_option = SMACK_CIPSO_OPTION; - -#define SEQ_READ_FINISHED 1 - /* * Values for parsing cipso rules * SMK_DIGITLEN: Length of a digit field in a rule. @@ -109,9 +156,24 @@ const char *smack_cipso_option = SMACK_CIPSO_OPTION; * SMK_ACCESSLEN: Maximum length for a rule access field * SMK_LOADLEN: Smack rule length */ -#define SMK_ACCESS "rwxa" -#define SMK_ACCESSLEN (sizeof(SMK_ACCESS) - 1) -#define SMK_LOADLEN (SMK_LABELLEN + SMK_LABELLEN + SMK_ACCESSLEN) +#define SMK_OACCESS "rwxa" +#define SMK_ACCESS "rwxatl" +#define SMK_OACCESSLEN (sizeof(SMK_OACCESS) - 1) +#define SMK_ACCESSLEN (sizeof(SMK_ACCESS) - 1) +#define SMK_OLOADLEN (SMK_LABELLEN + SMK_LABELLEN + SMK_OACCESSLEN) +#define SMK_LOADLEN (SMK_LABELLEN + SMK_LABELLEN + SMK_ACCESSLEN) + +/* + * Stricly for CIPSO level manipulation. + * Set the category bit number in a smack label sized buffer. + */ +static inline void smack_catset_bit(unsigned int cat, char *catsetp) +{ + if (cat == 0 || cat > (SMK_CIPSOLEN * 8)) + return; + + catsetp[(cat - 1) / 8] |= 0x80 >> ((cat - 1) % 8); +} /** * smk_netlabel_audit_set - fill a netlbl_audit struct @@ -119,51 +181,426 @@ const char *smack_cipso_option = SMACK_CIPSO_OPTION; */ static void smk_netlabel_audit_set(struct netlbl_audit *nap) { + struct smack_known *skp = smk_of_current(); + nap->loginuid = audit_get_loginuid(current); nap->sessionid = audit_get_sessionid(current); - nap->secid = smack_to_secid(current_security()); + nap->secid = skp->smk_secid; } /* - * Values for parsing single label host rules + * Value for parsing single label host rules * "1.2.3.4 X" - * "192.168.138.129/32 abcdefghijklmnopqrstuvw" */ #define SMK_NETLBLADDRMIN 9 -#define SMK_NETLBLADDRMAX 42 + +/** + * smk_set_access - add a rule to the rule list or replace an old rule + * @srp: the rule to add or replace + * @rule_list: the list of rules + * @rule_lock: the rule list lock + * @global: if non-zero, indicates a global rule + * + * Looks through the current subject/object/access list for + * the subject/object pair and replaces the access that was + * there. If the pair isn't found add it with the specified + * access. + * + * Returns 0 if nothing goes wrong or -ENOMEM if it fails + * during the allocation of the new pair to add. + */ +static int smk_set_access(struct smack_parsed_rule *srp, + struct list_head *rule_list, + struct mutex *rule_lock, int global) +{ + struct smack_rule *sp; + struct smack_master_list *smlp; + int found = 0; + int rc = 0; + + mutex_lock(rule_lock); + + /* + * Because the object label is less likely to match + * than the subject label check it first + */ + list_for_each_entry_rcu(sp, rule_list, list) { + if (sp->smk_object == srp->smk_object && + sp->smk_subject == srp->smk_subject) { + found = 1; + sp->smk_access |= srp->smk_access1; + sp->smk_access &= ~srp->smk_access2; + break; + } + } + + if (found == 0) { + sp = kzalloc(sizeof(*sp), GFP_KERNEL); + if (sp == NULL) { + rc = -ENOMEM; + goto out; + } + + sp->smk_subject = srp->smk_subject; + sp->smk_object = srp->smk_object; + sp->smk_access = srp->smk_access1 & ~srp->smk_access2; + + list_add_rcu(&sp->list, rule_list); + /* + * If this is a global as opposed to self and a new rule + * it needs to get added for reporting. + */ + if (global) { + smlp = kzalloc(sizeof(*smlp), GFP_KERNEL); + if (smlp != NULL) { + smlp->smk_rule = sp; + list_add_rcu(&smlp->list, &smack_rule_list); + } else + rc = -ENOMEM; + } + } + +out: + mutex_unlock(rule_lock); + return rc; +} + +/** + * smk_perm_from_str - parse smack accesses from a text string + * @string: a text string that contains a Smack accesses code + * + * Returns an integer with respective bits set for specified accesses. + */ +static int smk_perm_from_str(const char *string) +{ + int perm = 0; + const char *cp; + + for (cp = string; ; cp++) + switch (*cp) { + case '-': + break; + case 'r': + case 'R': + perm |= MAY_READ; + break; + case 'w': + case 'W': + perm |= MAY_WRITE; + break; + case 'x': + case 'X': + perm |= MAY_EXEC; + break; + case 'a': + case 'A': + perm |= MAY_APPEND; + break; + case 't': + case 'T': + perm |= MAY_TRANSMUTE; + break; + case 'l': + case 'L': + perm |= MAY_LOCK; + break; + default: + return perm; + } +} + +/** + * smk_fill_rule - Fill Smack rule from strings + * @subject: subject label string + * @object: object label string + * @access1: access string + * @access2: string with permissions to be removed + * @rule: Smack rule + * @import: if non-zero, import labels + * @len: label length limit + * + * Returns 0 on success, -EINVAL on failure and -ENOENT when either subject + * or object is missing. + */ +static int smk_fill_rule(const char *subject, const char *object, + const char *access1, const char *access2, + struct smack_parsed_rule *rule, int import, + int len) +{ + const char *cp; + struct smack_known *skp; + + if (import) { + rule->smk_subject = smk_import_entry(subject, len); + if (rule->smk_subject == NULL) + return -EINVAL; + + rule->smk_object = smk_import(object, len); + if (rule->smk_object == NULL) + return -EINVAL; + } else { + cp = smk_parse_smack(subject, len); + if (cp == NULL) + return -EINVAL; + skp = smk_find_entry(cp); + kfree(cp); + if (skp == NULL) + return -ENOENT; + rule->smk_subject = skp; + + cp = smk_parse_smack(object, len); + if (cp == NULL) + return -EINVAL; + skp = smk_find_entry(cp); + kfree(cp); + if (skp == NULL) + return -ENOENT; + rule->smk_object = skp->smk_known; + } + + rule->smk_access1 = smk_perm_from_str(access1); + if (access2) + rule->smk_access2 = smk_perm_from_str(access2); + else + rule->smk_access2 = ~rule->smk_access1; + + return 0; +} + +/** + * smk_parse_rule - parse Smack rule from load string + * @data: string to be parsed whose size is SMK_LOADLEN + * @rule: Smack rule + * @import: if non-zero, import labels + * + * Returns 0 on success, -1 on errors. + */ +static int smk_parse_rule(const char *data, struct smack_parsed_rule *rule, + int import) +{ + int rc; + + rc = smk_fill_rule(data, data + SMK_LABELLEN, + data + SMK_LABELLEN + SMK_LABELLEN, NULL, rule, + import, SMK_LABELLEN); + return rc; +} + +/** + * smk_parse_long_rule - parse Smack rule from rule string + * @data: string to be parsed, null terminated + * @rule: Will be filled with Smack parsed rule + * @import: if non-zero, import labels + * @tokens: numer of substrings expected in data + * + * Returns number of processed bytes on success, -1 on failure. + */ +static ssize_t smk_parse_long_rule(char *data, struct smack_parsed_rule *rule, + int import, int tokens) +{ + ssize_t cnt = 0; + char *tok[4]; + int rc; + int i; + + /* + * Parsing the rule in-place, filling all white-spaces with '\0' + */ + for (i = 0; i < tokens; ++i) { + while (isspace(data[cnt])) + data[cnt++] = '\0'; + + if (data[cnt] == '\0') + /* Unexpected end of data */ + return -1; + + tok[i] = data + cnt; + + while (data[cnt] && !isspace(data[cnt])) + ++cnt; + } + while (isspace(data[cnt])) + data[cnt++] = '\0'; + + while (i < 4) + tok[i++] = NULL; + + rc = smk_fill_rule(tok[0], tok[1], tok[2], tok[3], rule, import, 0); + return rc == 0 ? cnt : rc; +} + +#define SMK_FIXED24_FMT 0 /* Fixed 24byte label format */ +#define SMK_LONG_FMT 1 /* Variable long label format */ +#define SMK_CHANGE_FMT 2 /* Rule modification format */ +/** + * smk_write_rules_list - write() for any /smack rule file + * @file: file pointer, not actually used + * @buf: where to get the data from + * @count: bytes sent + * @ppos: where to start - must be 0 + * @rule_list: the list of rules to write to + * @rule_lock: lock for the rule list + * @format: /smack/load or /smack/load2 or /smack/change-rule format. + * + * Get one smack access rule from above. + * The format for SMK_LONG_FMT is: + * "subject<whitespace>object<whitespace>access[<whitespace>...]" + * The format for SMK_FIXED24_FMT is exactly: + * "subject object rwxat" + * The format for SMK_CHANGE_FMT is: + * "subject<whitespace>object<whitespace> + * acc_enable<whitespace>acc_disable[<whitespace>...]" + */ +static ssize_t smk_write_rules_list(struct file *file, const char __user *buf, + size_t count, loff_t *ppos, + struct list_head *rule_list, + struct mutex *rule_lock, int format) +{ + struct smack_parsed_rule rule; + char *data; + int rc; + int trunc = 0; + int tokens; + ssize_t cnt = 0; + + /* + * No partial writes. + * Enough data must be present. + */ + if (*ppos != 0) + return -EINVAL; + + if (format == SMK_FIXED24_FMT) { + /* + * Minor hack for backward compatibility + */ + if (count < SMK_OLOADLEN || count > SMK_LOADLEN) + return -EINVAL; + } else { + if (count >= PAGE_SIZE) { + count = PAGE_SIZE - 1; + trunc = 1; + } + } + + data = kmalloc(count + 1, GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + + if (copy_from_user(data, buf, count) != 0) { + rc = -EFAULT; + goto out; + } + + /* + * In case of parsing only part of user buf, + * avoid having partial rule at the data buffer + */ + if (trunc) { + while (count > 0 && (data[count - 1] != '\n')) + --count; + if (count == 0) { + rc = -EINVAL; + goto out; + } + } + + data[count] = '\0'; + tokens = (format == SMK_CHANGE_FMT ? 4 : 3); + while (cnt < count) { + if (format == SMK_FIXED24_FMT) { + rc = smk_parse_rule(data, &rule, 1); + if (rc != 0) { + rc = -EINVAL; + goto out; + } + cnt = count; + } else { + rc = smk_parse_long_rule(data + cnt, &rule, 1, tokens); + if (rc <= 0) { + rc = -EINVAL; + goto out; + } + cnt += rc; + } + + if (rule_list == NULL) + rc = smk_set_access(&rule, &rule.smk_subject->smk_rules, + &rule.smk_subject->smk_rules_lock, 1); + else + rc = smk_set_access(&rule, rule_list, rule_lock, 0); + + if (rc) + goto out; + } + + rc = cnt; +out: + kfree(data); + return rc; +} /* - * Seq_file read operations for /smack/load + * Core logic for smackfs seq list operations. */ -static void *load_seq_start(struct seq_file *s, loff_t *pos) +static void *smk_seq_start(struct seq_file *s, loff_t *pos, + struct list_head *head) { - if (*pos == SEQ_READ_FINISHED) + struct list_head *list; + + /* + * This is 0 the first time through. + */ + if (s->index == 0) + s->private = head; + + if (s->private == NULL) return NULL; - if (list_empty(&smack_rule_list)) + + list = s->private; + if (list_empty(list)) return NULL; - return smack_rule_list.next; + + if (s->index == 0) + return list->next; + return list; } -static void *load_seq_next(struct seq_file *s, void *v, loff_t *pos) +static void *smk_seq_next(struct seq_file *s, void *v, loff_t *pos, + struct list_head *head) { struct list_head *list = v; - if (list_is_last(list, &smack_rule_list)) { - *pos = SEQ_READ_FINISHED; + if (list_is_last(list, head)) { + s->private = NULL; return NULL; } + s->private = list->next; return list->next; } -static int load_seq_show(struct seq_file *s, void *v) +static void smk_seq_stop(struct seq_file *s, void *v) { - struct list_head *list = v; - struct smack_rule *srp = - list_entry(list, struct smack_rule, list); + /* No-op */ +} - seq_printf(s, "%s %s", (char *)srp->smk_subject, - (char *)srp->smk_object); +static void smk_rule_show(struct seq_file *s, struct smack_rule *srp, int max) +{ + /* + * Don't show any rules with label names too long for + * interface file (/smack/load or /smack/load2) + * because you should expect to be able to write + * anything you read back. + */ + if (strlen(srp->smk_subject->smk_known) >= max || + strlen(srp->smk_object) >= max) + return; + + if (srp->smk_access == 0) + return; + + seq_printf(s, "%s %s", srp->smk_subject->smk_known, srp->smk_object); seq_putc(s, ' '); @@ -175,24 +612,44 @@ static int load_seq_show(struct seq_file *s, void *v) seq_putc(s, 'x'); if (srp->smk_access & MAY_APPEND) seq_putc(s, 'a'); - if (srp->smk_access == 0) - seq_putc(s, '-'); + if (srp->smk_access & MAY_TRANSMUTE) + seq_putc(s, 't'); + if (srp->smk_access & MAY_LOCK) + seq_putc(s, 'l'); seq_putc(s, '\n'); +} - return 0; +/* + * Seq_file read operations for /smack/load + */ + +static void *load2_seq_start(struct seq_file *s, loff_t *pos) +{ + return smk_seq_start(s, pos, &smack_rule_list); } -static void load_seq_stop(struct seq_file *s, void *v) +static void *load2_seq_next(struct seq_file *s, void *v, loff_t *pos) { - /* No-op */ + return smk_seq_next(s, v, pos, &smack_rule_list); +} + +static int load_seq_show(struct seq_file *s, void *v) +{ + struct list_head *list = v; + struct smack_master_list *smlp = + list_entry(list, struct smack_master_list, list); + + smk_rule_show(s, smlp->smk_rule, SMK_LABELLEN); + + return 0; } static const struct seq_operations load_seq_ops = { - .start = load_seq_start, - .next = load_seq_next, + .start = load2_seq_start, + .next = load2_seq_next, .show = load_seq_show, - .stop = load_seq_stop, + .stop = smk_seq_stop, }; /** @@ -208,154 +665,26 @@ static int smk_open_load(struct inode *inode, struct file *file) } /** - * smk_set_access - add a rule to the rule list - * @srp: the new rule to add - * - * Looks through the current subject/object/access list for - * the subject/object pair and replaces the access that was - * there. If the pair isn't found add it with the specified - * access. - * - * Returns 0 if nothing goes wrong or -ENOMEM if it fails - * during the allocation of the new pair to add. - */ -static int smk_set_access(struct smack_rule *srp) -{ - struct smack_rule *sp; - int ret = 0; - int found; - mutex_lock(&smack_list_lock); - - found = 0; - list_for_each_entry_rcu(sp, &smack_rule_list, list) { - if (sp->smk_subject == srp->smk_subject && - sp->smk_object == srp->smk_object) { - found = 1; - sp->smk_access = srp->smk_access; - break; - } - } - if (found == 0) - list_add_rcu(&srp->list, &smack_rule_list); - - mutex_unlock(&smack_list_lock); - - return ret; -} - -/** * smk_write_load - write() for /smack/load * @file: file pointer, not actually used * @buf: where to get the data from * @count: bytes sent * @ppos: where to start - must be 0 * - * Get one smack access rule from above. - * The format is exactly: - * char subject[SMK_LABELLEN] - * char object[SMK_LABELLEN] - * char access[SMK_ACCESSLEN] - * - * writes must be SMK_LABELLEN+SMK_LABELLEN+SMK_ACCESSLEN bytes. */ static ssize_t smk_write_load(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - struct smack_rule *rule; - char *data; - int rc = -EINVAL; - /* * Must have privilege. * No partial writes. * Enough data must be present. */ - if (!capable(CAP_MAC_ADMIN)) + if (!smack_privileged(CAP_MAC_ADMIN)) return -EPERM; - if (*ppos != 0 || count != SMK_LOADLEN) - return -EINVAL; - - data = kzalloc(count, GFP_KERNEL); - if (data == NULL) - return -ENOMEM; - - if (copy_from_user(data, buf, count) != 0) { - rc = -EFAULT; - goto out; - } - - rule = kzalloc(sizeof(*rule), GFP_KERNEL); - if (rule == NULL) { - rc = -ENOMEM; - goto out; - } - - rule->smk_subject = smk_import(data, 0); - if (rule->smk_subject == NULL) - goto out_free_rule; - - rule->smk_object = smk_import(data + SMK_LABELLEN, 0); - if (rule->smk_object == NULL) - goto out_free_rule; - - rule->smk_access = 0; - - switch (data[SMK_LABELLEN + SMK_LABELLEN]) { - case '-': - break; - case 'r': - case 'R': - rule->smk_access |= MAY_READ; - break; - default: - goto out_free_rule; - } - - switch (data[SMK_LABELLEN + SMK_LABELLEN + 1]) { - case '-': - break; - case 'w': - case 'W': - rule->smk_access |= MAY_WRITE; - break; - default: - goto out_free_rule; - } - - switch (data[SMK_LABELLEN + SMK_LABELLEN + 2]) { - case '-': - break; - case 'x': - case 'X': - rule->smk_access |= MAY_EXEC; - break; - default: - goto out_free_rule; - } - - switch (data[SMK_LABELLEN + SMK_LABELLEN + 3]) { - case '-': - break; - case 'a': - case 'A': - rule->smk_access |= MAY_APPEND; - break; - default: - goto out_free_rule; - } - - rc = smk_set_access(rule); - - if (!rc) - rc = count; - goto out; - -out_free_rule: - kfree(rule); -out: - kfree(data); - return rc; + return smk_write_rules_list(file, buf, count, ppos, NULL, NULL, + SMK_FIXED24_FMT); } static const struct file_operations smk_load_ops = { @@ -425,8 +754,10 @@ static void smk_unlbl_ambient(char *oldambient) printk(KERN_WARNING "%s:%d remove rc = %d\n", __func__, __LINE__, rc); } + if (smack_net_ambient == NULL) + smack_net_ambient = &smack_known_floor; - rc = netlbl_cfg_unlbl_map_add(smack_net_ambient, PF_INET, + rc = netlbl_cfg_unlbl_map_add(smack_net_ambient->smk_known, PF_INET, NULL, NULL, &nai); if (rc != 0) printk(KERN_WARNING "%s:%d add rc = %d\n", @@ -439,28 +770,12 @@ static void smk_unlbl_ambient(char *oldambient) static void *cipso_seq_start(struct seq_file *s, loff_t *pos) { - if (*pos == SEQ_READ_FINISHED) - return NULL; - if (list_empty(&smack_known_list)) - return NULL; - - return smack_known_list.next; + return smk_seq_start(s, pos, &smack_known_list); } static void *cipso_seq_next(struct seq_file *s, void *v, loff_t *pos) { - struct list_head *list = v; - - /* - * labels with no associated cipso value wont be printed - * in cipso_seq_show - */ - if (list_is_last(list, &smack_known_list)) { - *pos = SEQ_READ_FINISHED; - return NULL; - } - - return list->next; + return smk_seq_next(s, v, pos, &smack_known_list); } /* @@ -472,43 +787,39 @@ static int cipso_seq_show(struct seq_file *s, void *v) struct list_head *list = v; struct smack_known *skp = list_entry(list, struct smack_known, list); - struct smack_cipso *scp = skp->smk_cipso; - char *cbp; + struct netlbl_lsm_secattr_catmap *cmp = skp->smk_netlabel.attr.mls.cat; char sep = '/'; - int cat = 1; int i; - unsigned char m; - if (scp == NULL) + /* + * Don't show a label that could not have been set using + * /smack/cipso. This is in support of the notion that + * anything read from /smack/cipso ought to be writeable + * to /smack/cipso. + * + * /smack/cipso2 should be used instead. + */ + if (strlen(skp->smk_known) >= SMK_LABELLEN) return 0; - seq_printf(s, "%s %3d", (char *)&skp->smk_known, scp->smk_level); + seq_printf(s, "%s %3d", skp->smk_known, skp->smk_netlabel.attr.mls.lvl); - cbp = scp->smk_catset; - for (i = 0; i < SMK_LABELLEN; i++) - for (m = 0x80; m != 0; m >>= 1) { - if (m & cbp[i]) { - seq_printf(s, "%c%d", sep, cat); - sep = ','; - } - cat++; - } + for (i = netlbl_secattr_catmap_walk(cmp, 0); i >= 0; + i = netlbl_secattr_catmap_walk(cmp, i + 1)) { + seq_printf(s, "%c%d", sep, i); + sep = ','; + } seq_putc(s, '\n'); return 0; } -static void cipso_seq_stop(struct seq_file *s, void *v) -{ - /* No-op */ -} - static const struct seq_operations cipso_seq_ops = { .start = cipso_seq_start, - .stop = cipso_seq_stop, .next = cipso_seq_next, .show = cipso_seq_show, + .stop = smk_seq_stop, }; /** @@ -525,23 +836,24 @@ static int smk_open_cipso(struct inode *inode, struct file *file) } /** - * smk_write_cipso - write() for /smack/cipso + * smk_set_cipso - do the work for write() for cipso and cipso2 * @file: file pointer, not actually used * @buf: where to get the data from * @count: bytes sent * @ppos: where to start + * @format: /smack/cipso or /smack/cipso2 * * Accepts only one cipso rule per write call. * Returns number of bytes written or error code, as appropriate */ -static ssize_t smk_write_cipso(struct file *file, const char __user *buf, - size_t count, loff_t *ppos) +static ssize_t smk_set_cipso(struct file *file, const char __user *buf, + size_t count, loff_t *ppos, int format) { struct smack_known *skp; - struct smack_cipso *scp = NULL; - char mapcatset[SMK_LABELLEN]; + struct netlbl_lsm_secattr ncats; + char mapcatset[SMK_CIPSOLEN]; int maplevel; - int cat; + unsigned int cat; int catlen; ssize_t rc = -EINVAL; char *data = NULL; @@ -554,11 +866,12 @@ static ssize_t smk_write_cipso(struct file *file, const char __user *buf, * No partial writes. * Enough data must be present. */ - if (!capable(CAP_MAC_ADMIN)) + if (!smack_privileged(CAP_MAC_ADMIN)) return -EPERM; if (*ppos != 0) return -EINVAL; - if (count < SMK_CIPSOMIN || count > SMK_CIPSOMAX) + if (format == SMK_FIXED24_FMT && + (count < SMK_CIPSOMIN || count > SMK_CIPSOMAX)) return -EINVAL; data = kzalloc(count + 1, GFP_KERNEL); @@ -570,11 +883,6 @@ static ssize_t smk_write_cipso(struct file *file, const char __user *buf, goto unlockedout; } - /* labels cannot begin with a '-' */ - if (data[0] == '-') { - rc = -EINVAL; - goto unlockedout; - } data[count] = '\0'; rule = data; /* @@ -587,7 +895,11 @@ static ssize_t smk_write_cipso(struct file *file, const char __user *buf, if (skp == NULL) goto out; - rule += SMK_LABELLEN; + if (format == SMK_FIXED24_FMT) + rule += SMK_LABELLEN; + else + rule += strlen(skp->smk_known) + 1; + ret = sscanf(rule, "%d", &maplevel); if (ret != 1 || maplevel > SMACK_CIPSO_MAXLEVEL) goto out; @@ -597,41 +909,29 @@ static ssize_t smk_write_cipso(struct file *file, const char __user *buf, if (ret != 1 || catlen > SMACK_CIPSO_MAXCATNUM) goto out; - if (count != (SMK_CIPSOMIN + catlen * SMK_DIGITLEN)) + if (format == SMK_FIXED24_FMT && + count != (SMK_CIPSOMIN + catlen * SMK_DIGITLEN)) goto out; memset(mapcatset, 0, sizeof(mapcatset)); for (i = 0; i < catlen; i++) { rule += SMK_DIGITLEN; - ret = sscanf(rule, "%d", &cat); - if (ret != 1 || cat > SMACK_CIPSO_MAXCATVAL) + ret = sscanf(rule, "%u", &cat); + if (ret != 1 || cat > SMACK_CIPSO_MAXCATNUM) goto out; smack_catset_bit(cat, mapcatset); } - if (skp->smk_cipso == NULL) { - scp = kzalloc(sizeof(struct smack_cipso), GFP_KERNEL); - if (scp == NULL) { - rc = -ENOMEM; - goto out; - } + rc = smk_netlbl_mls(maplevel, mapcatset, &ncats, SMK_CIPSOLEN); + if (rc >= 0) { + netlbl_secattr_catmap_free(skp->smk_netlabel.attr.mls.cat); + skp->smk_netlabel.attr.mls.cat = ncats.attr.mls.cat; + skp->smk_netlabel.attr.mls.lvl = ncats.attr.mls.lvl; + rc = count; } - spin_lock_bh(&skp->smk_cipsolock); - - if (scp == NULL) - scp = skp->smk_cipso; - else - skp->smk_cipso = scp; - - scp->smk_level = maplevel; - memcpy(scp->smk_catset, mapcatset, sizeof(mapcatset)); - - spin_unlock_bh(&skp->smk_cipsolock); - - rc = count; out: mutex_unlock(&smack_cipso_lock); unlockedout: @@ -639,6 +939,22 @@ unlockedout: return rc; } +/** + * smk_write_cipso - write() for /smack/cipso + * @file: file pointer, not actually used + * @buf: where to get the data from + * @count: bytes sent + * @ppos: where to start + * + * Accepts only one cipso rule per write call. + * Returns number of bytes written or error code, as appropriate + */ +static ssize_t smk_write_cipso(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + return smk_set_cipso(file, buf, count, ppos, SMK_FIXED24_FMT); +} + static const struct file_operations smk_cipso_ops = { .open = smk_open_cipso, .read = seq_read, @@ -648,28 +964,91 @@ static const struct file_operations smk_cipso_ops = { }; /* + * Seq_file read operations for /smack/cipso2 + */ + +/* + * Print cipso labels in format: + * label level[/cat[,cat]] + */ +static int cipso2_seq_show(struct seq_file *s, void *v) +{ + struct list_head *list = v; + struct smack_known *skp = + list_entry(list, struct smack_known, list); + struct netlbl_lsm_secattr_catmap *cmp = skp->smk_netlabel.attr.mls.cat; + char sep = '/'; + int i; + + seq_printf(s, "%s %3d", skp->smk_known, skp->smk_netlabel.attr.mls.lvl); + + for (i = netlbl_secattr_catmap_walk(cmp, 0); i >= 0; + i = netlbl_secattr_catmap_walk(cmp, i + 1)) { + seq_printf(s, "%c%d", sep, i); + sep = ','; + } + + seq_putc(s, '\n'); + + return 0; +} + +static const struct seq_operations cipso2_seq_ops = { + .start = cipso_seq_start, + .next = cipso_seq_next, + .show = cipso2_seq_show, + .stop = smk_seq_stop, +}; + +/** + * smk_open_cipso2 - open() for /smack/cipso2 + * @inode: inode structure representing file + * @file: "cipso2" file pointer + * + * Connect our cipso_seq_* operations with /smack/cipso2 + * file_operations + */ +static int smk_open_cipso2(struct inode *inode, struct file *file) +{ + return seq_open(file, &cipso2_seq_ops); +} + +/** + * smk_write_cipso2 - write() for /smack/cipso2 + * @file: file pointer, not actually used + * @buf: where to get the data from + * @count: bytes sent + * @ppos: where to start + * + * Accepts only one cipso rule per write call. + * Returns number of bytes written or error code, as appropriate + */ +static ssize_t smk_write_cipso2(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + return smk_set_cipso(file, buf, count, ppos, SMK_LONG_FMT); +} + +static const struct file_operations smk_cipso2_ops = { + .open = smk_open_cipso2, + .read = seq_read, + .llseek = seq_lseek, + .write = smk_write_cipso2, + .release = seq_release, +}; + +/* * Seq_file read operations for /smack/netlabel */ static void *netlbladdr_seq_start(struct seq_file *s, loff_t *pos) { - if (*pos == SEQ_READ_FINISHED) - return NULL; - if (list_empty(&smk_netlbladdr_list)) - return NULL; - return smk_netlbladdr_list.next; + return smk_seq_start(s, pos, &smk_netlbladdr_list); } static void *netlbladdr_seq_next(struct seq_file *s, void *v, loff_t *pos) { - struct list_head *list = v; - - if (list_is_last(list, &smk_netlbladdr_list)) { - *pos = SEQ_READ_FINISHED; - return NULL; - } - - return list->next; + return smk_seq_next(s, v, pos, &smk_netlbladdr_list); } #define BEBITS (sizeof(__be32) * 8) @@ -693,16 +1072,11 @@ static int netlbladdr_seq_show(struct seq_file *s, void *v) return 0; } -static void netlbladdr_seq_stop(struct seq_file *s, void *v) -{ - /* No-op */ -} - static const struct seq_operations netlbladdr_seq_ops = { .start = netlbladdr_seq_start, - .stop = netlbladdr_seq_stop, .next = netlbladdr_seq_next, .show = netlbladdr_seq_show, + .stop = smk_seq_stop, }; /** @@ -775,9 +1149,9 @@ static ssize_t smk_write_netlbladdr(struct file *file, const char __user *buf, { struct smk_netlbladdr *skp; struct sockaddr_in newname; - char smack[SMK_LABELLEN]; + char *smack; char *sp; - char data[SMK_NETLBLADDRMAX + 1]; + char *data; char *host = (char *)&newname.sin_addr.s_addr; int rc; struct netlbl_audit audit_info; @@ -795,40 +1169,63 @@ static ssize_t smk_write_netlbladdr(struct file *file, const char __user *buf, * "<addr/mask, as a.b.c.d/e><space><label>" * "<addr, as a.b.c.d><space><label>" */ - if (!capable(CAP_MAC_ADMIN)) + if (!smack_privileged(CAP_MAC_ADMIN)) return -EPERM; if (*ppos != 0) return -EINVAL; - if (count < SMK_NETLBLADDRMIN || count > SMK_NETLBLADDRMAX) + if (count < SMK_NETLBLADDRMIN) return -EINVAL; - if (copy_from_user(data, buf, count) != 0) - return -EFAULT; + + data = kzalloc(count + 1, GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + + if (copy_from_user(data, buf, count) != 0) { + rc = -EFAULT; + goto free_data_out; + } + + smack = kzalloc(count + 1, GFP_KERNEL); + if (smack == NULL) { + rc = -ENOMEM; + goto free_data_out; + } data[count] = '\0'; - rc = sscanf(data, "%hhd.%hhd.%hhd.%hhd/%d %s", + rc = sscanf(data, "%hhd.%hhd.%hhd.%hhd/%u %s", &host[0], &host[1], &host[2], &host[3], &m, smack); if (rc != 6) { rc = sscanf(data, "%hhd.%hhd.%hhd.%hhd %s", &host[0], &host[1], &host[2], &host[3], smack); - if (rc != 5) - return -EINVAL; + if (rc != 5) { + rc = -EINVAL; + goto free_out; + } m = BEBITS; } - if (m > BEBITS) - return -EINVAL; + if (m > BEBITS) { + rc = -EINVAL; + goto free_out; + } - /* if smack begins with '-', its an option, don't import it */ + /* + * If smack begins with '-', it is an option, don't import it + */ if (smack[0] != '-') { sp = smk_import(smack, 0); - if (sp == NULL) - return -EINVAL; + if (sp == NULL) { + rc = -EINVAL; + goto free_out; + } } else { /* check known options */ if (strcmp(smack, smack_cipso_option) == 0) sp = (char *)smack_cipso_option; - else - return -EINVAL; + else { + rc = -EINVAL; + goto free_out; + } } for (temp_mask = 0; m > 0; m--) { @@ -869,7 +1266,7 @@ static ssize_t smk_write_netlbladdr(struct file *file, const char __user *buf, } } else { /* we delete the unlabeled entry, only if the previous label - * wasnt the special CIPSO option */ + * wasn't the special CIPSO option */ if (skp->smk_label != smack_cipso_option) rc = netlbl_cfg_unlbl_static_del(&init_net, NULL, &skp->smk_host.sin_addr, &skp->smk_mask, @@ -894,6 +1291,11 @@ static ssize_t smk_write_netlbladdr(struct file *file, const char __user *buf, mutex_unlock(&smk_netlbladdr_lock); +free_out: + kfree(smack); +free_data_out: + kfree(data); + return rc; } @@ -944,7 +1346,7 @@ static ssize_t smk_write_doi(struct file *file, const char __user *buf, char temp[80]; int i; - if (!capable(CAP_MAC_ADMIN)) + if (!smack_privileged(CAP_MAC_ADMIN)) return -EPERM; if (count >= sizeof(temp) || count == 0) @@ -1007,10 +1409,11 @@ static ssize_t smk_read_direct(struct file *filp, char __user *buf, static ssize_t smk_write_direct(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { + struct smack_known *skp; char temp[80]; int i; - if (!capable(CAP_MAC_ADMIN)) + if (!smack_privileged(CAP_MAC_ADMIN)) return -EPERM; if (count >= sizeof(temp) || count == 0) @@ -1024,7 +1427,20 @@ static ssize_t smk_write_direct(struct file *file, const char __user *buf, if (sscanf(temp, "%d", &i) != 1) return -EINVAL; - smack_cipso_direct = i; + /* + * Don't do anything if the value hasn't actually changed. + * If it is changing reset the level on entries that were + * set up to be direct when they were created. + */ + if (smack_cipso_direct != i) { + mutex_lock(&smack_known_lock); + list_for_each_entry_rcu(skp, &smack_known_list, list) + if (skp->smk_netlabel.attr.mls.lvl == + smack_cipso_direct) + skp->smk_netlabel.attr.mls.lvl = i; + smack_cipso_direct = i; + mutex_unlock(&smack_known_lock); + } return count; } @@ -1036,6 +1452,84 @@ static const struct file_operations smk_direct_ops = { }; /** + * smk_read_mapped - read() for /smack/mapped + * @filp: file pointer, not actually used + * @buf: where to put the result + * @count: maximum to send along + * @ppos: where to start + * + * Returns number of bytes read or error code, as appropriate + */ +static ssize_t smk_read_mapped(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + char temp[80]; + ssize_t rc; + + if (*ppos != 0) + return 0; + + sprintf(temp, "%d", smack_cipso_mapped); + rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp)); + + return rc; +} + +/** + * smk_write_mapped - write() for /smack/mapped + * @file: file pointer, not actually used + * @buf: where to get the data from + * @count: bytes sent + * @ppos: where to start + * + * Returns number of bytes written or error code, as appropriate + */ +static ssize_t smk_write_mapped(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct smack_known *skp; + char temp[80]; + int i; + + if (!smack_privileged(CAP_MAC_ADMIN)) + return -EPERM; + + if (count >= sizeof(temp) || count == 0) + return -EINVAL; + + if (copy_from_user(temp, buf, count) != 0) + return -EFAULT; + + temp[count] = '\0'; + + if (sscanf(temp, "%d", &i) != 1) + return -EINVAL; + + /* + * Don't do anything if the value hasn't actually changed. + * If it is changing reset the level on entries that were + * set up to be mapped when they were created. + */ + if (smack_cipso_mapped != i) { + mutex_lock(&smack_known_lock); + list_for_each_entry_rcu(skp, &smack_known_list, list) + if (skp->smk_netlabel.attr.mls.lvl == + smack_cipso_mapped) + skp->smk_netlabel.attr.mls.lvl = i; + smack_cipso_mapped = i; + mutex_unlock(&smack_known_lock); + } + + return count; +} + +static const struct file_operations smk_mapped_ops = { + .read = smk_read_mapped, + .write = smk_write_mapped, + .llseek = default_llseek, +}; + +/** * smk_read_ambient - read() for /smack/ambient * @filp: file pointer, not actually used * @buf: where to put the result @@ -1058,11 +1552,12 @@ static ssize_t smk_read_ambient(struct file *filp, char __user *buf, */ mutex_lock(&smack_ambient_lock); - asize = strlen(smack_net_ambient) + 1; + asize = strlen(smack_net_ambient->smk_known) + 1; if (cn >= asize) rc = simple_read_from_buffer(buf, cn, ppos, - smack_net_ambient, asize); + smack_net_ambient->smk_known, + asize); else rc = -EINVAL; @@ -1083,32 +1578,40 @@ static ssize_t smk_read_ambient(struct file *filp, char __user *buf, static ssize_t smk_write_ambient(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - char in[SMK_LABELLEN]; + struct smack_known *skp; char *oldambient; - char *smack; + char *data; + int rc = count; - if (!capable(CAP_MAC_ADMIN)) + if (!smack_privileged(CAP_MAC_ADMIN)) return -EPERM; - if (count >= SMK_LABELLEN) - return -EINVAL; + data = kzalloc(count + 1, GFP_KERNEL); + if (data == NULL) + return -ENOMEM; - if (copy_from_user(in, buf, count) != 0) - return -EFAULT; + if (copy_from_user(data, buf, count) != 0) { + rc = -EFAULT; + goto out; + } - smack = smk_import(in, count); - if (smack == NULL) - return -EINVAL; + skp = smk_import_entry(data, count); + if (skp == NULL) { + rc = -EINVAL; + goto out; + } mutex_lock(&smack_ambient_lock); - oldambient = smack_net_ambient; - smack_net_ambient = smack; + oldambient = smack_net_ambient->smk_known; + smack_net_ambient = skp; smk_unlbl_ambient(oldambient); mutex_unlock(&smack_ambient_lock); - return count; +out: + kfree(data); + return rc; } static const struct file_operations smk_ambient_ops = { @@ -1118,7 +1621,7 @@ static const struct file_operations smk_ambient_ops = { }; /** - * smk_read_onlycap - read() for /smack/onlycap + * smk_read_onlycap - read() for smackfs/onlycap * @filp: file pointer, not actually used * @buf: where to put the result * @cn: maximum to send along @@ -1137,7 +1640,7 @@ static ssize_t smk_read_onlycap(struct file *filp, char __user *buf, return 0; if (smack_onlycap != NULL) - smack = smack_onlycap; + smack = smack_onlycap->smk_known; asize = strlen(smack) + 1; @@ -1148,7 +1651,7 @@ static ssize_t smk_read_onlycap(struct file *filp, char __user *buf, } /** - * smk_write_onlycap - write() for /smack/onlycap + * smk_write_onlycap - write() for smackfs/onlycap * @file: file pointer, not actually used * @buf: where to get the data from * @count: bytes sent @@ -1159,10 +1662,11 @@ static ssize_t smk_read_onlycap(struct file *filp, char __user *buf, static ssize_t smk_write_onlycap(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - char in[SMK_LABELLEN]; - char *sp = current->cred->security; + char *data; + struct smack_known *skp = smk_of_task(current->cred->security); + int rc = count; - if (!capable(CAP_MAC_ADMIN)) + if (!smack_privileged(CAP_MAC_ADMIN)) return -EPERM; /* @@ -1170,14 +1674,12 @@ static ssize_t smk_write_onlycap(struct file *file, const char __user *buf, * explicitly for clarity. The smk_access() implementation * would use smk_access(smack_onlycap, MAY_WRITE) */ - if (smack_onlycap != NULL && smack_onlycap != sp) + if (smack_onlycap != NULL && smack_onlycap != skp) return -EPERM; - if (count >= SMK_LABELLEN) - return -EINVAL; - - if (copy_from_user(in, buf, count) != 0) - return -EFAULT; + data = kzalloc(count, GFP_KERNEL); + if (data == NULL) + return -ENOMEM; /* * Should the null string be passed in unset the onlycap value. @@ -1185,10 +1687,17 @@ static ssize_t smk_write_onlycap(struct file *file, const char __user *buf, * smk_import only expects to return NULL for errors. It * is usually the case that a nullstring or "\n" would be * bad to pass to smk_import but in fact this is useful here. + * + * smk_import will also reject a label beginning with '-', + * so "-usecapabilities" will also work. */ - smack_onlycap = smk_import(in, count); + if (copy_from_user(data, buf, count) != 0) + rc = -EFAULT; + else + smack_onlycap = smk_import_entry(data, count); - return count; + kfree(data); + return rc; } static const struct file_operations smk_onlycap_ops = { @@ -1235,7 +1744,7 @@ static ssize_t smk_write_logging(struct file *file, const char __user *buf, char temp[32]; int i; - if (!capable(CAP_MAC_ADMIN)) + if (!smack_privileged(CAP_MAC_ADMIN)) return -EPERM; if (count >= sizeof(temp) || count == 0) @@ -1261,13 +1770,558 @@ static const struct file_operations smk_logging_ops = { .write = smk_write_logging, .llseek = default_llseek, }; + +/* + * Seq_file read operations for /smack/load-self + */ + +static void *load_self_seq_start(struct seq_file *s, loff_t *pos) +{ + struct task_smack *tsp = current_security(); + + return smk_seq_start(s, pos, &tsp->smk_rules); +} + +static void *load_self_seq_next(struct seq_file *s, void *v, loff_t *pos) +{ + struct task_smack *tsp = current_security(); + + return smk_seq_next(s, v, pos, &tsp->smk_rules); +} + +static int load_self_seq_show(struct seq_file *s, void *v) +{ + struct list_head *list = v; + struct smack_rule *srp = + list_entry(list, struct smack_rule, list); + + smk_rule_show(s, srp, SMK_LABELLEN); + + return 0; +} + +static const struct seq_operations load_self_seq_ops = { + .start = load_self_seq_start, + .next = load_self_seq_next, + .show = load_self_seq_show, + .stop = smk_seq_stop, +}; + + +/** + * smk_open_load_self - open() for /smack/load-self2 + * @inode: inode structure representing file + * @file: "load" file pointer + * + * For reading, use load_seq_* seq_file reading operations. + */ +static int smk_open_load_self(struct inode *inode, struct file *file) +{ + return seq_open(file, &load_self_seq_ops); +} + +/** + * smk_write_load_self - write() for /smack/load-self + * @file: file pointer, not actually used + * @buf: where to get the data from + * @count: bytes sent + * @ppos: where to start - must be 0 + * + */ +static ssize_t smk_write_load_self(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct task_smack *tsp = current_security(); + + return smk_write_rules_list(file, buf, count, ppos, &tsp->smk_rules, + &tsp->smk_rules_lock, SMK_FIXED24_FMT); +} + +static const struct file_operations smk_load_self_ops = { + .open = smk_open_load_self, + .read = seq_read, + .llseek = seq_lseek, + .write = smk_write_load_self, + .release = seq_release, +}; + +/** + * smk_user_access - handle access check transaction + * @file: file pointer + * @buf: data from user space + * @count: bytes sent + * @ppos: where to start - must be 0 + */ +static ssize_t smk_user_access(struct file *file, const char __user *buf, + size_t count, loff_t *ppos, int format) +{ + struct smack_parsed_rule rule; + char *data; + int res; + + data = simple_transaction_get(file, buf, count); + if (IS_ERR(data)) + return PTR_ERR(data); + + if (format == SMK_FIXED24_FMT) { + if (count < SMK_LOADLEN) + return -EINVAL; + res = smk_parse_rule(data, &rule, 0); + } else { + /* + * simple_transaction_get() returns null-terminated data + */ + res = smk_parse_long_rule(data, &rule, 0, 3); + } + + if (res >= 0) + res = smk_access(rule.smk_subject, rule.smk_object, + rule.smk_access1, NULL); + else if (res != -ENOENT) + return -EINVAL; + + data[0] = res == 0 ? '1' : '0'; + data[1] = '\0'; + + simple_transaction_set(file, 2); + + if (format == SMK_FIXED24_FMT) + return SMK_LOADLEN; + return count; +} + +/** + * smk_write_access - handle access check transaction + * @file: file pointer + * @buf: data from user space + * @count: bytes sent + * @ppos: where to start - must be 0 + */ +static ssize_t smk_write_access(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + return smk_user_access(file, buf, count, ppos, SMK_FIXED24_FMT); +} + +static const struct file_operations smk_access_ops = { + .write = smk_write_access, + .read = simple_transaction_read, + .release = simple_transaction_release, + .llseek = generic_file_llseek, +}; + + +/* + * Seq_file read operations for /smack/load2 + */ + +static int load2_seq_show(struct seq_file *s, void *v) +{ + struct list_head *list = v; + struct smack_master_list *smlp = + list_entry(list, struct smack_master_list, list); + + smk_rule_show(s, smlp->smk_rule, SMK_LONGLABEL); + + return 0; +} + +static const struct seq_operations load2_seq_ops = { + .start = load2_seq_start, + .next = load2_seq_next, + .show = load2_seq_show, + .stop = smk_seq_stop, +}; + /** - * smk_fill_super - fill the /smackfs superblock + * smk_open_load2 - open() for /smack/load2 + * @inode: inode structure representing file + * @file: "load2" file pointer + * + * For reading, use load2_seq_* seq_file reading operations. + */ +static int smk_open_load2(struct inode *inode, struct file *file) +{ + return seq_open(file, &load2_seq_ops); +} + +/** + * smk_write_load2 - write() for /smack/load2 + * @file: file pointer, not actually used + * @buf: where to get the data from + * @count: bytes sent + * @ppos: where to start - must be 0 + * + */ +static ssize_t smk_write_load2(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + /* + * Must have privilege. + */ + if (!smack_privileged(CAP_MAC_ADMIN)) + return -EPERM; + + return smk_write_rules_list(file, buf, count, ppos, NULL, NULL, + SMK_LONG_FMT); +} + +static const struct file_operations smk_load2_ops = { + .open = smk_open_load2, + .read = seq_read, + .llseek = seq_lseek, + .write = smk_write_load2, + .release = seq_release, +}; + +/* + * Seq_file read operations for /smack/load-self2 + */ + +static void *load_self2_seq_start(struct seq_file *s, loff_t *pos) +{ + struct task_smack *tsp = current_security(); + + return smk_seq_start(s, pos, &tsp->smk_rules); +} + +static void *load_self2_seq_next(struct seq_file *s, void *v, loff_t *pos) +{ + struct task_smack *tsp = current_security(); + + return smk_seq_next(s, v, pos, &tsp->smk_rules); +} + +static int load_self2_seq_show(struct seq_file *s, void *v) +{ + struct list_head *list = v; + struct smack_rule *srp = + list_entry(list, struct smack_rule, list); + + smk_rule_show(s, srp, SMK_LONGLABEL); + + return 0; +} + +static const struct seq_operations load_self2_seq_ops = { + .start = load_self2_seq_start, + .next = load_self2_seq_next, + .show = load_self2_seq_show, + .stop = smk_seq_stop, +}; + +/** + * smk_open_load_self2 - open() for /smack/load-self2 + * @inode: inode structure representing file + * @file: "load" file pointer + * + * For reading, use load_seq_* seq_file reading operations. + */ +static int smk_open_load_self2(struct inode *inode, struct file *file) +{ + return seq_open(file, &load_self2_seq_ops); +} + +/** + * smk_write_load_self2 - write() for /smack/load-self2 + * @file: file pointer, not actually used + * @buf: where to get the data from + * @count: bytes sent + * @ppos: where to start - must be 0 + * + */ +static ssize_t smk_write_load_self2(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + struct task_smack *tsp = current_security(); + + return smk_write_rules_list(file, buf, count, ppos, &tsp->smk_rules, + &tsp->smk_rules_lock, SMK_LONG_FMT); +} + +static const struct file_operations smk_load_self2_ops = { + .open = smk_open_load_self2, + .read = seq_read, + .llseek = seq_lseek, + .write = smk_write_load_self2, + .release = seq_release, +}; + +/** + * smk_write_access2 - handle access check transaction + * @file: file pointer + * @buf: data from user space + * @count: bytes sent + * @ppos: where to start - must be 0 + */ +static ssize_t smk_write_access2(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + return smk_user_access(file, buf, count, ppos, SMK_LONG_FMT); +} + +static const struct file_operations smk_access2_ops = { + .write = smk_write_access2, + .read = simple_transaction_read, + .release = simple_transaction_release, + .llseek = generic_file_llseek, +}; + +/** + * smk_write_revoke_subj - write() for /smack/revoke-subject + * @file: file pointer + * @buf: data from user space + * @count: bytes sent + * @ppos: where to start - must be 0 + */ +static ssize_t smk_write_revoke_subj(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + char *data = NULL; + const char *cp = NULL; + struct smack_known *skp; + struct smack_rule *sp; + struct list_head *rule_list; + struct mutex *rule_lock; + int rc = count; + + if (*ppos != 0) + return -EINVAL; + + if (!smack_privileged(CAP_MAC_ADMIN)) + return -EPERM; + + if (count == 0 || count > SMK_LONGLABEL) + return -EINVAL; + + data = kzalloc(count, GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + + if (copy_from_user(data, buf, count) != 0) { + rc = -EFAULT; + goto free_out; + } + + cp = smk_parse_smack(data, count); + if (cp == NULL) { + rc = -EINVAL; + goto free_out; + } + + skp = smk_find_entry(cp); + if (skp == NULL) + goto free_out; + + rule_list = &skp->smk_rules; + rule_lock = &skp->smk_rules_lock; + + mutex_lock(rule_lock); + + list_for_each_entry_rcu(sp, rule_list, list) + sp->smk_access = 0; + + mutex_unlock(rule_lock); + +free_out: + kfree(data); + kfree(cp); + return rc; +} + +static const struct file_operations smk_revoke_subj_ops = { + .write = smk_write_revoke_subj, + .read = simple_transaction_read, + .release = simple_transaction_release, + .llseek = generic_file_llseek, +}; + +static struct kset *smackfs_kset; +/** + * smk_init_sysfs - initialize /sys/fs/smackfs + * + */ +static int smk_init_sysfs(void) +{ + smackfs_kset = kset_create_and_add("smackfs", NULL, fs_kobj); + if (!smackfs_kset) + return -ENOMEM; + return 0; +} + +/** + * smk_write_change_rule - write() for /smack/change-rule + * @file: file pointer + * @buf: data from user space + * @count: bytes sent + * @ppos: where to start - must be 0 + */ +static ssize_t smk_write_change_rule(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + /* + * Must have privilege. + */ + if (!smack_privileged(CAP_MAC_ADMIN)) + return -EPERM; + + return smk_write_rules_list(file, buf, count, ppos, NULL, NULL, + SMK_CHANGE_FMT); +} + +static const struct file_operations smk_change_rule_ops = { + .write = smk_write_change_rule, + .read = simple_transaction_read, + .release = simple_transaction_release, + .llseek = generic_file_llseek, +}; + +/** + * smk_read_syslog - read() for smackfs/syslog + * @filp: file pointer, not actually used + * @buf: where to put the result + * @cn: maximum to send along + * @ppos: where to start + * + * Returns number of bytes read or error code, as appropriate + */ +static ssize_t smk_read_syslog(struct file *filp, char __user *buf, + size_t cn, loff_t *ppos) +{ + struct smack_known *skp; + ssize_t rc = -EINVAL; + int asize; + + if (*ppos != 0) + return 0; + + if (smack_syslog_label == NULL) + skp = &smack_known_star; + else + skp = smack_syslog_label; + + asize = strlen(skp->smk_known) + 1; + + if (cn >= asize) + rc = simple_read_from_buffer(buf, cn, ppos, skp->smk_known, + asize); + + return rc; +} + +/** + * smk_write_syslog - write() for smackfs/syslog + * @file: file pointer, not actually used + * @buf: where to get the data from + * @count: bytes sent + * @ppos: where to start + * + * Returns number of bytes written or error code, as appropriate + */ +static ssize_t smk_write_syslog(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + char *data; + struct smack_known *skp; + int rc = count; + + if (!smack_privileged(CAP_MAC_ADMIN)) + return -EPERM; + + data = kzalloc(count, GFP_KERNEL); + if (data == NULL) + return -ENOMEM; + + if (copy_from_user(data, buf, count) != 0) + rc = -EFAULT; + else { + skp = smk_import_entry(data, count); + if (skp == NULL) + rc = -EINVAL; + else + smack_syslog_label = smk_import_entry(data, count); + } + + kfree(data); + return rc; +} + +static const struct file_operations smk_syslog_ops = { + .read = smk_read_syslog, + .write = smk_write_syslog, + .llseek = default_llseek, +}; + + +/** + * smk_read_ptrace - read() for /smack/ptrace + * @filp: file pointer, not actually used + * @buf: where to put the result + * @count: maximum to send along + * @ppos: where to start + * + * Returns number of bytes read or error code, as appropriate + */ +static ssize_t smk_read_ptrace(struct file *filp, char __user *buf, + size_t count, loff_t *ppos) +{ + char temp[32]; + ssize_t rc; + + if (*ppos != 0) + return 0; + + sprintf(temp, "%d\n", smack_ptrace_rule); + rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp)); + return rc; +} + +/** + * smk_write_ptrace - write() for /smack/ptrace + * @file: file pointer + * @buf: data from user space + * @count: bytes sent + * @ppos: where to start - must be 0 + */ +static ssize_t smk_write_ptrace(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + char temp[32]; + int i; + + if (!smack_privileged(CAP_MAC_ADMIN)) + return -EPERM; + + if (*ppos != 0 || count >= sizeof(temp) || count == 0) + return -EINVAL; + + if (copy_from_user(temp, buf, count) != 0) + return -EFAULT; + + temp[count] = '\0'; + + if (sscanf(temp, "%d", &i) != 1) + return -EINVAL; + if (i < SMACK_PTRACE_DEFAULT || i > SMACK_PTRACE_MAX) + return -EINVAL; + smack_ptrace_rule = i; + + return count; +} + +static const struct file_operations smk_ptrace_ops = { + .write = smk_write_ptrace, + .read = smk_read_ptrace, + .llseek = default_llseek, +}; + +/** + * smk_fill_super - fill the smackfs superblock * @sb: the empty superblock * @data: unused * @silent: unused * - * Fill in the well known entries for /smack + * Fill in the well known entries for the smack filesystem * * Returns 0 on success, an error code on failure */ @@ -1277,23 +2331,47 @@ static int smk_fill_super(struct super_block *sb, void *data, int silent) struct inode *root_inode; static struct tree_descr smack_files[] = { - [SMK_LOAD] = - {"load", &smk_load_ops, S_IRUGO|S_IWUSR}, - [SMK_CIPSO] = - {"cipso", &smk_cipso_ops, S_IRUGO|S_IWUSR}, - [SMK_DOI] = - {"doi", &smk_doi_ops, S_IRUGO|S_IWUSR}, - [SMK_DIRECT] = - {"direct", &smk_direct_ops, S_IRUGO|S_IWUSR}, - [SMK_AMBIENT] = - {"ambient", &smk_ambient_ops, S_IRUGO|S_IWUSR}, - [SMK_NETLBLADDR] = - {"netlabel", &smk_netlbladdr_ops, S_IRUGO|S_IWUSR}, - [SMK_ONLYCAP] = - {"onlycap", &smk_onlycap_ops, S_IRUGO|S_IWUSR}, - [SMK_LOGGING] = - {"logging", &smk_logging_ops, S_IRUGO|S_IWUSR}, - /* last one */ {""} + [SMK_LOAD] = { + "load", &smk_load_ops, S_IRUGO|S_IWUSR}, + [SMK_CIPSO] = { + "cipso", &smk_cipso_ops, S_IRUGO|S_IWUSR}, + [SMK_DOI] = { + "doi", &smk_doi_ops, S_IRUGO|S_IWUSR}, + [SMK_DIRECT] = { + "direct", &smk_direct_ops, S_IRUGO|S_IWUSR}, + [SMK_AMBIENT] = { + "ambient", &smk_ambient_ops, S_IRUGO|S_IWUSR}, + [SMK_NETLBLADDR] = { + "netlabel", &smk_netlbladdr_ops, S_IRUGO|S_IWUSR}, + [SMK_ONLYCAP] = { + "onlycap", &smk_onlycap_ops, S_IRUGO|S_IWUSR}, + [SMK_LOGGING] = { + "logging", &smk_logging_ops, S_IRUGO|S_IWUSR}, + [SMK_LOAD_SELF] = { + "load-self", &smk_load_self_ops, S_IRUGO|S_IWUGO}, + [SMK_ACCESSES] = { + "access", &smk_access_ops, S_IRUGO|S_IWUGO}, + [SMK_MAPPED] = { + "mapped", &smk_mapped_ops, S_IRUGO|S_IWUSR}, + [SMK_LOAD2] = { + "load2", &smk_load2_ops, S_IRUGO|S_IWUSR}, + [SMK_LOAD_SELF2] = { + "load-self2", &smk_load_self2_ops, S_IRUGO|S_IWUGO}, + [SMK_ACCESS2] = { + "access2", &smk_access2_ops, S_IRUGO|S_IWUGO}, + [SMK_CIPSO2] = { + "cipso2", &smk_cipso2_ops, S_IRUGO|S_IWUSR}, + [SMK_REVOKE_SUBJ] = { + "revoke-subject", &smk_revoke_subj_ops, + S_IRUGO|S_IWUSR}, + [SMK_CHANGE_RULE] = { + "change-rule", &smk_change_rule_ops, S_IRUGO|S_IWUSR}, + [SMK_SYSLOG] = { + "syslog", &smk_syslog_ops, S_IRUGO|S_IWUSR}, + [SMK_PTRACE] = { + "ptrace", &smk_ptrace_ops, S_IRUGO|S_IWUSR}, + /* last one */ + {""} }; rc = simple_fill_super(sb, SMACK_MAGIC, smack_files); @@ -1304,7 +2382,6 @@ static int smk_fill_super(struct super_block *sb, void *data, int silent) } root_inode = sb->s_root->d_inode; - root_inode->i_security = new_inode_smack(smack_known_floor.smk_known); return 0; } @@ -1334,6 +2411,15 @@ static struct file_system_type smk_fs_type = { static struct vfsmount *smackfs_mount; +static int __init smk_preset_netlabel(struct smack_known *skp) +{ + skp->smk_netlabel.domain = skp->smk_known; + skp->smk_netlabel.flags = + NETLBL_SECATTR_DOMAIN | NETLBL_SECATTR_MLS_LVL; + return smk_netlbl_mls(smack_cipso_direct, skp->smk_known, + &skp->smk_netlabel, strlen(skp->smk_known)); +} + /** * init_smk_fs - get the smackfs superblock * @@ -1350,10 +2436,15 @@ static struct vfsmount *smackfs_mount; static int __init init_smk_fs(void) { int err; + int rc; if (!security_module_enable(&smack_ops)) return 0; + err = smk_init_sysfs(); + if (err) + printk(KERN_ERR "smackfs: sysfs mountpoint problem.\n"); + err = register_filesystem(&smk_fs_type); if (!err) { smackfs_mount = kern_mount(&smk_fs_type); @@ -1367,6 +2458,25 @@ static int __init init_smk_fs(void) smk_cipso_doi(); smk_unlbl_ambient(NULL); + rc = smk_preset_netlabel(&smack_known_floor); + if (err == 0 && rc < 0) + err = rc; + rc = smk_preset_netlabel(&smack_known_hat); + if (err == 0 && rc < 0) + err = rc; + rc = smk_preset_netlabel(&smack_known_huh); + if (err == 0 && rc < 0) + err = rc; + rc = smk_preset_netlabel(&smack_known_invalid); + if (err == 0 && rc < 0) + err = rc; + rc = smk_preset_netlabel(&smack_known_star); + if (err == 0 && rc < 0) + err = rc; + rc = smk_preset_netlabel(&smack_known_web); + if (err == 0 && rc < 0) + err = rc; + return err; } diff --git a/security/tomoyo/.gitignore b/security/tomoyo/.gitignore new file mode 100644 index 00000000000..5caf1a6f590 --- /dev/null +++ b/security/tomoyo/.gitignore @@ -0,0 +1,2 @@ +builtin-policy.h +policy/ diff --git a/security/tomoyo/Kconfig b/security/tomoyo/Kconfig index c8f38579323..8eb779b9d77 100644 --- a/security/tomoyo/Kconfig +++ b/security/tomoyo/Kconfig @@ -1,11 +1,74 @@ config SECURITY_TOMOYO bool "TOMOYO Linux Support" depends on SECURITY + depends on NET select SECURITYFS select SECURITY_PATH + select SECURITY_NETWORK default n help This selects TOMOYO Linux, pathname-based access control. Required userspace tools and further information may be found at <http://tomoyo.sourceforge.jp/>. If you are unsure how to answer this question, answer N. + +config SECURITY_TOMOYO_MAX_ACCEPT_ENTRY + int "Default maximal count for learning mode" + default 2048 + range 0 2147483647 + depends on SECURITY_TOMOYO + help + This is the default value for maximal ACL entries + that are automatically appended into policy at "learning mode". + Some programs access thousands of objects, so running + such programs in "learning mode" dulls the system response + and consumes much memory. + This is the safeguard for such programs. + +config SECURITY_TOMOYO_MAX_AUDIT_LOG + int "Default maximal count for audit log" + default 1024 + range 0 2147483647 + depends on SECURITY_TOMOYO + help + This is the default value for maximal entries for + audit logs that the kernel can hold on memory. + You can read the log via /sys/kernel/security/tomoyo/audit. + If you don't need audit logs, you may set this value to 0. + +config SECURITY_TOMOYO_OMIT_USERSPACE_LOADER + bool "Activate without calling userspace policy loader." + default n + depends on SECURITY_TOMOYO + ---help--- + Say Y here if you want to activate access control as soon as built-in + policy was loaded. This option will be useful for systems where + operations which can lead to the hijacking of the boot sequence are + needed before loading the policy. For example, you can activate + immediately after loading the fixed part of policy which will allow + only operations needed for mounting a partition which contains the + variant part of policy and verifying (e.g. running GPG check) and + loading the variant part of policy. Since you can start using + enforcing mode from the beginning, you can reduce the possibility of + hijacking the boot sequence. + +config SECURITY_TOMOYO_POLICY_LOADER + string "Location of userspace policy loader" + default "/sbin/tomoyo-init" + depends on SECURITY_TOMOYO + depends on !SECURITY_TOMOYO_OMIT_USERSPACE_LOADER + ---help--- + This is the default pathname of policy loader which is called before + activation. You can override this setting via TOMOYO_loader= kernel + command line option. + +config SECURITY_TOMOYO_ACTIVATION_TRIGGER + string "Trigger for calling userspace policy loader" + default "/sbin/init" + depends on SECURITY_TOMOYO + depends on !SECURITY_TOMOYO_OMIT_USERSPACE_LOADER + ---help--- + This is the default pathname of activation trigger. + You can override this setting via TOMOYO_trigger= kernel command line + option. For example, if you pass init=/bin/systemd option, you may + want to also pass TOMOYO_trigger=/bin/systemd option. diff --git a/security/tomoyo/Makefile b/security/tomoyo/Makefile index 91640e96bd0..56a0c7be409 100644 --- a/security/tomoyo/Makefile +++ b/security/tomoyo/Makefile @@ -1 +1,48 @@ -obj-y = common.o domain.o file.o gc.o group.o load_policy.o memory.o mount.o realpath.o securityfs_if.o tomoyo.o util.o +obj-y = audit.o common.o condition.o domain.o environ.o file.o gc.o group.o load_policy.o memory.o mount.o network.o realpath.o securityfs_if.o tomoyo.o util.o + +$(obj)/policy/profile.conf: + @mkdir -p $(obj)/policy/ + @echo Creating an empty policy/profile.conf + @touch $@ + +$(obj)/policy/exception_policy.conf: + @mkdir -p $(obj)/policy/ + @echo Creating a default policy/exception_policy.conf + @echo initialize_domain /sbin/modprobe from any >> $@ + @echo initialize_domain /sbin/hotplug from any >> $@ + +$(obj)/policy/domain_policy.conf: + @mkdir -p $(obj)/policy/ + @echo Creating an empty policy/domain_policy.conf + @touch $@ + +$(obj)/policy/manager.conf: + @mkdir -p $(obj)/policy/ + @echo Creating an empty policy/manager.conf + @touch $@ + +$(obj)/policy/stat.conf: + @mkdir -p $(obj)/policy/ + @echo Creating an empty policy/stat.conf + @touch $@ + +$(obj)/builtin-policy.h: $(obj)/policy/profile.conf $(obj)/policy/exception_policy.conf $(obj)/policy/domain_policy.conf $(obj)/policy/manager.conf $(obj)/policy/stat.conf + @echo Generating built-in policy for TOMOYO 2.5.x. + @echo "static char tomoyo_builtin_profile[] __initdata =" > $@.tmp + @sed -e 's/\\/\\\\/g' -e 's/\"/\\"/g' -e 's/\(.*\)/"\1\\n"/' < $(obj)/policy/profile.conf >> $@.tmp + @echo "\"\";" >> $@.tmp + @echo "static char tomoyo_builtin_exception_policy[] __initdata =" >> $@.tmp + @sed -e 's/\\/\\\\/g' -e 's/\"/\\"/g' -e 's/\(.*\)/"\1\\n"/' < $(obj)/policy/exception_policy.conf >> $@.tmp + @echo "\"\";" >> $@.tmp + @echo "static char tomoyo_builtin_domain_policy[] __initdata =" >> $@.tmp + @sed -e 's/\\/\\\\/g' -e 's/\"/\\"/g' -e 's/\(.*\)/"\1\\n"/' < $(obj)/policy/domain_policy.conf >> $@.tmp + @echo "\"\";" >> $@.tmp + @echo "static char tomoyo_builtin_manager[] __initdata =" >> $@.tmp + @sed -e 's/\\/\\\\/g' -e 's/\"/\\"/g' -e 's/\(.*\)/"\1\\n"/' < $(obj)/policy/manager.conf >> $@.tmp + @echo "\"\";" >> $@.tmp + @echo "static char tomoyo_builtin_stat[] __initdata =" >> $@.tmp + @sed -e 's/\\/\\\\/g' -e 's/\"/\\"/g' -e 's/\(.*\)/"\1\\n"/' < $(obj)/policy/stat.conf >> $@.tmp + @echo "\"\";" >> $@.tmp + @mv $@.tmp $@ + +$(obj)/common.o: $(obj)/builtin-policy.h diff --git a/security/tomoyo/audit.c b/security/tomoyo/audit.c new file mode 100644 index 00000000000..c1b00375c9a --- /dev/null +++ b/security/tomoyo/audit.c @@ -0,0 +1,470 @@ +/* + * security/tomoyo/audit.c + * + * Copyright (C) 2005-2011 NTT DATA CORPORATION + */ + +#include "common.h" +#include <linux/slab.h> + +/** + * tomoyo_print_bprm - Print "struct linux_binprm" for auditing. + * + * @bprm: Pointer to "struct linux_binprm". + * @dump: Pointer to "struct tomoyo_page_dump". + * + * Returns the contents of @bprm on success, NULL otherwise. + * + * This function uses kzalloc(), so caller must kfree() if this function + * didn't return NULL. + */ +static char *tomoyo_print_bprm(struct linux_binprm *bprm, + struct tomoyo_page_dump *dump) +{ + static const int tomoyo_buffer_len = 4096 * 2; + char *buffer = kzalloc(tomoyo_buffer_len, GFP_NOFS); + char *cp; + char *last_start; + int len; + unsigned long pos = bprm->p; + int offset = pos % PAGE_SIZE; + int argv_count = bprm->argc; + int envp_count = bprm->envc; + bool truncated = false; + if (!buffer) + return NULL; + len = snprintf(buffer, tomoyo_buffer_len - 1, "argv[]={ "); + cp = buffer + len; + if (!argv_count) { + memmove(cp, "} envp[]={ ", 11); + cp += 11; + } + last_start = cp; + while (argv_count || envp_count) { + if (!tomoyo_dump_page(bprm, pos, dump)) + goto out; + pos += PAGE_SIZE - offset; + /* Read. */ + while (offset < PAGE_SIZE) { + const char *kaddr = dump->data; + const unsigned char c = kaddr[offset++]; + if (cp == last_start) + *cp++ = '"'; + if (cp >= buffer + tomoyo_buffer_len - 32) { + /* Reserve some room for "..." string. */ + truncated = true; + } else if (c == '\\') { + *cp++ = '\\'; + *cp++ = '\\'; + } else if (c > ' ' && c < 127) { + *cp++ = c; + } else if (!c) { + *cp++ = '"'; + *cp++ = ' '; + last_start = cp; + } else { + *cp++ = '\\'; + *cp++ = (c >> 6) + '0'; + *cp++ = ((c >> 3) & 7) + '0'; + *cp++ = (c & 7) + '0'; + } + if (c) + continue; + if (argv_count) { + if (--argv_count == 0) { + if (truncated) { + cp = last_start; + memmove(cp, "... ", 4); + cp += 4; + } + memmove(cp, "} envp[]={ ", 11); + cp += 11; + last_start = cp; + truncated = false; + } + } else if (envp_count) { + if (--envp_count == 0) { + if (truncated) { + cp = last_start; + memmove(cp, "... ", 4); + cp += 4; + } + } + } + if (!argv_count && !envp_count) + break; + } + offset = 0; + } + *cp++ = '}'; + *cp = '\0'; + return buffer; +out: + snprintf(buffer, tomoyo_buffer_len - 1, + "argv[]={ ... } envp[]= { ... }"); + return buffer; +} + +/** + * tomoyo_filetype - Get string representation of file type. + * + * @mode: Mode value for stat(). + * + * Returns file type string. + */ +static inline const char *tomoyo_filetype(const umode_t mode) +{ + switch (mode & S_IFMT) { + case S_IFREG: + case 0: + return tomoyo_condition_keyword[TOMOYO_TYPE_IS_FILE]; + case S_IFDIR: + return tomoyo_condition_keyword[TOMOYO_TYPE_IS_DIRECTORY]; + case S_IFLNK: + return tomoyo_condition_keyword[TOMOYO_TYPE_IS_SYMLINK]; + case S_IFIFO: + return tomoyo_condition_keyword[TOMOYO_TYPE_IS_FIFO]; + case S_IFSOCK: + return tomoyo_condition_keyword[TOMOYO_TYPE_IS_SOCKET]; + case S_IFBLK: + return tomoyo_condition_keyword[TOMOYO_TYPE_IS_BLOCK_DEV]; + case S_IFCHR: + return tomoyo_condition_keyword[TOMOYO_TYPE_IS_CHAR_DEV]; + } + return "unknown"; /* This should not happen. */ +} + +/** + * tomoyo_print_header - Get header line of audit log. + * + * @r: Pointer to "struct tomoyo_request_info". + * + * Returns string representation. + * + * This function uses kmalloc(), so caller must kfree() if this function + * didn't return NULL. + */ +static char *tomoyo_print_header(struct tomoyo_request_info *r) +{ + struct tomoyo_time stamp; + const pid_t gpid = task_pid_nr(current); + struct tomoyo_obj_info *obj = r->obj; + static const int tomoyo_buffer_len = 4096; + char *buffer = kmalloc(tomoyo_buffer_len, GFP_NOFS); + int pos; + u8 i; + if (!buffer) + return NULL; + { + struct timeval tv; + do_gettimeofday(&tv); + tomoyo_convert_time(tv.tv_sec, &stamp); + } + pos = snprintf(buffer, tomoyo_buffer_len - 1, + "#%04u/%02u/%02u %02u:%02u:%02u# profile=%u mode=%s " + "granted=%s (global-pid=%u) task={ pid=%u ppid=%u " + "uid=%u gid=%u euid=%u egid=%u suid=%u sgid=%u " + "fsuid=%u fsgid=%u }", stamp.year, stamp.month, + stamp.day, stamp.hour, stamp.min, stamp.sec, r->profile, + tomoyo_mode[r->mode], tomoyo_yesno(r->granted), gpid, + tomoyo_sys_getpid(), tomoyo_sys_getppid(), + from_kuid(&init_user_ns, current_uid()), + from_kgid(&init_user_ns, current_gid()), + from_kuid(&init_user_ns, current_euid()), + from_kgid(&init_user_ns, current_egid()), + from_kuid(&init_user_ns, current_suid()), + from_kgid(&init_user_ns, current_sgid()), + from_kuid(&init_user_ns, current_fsuid()), + from_kgid(&init_user_ns, current_fsgid())); + if (!obj) + goto no_obj_info; + if (!obj->validate_done) { + tomoyo_get_attributes(obj); + obj->validate_done = true; + } + for (i = 0; i < TOMOYO_MAX_PATH_STAT; i++) { + struct tomoyo_mini_stat *stat; + unsigned int dev; + umode_t mode; + if (!obj->stat_valid[i]) + continue; + stat = &obj->stat[i]; + dev = stat->dev; + mode = stat->mode; + if (i & 1) { + pos += snprintf(buffer + pos, + tomoyo_buffer_len - 1 - pos, + " path%u.parent={ uid=%u gid=%u " + "ino=%lu perm=0%o }", (i >> 1) + 1, + from_kuid(&init_user_ns, stat->uid), + from_kgid(&init_user_ns, stat->gid), + (unsigned long)stat->ino, + stat->mode & S_IALLUGO); + continue; + } + pos += snprintf(buffer + pos, tomoyo_buffer_len - 1 - pos, + " path%u={ uid=%u gid=%u ino=%lu major=%u" + " minor=%u perm=0%o type=%s", (i >> 1) + 1, + from_kuid(&init_user_ns, stat->uid), + from_kgid(&init_user_ns, stat->gid), + (unsigned long)stat->ino, + MAJOR(dev), MINOR(dev), + mode & S_IALLUGO, tomoyo_filetype(mode)); + if (S_ISCHR(mode) || S_ISBLK(mode)) { + dev = stat->rdev; + pos += snprintf(buffer + pos, + tomoyo_buffer_len - 1 - pos, + " dev_major=%u dev_minor=%u", + MAJOR(dev), MINOR(dev)); + } + pos += snprintf(buffer + pos, tomoyo_buffer_len - 1 - pos, + " }"); + } +no_obj_info: + if (pos < tomoyo_buffer_len - 1) + return buffer; + kfree(buffer); + return NULL; +} + +/** + * tomoyo_init_log - Allocate buffer for audit logs. + * + * @r: Pointer to "struct tomoyo_request_info". + * @len: Buffer size needed for @fmt and @args. + * @fmt: The printf()'s format string. + * @args: va_list structure for @fmt. + * + * Returns pointer to allocated memory. + * + * This function uses kzalloc(), so caller must kfree() if this function + * didn't return NULL. + */ +char *tomoyo_init_log(struct tomoyo_request_info *r, int len, const char *fmt, + va_list args) +{ + char *buf = NULL; + char *bprm_info = NULL; + const char *header = NULL; + char *realpath = NULL; + const char *symlink = NULL; + int pos; + const char *domainname = r->domain->domainname->name; + header = tomoyo_print_header(r); + if (!header) + return NULL; + /* +10 is for '\n' etc. and '\0'. */ + len += strlen(domainname) + strlen(header) + 10; + if (r->ee) { + struct file *file = r->ee->bprm->file; + realpath = tomoyo_realpath_from_path(&file->f_path); + bprm_info = tomoyo_print_bprm(r->ee->bprm, &r->ee->dump); + if (!realpath || !bprm_info) + goto out; + /* +80 is for " exec={ realpath=\"%s\" argc=%d envc=%d %s }" */ + len += strlen(realpath) + 80 + strlen(bprm_info); + } else if (r->obj && r->obj->symlink_target) { + symlink = r->obj->symlink_target->name; + /* +18 is for " symlink.target=\"%s\"" */ + len += 18 + strlen(symlink); + } + len = tomoyo_round2(len); + buf = kzalloc(len, GFP_NOFS); + if (!buf) + goto out; + len--; + pos = snprintf(buf, len, "%s", header); + if (realpath) { + struct linux_binprm *bprm = r->ee->bprm; + pos += snprintf(buf + pos, len - pos, + " exec={ realpath=\"%s\" argc=%d envc=%d %s }", + realpath, bprm->argc, bprm->envc, bprm_info); + } else if (symlink) + pos += snprintf(buf + pos, len - pos, " symlink.target=\"%s\"", + symlink); + pos += snprintf(buf + pos, len - pos, "\n%s\n", domainname); + vsnprintf(buf + pos, len - pos, fmt, args); +out: + kfree(realpath); + kfree(bprm_info); + kfree(header); + return buf; +} + +/* Wait queue for /sys/kernel/security/tomoyo/audit. */ +static DECLARE_WAIT_QUEUE_HEAD(tomoyo_log_wait); + +/* Structure for audit log. */ +struct tomoyo_log { + struct list_head list; + char *log; + int size; +}; + +/* The list for "struct tomoyo_log". */ +static LIST_HEAD(tomoyo_log); + +/* Lock for "struct list_head tomoyo_log". */ +static DEFINE_SPINLOCK(tomoyo_log_lock); + +/* Length of "stuct list_head tomoyo_log". */ +static unsigned int tomoyo_log_count; + +/** + * tomoyo_get_audit - Get audit mode. + * + * @ns: Pointer to "struct tomoyo_policy_namespace". + * @profile: Profile number. + * @index: Index number of functionality. + * @is_granted: True if granted log, false otherwise. + * + * Returns true if this request should be audited, false otherwise. + */ +static bool tomoyo_get_audit(const struct tomoyo_policy_namespace *ns, + const u8 profile, const u8 index, + const struct tomoyo_acl_info *matched_acl, + const bool is_granted) +{ + u8 mode; + const u8 category = tomoyo_index2category[index] + + TOMOYO_MAX_MAC_INDEX; + struct tomoyo_profile *p; + if (!tomoyo_policy_loaded) + return false; + p = tomoyo_profile(ns, profile); + if (tomoyo_log_count >= p->pref[TOMOYO_PREF_MAX_AUDIT_LOG]) + return false; + if (is_granted && matched_acl && matched_acl->cond && + matched_acl->cond->grant_log != TOMOYO_GRANTLOG_AUTO) + return matched_acl->cond->grant_log == TOMOYO_GRANTLOG_YES; + mode = p->config[index]; + if (mode == TOMOYO_CONFIG_USE_DEFAULT) + mode = p->config[category]; + if (mode == TOMOYO_CONFIG_USE_DEFAULT) + mode = p->default_config; + if (is_granted) + return mode & TOMOYO_CONFIG_WANT_GRANT_LOG; + return mode & TOMOYO_CONFIG_WANT_REJECT_LOG; +} + +/** + * tomoyo_write_log2 - Write an audit log. + * + * @r: Pointer to "struct tomoyo_request_info". + * @len: Buffer size needed for @fmt and @args. + * @fmt: The printf()'s format string. + * @args: va_list structure for @fmt. + * + * Returns nothing. + */ +void tomoyo_write_log2(struct tomoyo_request_info *r, int len, const char *fmt, + va_list args) +{ + char *buf; + struct tomoyo_log *entry; + bool quota_exceeded = false; + if (!tomoyo_get_audit(r->domain->ns, r->profile, r->type, + r->matched_acl, r->granted)) + goto out; + buf = tomoyo_init_log(r, len, fmt, args); + if (!buf) + goto out; + entry = kzalloc(sizeof(*entry), GFP_NOFS); + if (!entry) { + kfree(buf); + goto out; + } + entry->log = buf; + len = tomoyo_round2(strlen(buf) + 1); + /* + * The entry->size is used for memory quota checks. + * Don't go beyond strlen(entry->log). + */ + entry->size = len + tomoyo_round2(sizeof(*entry)); + spin_lock(&tomoyo_log_lock); + if (tomoyo_memory_quota[TOMOYO_MEMORY_AUDIT] && + tomoyo_memory_used[TOMOYO_MEMORY_AUDIT] + entry->size >= + tomoyo_memory_quota[TOMOYO_MEMORY_AUDIT]) { + quota_exceeded = true; + } else { + tomoyo_memory_used[TOMOYO_MEMORY_AUDIT] += entry->size; + list_add_tail(&entry->list, &tomoyo_log); + tomoyo_log_count++; + } + spin_unlock(&tomoyo_log_lock); + if (quota_exceeded) { + kfree(buf); + kfree(entry); + goto out; + } + wake_up(&tomoyo_log_wait); +out: + return; +} + +/** + * tomoyo_write_log - Write an audit log. + * + * @r: Pointer to "struct tomoyo_request_info". + * @fmt: The printf()'s format string, followed by parameters. + * + * Returns nothing. + */ +void tomoyo_write_log(struct tomoyo_request_info *r, const char *fmt, ...) +{ + va_list args; + int len; + va_start(args, fmt); + len = vsnprintf((char *) &len, 1, fmt, args) + 1; + va_end(args); + va_start(args, fmt); + tomoyo_write_log2(r, len, fmt, args); + va_end(args); +} + +/** + * tomoyo_read_log - Read an audit log. + * + * @head: Pointer to "struct tomoyo_io_buffer". + * + * Returns nothing. + */ +void tomoyo_read_log(struct tomoyo_io_buffer *head) +{ + struct tomoyo_log *ptr = NULL; + if (head->r.w_pos) + return; + kfree(head->read_buf); + head->read_buf = NULL; + spin_lock(&tomoyo_log_lock); + if (!list_empty(&tomoyo_log)) { + ptr = list_entry(tomoyo_log.next, typeof(*ptr), list); + list_del(&ptr->list); + tomoyo_log_count--; + tomoyo_memory_used[TOMOYO_MEMORY_AUDIT] -= ptr->size; + } + spin_unlock(&tomoyo_log_lock); + if (ptr) { + head->read_buf = ptr->log; + head->r.w[head->r.w_pos++] = head->read_buf; + kfree(ptr); + } +} + +/** + * tomoyo_poll_log - Wait for an audit log. + * + * @file: Pointer to "struct file". + * @wait: Pointer to "poll_table". Maybe NULL. + * + * Returns POLLIN | POLLRDNORM when ready to read an audit log. + */ +unsigned int tomoyo_poll_log(struct file *file, poll_table *wait) +{ + if (tomoyo_log_count) + return POLLIN | POLLRDNORM; + poll_wait(file, &tomoyo_log_wait, wait); + if (tomoyo_log_count) + return POLLIN | POLLRDNORM; + return 0; +} diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c index 7556315c197..283862aebdc 100644 --- a/security/tomoyo/common.c +++ b/security/tomoyo/common.c @@ -1,9 +1,7 @@ /* * security/tomoyo/common.c * - * Common functions for TOMOYO. - * - * Copyright (C) 2005-2010 NTT DATA CORPORATION + * Copyright (C) 2005-2011 NTT DATA CORPORATION */ #include <linux/uaccess.h> @@ -11,54 +9,163 @@ #include <linux/security.h> #include "common.h" -static struct tomoyo_profile tomoyo_default_profile = { - .learning = &tomoyo_default_profile.preference, - .permissive = &tomoyo_default_profile.preference, - .enforcing = &tomoyo_default_profile.preference, - .preference.enforcing_verbose = true, - .preference.learning_max_entry = 2048, - .preference.learning_verbose = false, - .preference.permissive_verbose = true +/* String table for operation mode. */ +const char * const tomoyo_mode[TOMOYO_CONFIG_MAX_MODE] = { + [TOMOYO_CONFIG_DISABLED] = "disabled", + [TOMOYO_CONFIG_LEARNING] = "learning", + [TOMOYO_CONFIG_PERMISSIVE] = "permissive", + [TOMOYO_CONFIG_ENFORCING] = "enforcing" }; -/* Profile version. Currently only 20090903 is defined. */ -static unsigned int tomoyo_profile_version; +/* String table for /sys/kernel/security/tomoyo/profile */ +const char * const tomoyo_mac_keywords[TOMOYO_MAX_MAC_INDEX + + TOMOYO_MAX_MAC_CATEGORY_INDEX] = { + /* CONFIG::file group */ + [TOMOYO_MAC_FILE_EXECUTE] = "execute", + [TOMOYO_MAC_FILE_OPEN] = "open", + [TOMOYO_MAC_FILE_CREATE] = "create", + [TOMOYO_MAC_FILE_UNLINK] = "unlink", + [TOMOYO_MAC_FILE_GETATTR] = "getattr", + [TOMOYO_MAC_FILE_MKDIR] = "mkdir", + [TOMOYO_MAC_FILE_RMDIR] = "rmdir", + [TOMOYO_MAC_FILE_MKFIFO] = "mkfifo", + [TOMOYO_MAC_FILE_MKSOCK] = "mksock", + [TOMOYO_MAC_FILE_TRUNCATE] = "truncate", + [TOMOYO_MAC_FILE_SYMLINK] = "symlink", + [TOMOYO_MAC_FILE_MKBLOCK] = "mkblock", + [TOMOYO_MAC_FILE_MKCHAR] = "mkchar", + [TOMOYO_MAC_FILE_LINK] = "link", + [TOMOYO_MAC_FILE_RENAME] = "rename", + [TOMOYO_MAC_FILE_CHMOD] = "chmod", + [TOMOYO_MAC_FILE_CHOWN] = "chown", + [TOMOYO_MAC_FILE_CHGRP] = "chgrp", + [TOMOYO_MAC_FILE_IOCTL] = "ioctl", + [TOMOYO_MAC_FILE_CHROOT] = "chroot", + [TOMOYO_MAC_FILE_MOUNT] = "mount", + [TOMOYO_MAC_FILE_UMOUNT] = "unmount", + [TOMOYO_MAC_FILE_PIVOT_ROOT] = "pivot_root", + /* CONFIG::network group */ + [TOMOYO_MAC_NETWORK_INET_STREAM_BIND] = "inet_stream_bind", + [TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN] = "inet_stream_listen", + [TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT] = "inet_stream_connect", + [TOMOYO_MAC_NETWORK_INET_DGRAM_BIND] = "inet_dgram_bind", + [TOMOYO_MAC_NETWORK_INET_DGRAM_SEND] = "inet_dgram_send", + [TOMOYO_MAC_NETWORK_INET_RAW_BIND] = "inet_raw_bind", + [TOMOYO_MAC_NETWORK_INET_RAW_SEND] = "inet_raw_send", + [TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND] = "unix_stream_bind", + [TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN] = "unix_stream_listen", + [TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT] = "unix_stream_connect", + [TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND] = "unix_dgram_bind", + [TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND] = "unix_dgram_send", + [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND] = "unix_seqpacket_bind", + [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN] = "unix_seqpacket_listen", + [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT] = "unix_seqpacket_connect", + /* CONFIG::misc group */ + [TOMOYO_MAC_ENVIRON] = "env", + /* CONFIG group */ + [TOMOYO_MAX_MAC_INDEX + TOMOYO_MAC_CATEGORY_FILE] = "file", + [TOMOYO_MAX_MAC_INDEX + TOMOYO_MAC_CATEGORY_NETWORK] = "network", + [TOMOYO_MAX_MAC_INDEX + TOMOYO_MAC_CATEGORY_MISC] = "misc", +}; -/* Profile table. Memory is allocated as needed. */ -static struct tomoyo_profile *tomoyo_profile_ptr[TOMOYO_MAX_PROFILES]; +/* String table for conditions. */ +const char * const tomoyo_condition_keyword[TOMOYO_MAX_CONDITION_KEYWORD] = { + [TOMOYO_TASK_UID] = "task.uid", + [TOMOYO_TASK_EUID] = "task.euid", + [TOMOYO_TASK_SUID] = "task.suid", + [TOMOYO_TASK_FSUID] = "task.fsuid", + [TOMOYO_TASK_GID] = "task.gid", + [TOMOYO_TASK_EGID] = "task.egid", + [TOMOYO_TASK_SGID] = "task.sgid", + [TOMOYO_TASK_FSGID] = "task.fsgid", + [TOMOYO_TASK_PID] = "task.pid", + [TOMOYO_TASK_PPID] = "task.ppid", + [TOMOYO_EXEC_ARGC] = "exec.argc", + [TOMOYO_EXEC_ENVC] = "exec.envc", + [TOMOYO_TYPE_IS_SOCKET] = "socket", + [TOMOYO_TYPE_IS_SYMLINK] = "symlink", + [TOMOYO_TYPE_IS_FILE] = "file", + [TOMOYO_TYPE_IS_BLOCK_DEV] = "block", + [TOMOYO_TYPE_IS_DIRECTORY] = "directory", + [TOMOYO_TYPE_IS_CHAR_DEV] = "char", + [TOMOYO_TYPE_IS_FIFO] = "fifo", + [TOMOYO_MODE_SETUID] = "setuid", + [TOMOYO_MODE_SETGID] = "setgid", + [TOMOYO_MODE_STICKY] = "sticky", + [TOMOYO_MODE_OWNER_READ] = "owner_read", + [TOMOYO_MODE_OWNER_WRITE] = "owner_write", + [TOMOYO_MODE_OWNER_EXECUTE] = "owner_execute", + [TOMOYO_MODE_GROUP_READ] = "group_read", + [TOMOYO_MODE_GROUP_WRITE] = "group_write", + [TOMOYO_MODE_GROUP_EXECUTE] = "group_execute", + [TOMOYO_MODE_OTHERS_READ] = "others_read", + [TOMOYO_MODE_OTHERS_WRITE] = "others_write", + [TOMOYO_MODE_OTHERS_EXECUTE] = "others_execute", + [TOMOYO_EXEC_REALPATH] = "exec.realpath", + [TOMOYO_SYMLINK_TARGET] = "symlink.target", + [TOMOYO_PATH1_UID] = "path1.uid", + [TOMOYO_PATH1_GID] = "path1.gid", + [TOMOYO_PATH1_INO] = "path1.ino", + [TOMOYO_PATH1_MAJOR] = "path1.major", + [TOMOYO_PATH1_MINOR] = "path1.minor", + [TOMOYO_PATH1_PERM] = "path1.perm", + [TOMOYO_PATH1_TYPE] = "path1.type", + [TOMOYO_PATH1_DEV_MAJOR] = "path1.dev_major", + [TOMOYO_PATH1_DEV_MINOR] = "path1.dev_minor", + [TOMOYO_PATH2_UID] = "path2.uid", + [TOMOYO_PATH2_GID] = "path2.gid", + [TOMOYO_PATH2_INO] = "path2.ino", + [TOMOYO_PATH2_MAJOR] = "path2.major", + [TOMOYO_PATH2_MINOR] = "path2.minor", + [TOMOYO_PATH2_PERM] = "path2.perm", + [TOMOYO_PATH2_TYPE] = "path2.type", + [TOMOYO_PATH2_DEV_MAJOR] = "path2.dev_major", + [TOMOYO_PATH2_DEV_MINOR] = "path2.dev_minor", + [TOMOYO_PATH1_PARENT_UID] = "path1.parent.uid", + [TOMOYO_PATH1_PARENT_GID] = "path1.parent.gid", + [TOMOYO_PATH1_PARENT_INO] = "path1.parent.ino", + [TOMOYO_PATH1_PARENT_PERM] = "path1.parent.perm", + [TOMOYO_PATH2_PARENT_UID] = "path2.parent.uid", + [TOMOYO_PATH2_PARENT_GID] = "path2.parent.gid", + [TOMOYO_PATH2_PARENT_INO] = "path2.parent.ino", + [TOMOYO_PATH2_PARENT_PERM] = "path2.parent.perm", +}; -/* String table for functionality that takes 4 modes. */ -static const char *tomoyo_mode[4] = { - "disabled", "learning", "permissive", "enforcing" +/* String table for PREFERENCE keyword. */ +static const char * const tomoyo_pref_keywords[TOMOYO_MAX_PREF] = { + [TOMOYO_PREF_MAX_AUDIT_LOG] = "max_audit_log", + [TOMOYO_PREF_MAX_LEARNING_ENTRY] = "max_learning_entry", }; -/* String table for /sys/kernel/security/tomoyo/profile */ -static const char *tomoyo_mac_keywords[TOMOYO_MAX_MAC_INDEX - + TOMOYO_MAX_MAC_CATEGORY_INDEX] = { - [TOMOYO_MAC_FILE_EXECUTE] = "file::execute", - [TOMOYO_MAC_FILE_OPEN] = "file::open", - [TOMOYO_MAC_FILE_CREATE] = "file::create", - [TOMOYO_MAC_FILE_UNLINK] = "file::unlink", - [TOMOYO_MAC_FILE_MKDIR] = "file::mkdir", - [TOMOYO_MAC_FILE_RMDIR] = "file::rmdir", - [TOMOYO_MAC_FILE_MKFIFO] = "file::mkfifo", - [TOMOYO_MAC_FILE_MKSOCK] = "file::mksock", - [TOMOYO_MAC_FILE_TRUNCATE] = "file::truncate", - [TOMOYO_MAC_FILE_SYMLINK] = "file::symlink", - [TOMOYO_MAC_FILE_REWRITE] = "file::rewrite", - [TOMOYO_MAC_FILE_MKBLOCK] = "file::mkblock", - [TOMOYO_MAC_FILE_MKCHAR] = "file::mkchar", - [TOMOYO_MAC_FILE_LINK] = "file::link", - [TOMOYO_MAC_FILE_RENAME] = "file::rename", - [TOMOYO_MAC_FILE_CHMOD] = "file::chmod", - [TOMOYO_MAC_FILE_CHOWN] = "file::chown", - [TOMOYO_MAC_FILE_CHGRP] = "file::chgrp", - [TOMOYO_MAC_FILE_IOCTL] = "file::ioctl", - [TOMOYO_MAC_FILE_CHROOT] = "file::chroot", - [TOMOYO_MAC_FILE_MOUNT] = "file::mount", - [TOMOYO_MAC_FILE_UMOUNT] = "file::umount", - [TOMOYO_MAC_FILE_PIVOT_ROOT] = "file::pivot_root", - [TOMOYO_MAX_MAC_INDEX + TOMOYO_MAC_CATEGORY_FILE] = "file", +/* String table for path operation. */ +const char * const tomoyo_path_keyword[TOMOYO_MAX_PATH_OPERATION] = { + [TOMOYO_TYPE_EXECUTE] = "execute", + [TOMOYO_TYPE_READ] = "read", + [TOMOYO_TYPE_WRITE] = "write", + [TOMOYO_TYPE_APPEND] = "append", + [TOMOYO_TYPE_UNLINK] = "unlink", + [TOMOYO_TYPE_GETATTR] = "getattr", + [TOMOYO_TYPE_RMDIR] = "rmdir", + [TOMOYO_TYPE_TRUNCATE] = "truncate", + [TOMOYO_TYPE_SYMLINK] = "symlink", + [TOMOYO_TYPE_CHROOT] = "chroot", + [TOMOYO_TYPE_UMOUNT] = "unmount", +}; + +/* String table for socket's operation. */ +const char * const tomoyo_socket_keyword[TOMOYO_MAX_NETWORK_OPERATION] = { + [TOMOYO_NETWORK_BIND] = "bind", + [TOMOYO_NETWORK_LISTEN] = "listen", + [TOMOYO_NETWORK_CONNECT] = "connect", + [TOMOYO_NETWORK_SEND] = "send", +}; + +/* String table for categories. */ +static const char * const tomoyo_category_keywords +[TOMOYO_MAX_MAC_CATEGORY_INDEX] = { + [TOMOYO_MAC_CATEGORY_FILE] = "file", + [TOMOYO_MAC_CATEGORY_NETWORK] = "network", + [TOMOYO_MAC_CATEGORY_MISC] = "misc", }; /* Permit policy management by non-root user? */ @@ -71,11 +178,20 @@ static bool tomoyo_manage_by_non_root; * * @value: Bool value. */ -static const char *tomoyo_yesno(const unsigned int value) +const char *tomoyo_yesno(const unsigned int value) { return value ? "yes" : "no"; } +/** + * tomoyo_addprintf - strncat()-like-snprintf(). + * + * @buffer: Buffer to write to. Must be '\0'-terminated. + * @len: Size of @buffer. + * @fmt: The printf()'s format string, followed by parameters. + * + * Returns nothing. + */ static void tomoyo_addprintf(char *buffer, int len, const char *fmt, ...) { va_list args; @@ -96,7 +212,7 @@ static bool tomoyo_flush(struct tomoyo_io_buffer *head) { while (head->r.w_pos) { const char *w = head->r.w[0]; - int len = strlen(w); + size_t len = strlen(w); if (len) { if (len > head->read_user_buf_avail) len = head->read_user_buf_avail; @@ -108,11 +224,10 @@ static bool tomoyo_flush(struct tomoyo_io_buffer *head) head->read_user_buf += len; w += len; } - if (*w) { - head->r.w[0] = w; + head->r.w[0] = w; + if (*w) return false; - } - /* Add '\0' for query. */ + /* Add '\0' for audit logs and query. */ if (head->poll) { if (!head->read_user_buf_avail || copy_to_user(head->read_user_buf, "", 1)) @@ -147,17 +262,21 @@ static void tomoyo_set_string(struct tomoyo_io_buffer *head, const char *string) WARN_ON(1); } +static void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, + ...) __printf(2, 3); + /** * tomoyo_io_printf - printf() to "struct tomoyo_io_buffer" structure. * * @head: Pointer to "struct tomoyo_io_buffer". * @fmt: The printf()'s format string, followed by parameters. */ -void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, ...) +static void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, + ...) { va_list args; - int len; - int pos = head->r.avail; + size_t len; + size_t pos = head->r.avail; int size = head->readbuf_size - pos; if (size <= 0) return; @@ -172,11 +291,25 @@ void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, ...) tomoyo_set_string(head, head->read_buf + pos); } +/** + * tomoyo_set_space - Put a space to "struct tomoyo_io_buffer" structure. + * + * @head: Pointer to "struct tomoyo_io_buffer". + * + * Returns nothing. + */ static void tomoyo_set_space(struct tomoyo_io_buffer *head) { tomoyo_set_string(head, " "); } +/** + * tomoyo_set_lf - Put a line feed to "struct tomoyo_io_buffer" structure. + * + * @head: Pointer to "struct tomoyo_io_buffer". + * + * Returns nothing. + */ static bool tomoyo_set_lf(struct tomoyo_io_buffer *head) { tomoyo_set_string(head, "\n"); @@ -184,6 +317,62 @@ static bool tomoyo_set_lf(struct tomoyo_io_buffer *head) } /** + * tomoyo_set_slash - Put a shash to "struct tomoyo_io_buffer" structure. + * + * @head: Pointer to "struct tomoyo_io_buffer". + * + * Returns nothing. + */ +static void tomoyo_set_slash(struct tomoyo_io_buffer *head) +{ + tomoyo_set_string(head, "/"); +} + +/* List of namespaces. */ +LIST_HEAD(tomoyo_namespace_list); +/* True if namespace other than tomoyo_kernel_namespace is defined. */ +static bool tomoyo_namespace_enabled; + +/** + * tomoyo_init_policy_namespace - Initialize namespace. + * + * @ns: Pointer to "struct tomoyo_policy_namespace". + * + * Returns nothing. + */ +void tomoyo_init_policy_namespace(struct tomoyo_policy_namespace *ns) +{ + unsigned int idx; + for (idx = 0; idx < TOMOYO_MAX_ACL_GROUPS; idx++) + INIT_LIST_HEAD(&ns->acl_group[idx]); + for (idx = 0; idx < TOMOYO_MAX_GROUP; idx++) + INIT_LIST_HEAD(&ns->group_list[idx]); + for (idx = 0; idx < TOMOYO_MAX_POLICY; idx++) + INIT_LIST_HEAD(&ns->policy_list[idx]); + ns->profile_version = 20110903; + tomoyo_namespace_enabled = !list_empty(&tomoyo_namespace_list); + list_add_tail_rcu(&ns->namespace_list, &tomoyo_namespace_list); +} + +/** + * tomoyo_print_namespace - Print namespace header. + * + * @head: Pointer to "struct tomoyo_io_buffer". + * + * Returns nothing. + */ +static void tomoyo_print_namespace(struct tomoyo_io_buffer *head) +{ + if (!tomoyo_namespace_enabled) + return; + tomoyo_set_string(head, + container_of(head->r.ns, + struct tomoyo_policy_namespace, + namespace_list)->name); + tomoyo_set_space(head); +} + +/** * tomoyo_print_name_union - Print a tomoyo_name_union. * * @head: Pointer to "struct tomoyo_io_buffer". @@ -193,7 +382,7 @@ static void tomoyo_print_name_union(struct tomoyo_io_buffer *head, const struct tomoyo_name_union *ptr) { tomoyo_set_space(head); - if (ptr->is_group) { + if (ptr->group) { tomoyo_set_string(head, "@"); tomoyo_set_string(head, ptr->group->group_name->name); } else { @@ -202,24 +391,46 @@ static void tomoyo_print_name_union(struct tomoyo_io_buffer *head, } /** - * tomoyo_print_number_union - Print a tomoyo_number_union. + * tomoyo_print_name_union_quoted - Print a tomoyo_name_union with a quote. * - * @head: Pointer to "struct tomoyo_io_buffer". - * @ptr: Pointer to "struct tomoyo_number_union". + * @head: Pointer to "struct tomoyo_io_buffer". + * @ptr: Pointer to "struct tomoyo_name_union". + * + * Returns nothing. */ -static void tomoyo_print_number_union(struct tomoyo_io_buffer *head, - const struct tomoyo_number_union *ptr) +static void tomoyo_print_name_union_quoted(struct tomoyo_io_buffer *head, + const struct tomoyo_name_union *ptr) { - tomoyo_set_space(head); - if (ptr->is_group) { + if (ptr->group) { + tomoyo_set_string(head, "@"); + tomoyo_set_string(head, ptr->group->group_name->name); + } else { + tomoyo_set_string(head, "\""); + tomoyo_set_string(head, ptr->filename->name); + tomoyo_set_string(head, "\""); + } +} + +/** + * tomoyo_print_number_union_nospace - Print a tomoyo_number_union without a space. + * + * @head: Pointer to "struct tomoyo_io_buffer". + * @ptr: Pointer to "struct tomoyo_number_union". + * + * Returns nothing. + */ +static void tomoyo_print_number_union_nospace +(struct tomoyo_io_buffer *head, const struct tomoyo_number_union *ptr) +{ + if (ptr->group) { tomoyo_set_string(head, "@"); tomoyo_set_string(head, ptr->group->group_name->name); } else { int i; unsigned long min = ptr->values[0]; const unsigned long max = ptr->values[1]; - u8 min_type = ptr->min_type; - const u8 max_type = ptr->max_type; + u8 min_type = ptr->value_type[0]; + const u8 max_type = ptr->value_type[1]; char buffer[128]; buffer[0] = '\0'; for (i = 0; i < 2; i++) { @@ -233,8 +444,8 @@ static void tomoyo_print_number_union(struct tomoyo_io_buffer *head, "0%lo", min); break; default: - tomoyo_addprintf(buffer, sizeof(buffer), - "%lu", min); + tomoyo_addprintf(buffer, sizeof(buffer), "%lu", + min); break; } if (min == max && min_type == max_type) @@ -248,35 +459,55 @@ static void tomoyo_print_number_union(struct tomoyo_io_buffer *head, } /** + * tomoyo_print_number_union - Print a tomoyo_number_union. + * + * @head: Pointer to "struct tomoyo_io_buffer". + * @ptr: Pointer to "struct tomoyo_number_union". + * + * Returns nothing. + */ +static void tomoyo_print_number_union(struct tomoyo_io_buffer *head, + const struct tomoyo_number_union *ptr) +{ + tomoyo_set_space(head); + tomoyo_print_number_union_nospace(head, ptr); +} + +/** * tomoyo_assign_profile - Create a new profile. * + * @ns: Pointer to "struct tomoyo_policy_namespace". * @profile: Profile number to create. * * Returns pointer to "struct tomoyo_profile" on success, NULL otherwise. */ -static struct tomoyo_profile *tomoyo_assign_profile(const unsigned int profile) +static struct tomoyo_profile *tomoyo_assign_profile +(struct tomoyo_policy_namespace *ns, const unsigned int profile) { struct tomoyo_profile *ptr; struct tomoyo_profile *entry; if (profile >= TOMOYO_MAX_PROFILES) return NULL; - ptr = tomoyo_profile_ptr[profile]; + ptr = ns->profile_ptr[profile]; if (ptr) return ptr; entry = kzalloc(sizeof(*entry), GFP_NOFS); if (mutex_lock_interruptible(&tomoyo_policy_lock)) goto out; - ptr = tomoyo_profile_ptr[profile]; + ptr = ns->profile_ptr[profile]; if (!ptr && tomoyo_memory_ok(entry)) { ptr = entry; - ptr->learning = &tomoyo_default_profile.preference; - ptr->permissive = &tomoyo_default_profile.preference; - ptr->enforcing = &tomoyo_default_profile.preference; - ptr->default_config = TOMOYO_CONFIG_DISABLED; + ptr->default_config = TOMOYO_CONFIG_DISABLED | + TOMOYO_CONFIG_WANT_GRANT_LOG | + TOMOYO_CONFIG_WANT_REJECT_LOG; memset(ptr->config, TOMOYO_CONFIG_USE_DEFAULT, sizeof(ptr->config)); + ptr->pref[TOMOYO_PREF_MAX_AUDIT_LOG] = + CONFIG_SECURITY_TOMOYO_MAX_AUDIT_LOG; + ptr->pref[TOMOYO_PREF_MAX_LEARNING_ENTRY] = + CONFIG_SECURITY_TOMOYO_MAX_ACCEPT_ENTRY; mb(); /* Avoid out-of-order execution. */ - tomoyo_profile_ptr[profile] = ptr; + ns->profile_ptr[profile] = ptr; entry = NULL; } mutex_unlock(&tomoyo_policy_lock); @@ -288,19 +519,29 @@ static struct tomoyo_profile *tomoyo_assign_profile(const unsigned int profile) /** * tomoyo_profile - Find a profile. * + * @ns: Pointer to "struct tomoyo_policy_namespace". * @profile: Profile number to find. * * Returns pointer to "struct tomoyo_profile". */ -struct tomoyo_profile *tomoyo_profile(const u8 profile) +struct tomoyo_profile *tomoyo_profile(const struct tomoyo_policy_namespace *ns, + const u8 profile) { - struct tomoyo_profile *ptr = tomoyo_profile_ptr[profile]; - if (!tomoyo_policy_loaded) - return &tomoyo_default_profile; - BUG_ON(!ptr); + static struct tomoyo_profile tomoyo_null_profile; + struct tomoyo_profile *ptr = ns->profile_ptr[profile]; + if (!ptr) + ptr = &tomoyo_null_profile; return ptr; } +/** + * tomoyo_find_yesno - Find values for specified keyword. + * + * @string: String to check. + * @find: Name of keyword. + * + * Returns 1 if "@find=yes" was found, 0 if "@find=no" was found, -1 otherwise. + */ static s8 tomoyo_find_yesno(const char *string, const char *find) { const char *cp = strstr(string, find); @@ -314,18 +555,15 @@ static s8 tomoyo_find_yesno(const char *string, const char *find) return -1; } -static void tomoyo_set_bool(bool *b, const char *string, const char *find) -{ - switch (tomoyo_find_yesno(string, find)) { - case 1: - *b = true; - break; - case 0: - *b = false; - break; - } -} - +/** + * tomoyo_set_uint - Set value for specified preference. + * + * @i: Pointer to "unsigned int". + * @string: String to check. + * @find: Name of keyword. + * + * Returns nothing. + */ static void tomoyo_set_uint(unsigned int *i, const char *string, const char *find) { @@ -334,51 +572,16 @@ static void tomoyo_set_uint(unsigned int *i, const char *string, sscanf(cp + strlen(find), "=%u", i); } -static void tomoyo_set_pref(const char *name, const char *value, - const bool use_default, - struct tomoyo_profile *profile) -{ - struct tomoyo_preference **pref; - bool *verbose; - if (!strcmp(name, "enforcing")) { - if (use_default) { - pref = &profile->enforcing; - goto set_default; - } - profile->enforcing = &profile->preference; - verbose = &profile->preference.enforcing_verbose; - goto set_verbose; - } - if (!strcmp(name, "permissive")) { - if (use_default) { - pref = &profile->permissive; - goto set_default; - } - profile->permissive = &profile->preference; - verbose = &profile->preference.permissive_verbose; - goto set_verbose; - } - if (!strcmp(name, "learning")) { - if (use_default) { - pref = &profile->learning; - goto set_default; - } - profile->learning = &profile->preference; - tomoyo_set_uint(&profile->preference.learning_max_entry, value, - "max_entry"); - verbose = &profile->preference.learning_verbose; - goto set_verbose; - } - return; - set_default: - *pref = &tomoyo_default_profile.preference; - return; - set_verbose: - tomoyo_set_bool(verbose, value, "verbose"); -} - +/** + * tomoyo_set_mode - Set mode for specified profile. + * + * @name: Name of functionality. + * @value: Mode for @name. + * @profile: Pointer to "struct tomoyo_profile". + * + * Returns 0 on success, negative value otherwise. + */ static int tomoyo_set_mode(char *name, const char *value, - const bool use_default, struct tomoyo_profile *profile) { u8 i; @@ -390,7 +593,17 @@ static int tomoyo_set_mode(char *name, const char *value, config = 0; for (i = 0; i < TOMOYO_MAX_MAC_INDEX + TOMOYO_MAX_MAC_CATEGORY_INDEX; i++) { - if (strcmp(name, tomoyo_mac_keywords[i])) + int len = 0; + if (i < TOMOYO_MAX_MAC_INDEX) { + const u8 c = tomoyo_index2category[i]; + const char *category = + tomoyo_category_keywords[c]; + len = strlen(category); + if (strncmp(name, category, len) || + name[len++] != ':' || name[len++] != ':') + continue; + } + if (strcmp(name + len, tomoyo_mac_keywords[i])) continue; config = profile->config[i]; break; @@ -400,7 +613,7 @@ static int tomoyo_set_mode(char *name, const char *value, } else { return -EINVAL; } - if (use_default) { + if (strstr(value, "use_default")) { config = TOMOYO_CONFIG_USE_DEFAULT; } else { u8 mode; @@ -411,6 +624,24 @@ static int tomoyo_set_mode(char *name, const char *value, * 'config' from 'TOMOYO_CONFIG_USE_DEAFULT'. */ config = (config & ~7) | mode; + if (config != TOMOYO_CONFIG_USE_DEFAULT) { + switch (tomoyo_find_yesno(value, "grant_log")) { + case 1: + config |= TOMOYO_CONFIG_WANT_GRANT_LOG; + break; + case 0: + config &= ~TOMOYO_CONFIG_WANT_GRANT_LOG; + break; + } + switch (tomoyo_find_yesno(value, "reject_log")) { + case 1: + config |= TOMOYO_CONFIG_WANT_REJECT_LOG; + break; + case 0: + config &= ~TOMOYO_CONFIG_WANT_REJECT_LOG; + break; + } + } } if (i < TOMOYO_MAX_MAC_INDEX + TOMOYO_MAX_MAC_CATEGORY_INDEX) profile->config[i] = config; @@ -430,128 +661,121 @@ static int tomoyo_write_profile(struct tomoyo_io_buffer *head) { char *data = head->write_buf; unsigned int i; - bool use_default = false; char *cp; struct tomoyo_profile *profile; - if (sscanf(data, "PROFILE_VERSION=%u", &tomoyo_profile_version) == 1) + if (sscanf(data, "PROFILE_VERSION=%u", &head->w.ns->profile_version) + == 1) return 0; i = simple_strtoul(data, &cp, 10); - if (data == cp) { - profile = &tomoyo_default_profile; - } else { - if (*cp != '-') - return -EINVAL; - data = cp + 1; - profile = tomoyo_assign_profile(i); - if (!profile) - return -EINVAL; - } + if (*cp != '-') + return -EINVAL; + data = cp + 1; + profile = tomoyo_assign_profile(head->w.ns, i); + if (!profile) + return -EINVAL; cp = strchr(data, '='); if (!cp) return -EINVAL; *cp++ = '\0'; - if (profile != &tomoyo_default_profile) - use_default = strstr(cp, "use_default") != NULL; - if (tomoyo_str_starts(&data, "PREFERENCE::")) { - tomoyo_set_pref(data, cp, use_default, profile); - return 0; - } - if (profile == &tomoyo_default_profile) - return -EINVAL; if (!strcmp(data, "COMMENT")) { - const struct tomoyo_path_info *old_comment = profile->comment; - profile->comment = tomoyo_get_name(cp); + static DEFINE_SPINLOCK(lock); + const struct tomoyo_path_info *new_comment + = tomoyo_get_name(cp); + const struct tomoyo_path_info *old_comment; + if (!new_comment) + return -ENOMEM; + spin_lock(&lock); + old_comment = profile->comment; + profile->comment = new_comment; + spin_unlock(&lock); tomoyo_put_name(old_comment); return 0; } - return tomoyo_set_mode(data, cp, use_default, profile); -} - -static void tomoyo_print_preference(struct tomoyo_io_buffer *head, - const int idx) -{ - struct tomoyo_preference *pref = &tomoyo_default_profile.preference; - const struct tomoyo_profile *profile = idx >= 0 ? - tomoyo_profile_ptr[idx] : NULL; - char buffer[16] = ""; - if (profile) { - buffer[sizeof(buffer) - 1] = '\0'; - snprintf(buffer, sizeof(buffer) - 1, "%u-", idx); - } - if (profile) { - pref = profile->learning; - if (pref == &tomoyo_default_profile.preference) - goto skip1; - } - tomoyo_io_printf(head, "%sPREFERENCE::%s={ " - "verbose=%s max_entry=%u }\n", - buffer, "learning", - tomoyo_yesno(pref->learning_verbose), - pref->learning_max_entry); - skip1: - if (profile) { - pref = profile->permissive; - if (pref == &tomoyo_default_profile.preference) - goto skip2; - } - tomoyo_io_printf(head, "%sPREFERENCE::%s={ verbose=%s }\n", - buffer, "permissive", - tomoyo_yesno(pref->permissive_verbose)); - skip2: - if (profile) { - pref = profile->enforcing; - if (pref == &tomoyo_default_profile.preference) - return; + if (!strcmp(data, "PREFERENCE")) { + for (i = 0; i < TOMOYO_MAX_PREF; i++) + tomoyo_set_uint(&profile->pref[i], cp, + tomoyo_pref_keywords[i]); + return 0; } - tomoyo_io_printf(head, "%sPREFERENCE::%s={ verbose=%s }\n", - buffer, "enforcing", - tomoyo_yesno(pref->enforcing_verbose)); + return tomoyo_set_mode(data, cp, profile); } +/** + * tomoyo_print_config - Print mode for specified functionality. + * + * @head: Pointer to "struct tomoyo_io_buffer". + * @config: Mode for that functionality. + * + * Returns nothing. + * + * Caller prints functionality's name. + */ static void tomoyo_print_config(struct tomoyo_io_buffer *head, const u8 config) { - tomoyo_io_printf(head, "={ mode=%s }\n", tomoyo_mode[config & 3]); + tomoyo_io_printf(head, "={ mode=%s grant_log=%s reject_log=%s }\n", + tomoyo_mode[config & 3], + tomoyo_yesno(config & TOMOYO_CONFIG_WANT_GRANT_LOG), + tomoyo_yesno(config & TOMOYO_CONFIG_WANT_REJECT_LOG)); } /** * tomoyo_read_profile - Read profile table. * * @head: Pointer to "struct tomoyo_io_buffer". + * + * Returns nothing. */ static void tomoyo_read_profile(struct tomoyo_io_buffer *head) { u8 index; + struct tomoyo_policy_namespace *ns = + container_of(head->r.ns, typeof(*ns), namespace_list); const struct tomoyo_profile *profile; + if (head->r.eof) + return; next: index = head->r.index; - profile = tomoyo_profile_ptr[index]; + profile = ns->profile_ptr[index]; switch (head->r.step) { case 0: - tomoyo_io_printf(head, "PROFILE_VERSION=%s\n", "20090903"); - tomoyo_print_preference(head, -1); + tomoyo_print_namespace(head); + tomoyo_io_printf(head, "PROFILE_VERSION=%u\n", + ns->profile_version); head->r.step++; break; case 1: for ( ; head->r.index < TOMOYO_MAX_PROFILES; head->r.index++) - if (tomoyo_profile_ptr[head->r.index]) + if (ns->profile_ptr[head->r.index]) break; - if (head->r.index == TOMOYO_MAX_PROFILES) + if (head->r.index == TOMOYO_MAX_PROFILES) { + head->r.eof = true; return; + } head->r.step++; break; case 2: { + u8 i; const struct tomoyo_path_info *comment = profile->comment; + tomoyo_print_namespace(head); tomoyo_io_printf(head, "%u-COMMENT=", index); tomoyo_set_string(head, comment ? comment->name : ""); tomoyo_set_lf(head); + tomoyo_print_namespace(head); + tomoyo_io_printf(head, "%u-PREFERENCE={ ", index); + for (i = 0; i < TOMOYO_MAX_PREF; i++) + tomoyo_io_printf(head, "%s=%u ", + tomoyo_pref_keywords[i], + profile->pref[i]); + tomoyo_set_string(head, "}\n"); head->r.step++; } break; case 3: { + tomoyo_print_namespace(head); tomoyo_io_printf(head, "%u-%s", index, "CONFIG"); tomoyo_print_config(head, profile->default_config); head->r.bit = 0; @@ -565,15 +789,22 @@ static void tomoyo_read_profile(struct tomoyo_io_buffer *head) const u8 config = profile->config[i]; if (config == TOMOYO_CONFIG_USE_DEFAULT) continue; - tomoyo_io_printf(head, "%u-%s%s", index, "CONFIG::", - tomoyo_mac_keywords[i]); + tomoyo_print_namespace(head); + if (i < TOMOYO_MAX_MAC_INDEX) + tomoyo_io_printf(head, "%u-CONFIG::%s::%s", + index, + tomoyo_category_keywords + [tomoyo_index2category[i]], + tomoyo_mac_keywords[i]); + else + tomoyo_io_printf(head, "%u-CONFIG::%s", index, + tomoyo_mac_keywords[i]); tomoyo_print_config(head, config); head->r.bit++; break; } if (head->r.bit == TOMOYO_MAX_MAC_INDEX + TOMOYO_MAX_MAC_CATEGORY_INDEX) { - tomoyo_print_preference(head, index); head->r.index++; head->r.step = 1; } @@ -583,6 +814,14 @@ static void tomoyo_read_profile(struct tomoyo_io_buffer *head) goto next; } +/** + * tomoyo_same_manager - Check for duplicated "struct tomoyo_manager" entry. + * + * @a: Pointer to "struct tomoyo_acl_head". + * @b: Pointer to "struct tomoyo_acl_head". + * + * Returns true if @a == @b, false otherwise. + */ static bool tomoyo_same_manager(const struct tomoyo_acl_head *a, const struct tomoyo_acl_head *b) { @@ -604,23 +843,22 @@ static int tomoyo_update_manager_entry(const char *manager, const bool is_delete) { struct tomoyo_manager e = { }; - int error; - - if (tomoyo_domain_def(manager)) { - if (!tomoyo_correct_domain(manager)) - return -EINVAL; - e.is_domain = true; - } else { - if (!tomoyo_correct_path(manager)) - return -EINVAL; - } + struct tomoyo_acl_param param = { + /* .ns = &tomoyo_kernel_namespace, */ + .is_delete = is_delete, + .list = &tomoyo_kernel_namespace. + policy_list[TOMOYO_ID_MANAGER], + }; + int error = is_delete ? -ENOENT : -ENOMEM; + if (!tomoyo_correct_domain(manager) && + !tomoyo_correct_word(manager)) + return -EINVAL; e.manager = tomoyo_get_name(manager); - if (!e.manager) - return -ENOMEM; - error = tomoyo_update_policy(&e.head, sizeof(e), is_delete, - &tomoyo_policy_list[TOMOYO_ID_MANAGER], - tomoyo_same_manager); - tomoyo_put_name(e.manager); + if (e.manager) { + error = tomoyo_update_policy(&e.head, sizeof(e), ¶m, + tomoyo_same_manager); + tomoyo_put_name(e.manager); + } return error; } @@ -636,13 +874,12 @@ static int tomoyo_update_manager_entry(const char *manager, static int tomoyo_write_manager(struct tomoyo_io_buffer *head) { char *data = head->write_buf; - bool is_delete = tomoyo_str_starts(&data, TOMOYO_KEYWORD_DELETE); if (!strcmp(data, "manage_by_non_root")) { - tomoyo_manage_by_non_root = !is_delete; + tomoyo_manage_by_non_root = !head->w.is_delete; return 0; } - return tomoyo_update_manager_entry(data, is_delete); + return tomoyo_update_manager_entry(data, head->w.is_delete); } /** @@ -656,8 +893,8 @@ static void tomoyo_read_manager(struct tomoyo_io_buffer *head) { if (head->r.eof) return; - list_for_each_cookie(head->r.acl, - &tomoyo_policy_list[TOMOYO_ID_MANAGER]) { + list_for_each_cookie(head->r.acl, &tomoyo_kernel_namespace. + policy_list[TOMOYO_ID_MANAGER]) { struct tomoyo_manager *ptr = list_entry(head->r.acl, typeof(*ptr), head.list); if (ptr->head.is_deleted) @@ -688,25 +925,18 @@ static bool tomoyo_manager(void) if (!tomoyo_policy_loaded) return true; - if (!tomoyo_manage_by_non_root && (task->cred->uid || task->cred->euid)) + if (!tomoyo_manage_by_non_root && + (!uid_eq(task->cred->uid, GLOBAL_ROOT_UID) || + !uid_eq(task->cred->euid, GLOBAL_ROOT_UID))) return false; - list_for_each_entry_rcu(ptr, &tomoyo_policy_list[TOMOYO_ID_MANAGER], - head.list) { - if (!ptr->head.is_deleted && ptr->is_domain - && !tomoyo_pathcmp(domainname, ptr->manager)) { - found = true; - break; - } - } - if (found) - return true; exe = tomoyo_get_exe(); if (!exe) return false; - list_for_each_entry_rcu(ptr, &tomoyo_policy_list[TOMOYO_ID_MANAGER], - head.list) { - if (!ptr->head.is_deleted && !ptr->is_domain - && !strcmp(exe, ptr->manager->name)) { + list_for_each_entry_rcu(ptr, &tomoyo_kernel_namespace. + policy_list[TOMOYO_ID_MANAGER], head.list) { + if (!ptr->head.is_deleted && + (!tomoyo_pathcmp(domainname, ptr->manager) || + !strcmp(exe, ptr->manager->name))) { found = true; break; } @@ -724,8 +954,11 @@ static bool tomoyo_manager(void) return found; } +static struct tomoyo_domain_info *tomoyo_find_domain_by_qid +(unsigned int serial); + /** - * tomoyo_select_one - Parse select command. + * tomoyo_select_domain - Parse select command. * * @head: Pointer to "struct tomoyo_io_buffer". * @data: String to parse. @@ -734,35 +967,34 @@ static bool tomoyo_manager(void) * * Caller holds tomoyo_read_lock(). */ -static bool tomoyo_select_one(struct tomoyo_io_buffer *head, const char *data) +static bool tomoyo_select_domain(struct tomoyo_io_buffer *head, + const char *data) { unsigned int pid; struct tomoyo_domain_info *domain = NULL; bool global_pid = false; - - if (!strcmp(data, "allow_execute")) { - head->r.print_execute_only = true; - return true; - } + if (strncmp(data, "select ", 7)) + return false; + data += 7; if (sscanf(data, "pid=%u", &pid) == 1 || (global_pid = true, sscanf(data, "global-pid=%u", &pid) == 1)) { struct task_struct *p; rcu_read_lock(); - read_lock(&tasklist_lock); if (global_pid) p = find_task_by_pid_ns(pid, &init_pid_ns); else p = find_task_by_vpid(pid); if (p) domain = tomoyo_real_domain(p); - read_unlock(&tasklist_lock); rcu_read_unlock(); } else if (!strncmp(data, "domain=", 7)) { if (tomoyo_domain_def(data + 7)) domain = tomoyo_find_domain(data + 7); + } else if (sscanf(data, "Q=%u", &pid) == 1) { + domain = tomoyo_find_domain_by_qid(pid); } else return false; - head->write_var1 = domain; + head->w.domain = domain; /* Accessing read_buf is safe because head->io_sem is held. */ if (!head->read_buf) return true; /* Do nothing if open(O_WRONLY). */ @@ -779,11 +1011,53 @@ static bool tomoyo_select_one(struct tomoyo_io_buffer *head, const char *data) } /** + * tomoyo_same_task_acl - Check for duplicated "struct tomoyo_task_acl" entry. + * + * @a: Pointer to "struct tomoyo_acl_info". + * @b: Pointer to "struct tomoyo_acl_info". + * + * Returns true if @a == @b, false otherwise. + */ +static bool tomoyo_same_task_acl(const struct tomoyo_acl_info *a, + const struct tomoyo_acl_info *b) +{ + const struct tomoyo_task_acl *p1 = container_of(a, typeof(*p1), head); + const struct tomoyo_task_acl *p2 = container_of(b, typeof(*p2), head); + return p1->domainname == p2->domainname; +} + +/** + * tomoyo_write_task - Update task related list. + * + * @param: Pointer to "struct tomoyo_acl_param". + * + * Returns 0 on success, negative value otherwise. + * + * Caller holds tomoyo_read_lock(). + */ +static int tomoyo_write_task(struct tomoyo_acl_param *param) +{ + int error = -EINVAL; + if (tomoyo_str_starts(¶m->data, "manual_domain_transition ")) { + struct tomoyo_task_acl e = { + .head.type = TOMOYO_TYPE_MANUAL_TASK_ACL, + .domainname = tomoyo_get_domainname(param), + }; + if (e.domainname) + error = tomoyo_update_domain(&e.head, sizeof(e), param, + tomoyo_same_task_acl, + NULL); + tomoyo_put_name(e.domainname); + } + return error; +} + +/** * tomoyo_delete_domain - Delete a domain. * * @domainname: The name of domain. * - * Returns 0. + * Returns 0 on success, negative value otherwise. * * Caller holds tomoyo_read_lock(). */ @@ -795,7 +1069,7 @@ static int tomoyo_delete_domain(char *domainname) name.name = domainname; tomoyo_fill_path_info(&name); if (mutex_lock_interruptible(&tomoyo_policy_lock)) - return 0; + return -EINTR; /* Is there an active domain? */ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) { /* Never delete tomoyo_kernel_domain */ @@ -814,20 +1088,52 @@ static int tomoyo_delete_domain(char *domainname) /** * tomoyo_write_domain2 - Write domain policy. * - * @head: Pointer to "struct tomoyo_io_buffer". + * @ns: Pointer to "struct tomoyo_policy_namespace". + * @list: Pointer to "struct list_head". + * @data: Policy to be interpreted. + * @is_delete: True if it is a delete request. * * Returns 0 on success, negative value otherwise. * * Caller holds tomoyo_read_lock(). */ -static int tomoyo_write_domain2(char *data, struct tomoyo_domain_info *domain, +static int tomoyo_write_domain2(struct tomoyo_policy_namespace *ns, + struct list_head *list, char *data, const bool is_delete) { - if (tomoyo_str_starts(&data, TOMOYO_KEYWORD_ALLOW_MOUNT)) - return tomoyo_write_mount(data, domain, is_delete); - return tomoyo_write_file(data, domain, is_delete); + struct tomoyo_acl_param param = { + .ns = ns, + .list = list, + .data = data, + .is_delete = is_delete, + }; + static const struct { + const char *keyword; + int (*write) (struct tomoyo_acl_param *); + } tomoyo_callback[5] = { + { "file ", tomoyo_write_file }, + { "network inet ", tomoyo_write_inet_network }, + { "network unix ", tomoyo_write_unix_network }, + { "misc ", tomoyo_write_misc }, + { "task ", tomoyo_write_task }, + }; + u8 i; + + for (i = 0; i < ARRAY_SIZE(tomoyo_callback); i++) { + if (!tomoyo_str_starts(¶m.data, + tomoyo_callback[i].keyword)) + continue; + return tomoyo_callback[i].write(¶m); + } + return -EINVAL; } +/* String table for domain flags. */ +const char * const tomoyo_dif[TOMOYO_MAX_DOMAIN_INFO_FLAGS] = { + [TOMOYO_DIF_QUOTA_WARNED] = "quota_exceeded\n", + [TOMOYO_DIF_TRANSITION_FAILED] = "transition_failed\n", +}; + /** * tomoyo_write_domain - Write domain policy. * @@ -840,69 +1146,207 @@ static int tomoyo_write_domain2(char *data, struct tomoyo_domain_info *domain, static int tomoyo_write_domain(struct tomoyo_io_buffer *head) { char *data = head->write_buf; - struct tomoyo_domain_info *domain = head->write_var1; - bool is_delete = false; - bool is_select = false; + struct tomoyo_policy_namespace *ns; + struct tomoyo_domain_info *domain = head->w.domain; + const bool is_delete = head->w.is_delete; + bool is_select = !is_delete && tomoyo_str_starts(&data, "select "); unsigned int profile; - - if (tomoyo_str_starts(&data, TOMOYO_KEYWORD_DELETE)) - is_delete = true; - else if (tomoyo_str_starts(&data, TOMOYO_KEYWORD_SELECT)) - is_select = true; - if (is_select && tomoyo_select_one(head, data)) - return 0; - /* Don't allow updating policies by non manager programs. */ - if (!tomoyo_manager()) - return -EPERM; - if (tomoyo_domain_def(data)) { + if (*data == '<') { + int ret = 0; domain = NULL; if (is_delete) - tomoyo_delete_domain(data); + ret = tomoyo_delete_domain(data); else if (is_select) domain = tomoyo_find_domain(data); else - domain = tomoyo_assign_domain(data, 0); - head->write_var1 = domain; - return 0; + domain = tomoyo_assign_domain(data, false); + head->w.domain = domain; + return ret; } if (!domain) return -EINVAL; - - if (sscanf(data, TOMOYO_KEYWORD_USE_PROFILE "%u", &profile) == 1 + ns = domain->ns; + if (sscanf(data, "use_profile %u", &profile) == 1 && profile < TOMOYO_MAX_PROFILES) { - if (tomoyo_profile_ptr[profile] || !tomoyo_policy_loaded) + if (!tomoyo_policy_loaded || ns->profile_ptr[profile]) domain->profile = (u8) profile; return 0; } - if (!strcmp(data, TOMOYO_KEYWORD_IGNORE_GLOBAL_ALLOW_READ)) { - domain->ignore_global_allow_read = !is_delete; + if (sscanf(data, "use_group %u\n", &profile) == 1 + && profile < TOMOYO_MAX_ACL_GROUPS) { + if (!is_delete) + domain->group = (u8) profile; return 0; } - if (!strcmp(data, TOMOYO_KEYWORD_QUOTA_EXCEEDED)) { - domain->quota_warned = !is_delete; - return 0; - } - if (!strcmp(data, TOMOYO_KEYWORD_TRANSITION_FAILED)) { - domain->transition_failed = !is_delete; + for (profile = 0; profile < TOMOYO_MAX_DOMAIN_INFO_FLAGS; profile++) { + const char *cp = tomoyo_dif[profile]; + if (strncmp(data, cp, strlen(cp) - 1)) + continue; + domain->flags[profile] = !is_delete; return 0; } - return tomoyo_write_domain2(data, domain, is_delete); + return tomoyo_write_domain2(ns, &domain->acl_info_list, data, + is_delete); } /** - * tomoyo_fns - Find next set bit. + * tomoyo_print_condition - Print condition part. * - * @perm: 8 bits value. - * @bit: First bit to find. + * @head: Pointer to "struct tomoyo_io_buffer". + * @cond: Pointer to "struct tomoyo_condition". * - * Returns next on-bit on success, 8 otherwise. + * Returns true on success, false otherwise. */ -static u8 tomoyo_fns(const u8 perm, u8 bit) +static bool tomoyo_print_condition(struct tomoyo_io_buffer *head, + const struct tomoyo_condition *cond) { - for ( ; bit < 8; bit++) - if (perm & (1 << bit)) + switch (head->r.cond_step) { + case 0: + head->r.cond_index = 0; + head->r.cond_step++; + if (cond->transit) { + tomoyo_set_space(head); + tomoyo_set_string(head, cond->transit->name); + } + /* fall through */ + case 1: + { + const u16 condc = cond->condc; + const struct tomoyo_condition_element *condp = + (typeof(condp)) (cond + 1); + const struct tomoyo_number_union *numbers_p = + (typeof(numbers_p)) (condp + condc); + const struct tomoyo_name_union *names_p = + (typeof(names_p)) + (numbers_p + cond->numbers_count); + const struct tomoyo_argv *argv = + (typeof(argv)) (names_p + cond->names_count); + const struct tomoyo_envp *envp = + (typeof(envp)) (argv + cond->argc); + u16 skip; + for (skip = 0; skip < head->r.cond_index; skip++) { + const u8 left = condp->left; + const u8 right = condp->right; + condp++; + switch (left) { + case TOMOYO_ARGV_ENTRY: + argv++; + continue; + case TOMOYO_ENVP_ENTRY: + envp++; + continue; + case TOMOYO_NUMBER_UNION: + numbers_p++; + break; + } + switch (right) { + case TOMOYO_NAME_UNION: + names_p++; + break; + case TOMOYO_NUMBER_UNION: + numbers_p++; + break; + } + } + while (head->r.cond_index < condc) { + const u8 match = condp->equals; + const u8 left = condp->left; + const u8 right = condp->right; + if (!tomoyo_flush(head)) + return false; + condp++; + head->r.cond_index++; + tomoyo_set_space(head); + switch (left) { + case TOMOYO_ARGV_ENTRY: + tomoyo_io_printf(head, + "exec.argv[%lu]%s=\"", + argv->index, argv-> + is_not ? "!" : ""); + tomoyo_set_string(head, + argv->value->name); + tomoyo_set_string(head, "\""); + argv++; + continue; + case TOMOYO_ENVP_ENTRY: + tomoyo_set_string(head, + "exec.envp[\""); + tomoyo_set_string(head, + envp->name->name); + tomoyo_io_printf(head, "\"]%s=", envp-> + is_not ? "!" : ""); + if (envp->value) { + tomoyo_set_string(head, "\""); + tomoyo_set_string(head, envp-> + value->name); + tomoyo_set_string(head, "\""); + } else { + tomoyo_set_string(head, + "NULL"); + } + envp++; + continue; + case TOMOYO_NUMBER_UNION: + tomoyo_print_number_union_nospace + (head, numbers_p++); + break; + default: + tomoyo_set_string(head, + tomoyo_condition_keyword[left]); + break; + } + tomoyo_set_string(head, match ? "=" : "!="); + switch (right) { + case TOMOYO_NAME_UNION: + tomoyo_print_name_union_quoted + (head, names_p++); + break; + case TOMOYO_NUMBER_UNION: + tomoyo_print_number_union_nospace + (head, numbers_p++); + break; + default: + tomoyo_set_string(head, + tomoyo_condition_keyword[right]); + break; + } + } + } + head->r.cond_step++; + /* fall through */ + case 2: + if (!tomoyo_flush(head)) break; - return bit; + head->r.cond_step++; + /* fall through */ + case 3: + if (cond->grant_log != TOMOYO_GRANTLOG_AUTO) + tomoyo_io_printf(head, " grant_log=%s", + tomoyo_yesno(cond->grant_log == + TOMOYO_GRANTLOG_YES)); + tomoyo_set_lf(head); + return true; + } + return false; +} + +/** + * tomoyo_set_group - Print "acl_group " header keyword and category name. + * + * @head: Pointer to "struct tomoyo_io_buffer". + * @category: Category name. + * + * Returns nothing. + */ +static void tomoyo_set_group(struct tomoyo_io_buffer *head, + const char *category) +{ + if (head->type == TOMOYO_EXCEPTIONPOLICY) { + tomoyo_print_namespace(head); + tomoyo_io_printf(head, "acl_group %u ", + head->r.acl_group_index); + } + tomoyo_set_string(head, category); } /** @@ -917,99 +1361,204 @@ static bool tomoyo_print_entry(struct tomoyo_io_buffer *head, struct tomoyo_acl_info *acl) { const u8 acl_type = acl->type; + bool first = true; u8 bit; + if (head->r.print_cond_part) + goto print_cond_part; if (acl->is_deleted) return true; - next: - bit = head->r.bit; if (!tomoyo_flush(head)) return false; else if (acl_type == TOMOYO_TYPE_PATH_ACL) { struct tomoyo_path_acl *ptr = container_of(acl, typeof(*ptr), head); const u16 perm = ptr->perm; - for ( ; bit < TOMOYO_MAX_PATH_OPERATION; bit++) { + for (bit = 0; bit < TOMOYO_MAX_PATH_OPERATION; bit++) { if (!(perm & (1 << bit))) continue; - if (head->r.print_execute_only && + if (head->r.print_transition_related_only && bit != TOMOYO_TYPE_EXECUTE) continue; - /* Print "read/write" instead of "read" and "write". */ - if ((bit == TOMOYO_TYPE_READ || - bit == TOMOYO_TYPE_WRITE) - && (perm & (1 << TOMOYO_TYPE_READ_WRITE))) - continue; - break; + if (first) { + tomoyo_set_group(head, "file "); + first = false; + } else { + tomoyo_set_slash(head); + } + tomoyo_set_string(head, tomoyo_path_keyword[bit]); } - if (bit >= TOMOYO_MAX_PATH_OPERATION) - goto done; - tomoyo_io_printf(head, "allow_%s", tomoyo_path_keyword[bit]); + if (first) + return true; tomoyo_print_name_union(head, &ptr->name); - } else if (head->r.print_execute_only) { + } else if (acl_type == TOMOYO_TYPE_MANUAL_TASK_ACL) { + struct tomoyo_task_acl *ptr = + container_of(acl, typeof(*ptr), head); + tomoyo_set_group(head, "task "); + tomoyo_set_string(head, "manual_domain_transition "); + tomoyo_set_string(head, ptr->domainname->name); + } else if (head->r.print_transition_related_only) { return true; } else if (acl_type == TOMOYO_TYPE_PATH2_ACL) { struct tomoyo_path2_acl *ptr = container_of(acl, typeof(*ptr), head); - bit = tomoyo_fns(ptr->perm, bit); - if (bit >= TOMOYO_MAX_PATH2_OPERATION) - goto done; - tomoyo_io_printf(head, "allow_%s", tomoyo_path2_keyword[bit]); + const u8 perm = ptr->perm; + for (bit = 0; bit < TOMOYO_MAX_PATH2_OPERATION; bit++) { + if (!(perm & (1 << bit))) + continue; + if (first) { + tomoyo_set_group(head, "file "); + first = false; + } else { + tomoyo_set_slash(head); + } + tomoyo_set_string(head, tomoyo_mac_keywords + [tomoyo_pp2mac[bit]]); + } + if (first) + return true; tomoyo_print_name_union(head, &ptr->name1); tomoyo_print_name_union(head, &ptr->name2); } else if (acl_type == TOMOYO_TYPE_PATH_NUMBER_ACL) { struct tomoyo_path_number_acl *ptr = container_of(acl, typeof(*ptr), head); - bit = tomoyo_fns(ptr->perm, bit); - if (bit >= TOMOYO_MAX_PATH_NUMBER_OPERATION) - goto done; - tomoyo_io_printf(head, "allow_%s", - tomoyo_path_number_keyword[bit]); + const u8 perm = ptr->perm; + for (bit = 0; bit < TOMOYO_MAX_PATH_NUMBER_OPERATION; bit++) { + if (!(perm & (1 << bit))) + continue; + if (first) { + tomoyo_set_group(head, "file "); + first = false; + } else { + tomoyo_set_slash(head); + } + tomoyo_set_string(head, tomoyo_mac_keywords + [tomoyo_pn2mac[bit]]); + } + if (first) + return true; tomoyo_print_name_union(head, &ptr->name); tomoyo_print_number_union(head, &ptr->number); } else if (acl_type == TOMOYO_TYPE_MKDEV_ACL) { struct tomoyo_mkdev_acl *ptr = container_of(acl, typeof(*ptr), head); - bit = tomoyo_fns(ptr->perm, bit); - if (bit >= TOMOYO_MAX_MKDEV_OPERATION) - goto done; - tomoyo_io_printf(head, "allow_%s", tomoyo_mkdev_keyword[bit]); + const u8 perm = ptr->perm; + for (bit = 0; bit < TOMOYO_MAX_MKDEV_OPERATION; bit++) { + if (!(perm & (1 << bit))) + continue; + if (first) { + tomoyo_set_group(head, "file "); + first = false; + } else { + tomoyo_set_slash(head); + } + tomoyo_set_string(head, tomoyo_mac_keywords + [tomoyo_pnnn2mac[bit]]); + } + if (first) + return true; tomoyo_print_name_union(head, &ptr->name); tomoyo_print_number_union(head, &ptr->mode); tomoyo_print_number_union(head, &ptr->major); tomoyo_print_number_union(head, &ptr->minor); + } else if (acl_type == TOMOYO_TYPE_INET_ACL) { + struct tomoyo_inet_acl *ptr = + container_of(acl, typeof(*ptr), head); + const u8 perm = ptr->perm; + + for (bit = 0; bit < TOMOYO_MAX_NETWORK_OPERATION; bit++) { + if (!(perm & (1 << bit))) + continue; + if (first) { + tomoyo_set_group(head, "network inet "); + tomoyo_set_string(head, tomoyo_proto_keyword + [ptr->protocol]); + tomoyo_set_space(head); + first = false; + } else { + tomoyo_set_slash(head); + } + tomoyo_set_string(head, tomoyo_socket_keyword[bit]); + } + if (first) + return true; + tomoyo_set_space(head); + if (ptr->address.group) { + tomoyo_set_string(head, "@"); + tomoyo_set_string(head, ptr->address.group->group_name + ->name); + } else { + char buf[128]; + tomoyo_print_ip(buf, sizeof(buf), &ptr->address); + tomoyo_io_printf(head, "%s", buf); + } + tomoyo_print_number_union(head, &ptr->port); + } else if (acl_type == TOMOYO_TYPE_UNIX_ACL) { + struct tomoyo_unix_acl *ptr = + container_of(acl, typeof(*ptr), head); + const u8 perm = ptr->perm; + + for (bit = 0; bit < TOMOYO_MAX_NETWORK_OPERATION; bit++) { + if (!(perm & (1 << bit))) + continue; + if (first) { + tomoyo_set_group(head, "network unix "); + tomoyo_set_string(head, tomoyo_proto_keyword + [ptr->protocol]); + tomoyo_set_space(head); + first = false; + } else { + tomoyo_set_slash(head); + } + tomoyo_set_string(head, tomoyo_socket_keyword[bit]); + } + if (first) + return true; + tomoyo_print_name_union(head, &ptr->name); } else if (acl_type == TOMOYO_TYPE_MOUNT_ACL) { struct tomoyo_mount_acl *ptr = container_of(acl, typeof(*ptr), head); - tomoyo_io_printf(head, "allow_mount"); + tomoyo_set_group(head, "file mount"); tomoyo_print_name_union(head, &ptr->dev_name); tomoyo_print_name_union(head, &ptr->dir_name); tomoyo_print_name_union(head, &ptr->fs_type); tomoyo_print_number_union(head, &ptr->flags); + } else if (acl_type == TOMOYO_TYPE_ENV_ACL) { + struct tomoyo_env_acl *ptr = + container_of(acl, typeof(*ptr), head); + + tomoyo_set_group(head, "misc env "); + tomoyo_set_string(head, ptr->env->name); + } + if (acl->cond) { + head->r.print_cond_part = true; + head->r.cond_step = 0; + if (!tomoyo_flush(head)) + return false; +print_cond_part: + if (!tomoyo_print_condition(head, acl->cond)) + return false; + head->r.print_cond_part = false; + } else { + tomoyo_set_lf(head); } - head->r.bit = bit + 1; - tomoyo_io_printf(head, "\n"); - if (acl_type != TOMOYO_TYPE_MOUNT_ACL) - goto next; - done: - head->r.bit = 0; return true; } /** * tomoyo_read_domain2 - Read domain policy. * - * @head: Pointer to "struct tomoyo_io_buffer". - * @domain: Pointer to "struct tomoyo_domain_info". + * @head: Pointer to "struct tomoyo_io_buffer". + * @list: Pointer to "struct list_head". * * Caller holds tomoyo_read_lock(). * * Returns true on success, false otherwise. */ static bool tomoyo_read_domain2(struct tomoyo_io_buffer *head, - struct tomoyo_domain_info *domain) + struct list_head *list) { - list_for_each_cookie(head->r.acl, &domain->acl_info_list) { + list_for_each_cookie(head->r.acl, list) { struct tomoyo_acl_info *ptr = list_entry(head->r.acl, typeof(*ptr), list); if (!tomoyo_print_entry(head, ptr)) @@ -1034,6 +1583,7 @@ static void tomoyo_read_domain(struct tomoyo_io_buffer *head) struct tomoyo_domain_info *domain = list_entry(head->r.domain, typeof(*domain), list); switch (head->r.step) { + u8 i; case 0: if (domain->is_deleted && !head->r.print_this_domain_only) @@ -1041,22 +1591,18 @@ static void tomoyo_read_domain(struct tomoyo_io_buffer *head) /* Print domainname and flags. */ tomoyo_set_string(head, domain->domainname->name); tomoyo_set_lf(head); - tomoyo_io_printf(head, - TOMOYO_KEYWORD_USE_PROFILE "%u\n", + tomoyo_io_printf(head, "use_profile %u\n", domain->profile); - if (domain->quota_warned) - tomoyo_set_string(head, "quota_exceeded\n"); - if (domain->transition_failed) - tomoyo_set_string(head, "transition_failed\n"); - if (domain->ignore_global_allow_read) - tomoyo_set_string(head, - TOMOYO_KEYWORD_IGNORE_GLOBAL_ALLOW_READ - "\n"); + tomoyo_io_printf(head, "use_group %u\n", + domain->group); + for (i = 0; i < TOMOYO_MAX_DOMAIN_INFO_FLAGS; i++) + if (domain->flags[i]) + tomoyo_set_string(head, tomoyo_dif[i]); head->r.step++; tomoyo_set_lf(head); /* fall through */ case 1: - if (!tomoyo_read_domain2(head, domain)) + if (!tomoyo_read_domain2(head, &domain->acl_info_list)) return; head->r.step++; if (!tomoyo_set_lf(head)) @@ -1073,73 +1619,6 @@ static void tomoyo_read_domain(struct tomoyo_io_buffer *head) } /** - * tomoyo_write_domain_profile - Assign profile for specified domain. - * - * @head: Pointer to "struct tomoyo_io_buffer". - * - * Returns 0 on success, -EINVAL otherwise. - * - * This is equivalent to doing - * - * ( echo "select " $domainname; echo "use_profile " $profile ) | - * /usr/sbin/tomoyo-loadpolicy -d - * - * Caller holds tomoyo_read_lock(). - */ -static int tomoyo_write_domain_profile(struct tomoyo_io_buffer *head) -{ - char *data = head->write_buf; - char *cp = strchr(data, ' '); - struct tomoyo_domain_info *domain; - unsigned long profile; - - if (!cp) - return -EINVAL; - *cp = '\0'; - domain = tomoyo_find_domain(cp + 1); - if (strict_strtoul(data, 10, &profile)) - return -EINVAL; - if (domain && profile < TOMOYO_MAX_PROFILES - && (tomoyo_profile_ptr[profile] || !tomoyo_policy_loaded)) - domain->profile = (u8) profile; - return 0; -} - -/** - * tomoyo_read_domain_profile - Read only domainname and profile. - * - * @head: Pointer to "struct tomoyo_io_buffer". - * - * Returns list of profile number and domainname pairs. - * - * This is equivalent to doing - * - * grep -A 1 '^<kernel>' /sys/kernel/security/tomoyo/domain_policy | - * awk ' { if ( domainname == "" ) { if ( $1 == "<kernel>" ) - * domainname = $0; } else if ( $1 == "use_profile" ) { - * print $2 " " domainname; domainname = ""; } } ; ' - * - * Caller holds tomoyo_read_lock(). - */ -static void tomoyo_read_domain_profile(struct tomoyo_io_buffer *head) -{ - if (head->r.eof) - return; - list_for_each_cookie(head->r.domain, &tomoyo_domain_list) { - struct tomoyo_domain_info *domain = - list_entry(head->r.domain, typeof(*domain), list); - if (domain->is_deleted) - continue; - if (!tomoyo_flush(head)) - return; - tomoyo_io_printf(head, "%u ", domain->profile); - tomoyo_set_string(head, domain->domainname->name); - tomoyo_set_lf(head); - } - head->r.eof = true; -} - -/** * tomoyo_write_pid: Specify PID to obtain domainname. * * @head: Pointer to "struct tomoyo_io_buffer". @@ -1182,14 +1661,12 @@ static void tomoyo_read_pid(struct tomoyo_io_buffer *head) global_pid = true; pid = (unsigned int) simple_strtoul(buf, NULL, 10); rcu_read_lock(); - read_lock(&tasklist_lock); if (global_pid) p = find_task_by_pid_ns(pid, &init_pid_ns); else p = find_task_by_vpid(pid); if (p) domain = tomoyo_real_domain(p); - read_unlock(&tasklist_lock); rcu_read_unlock(); if (!domain) return; @@ -1197,18 +1674,21 @@ static void tomoyo_read_pid(struct tomoyo_io_buffer *head) tomoyo_set_string(head, domain->domainname->name); } +/* String table for domain transition control keywords. */ static const char *tomoyo_transition_type[TOMOYO_MAX_TRANSITION_TYPE] = { - [TOMOYO_TRANSITION_CONTROL_NO_INITIALIZE] - = TOMOYO_KEYWORD_NO_INITIALIZE_DOMAIN, - [TOMOYO_TRANSITION_CONTROL_INITIALIZE] - = TOMOYO_KEYWORD_INITIALIZE_DOMAIN, - [TOMOYO_TRANSITION_CONTROL_NO_KEEP] = TOMOYO_KEYWORD_NO_KEEP_DOMAIN, - [TOMOYO_TRANSITION_CONTROL_KEEP] = TOMOYO_KEYWORD_KEEP_DOMAIN + [TOMOYO_TRANSITION_CONTROL_NO_RESET] = "no_reset_domain ", + [TOMOYO_TRANSITION_CONTROL_RESET] = "reset_domain ", + [TOMOYO_TRANSITION_CONTROL_NO_INITIALIZE] = "no_initialize_domain ", + [TOMOYO_TRANSITION_CONTROL_INITIALIZE] = "initialize_domain ", + [TOMOYO_TRANSITION_CONTROL_NO_KEEP] = "no_keep_domain ", + [TOMOYO_TRANSITION_CONTROL_KEEP] = "keep_domain ", }; +/* String table for grouping keywords. */ static const char *tomoyo_group_name[TOMOYO_MAX_GROUP] = { - [TOMOYO_PATH_GROUP] = TOMOYO_KEYWORD_PATH_GROUP, - [TOMOYO_NUMBER_GROUP] = TOMOYO_KEYWORD_NUMBER_GROUP + [TOMOYO_PATH_GROUP] = "path_group ", + [TOMOYO_NUMBER_GROUP] = "number_group ", + [TOMOYO_ADDRESS_GROUP] = "address_group ", }; /** @@ -1222,34 +1702,35 @@ static const char *tomoyo_group_name[TOMOYO_MAX_GROUP] = { */ static int tomoyo_write_exception(struct tomoyo_io_buffer *head) { - char *data = head->write_buf; - bool is_delete = tomoyo_str_starts(&data, TOMOYO_KEYWORD_DELETE); - u8 i; - static const struct { - const char *keyword; - int (*write) (char *, const bool); - } tomoyo_callback[4] = { - { TOMOYO_KEYWORD_AGGREGATOR, tomoyo_write_aggregator }, - { TOMOYO_KEYWORD_FILE_PATTERN, tomoyo_write_pattern }, - { TOMOYO_KEYWORD_DENY_REWRITE, tomoyo_write_no_rewrite }, - { TOMOYO_KEYWORD_ALLOW_READ, tomoyo_write_globally_readable }, + const bool is_delete = head->w.is_delete; + struct tomoyo_acl_param param = { + .ns = head->w.ns, + .is_delete = is_delete, + .data = head->write_buf, }; - + u8 i; + if (tomoyo_str_starts(¶m.data, "aggregator ")) + return tomoyo_write_aggregator(¶m); for (i = 0; i < TOMOYO_MAX_TRANSITION_TYPE; i++) - if (tomoyo_str_starts(&data, tomoyo_transition_type[i])) - return tomoyo_write_transition_control(data, is_delete, - i); - for (i = 0; i < 4; i++) - if (tomoyo_str_starts(&data, tomoyo_callback[i].keyword)) - return tomoyo_callback[i].write(data, is_delete); + if (tomoyo_str_starts(¶m.data, tomoyo_transition_type[i])) + return tomoyo_write_transition_control(¶m, i); for (i = 0; i < TOMOYO_MAX_GROUP; i++) - if (tomoyo_str_starts(&data, tomoyo_group_name[i])) - return tomoyo_write_group(data, is_delete, i); + if (tomoyo_str_starts(¶m.data, tomoyo_group_name[i])) + return tomoyo_write_group(¶m, i); + if (tomoyo_str_starts(¶m.data, "acl_group ")) { + unsigned int group; + char *data; + group = simple_strtoul(param.data, &data, 10); + if (group < TOMOYO_MAX_ACL_GROUPS && *data++ == ' ') + return tomoyo_write_domain2 + (head->w.ns, &head->w.ns->acl_group[group], + data, is_delete); + } return -EINVAL; } /** - * tomoyo_read_group - Read "struct tomoyo_path_group"/"struct tomoyo_number_group" list. + * tomoyo_read_group - Read "struct tomoyo_path_group"/"struct tomoyo_number_group"/"struct tomoyo_address_group" list. * * @head: Pointer to "struct tomoyo_io_buffer". * @idx: Index number. @@ -1260,9 +1741,12 @@ static int tomoyo_write_exception(struct tomoyo_io_buffer *head) */ static bool tomoyo_read_group(struct tomoyo_io_buffer *head, const int idx) { - list_for_each_cookie(head->r.group, &tomoyo_group_list[idx]) { + struct tomoyo_policy_namespace *ns = + container_of(head->r.ns, typeof(*ns), namespace_list); + struct list_head *list = &ns->group_list[idx]; + list_for_each_cookie(head->r.group, list) { struct tomoyo_group *group = - list_entry(head->r.group, typeof(*group), list); + list_entry(head->r.group, typeof(*group), head.list); list_for_each_cookie(head->r.acl, &group->member_list) { struct tomoyo_acl_head *ptr = list_entry(head->r.acl, typeof(*ptr), list); @@ -1270,6 +1754,7 @@ static bool tomoyo_read_group(struct tomoyo_io_buffer *head, const int idx) continue; if (!tomoyo_flush(head)) return false; + tomoyo_print_namespace(head); tomoyo_set_string(head, tomoyo_group_name[idx]); tomoyo_set_string(head, group->group_name->name); if (idx == TOMOYO_PATH_GROUP) { @@ -1282,6 +1767,15 @@ static bool tomoyo_read_group(struct tomoyo_io_buffer *head, const int idx) (ptr, struct tomoyo_number_group, head)->number); + } else if (idx == TOMOYO_ADDRESS_GROUP) { + char buffer[128]; + + struct tomoyo_address_group *member = + container_of(ptr, typeof(*member), + head); + tomoyo_print_ip(buffer, sizeof(buffer), + &member->address); + tomoyo_io_printf(head, " %s", buffer); } tomoyo_set_lf(head); } @@ -1303,7 +1797,10 @@ static bool tomoyo_read_group(struct tomoyo_io_buffer *head, const int idx) */ static bool tomoyo_read_policy(struct tomoyo_io_buffer *head, const int idx) { - list_for_each_cookie(head->r.acl, &tomoyo_policy_list[idx]) { + struct tomoyo_policy_namespace *ns = + container_of(head->r.ns, typeof(*ns), namespace_list); + struct list_head *list = &ns->policy_list[idx]; + list_for_each_cookie(head->r.acl, list) { struct tomoyo_acl_head *acl = container_of(head->r.acl, typeof(*acl), list); if (acl->is_deleted) @@ -1315,35 +1812,23 @@ static bool tomoyo_read_policy(struct tomoyo_io_buffer *head, const int idx) { struct tomoyo_transition_control *ptr = container_of(acl, typeof(*ptr), head); - tomoyo_set_string(head, - tomoyo_transition_type + tomoyo_print_namespace(head); + tomoyo_set_string(head, tomoyo_transition_type [ptr->type]); - if (ptr->program) - tomoyo_set_string(head, - ptr->program->name); - if (ptr->program && ptr->domainname) - tomoyo_set_string(head, " from "); - if (ptr->domainname) - tomoyo_set_string(head, - ptr->domainname-> - name); - } - break; - case TOMOYO_ID_GLOBALLY_READABLE: - { - struct tomoyo_readable_file *ptr = - container_of(acl, typeof(*ptr), head); - tomoyo_set_string(head, - TOMOYO_KEYWORD_ALLOW_READ); - tomoyo_set_string(head, ptr->filename->name); + tomoyo_set_string(head, ptr->program ? + ptr->program->name : "any"); + tomoyo_set_string(head, " from "); + tomoyo_set_string(head, ptr->domainname ? + ptr->domainname->name : + "any"); } break; case TOMOYO_ID_AGGREGATOR: { struct tomoyo_aggregator *ptr = container_of(acl, typeof(*ptr), head); - tomoyo_set_string(head, - TOMOYO_KEYWORD_AGGREGATOR); + tomoyo_print_namespace(head); + tomoyo_set_string(head, "aggregator "); tomoyo_set_string(head, ptr->original_name->name); tomoyo_set_space(head); @@ -1351,24 +1836,6 @@ static bool tomoyo_read_policy(struct tomoyo_io_buffer *head, const int idx) ptr->aggregated_name->name); } break; - case TOMOYO_ID_PATTERN: - { - struct tomoyo_no_pattern *ptr = - container_of(acl, typeof(*ptr), head); - tomoyo_set_string(head, - TOMOYO_KEYWORD_FILE_PATTERN); - tomoyo_set_string(head, ptr->pattern->name); - } - break; - case TOMOYO_ID_NO_REWRITE: - { - struct tomoyo_no_rewrite *ptr = - container_of(acl, typeof(*ptr), head); - tomoyo_set_string(head, - TOMOYO_KEYWORD_DENY_REWRITE); - tomoyo_set_string(head, ptr->pattern->name); - } - break; default: continue; } @@ -1387,6 +1854,8 @@ static bool tomoyo_read_policy(struct tomoyo_io_buffer *head, const int idx) */ static void tomoyo_read_exception(struct tomoyo_io_buffer *head) { + struct tomoyo_policy_namespace *ns = + container_of(head->r.ns, typeof(*ns), namespace_list); if (head->r.eof) return; while (head->r.step < TOMOYO_MAX_POLICY && @@ -1399,95 +1868,41 @@ static void tomoyo_read_exception(struct tomoyo_io_buffer *head) head->r.step++; if (head->r.step < TOMOYO_MAX_POLICY + TOMOYO_MAX_GROUP) return; + while (head->r.step < TOMOYO_MAX_POLICY + TOMOYO_MAX_GROUP + + TOMOYO_MAX_ACL_GROUPS) { + head->r.acl_group_index = head->r.step - TOMOYO_MAX_POLICY + - TOMOYO_MAX_GROUP; + if (!tomoyo_read_domain2(head, &ns->acl_group + [head->r.acl_group_index])) + return; + head->r.step++; + } head->r.eof = true; } -/** - * tomoyo_print_header - Get header line of audit log. - * - * @r: Pointer to "struct tomoyo_request_info". - * - * Returns string representation. - * - * This function uses kmalloc(), so caller must kfree() if this function - * didn't return NULL. - */ -static char *tomoyo_print_header(struct tomoyo_request_info *r) -{ - struct timeval tv; - const pid_t gpid = task_pid_nr(current); - static const int tomoyo_buffer_len = 4096; - char *buffer = kmalloc(tomoyo_buffer_len, GFP_NOFS); - pid_t ppid; - if (!buffer) - return NULL; - do_gettimeofday(&tv); - rcu_read_lock(); - ppid = task_tgid_vnr(current->real_parent); - rcu_read_unlock(); - snprintf(buffer, tomoyo_buffer_len - 1, - "#timestamp=%lu profile=%u mode=%s (global-pid=%u)" - " task={ pid=%u ppid=%u uid=%u gid=%u euid=%u" - " egid=%u suid=%u sgid=%u fsuid=%u fsgid=%u }", - tv.tv_sec, r->profile, tomoyo_mode[r->mode], gpid, - task_tgid_vnr(current), ppid, - current_uid(), current_gid(), current_euid(), - current_egid(), current_suid(), current_sgid(), - current_fsuid(), current_fsgid()); - return buffer; -} - -/** - * tomoyo_init_audit_log - Allocate buffer for audit logs. - * - * @len: Required size. - * @r: Pointer to "struct tomoyo_request_info". - * - * Returns pointer to allocated memory. - * - * The @len is updated to add the header lines' size on success. - * - * This function uses kzalloc(), so caller must kfree() if this function - * didn't return NULL. - */ -static char *tomoyo_init_audit_log(int *len, struct tomoyo_request_info *r) -{ - char *buf = NULL; - const char *header; - const char *domainname; - if (!r->domain) - r->domain = tomoyo_domain(); - domainname = r->domain->domainname->name; - header = tomoyo_print_header(r); - if (!header) - return NULL; - *len += strlen(domainname) + strlen(header) + 10; - buf = kzalloc(*len, GFP_NOFS); - if (buf) - snprintf(buf, (*len) - 1, "%s\n%s\n", header, domainname); - kfree(header); - return buf; -} - -/* Wait queue for tomoyo_query_list. */ +/* Wait queue for kernel -> userspace notification. */ static DECLARE_WAIT_QUEUE_HEAD(tomoyo_query_wait); - -/* Lock for manipulating tomoyo_query_list. */ -static DEFINE_SPINLOCK(tomoyo_query_list_lock); +/* Wait queue for userspace -> kernel notification. */ +static DECLARE_WAIT_QUEUE_HEAD(tomoyo_answer_wait); /* Structure for query. */ struct tomoyo_query { struct list_head list; + struct tomoyo_domain_info *domain; char *query; - int query_len; + size_t query_len; unsigned int serial; - int timer; - int answer; + u8 timer; + u8 answer; + u8 retry; }; /* The list for "struct tomoyo_query". */ static LIST_HEAD(tomoyo_query_list); +/* Lock for manipulating tomoyo_query_list. */ +static DEFINE_SPINLOCK(tomoyo_query_list_lock); + /* * Number of "struct file" referring /sys/kernel/security/tomoyo/query * interface. @@ -1495,10 +1910,82 @@ static LIST_HEAD(tomoyo_query_list); static atomic_t tomoyo_query_observers = ATOMIC_INIT(0); /** + * tomoyo_truncate - Truncate a line. + * + * @str: String to truncate. + * + * Returns length of truncated @str. + */ +static int tomoyo_truncate(char *str) +{ + char *start = str; + while (*(unsigned char *) str > (unsigned char) ' ') + str++; + *str = '\0'; + return strlen(start) + 1; +} + +/** + * tomoyo_add_entry - Add an ACL to current thread's domain. Used by learning mode. + * + * @domain: Pointer to "struct tomoyo_domain_info". + * @header: Lines containing ACL. + * + * Returns nothing. + */ +static void tomoyo_add_entry(struct tomoyo_domain_info *domain, char *header) +{ + char *buffer; + char *realpath = NULL; + char *argv0 = NULL; + char *symlink = NULL; + char *cp = strchr(header, '\n'); + int len; + if (!cp) + return; + cp = strchr(cp + 1, '\n'); + if (!cp) + return; + *cp++ = '\0'; + len = strlen(cp) + 1; + /* strstr() will return NULL if ordering is wrong. */ + if (*cp == 'f') { + argv0 = strstr(header, " argv[]={ \""); + if (argv0) { + argv0 += 10; + len += tomoyo_truncate(argv0) + 14; + } + realpath = strstr(header, " exec={ realpath=\""); + if (realpath) { + realpath += 8; + len += tomoyo_truncate(realpath) + 6; + } + symlink = strstr(header, " symlink.target=\""); + if (symlink) + len += tomoyo_truncate(symlink + 1) + 1; + } + buffer = kmalloc(len, GFP_NOFS); + if (!buffer) + return; + snprintf(buffer, len - 1, "%s", cp); + if (realpath) + tomoyo_addprintf(buffer, len, " exec.%s", realpath); + if (argv0) + tomoyo_addprintf(buffer, len, " exec.argv[0]=%s", argv0); + if (symlink) + tomoyo_addprintf(buffer, len, "%s", symlink); + tomoyo_normalize_line(buffer); + if (!tomoyo_write_domain2(domain->ns, &domain->acl_info_list, buffer, + false)) + tomoyo_update_stat(TOMOYO_STAT_POLICY_UPDATES); + kfree(buffer); +} + +/** * tomoyo_supervisor - Ask for the supervisor's decision. * - * @r: Pointer to "struct tomoyo_request_info". - * @fmt: The printf()'s format string, followed by parameters. + * @r: Pointer to "struct tomoyo_request_info". + * @fmt: The printf()'s format string, followed by parameters. * * Returns 0 if the supervisor decided to permit the access request which * violated the policy in enforcing mode, TOMOYO_RETRY_REQUEST if the @@ -1508,88 +1995,80 @@ static atomic_t tomoyo_query_observers = ATOMIC_INIT(0); int tomoyo_supervisor(struct tomoyo_request_info *r, const char *fmt, ...) { va_list args; - int error = -EPERM; - int pos; + int error; int len; static unsigned int tomoyo_serial; - struct tomoyo_query *entry = NULL; + struct tomoyo_query entry = { }; bool quota_exceeded = false; - char *header; + va_start(args, fmt); + len = vsnprintf((char *) &len, 1, fmt, args) + 1; + va_end(args); + /* Write /sys/kernel/security/tomoyo/audit. */ + va_start(args, fmt); + tomoyo_write_log2(r, len, fmt, args); + va_end(args); + /* Nothing more to do if granted. */ + if (r->granted) + return 0; + if (r->mode) + tomoyo_update_stat(r->mode); switch (r->mode) { - char *buffer; + case TOMOYO_CONFIG_ENFORCING: + error = -EPERM; + if (atomic_read(&tomoyo_query_observers)) + break; + goto out; case TOMOYO_CONFIG_LEARNING: - if (!tomoyo_domain_quota_is_ok(r)) - return 0; - va_start(args, fmt); - len = vsnprintf((char *) &pos, sizeof(pos) - 1, fmt, args) + 4; - va_end(args); - buffer = kmalloc(len, GFP_NOFS); - if (!buffer) - return 0; - va_start(args, fmt); - vsnprintf(buffer, len - 1, fmt, args); - va_end(args); - tomoyo_normalize_line(buffer); - tomoyo_write_domain2(buffer, r->domain, false); - kfree(buffer); + error = 0; + /* Check max_learning_entry parameter. */ + if (tomoyo_domain_quota_is_ok(r)) + break; /* fall through */ - case TOMOYO_CONFIG_PERMISSIVE: + default: return 0; } - if (!r->domain) - r->domain = tomoyo_domain(); - if (!atomic_read(&tomoyo_query_observers)) - return -EPERM; + /* Get message. */ va_start(args, fmt); - len = vsnprintf((char *) &pos, sizeof(pos) - 1, fmt, args) + 32; + entry.query = tomoyo_init_log(r, len, fmt, args); va_end(args); - header = tomoyo_init_audit_log(&len, r); - if (!header) + if (!entry.query) goto out; - entry = kzalloc(sizeof(*entry), GFP_NOFS); - if (!entry) - goto out; - entry->query = kzalloc(len, GFP_NOFS); - if (!entry->query) + entry.query_len = strlen(entry.query) + 1; + if (!error) { + tomoyo_add_entry(r->domain, entry.query); goto out; - len = ksize(entry->query); + } + len = tomoyo_round2(entry.query_len); + entry.domain = r->domain; spin_lock(&tomoyo_query_list_lock); - if (tomoyo_quota_for_query && tomoyo_query_memory_size + len + - sizeof(*entry) >= tomoyo_quota_for_query) { + if (tomoyo_memory_quota[TOMOYO_MEMORY_QUERY] && + tomoyo_memory_used[TOMOYO_MEMORY_QUERY] + len + >= tomoyo_memory_quota[TOMOYO_MEMORY_QUERY]) { quota_exceeded = true; } else { - tomoyo_query_memory_size += len + sizeof(*entry); - entry->serial = tomoyo_serial++; + entry.serial = tomoyo_serial++; + entry.retry = r->retry; + tomoyo_memory_used[TOMOYO_MEMORY_QUERY] += len; + list_add_tail(&entry.list, &tomoyo_query_list); } spin_unlock(&tomoyo_query_list_lock); if (quota_exceeded) goto out; - pos = snprintf(entry->query, len - 1, "Q%u-%hu\n%s", - entry->serial, r->retry, header); - kfree(header); - header = NULL; - va_start(args, fmt); - vsnprintf(entry->query + pos, len - 1 - pos, fmt, args); - entry->query_len = strlen(entry->query) + 1; - va_end(args); - spin_lock(&tomoyo_query_list_lock); - list_add_tail(&entry->list, &tomoyo_query_list); - spin_unlock(&tomoyo_query_list_lock); /* Give 10 seconds for supervisor's opinion. */ - for (entry->timer = 0; - atomic_read(&tomoyo_query_observers) && entry->timer < 100; - entry->timer++) { - wake_up(&tomoyo_query_wait); - set_current_state(TASK_INTERRUPTIBLE); - schedule_timeout(HZ / 10); - if (entry->answer) + while (entry.timer < 10) { + wake_up_all(&tomoyo_query_wait); + if (wait_event_interruptible_timeout + (tomoyo_answer_wait, entry.answer || + !atomic_read(&tomoyo_query_observers), HZ)) break; + else + entry.timer++; } spin_lock(&tomoyo_query_list_lock); - list_del(&entry->list); - tomoyo_query_memory_size -= len + sizeof(*entry); + list_del(&entry.list); + tomoyo_memory_used[TOMOYO_MEMORY_QUERY] -= len; spin_unlock(&tomoyo_query_list_lock); - switch (entry->answer) { + switch (entry.answer) { case 3: /* Asked to retry by administrator. */ error = TOMOYO_RETRY_REQUEST; r->retry++; @@ -1598,22 +2077,39 @@ int tomoyo_supervisor(struct tomoyo_request_info *r, const char *fmt, ...) /* Granted by administrator. */ error = 0; break; - case 0: - /* Timed out. */ - break; default: - /* Rejected by administrator. */ + /* Timed out or rejected by administrator. */ break; } - out: - if (entry) - kfree(entry->query); - kfree(entry); - kfree(header); +out: + kfree(entry.query); return error; } /** + * tomoyo_find_domain_by_qid - Get domain by query id. + * + * @serial: Query ID assigned by tomoyo_supervisor(). + * + * Returns pointer to "struct tomoyo_domain_info" if found, NULL otherwise. + */ +static struct tomoyo_domain_info *tomoyo_find_domain_by_qid +(unsigned int serial) +{ + struct tomoyo_query *ptr; + struct tomoyo_domain_info *domain = NULL; + spin_lock(&tomoyo_query_list_lock); + list_for_each_entry(ptr, &tomoyo_query_list, list) { + if (ptr->serial != serial) + continue; + domain = ptr->domain; + break; + } + spin_unlock(&tomoyo_query_list_lock); + return domain; +} + +/** * tomoyo_poll_query - poll() for /sys/kernel/security/tomoyo/query. * * @file: Pointer to "struct file". @@ -1623,28 +2119,13 @@ int tomoyo_supervisor(struct tomoyo_request_info *r, const char *fmt, ...) * * Waits for access requests which violated policy in enforcing mode. */ -static int tomoyo_poll_query(struct file *file, poll_table *wait) +static unsigned int tomoyo_poll_query(struct file *file, poll_table *wait) { - struct list_head *tmp; - bool found = false; - u8 i; - for (i = 0; i < 2; i++) { - spin_lock(&tomoyo_query_list_lock); - list_for_each(tmp, &tomoyo_query_list) { - struct tomoyo_query *ptr = - list_entry(tmp, typeof(*ptr), list); - if (ptr->answer) - continue; - found = true; - break; - } - spin_unlock(&tomoyo_query_list_lock); - if (found) - return POLLIN | POLLRDNORM; - if (i) - break; - poll_wait(file, &tomoyo_query_wait, wait); - } + if (!list_empty(&tomoyo_query_list)) + return POLLIN | POLLRDNORM; + poll_wait(file, &tomoyo_query_wait, wait); + if (!list_empty(&tomoyo_query_list)) + return POLLIN | POLLRDNORM; return 0; } @@ -1656,8 +2137,8 @@ static int tomoyo_poll_query(struct file *file, poll_table *wait) static void tomoyo_read_query(struct tomoyo_io_buffer *head) { struct list_head *tmp; - int pos = 0; - int len = 0; + unsigned int pos = 0; + size_t len = 0; char *buf; if (head->r.w_pos) return; @@ -1668,8 +2149,6 @@ static void tomoyo_read_query(struct tomoyo_io_buffer *head) spin_lock(&tomoyo_query_list_lock); list_for_each(tmp, &tomoyo_query_list) { struct tomoyo_query *ptr = list_entry(tmp, typeof(*ptr), list); - if (ptr->answer) - continue; if (pos++ != head->r.query_index) continue; len = ptr->query_len; @@ -1680,15 +2159,13 @@ static void tomoyo_read_query(struct tomoyo_io_buffer *head) head->r.query_index = 0; return; } - buf = kzalloc(len, GFP_NOFS); + buf = kzalloc(len + 32, GFP_NOFS); if (!buf) return; pos = 0; spin_lock(&tomoyo_query_list_lock); list_for_each(tmp, &tomoyo_query_list) { struct tomoyo_query *ptr = list_entry(tmp, typeof(*ptr), list); - if (ptr->answer) - continue; if (pos++ != head->r.query_index) continue; /* @@ -1696,7 +2173,8 @@ static void tomoyo_read_query(struct tomoyo_io_buffer *head) * can change, but I don't care. */ if (len == ptr->query_len) - memmove(buf, ptr->query, len); + snprintf(buf, len + 31, "Q%u-%hu\n%s", ptr->serial, + ptr->retry, ptr->query); break; } spin_unlock(&tomoyo_query_list_lock); @@ -1735,8 +2213,10 @@ static int tomoyo_write_answer(struct tomoyo_io_buffer *head) struct tomoyo_query *ptr = list_entry(tmp, typeof(*ptr), list); if (ptr->serial != serial) continue; - if (!ptr->answer) - ptr->answer = answer; + ptr->answer = answer; + /* Remove from tomoyo_query_list. */ + if (ptr->answer) + list_del_init(&ptr->list); break; } spin_unlock(&tomoyo_query_list_lock); @@ -1753,29 +2233,107 @@ static int tomoyo_write_answer(struct tomoyo_io_buffer *head) static void tomoyo_read_version(struct tomoyo_io_buffer *head) { if (!head->r.eof) { - tomoyo_io_printf(head, "2.3.0"); + tomoyo_io_printf(head, "2.5.0"); head->r.eof = true; } } +/* String table for /sys/kernel/security/tomoyo/stat interface. */ +static const char * const tomoyo_policy_headers[TOMOYO_MAX_POLICY_STAT] = { + [TOMOYO_STAT_POLICY_UPDATES] = "update:", + [TOMOYO_STAT_POLICY_LEARNING] = "violation in learning mode:", + [TOMOYO_STAT_POLICY_PERMISSIVE] = "violation in permissive mode:", + [TOMOYO_STAT_POLICY_ENFORCING] = "violation in enforcing mode:", +}; + +/* String table for /sys/kernel/security/tomoyo/stat interface. */ +static const char * const tomoyo_memory_headers[TOMOYO_MAX_MEMORY_STAT] = { + [TOMOYO_MEMORY_POLICY] = "policy:", + [TOMOYO_MEMORY_AUDIT] = "audit log:", + [TOMOYO_MEMORY_QUERY] = "query message:", +}; + +/* Timestamp counter for last updated. */ +static unsigned int tomoyo_stat_updated[TOMOYO_MAX_POLICY_STAT]; +/* Counter for number of updates. */ +static unsigned int tomoyo_stat_modified[TOMOYO_MAX_POLICY_STAT]; + +/** + * tomoyo_update_stat - Update statistic counters. + * + * @index: Index for policy type. + * + * Returns nothing. + */ +void tomoyo_update_stat(const u8 index) +{ + struct timeval tv; + do_gettimeofday(&tv); + /* + * I don't use atomic operations because race condition is not fatal. + */ + tomoyo_stat_updated[index]++; + tomoyo_stat_modified[index] = tv.tv_sec; +} + /** - * tomoyo_read_self_domain - Get the current process's domainname. + * tomoyo_read_stat - Read statistic data. * * @head: Pointer to "struct tomoyo_io_buffer". * - * Returns the current process's domainname. + * Returns nothing. */ -static void tomoyo_read_self_domain(struct tomoyo_io_buffer *head) +static void tomoyo_read_stat(struct tomoyo_io_buffer *head) { - if (!head->r.eof) { - /* - * tomoyo_domain()->domainname != NULL - * because every process belongs to a domain and - * the domain's name cannot be NULL. - */ - tomoyo_io_printf(head, "%s", tomoyo_domain()->domainname->name); - head->r.eof = true; + u8 i; + unsigned int total = 0; + if (head->r.eof) + return; + for (i = 0; i < TOMOYO_MAX_POLICY_STAT; i++) { + tomoyo_io_printf(head, "Policy %-30s %10u", + tomoyo_policy_headers[i], + tomoyo_stat_updated[i]); + if (tomoyo_stat_modified[i]) { + struct tomoyo_time stamp; + tomoyo_convert_time(tomoyo_stat_modified[i], &stamp); + tomoyo_io_printf(head, " (Last: %04u/%02u/%02u " + "%02u:%02u:%02u)", + stamp.year, stamp.month, stamp.day, + stamp.hour, stamp.min, stamp.sec); + } + tomoyo_set_lf(head); + } + for (i = 0; i < TOMOYO_MAX_MEMORY_STAT; i++) { + unsigned int used = tomoyo_memory_used[i]; + total += used; + tomoyo_io_printf(head, "Memory used by %-22s %10u", + tomoyo_memory_headers[i], used); + used = tomoyo_memory_quota[i]; + if (used) + tomoyo_io_printf(head, " (Quota: %10u)", used); + tomoyo_set_lf(head); } + tomoyo_io_printf(head, "Total memory used: %10u\n", + total); + head->r.eof = true; +} + +/** + * tomoyo_write_stat - Set memory quota. + * + * @head: Pointer to "struct tomoyo_io_buffer". + * + * Returns 0. + */ +static int tomoyo_write_stat(struct tomoyo_io_buffer *head) +{ + char *data = head->write_buf; + u8 i; + if (tomoyo_str_starts(&data, "Memory used by ")) + for (i = 0; i < TOMOYO_MAX_MEMORY_STAT; i++) + if (tomoyo_str_starts(&data, tomoyo_memory_headers[i])) + sscanf(data, "%u", &tomoyo_memory_quota[i]); + return 0; } /** @@ -1784,9 +2342,7 @@ static void tomoyo_read_self_domain(struct tomoyo_io_buffer *head) * @type: Type of interface. * @file: Pointer to "struct file". * - * Associates policy handler and returns 0 on success, -ENOMEM otherwise. - * - * Caller acquires tomoyo_read_lock(). + * Returns 0 on success, negative value otherwise. */ int tomoyo_open_control(const u8 type, struct file *file) { @@ -1807,14 +2363,10 @@ int tomoyo_open_control(const u8 type, struct file *file) head->write = tomoyo_write_exception; head->read = tomoyo_read_exception; break; - case TOMOYO_SELFDOMAIN: - /* /sys/kernel/security/tomoyo/self_domain */ - head->read = tomoyo_read_self_domain; - break; - case TOMOYO_DOMAIN_STATUS: - /* /sys/kernel/security/tomoyo/.domain_status */ - head->write = tomoyo_write_domain_profile; - head->read = tomoyo_read_domain_profile; + case TOMOYO_AUDIT: + /* /sys/kernel/security/tomoyo/audit */ + head->poll = tomoyo_poll_log; + head->read = tomoyo_read_log; break; case TOMOYO_PROCESS_STATUS: /* /sys/kernel/security/tomoyo/.process_status */ @@ -1826,11 +2378,11 @@ int tomoyo_open_control(const u8 type, struct file *file) head->read = tomoyo_read_version; head->readbuf_size = 128; break; - case TOMOYO_MEMINFO: - /* /sys/kernel/security/tomoyo/meminfo */ - head->write = tomoyo_write_memory_quota; - head->read = tomoyo_read_memory_counter; - head->readbuf_size = 512; + case TOMOYO_STAT: + /* /sys/kernel/security/tomoyo/stat */ + head->write = tomoyo_write_stat; + head->read = tomoyo_read_stat; + head->readbuf_size = 1024; break; case TOMOYO_PROFILE: /* /sys/kernel/security/tomoyo/profile */ @@ -1880,26 +2432,16 @@ int tomoyo_open_control(const u8 type, struct file *file) return -ENOMEM; } } - if (type != TOMOYO_QUERY) - head->reader_idx = tomoyo_read_lock(); - file->private_data = head; - /* - * Call the handler now if the file is - * /sys/kernel/security/tomoyo/self_domain - * so that the user can use - * cat < /sys/kernel/security/tomoyo/self_domain" - * to know the current process's domainname. - */ - if (type == TOMOYO_SELFDOMAIN) - tomoyo_read_control(file, NULL, 0); /* * If the file is /sys/kernel/security/tomoyo/query , increment the * observer counter. * The obserber counter is used by tomoyo_supervisor() to see if * there is some process monitoring /sys/kernel/security/tomoyo/query. */ - else if (type == TOMOYO_QUERY) + if (type == TOMOYO_QUERY) atomic_inc(&tomoyo_query_observers); + file->private_data = head; + tomoyo_notify_gc(head, true); return 0; } @@ -1907,35 +2449,72 @@ int tomoyo_open_control(const u8 type, struct file *file) * tomoyo_poll_control - poll() for /sys/kernel/security/tomoyo/ interface. * * @file: Pointer to "struct file". - * @wait: Pointer to "poll_table". + * @wait: Pointer to "poll_table". Maybe NULL. * - * Waits for read readiness. - * /sys/kernel/security/tomoyo/query is handled by /usr/sbin/tomoyo-queryd . + * Returns POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM if ready to read/write, + * POLLOUT | POLLWRNORM otherwise. */ -int tomoyo_poll_control(struct file *file, poll_table *wait) +unsigned int tomoyo_poll_control(struct file *file, poll_table *wait) { struct tomoyo_io_buffer *head = file->private_data; - if (!head->poll) - return -ENOSYS; - return head->poll(file, wait); + if (head->poll) + return head->poll(file, wait) | POLLOUT | POLLWRNORM; + return POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM; +} + +/** + * tomoyo_set_namespace_cursor - Set namespace to read. + * + * @head: Pointer to "struct tomoyo_io_buffer". + * + * Returns nothing. + */ +static inline void tomoyo_set_namespace_cursor(struct tomoyo_io_buffer *head) +{ + struct list_head *ns; + if (head->type != TOMOYO_EXCEPTIONPOLICY && + head->type != TOMOYO_PROFILE) + return; + /* + * If this is the first read, or reading previous namespace finished + * and has more namespaces to read, update the namespace cursor. + */ + ns = head->r.ns; + if (!ns || (head->r.eof && ns->next != &tomoyo_namespace_list)) { + /* Clearing is OK because tomoyo_flush() returned true. */ + memset(&head->r, 0, sizeof(head->r)); + head->r.ns = ns ? ns->next : tomoyo_namespace_list.next; + } +} + +/** + * tomoyo_has_more_namespace - Check for unread namespaces. + * + * @head: Pointer to "struct tomoyo_io_buffer". + * + * Returns true if we have more entries to print, false otherwise. + */ +static inline bool tomoyo_has_more_namespace(struct tomoyo_io_buffer *head) +{ + return (head->type == TOMOYO_EXCEPTIONPOLICY || + head->type == TOMOYO_PROFILE) && head->r.eof && + head->r.ns->next != &tomoyo_namespace_list; } /** * tomoyo_read_control - read() for /sys/kernel/security/tomoyo/ interface. * - * @file: Pointer to "struct file". + * @head: Pointer to "struct tomoyo_io_buffer". * @buffer: Poiner to buffer to write to. * @buffer_len: Size of @buffer. * * Returns bytes read on success, negative value otherwise. - * - * Caller holds tomoyo_read_lock(). */ -int tomoyo_read_control(struct file *file, char __user *buffer, - const int buffer_len) +ssize_t tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer, + const int buffer_len) { int len; - struct tomoyo_io_buffer *head = file->private_data; + int idx; if (!head->read) return -ENOSYS; @@ -1943,64 +2522,157 @@ int tomoyo_read_control(struct file *file, char __user *buffer, return -EINTR; head->read_user_buf = buffer; head->read_user_buf_avail = buffer_len; + idx = tomoyo_read_lock(); if (tomoyo_flush(head)) /* Call the policy handler. */ - head->read(head); - tomoyo_flush(head); + do { + tomoyo_set_namespace_cursor(head); + head->read(head); + } while (tomoyo_flush(head) && + tomoyo_has_more_namespace(head)); + tomoyo_read_unlock(idx); len = head->read_user_buf - buffer; mutex_unlock(&head->io_sem); return len; } /** + * tomoyo_parse_policy - Parse a policy line. + * + * @head: Poiter to "struct tomoyo_io_buffer". + * @line: Line to parse. + * + * Returns 0 on success, negative value otherwise. + * + * Caller holds tomoyo_read_lock(). + */ +static int tomoyo_parse_policy(struct tomoyo_io_buffer *head, char *line) +{ + /* Delete request? */ + head->w.is_delete = !strncmp(line, "delete ", 7); + if (head->w.is_delete) + memmove(line, line + 7, strlen(line + 7) + 1); + /* Selecting namespace to update. */ + if (head->type == TOMOYO_EXCEPTIONPOLICY || + head->type == TOMOYO_PROFILE) { + if (*line == '<') { + char *cp = strchr(line, ' '); + if (cp) { + *cp++ = '\0'; + head->w.ns = tomoyo_assign_namespace(line); + memmove(line, cp, strlen(cp) + 1); + } else + head->w.ns = NULL; + } else + head->w.ns = &tomoyo_kernel_namespace; + /* Don't allow updating if namespace is invalid. */ + if (!head->w.ns) + return -ENOENT; + } + /* Do the update. */ + return head->write(head); +} + +/** * tomoyo_write_control - write() for /sys/kernel/security/tomoyo/ interface. * - * @file: Pointer to "struct file". + * @head: Pointer to "struct tomoyo_io_buffer". * @buffer: Pointer to buffer to read from. * @buffer_len: Size of @buffer. * * Returns @buffer_len on success, negative value otherwise. - * - * Caller holds tomoyo_read_lock(). */ -int tomoyo_write_control(struct file *file, const char __user *buffer, - const int buffer_len) +ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head, + const char __user *buffer, const int buffer_len) { - struct tomoyo_io_buffer *head = file->private_data; int error = buffer_len; - int avail_len = buffer_len; + size_t avail_len = buffer_len; char *cp0 = head->write_buf; - + int idx; if (!head->write) return -ENOSYS; if (!access_ok(VERIFY_READ, buffer, buffer_len)) return -EFAULT; - /* Don't allow updating policies by non manager programs. */ - if (head->write != tomoyo_write_pid && - head->write != tomoyo_write_domain && !tomoyo_manager()) - return -EPERM; if (mutex_lock_interruptible(&head->io_sem)) return -EINTR; + head->read_user_buf_avail = 0; + idx = tomoyo_read_lock(); /* Read a line and dispatch it to the policy handler. */ while (avail_len > 0) { char c; - if (head->write_avail >= head->writebuf_size - 1) { - error = -ENOMEM; - break; - } else if (get_user(c, buffer)) { + if (head->w.avail >= head->writebuf_size - 1) { + const int len = head->writebuf_size * 2; + char *cp = kzalloc(len, GFP_NOFS); + if (!cp) { + error = -ENOMEM; + break; + } + memmove(cp, cp0, head->w.avail); + kfree(cp0); + head->write_buf = cp; + cp0 = cp; + head->writebuf_size = len; + } + if (get_user(c, buffer)) { error = -EFAULT; break; } buffer++; avail_len--; - cp0[head->write_avail++] = c; + cp0[head->w.avail++] = c; if (c != '\n') continue; - cp0[head->write_avail - 1] = '\0'; - head->write_avail = 0; + cp0[head->w.avail - 1] = '\0'; + head->w.avail = 0; tomoyo_normalize_line(cp0); - head->write(head); + if (!strcmp(cp0, "reset")) { + head->w.ns = &tomoyo_kernel_namespace; + head->w.domain = NULL; + memset(&head->r, 0, sizeof(head->r)); + continue; + } + /* Don't allow updating policies by non manager programs. */ + switch (head->type) { + case TOMOYO_PROCESS_STATUS: + /* This does not write anything. */ + break; + case TOMOYO_DOMAINPOLICY: + if (tomoyo_select_domain(head, cp0)) + continue; + /* fall through */ + case TOMOYO_EXCEPTIONPOLICY: + if (!strcmp(cp0, "select transition_only")) { + head->r.print_transition_related_only = true; + continue; + } + /* fall through */ + default: + if (!tomoyo_manager()) { + error = -EPERM; + goto out; + } + } + switch (tomoyo_parse_policy(head, cp0)) { + case -EPERM: + error = -EPERM; + goto out; + case 0: + switch (head->type) { + case TOMOYO_DOMAINPOLICY: + case TOMOYO_EXCEPTIONPOLICY: + case TOMOYO_STAT: + case TOMOYO_PROFILE: + case TOMOYO_MANAGER: + tomoyo_update_stat(TOMOYO_STAT_POLICY_UPDATES); + break; + default: + break; + } + break; + } } +out: + tomoyo_read_unlock(idx); mutex_unlock(&head->io_sem); return error; } @@ -2008,36 +2680,18 @@ int tomoyo_write_control(struct file *file, const char __user *buffer, /** * tomoyo_close_control - close() for /sys/kernel/security/tomoyo/ interface. * - * @file: Pointer to "struct file". - * - * Releases memory and returns 0. - * - * Caller looses tomoyo_read_lock(). + * @head: Pointer to "struct tomoyo_io_buffer". */ -int tomoyo_close_control(struct file *file) +void tomoyo_close_control(struct tomoyo_io_buffer *head) { - struct tomoyo_io_buffer *head = file->private_data; - const bool is_write = !!head->write_buf; - /* * If the file is /sys/kernel/security/tomoyo/query , decrement the * observer counter. */ - if (head->type == TOMOYO_QUERY) - atomic_dec(&tomoyo_query_observers); - else - tomoyo_read_unlock(head->reader_idx); - /* Release memory used for policy I/O. */ - kfree(head->read_buf); - head->read_buf = NULL; - kfree(head->write_buf); - head->write_buf = NULL; - kfree(head); - head = NULL; - file->private_data = NULL; - if (is_write) - tomoyo_run_gc(); - return 0; + if (head->type == TOMOYO_QUERY && + atomic_dec_and_test(&tomoyo_query_observers)) + wake_up_all(&tomoyo_answer_wait); + tomoyo_notify_gc(head, false); } /** @@ -2048,27 +2702,90 @@ void tomoyo_check_profile(void) struct tomoyo_domain_info *domain; const int idx = tomoyo_read_lock(); tomoyo_policy_loaded = true; - /* Check all profiles currently assigned to domains are defined. */ + printk(KERN_INFO "TOMOYO: 2.5.0\n"); list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) { const u8 profile = domain->profile; - if (tomoyo_profile_ptr[profile]) + const struct tomoyo_policy_namespace *ns = domain->ns; + if (ns->profile_version != 20110903) + printk(KERN_ERR + "Profile version %u is not supported.\n", + ns->profile_version); + else if (!ns->profile_ptr[profile]) + printk(KERN_ERR + "Profile %u (used by '%s') is not defined.\n", + profile, domain->domainname->name); + else continue; - printk(KERN_ERR "You need to define profile %u before using it.\n", - profile); - printk(KERN_ERR "Please see http://tomoyo.sourceforge.jp/2.3/ " + printk(KERN_ERR + "Userland tools for TOMOYO 2.5 must be installed and " + "policy must be initialized.\n"); + printk(KERN_ERR "Please see http://tomoyo.sourceforge.jp/2.5/ " "for more information.\n"); - panic("Profile %u (used by '%s') not defined.\n", - profile, domain->domainname->name); + panic("STOP!"); } tomoyo_read_unlock(idx); - if (tomoyo_profile_version != 20090903) { - printk(KERN_ERR "You need to install userland programs for " - "TOMOYO 2.3 and initialize policy configuration.\n"); - printk(KERN_ERR "Please see http://tomoyo.sourceforge.jp/2.3/ " - "for more information.\n"); - panic("Profile version %u is not supported.\n", - tomoyo_profile_version); - } - printk(KERN_INFO "TOMOYO: 2.3.0\n"); printk(KERN_INFO "Mandatory Access Control activated.\n"); } + +/** + * tomoyo_load_builtin_policy - Load built-in policy. + * + * Returns nothing. + */ +void __init tomoyo_load_builtin_policy(void) +{ + /* + * This include file is manually created and contains built-in policy + * named "tomoyo_builtin_profile", "tomoyo_builtin_exception_policy", + * "tomoyo_builtin_domain_policy", "tomoyo_builtin_manager", + * "tomoyo_builtin_stat" in the form of "static char [] __initdata". + */ +#include "builtin-policy.h" + u8 i; + const int idx = tomoyo_read_lock(); + for (i = 0; i < 5; i++) { + struct tomoyo_io_buffer head = { }; + char *start = ""; + switch (i) { + case 0: + start = tomoyo_builtin_profile; + head.type = TOMOYO_PROFILE; + head.write = tomoyo_write_profile; + break; + case 1: + start = tomoyo_builtin_exception_policy; + head.type = TOMOYO_EXCEPTIONPOLICY; + head.write = tomoyo_write_exception; + break; + case 2: + start = tomoyo_builtin_domain_policy; + head.type = TOMOYO_DOMAINPOLICY; + head.write = tomoyo_write_domain; + break; + case 3: + start = tomoyo_builtin_manager; + head.type = TOMOYO_MANAGER; + head.write = tomoyo_write_manager; + break; + case 4: + start = tomoyo_builtin_stat; + head.type = TOMOYO_STAT; + head.write = tomoyo_write_stat; + break; + } + while (1) { + char *end = strchr(start, '\n'); + if (!end) + break; + *end = '\0'; + tomoyo_normalize_line(start); + head.write_buf = start; + tomoyo_parse_policy(&head, start); + start = end + 1; + } + } + tomoyo_read_unlock(idx); +#ifdef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER + tomoyo_check_profile(); +#endif +} diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h index 7c66bd89878..b897d486201 100644 --- a/security/tomoyo/common.h +++ b/security/tomoyo/common.h @@ -3,7 +3,7 @@ * * Header file for TOMOYO. * - * Copyright (C) 2005-2010 NTT DATA CORPORATION + * Copyright (C) 2005-2011 NTT DATA CORPORATION */ #ifndef _SECURITY_TOMOYO_COMMON_H @@ -21,7 +21,18 @@ #include <linux/list.h> #include <linux/cred.h> #include <linux/poll.h> -struct linux_binprm; +#include <linux/binfmts.h> +#include <linux/highmem.h> +#include <linux/net.h> +#include <linux/inet.h> +#include <linux/in.h> +#include <linux/in6.h> +#include <linux/un.h> +#include <net/sock.h> +#include <net/af_unix.h> +#include <net/ip.h> +#include <net/ipv6.h> +#include <net/udp.h> /********** Constants definitions. **********/ @@ -33,71 +44,175 @@ struct linux_binprm; #define TOMOYO_HASH_BITS 8 #define TOMOYO_MAX_HASH (1u<<TOMOYO_HASH_BITS) +/* + * TOMOYO checks only SOCK_STREAM, SOCK_DGRAM, SOCK_RAW, SOCK_SEQPACKET. + * Therefore, we don't need SOCK_MAX. + */ +#define TOMOYO_SOCK_MAX 6 + #define TOMOYO_EXEC_TMPSIZE 4096 +/* Garbage collector is trying to kfree() this element. */ +#define TOMOYO_GC_IN_PROGRESS -1 + /* Profile number is an integer between 0 and 255. */ #define TOMOYO_MAX_PROFILES 256 +/* Group number is an integer between 0 and 255. */ +#define TOMOYO_MAX_ACL_GROUPS 256 + +/* Index numbers for "struct tomoyo_condition". */ +enum tomoyo_conditions_index { + TOMOYO_TASK_UID, /* current_uid() */ + TOMOYO_TASK_EUID, /* current_euid() */ + TOMOYO_TASK_SUID, /* current_suid() */ + TOMOYO_TASK_FSUID, /* current_fsuid() */ + TOMOYO_TASK_GID, /* current_gid() */ + TOMOYO_TASK_EGID, /* current_egid() */ + TOMOYO_TASK_SGID, /* current_sgid() */ + TOMOYO_TASK_FSGID, /* current_fsgid() */ + TOMOYO_TASK_PID, /* sys_getpid() */ + TOMOYO_TASK_PPID, /* sys_getppid() */ + TOMOYO_EXEC_ARGC, /* "struct linux_binprm *"->argc */ + TOMOYO_EXEC_ENVC, /* "struct linux_binprm *"->envc */ + TOMOYO_TYPE_IS_SOCKET, /* S_IFSOCK */ + TOMOYO_TYPE_IS_SYMLINK, /* S_IFLNK */ + TOMOYO_TYPE_IS_FILE, /* S_IFREG */ + TOMOYO_TYPE_IS_BLOCK_DEV, /* S_IFBLK */ + TOMOYO_TYPE_IS_DIRECTORY, /* S_IFDIR */ + TOMOYO_TYPE_IS_CHAR_DEV, /* S_IFCHR */ + TOMOYO_TYPE_IS_FIFO, /* S_IFIFO */ + TOMOYO_MODE_SETUID, /* S_ISUID */ + TOMOYO_MODE_SETGID, /* S_ISGID */ + TOMOYO_MODE_STICKY, /* S_ISVTX */ + TOMOYO_MODE_OWNER_READ, /* S_IRUSR */ + TOMOYO_MODE_OWNER_WRITE, /* S_IWUSR */ + TOMOYO_MODE_OWNER_EXECUTE, /* S_IXUSR */ + TOMOYO_MODE_GROUP_READ, /* S_IRGRP */ + TOMOYO_MODE_GROUP_WRITE, /* S_IWGRP */ + TOMOYO_MODE_GROUP_EXECUTE, /* S_IXGRP */ + TOMOYO_MODE_OTHERS_READ, /* S_IROTH */ + TOMOYO_MODE_OTHERS_WRITE, /* S_IWOTH */ + TOMOYO_MODE_OTHERS_EXECUTE, /* S_IXOTH */ + TOMOYO_EXEC_REALPATH, + TOMOYO_SYMLINK_TARGET, + TOMOYO_PATH1_UID, + TOMOYO_PATH1_GID, + TOMOYO_PATH1_INO, + TOMOYO_PATH1_MAJOR, + TOMOYO_PATH1_MINOR, + TOMOYO_PATH1_PERM, + TOMOYO_PATH1_TYPE, + TOMOYO_PATH1_DEV_MAJOR, + TOMOYO_PATH1_DEV_MINOR, + TOMOYO_PATH2_UID, + TOMOYO_PATH2_GID, + TOMOYO_PATH2_INO, + TOMOYO_PATH2_MAJOR, + TOMOYO_PATH2_MINOR, + TOMOYO_PATH2_PERM, + TOMOYO_PATH2_TYPE, + TOMOYO_PATH2_DEV_MAJOR, + TOMOYO_PATH2_DEV_MINOR, + TOMOYO_PATH1_PARENT_UID, + TOMOYO_PATH1_PARENT_GID, + TOMOYO_PATH1_PARENT_INO, + TOMOYO_PATH1_PARENT_PERM, + TOMOYO_PATH2_PARENT_UID, + TOMOYO_PATH2_PARENT_GID, + TOMOYO_PATH2_PARENT_INO, + TOMOYO_PATH2_PARENT_PERM, + TOMOYO_MAX_CONDITION_KEYWORD, + TOMOYO_NUMBER_UNION, + TOMOYO_NAME_UNION, + TOMOYO_ARGV_ENTRY, + TOMOYO_ENVP_ENTRY, +}; + + +/* Index numbers for stat(). */ +enum tomoyo_path_stat_index { + /* Do not change this order. */ + TOMOYO_PATH1, + TOMOYO_PATH1_PARENT, + TOMOYO_PATH2, + TOMOYO_PATH2_PARENT, + TOMOYO_MAX_PATH_STAT +}; + +/* Index numbers for operation mode. */ enum tomoyo_mode_index { TOMOYO_CONFIG_DISABLED, TOMOYO_CONFIG_LEARNING, TOMOYO_CONFIG_PERMISSIVE, TOMOYO_CONFIG_ENFORCING, - TOMOYO_CONFIG_USE_DEFAULT = 255 + TOMOYO_CONFIG_MAX_MODE, + TOMOYO_CONFIG_WANT_REJECT_LOG = 64, + TOMOYO_CONFIG_WANT_GRANT_LOG = 128, + TOMOYO_CONFIG_USE_DEFAULT = 255, }; +/* Index numbers for entry type. */ enum tomoyo_policy_id { TOMOYO_ID_GROUP, + TOMOYO_ID_ADDRESS_GROUP, TOMOYO_ID_PATH_GROUP, TOMOYO_ID_NUMBER_GROUP, TOMOYO_ID_TRANSITION_CONTROL, TOMOYO_ID_AGGREGATOR, - TOMOYO_ID_GLOBALLY_READABLE, - TOMOYO_ID_PATTERN, - TOMOYO_ID_NO_REWRITE, TOMOYO_ID_MANAGER, + TOMOYO_ID_CONDITION, TOMOYO_ID_NAME, TOMOYO_ID_ACL, TOMOYO_ID_DOMAIN, TOMOYO_MAX_POLICY }; +/* Index numbers for domain's attributes. */ +enum tomoyo_domain_info_flags_index { + /* Quota warnning flag. */ + TOMOYO_DIF_QUOTA_WARNED, + /* + * This domain was unable to create a new domain at + * tomoyo_find_next_domain() because the name of the domain to be + * created was too long or it could not allocate memory. + * More than one process continued execve() without domain transition. + */ + TOMOYO_DIF_TRANSITION_FAILED, + TOMOYO_MAX_DOMAIN_INFO_FLAGS +}; + +/* Index numbers for audit type. */ +enum tomoyo_grant_log { + /* Follow profile's configuration. */ + TOMOYO_GRANTLOG_AUTO, + /* Do not generate grant log. */ + TOMOYO_GRANTLOG_NO, + /* Generate grant_log. */ + TOMOYO_GRANTLOG_YES, +}; + +/* Index numbers for group entries. */ enum tomoyo_group_id { TOMOYO_PATH_GROUP, TOMOYO_NUMBER_GROUP, + TOMOYO_ADDRESS_GROUP, TOMOYO_MAX_GROUP }; -/* Keywords for ACLs. */ -#define TOMOYO_KEYWORD_AGGREGATOR "aggregator " -#define TOMOYO_KEYWORD_ALLOW_MOUNT "allow_mount " -#define TOMOYO_KEYWORD_ALLOW_READ "allow_read " -#define TOMOYO_KEYWORD_DELETE "delete " -#define TOMOYO_KEYWORD_DENY_REWRITE "deny_rewrite " -#define TOMOYO_KEYWORD_FILE_PATTERN "file_pattern " -#define TOMOYO_KEYWORD_INITIALIZE_DOMAIN "initialize_domain " -#define TOMOYO_KEYWORD_KEEP_DOMAIN "keep_domain " -#define TOMOYO_KEYWORD_NO_INITIALIZE_DOMAIN "no_initialize_domain " -#define TOMOYO_KEYWORD_NO_KEEP_DOMAIN "no_keep_domain " -#define TOMOYO_KEYWORD_PATH_GROUP "path_group " -#define TOMOYO_KEYWORD_NUMBER_GROUP "number_group " -#define TOMOYO_KEYWORD_SELECT "select " -#define TOMOYO_KEYWORD_USE_PROFILE "use_profile " -#define TOMOYO_KEYWORD_IGNORE_GLOBAL_ALLOW_READ "ignore_global_allow_read" -#define TOMOYO_KEYWORD_QUOTA_EXCEEDED "quota_exceeded" -#define TOMOYO_KEYWORD_TRANSITION_FAILED "transition_failed" -/* A domain definition starts with <kernel>. */ -#define TOMOYO_ROOT_NAME "<kernel>" -#define TOMOYO_ROOT_NAME_LEN (sizeof(TOMOYO_ROOT_NAME) - 1) - -/* Value type definition. */ -#define TOMOYO_VALUE_TYPE_INVALID 0 -#define TOMOYO_VALUE_TYPE_DECIMAL 1 -#define TOMOYO_VALUE_TYPE_OCTAL 2 -#define TOMOYO_VALUE_TYPE_HEXADECIMAL 3 +/* Index numbers for type of numeric values. */ +enum tomoyo_value_type { + TOMOYO_VALUE_TYPE_INVALID, + TOMOYO_VALUE_TYPE_DECIMAL, + TOMOYO_VALUE_TYPE_OCTAL, + TOMOYO_VALUE_TYPE_HEXADECIMAL, +}; +/* Index numbers for domain transition control keywords. */ enum tomoyo_transition_type { /* Do not change this order, */ + TOMOYO_TRANSITION_CONTROL_NO_RESET, + TOMOYO_TRANSITION_CONTROL_RESET, TOMOYO_TRANSITION_CONTROL_NO_INITIALIZE, TOMOYO_TRANSITION_CONTROL_INITIALIZE, TOMOYO_TRANSITION_CONTROL_NO_KEEP, @@ -112,37 +227,35 @@ enum tomoyo_acl_entry_type_index { TOMOYO_TYPE_PATH_NUMBER_ACL, TOMOYO_TYPE_MKDEV_ACL, TOMOYO_TYPE_MOUNT_ACL, + TOMOYO_TYPE_INET_ACL, + TOMOYO_TYPE_UNIX_ACL, + TOMOYO_TYPE_ENV_ACL, + TOMOYO_TYPE_MANUAL_TASK_ACL, }; -/* Index numbers for File Controls. */ - -/* - * TOMOYO_TYPE_READ_WRITE is special. TOMOYO_TYPE_READ_WRITE is automatically - * set if both TOMOYO_TYPE_READ and TOMOYO_TYPE_WRITE are set. - * Both TOMOYO_TYPE_READ and TOMOYO_TYPE_WRITE are automatically set if - * TOMOYO_TYPE_READ_WRITE is set. - * TOMOYO_TYPE_READ_WRITE is automatically cleared if either TOMOYO_TYPE_READ - * or TOMOYO_TYPE_WRITE is cleared. - * Both TOMOYO_TYPE_READ and TOMOYO_TYPE_WRITE are automatically cleared if - * TOMOYO_TYPE_READ_WRITE is cleared. - */ - +/* Index numbers for access controls with one pathname. */ enum tomoyo_path_acl_index { - TOMOYO_TYPE_READ_WRITE, TOMOYO_TYPE_EXECUTE, TOMOYO_TYPE_READ, TOMOYO_TYPE_WRITE, + TOMOYO_TYPE_APPEND, TOMOYO_TYPE_UNLINK, + TOMOYO_TYPE_GETATTR, TOMOYO_TYPE_RMDIR, TOMOYO_TYPE_TRUNCATE, TOMOYO_TYPE_SYMLINK, - TOMOYO_TYPE_REWRITE, TOMOYO_TYPE_CHROOT, TOMOYO_TYPE_UMOUNT, TOMOYO_MAX_PATH_OPERATION }; -#define TOMOYO_RW_MASK ((1 << TOMOYO_TYPE_READ) | (1 << TOMOYO_TYPE_WRITE)) +/* Index numbers for /sys/kernel/security/tomoyo/stat interface. */ +enum tomoyo_memory_stat_type { + TOMOYO_MEMORY_POLICY, + TOMOYO_MEMORY_AUDIT, + TOMOYO_MEMORY_QUERY, + TOMOYO_MAX_MEMORY_STAT +}; enum tomoyo_mkdev_acl_index { TOMOYO_TYPE_MKBLOCK, @@ -150,6 +263,16 @@ enum tomoyo_mkdev_acl_index { TOMOYO_MAX_MKDEV_OPERATION }; +/* Index numbers for socket operations. */ +enum tomoyo_network_acl_index { + TOMOYO_NETWORK_BIND, /* bind() operation. */ + TOMOYO_NETWORK_LISTEN, /* listen() operation. */ + TOMOYO_NETWORK_CONNECT, /* connect() operation. */ + TOMOYO_NETWORK_SEND, /* send() operation. */ + TOMOYO_MAX_NETWORK_OPERATION +}; + +/* Index numbers for access controls with two pathnames. */ enum tomoyo_path2_acl_index { TOMOYO_TYPE_LINK, TOMOYO_TYPE_RENAME, @@ -157,6 +280,7 @@ enum tomoyo_path2_acl_index { TOMOYO_MAX_PATH2_OPERATION }; +/* Index numbers for access controls with one pathname and one number. */ enum tomoyo_path_number_acl_index { TOMOYO_TYPE_CREATE, TOMOYO_TYPE_MKDIR, @@ -169,31 +293,44 @@ enum tomoyo_path_number_acl_index { TOMOYO_MAX_PATH_NUMBER_OPERATION }; +/* Index numbers for /sys/kernel/security/tomoyo/ interfaces. */ enum tomoyo_securityfs_interface_index { TOMOYO_DOMAINPOLICY, TOMOYO_EXCEPTIONPOLICY, - TOMOYO_DOMAIN_STATUS, TOMOYO_PROCESS_STATUS, - TOMOYO_MEMINFO, - TOMOYO_SELFDOMAIN, + TOMOYO_STAT, + TOMOYO_AUDIT, TOMOYO_VERSION, TOMOYO_PROFILE, TOMOYO_QUERY, TOMOYO_MANAGER }; +/* Index numbers for special mount operations. */ +enum tomoyo_special_mount { + TOMOYO_MOUNT_BIND, /* mount --bind /source /dest */ + TOMOYO_MOUNT_MOVE, /* mount --move /old /new */ + TOMOYO_MOUNT_REMOUNT, /* mount -o remount /dir */ + TOMOYO_MOUNT_MAKE_UNBINDABLE, /* mount --make-unbindable /dir */ + TOMOYO_MOUNT_MAKE_PRIVATE, /* mount --make-private /dir */ + TOMOYO_MOUNT_MAKE_SLAVE, /* mount --make-slave /dir */ + TOMOYO_MOUNT_MAKE_SHARED, /* mount --make-shared /dir */ + TOMOYO_MAX_SPECIAL_MOUNT +}; + +/* Index numbers for functionality. */ enum tomoyo_mac_index { TOMOYO_MAC_FILE_EXECUTE, TOMOYO_MAC_FILE_OPEN, TOMOYO_MAC_FILE_CREATE, TOMOYO_MAC_FILE_UNLINK, + TOMOYO_MAC_FILE_GETATTR, TOMOYO_MAC_FILE_MKDIR, TOMOYO_MAC_FILE_RMDIR, TOMOYO_MAC_FILE_MKFIFO, TOMOYO_MAC_FILE_MKSOCK, TOMOYO_MAC_FILE_TRUNCATE, TOMOYO_MAC_FILE_SYMLINK, - TOMOYO_MAC_FILE_REWRITE, TOMOYO_MAC_FILE_MKBLOCK, TOMOYO_MAC_FILE_MKCHAR, TOMOYO_MAC_FILE_LINK, @@ -206,41 +343,87 @@ enum tomoyo_mac_index { TOMOYO_MAC_FILE_MOUNT, TOMOYO_MAC_FILE_UMOUNT, TOMOYO_MAC_FILE_PIVOT_ROOT, + TOMOYO_MAC_NETWORK_INET_STREAM_BIND, + TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN, + TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT, + TOMOYO_MAC_NETWORK_INET_DGRAM_BIND, + TOMOYO_MAC_NETWORK_INET_DGRAM_SEND, + TOMOYO_MAC_NETWORK_INET_RAW_BIND, + TOMOYO_MAC_NETWORK_INET_RAW_SEND, + TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND, + TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN, + TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT, + TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND, + TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND, + TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND, + TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN, + TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT, + TOMOYO_MAC_ENVIRON, TOMOYO_MAX_MAC_INDEX }; +/* Index numbers for category of functionality. */ enum tomoyo_mac_category_index { TOMOYO_MAC_CATEGORY_FILE, + TOMOYO_MAC_CATEGORY_NETWORK, + TOMOYO_MAC_CATEGORY_MISC, TOMOYO_MAX_MAC_CATEGORY_INDEX }; -#define TOMOYO_RETRY_REQUEST 1 /* Retry this request. */ - -/********** Structure definitions. **********/ - /* - * tomoyo_acl_head is a structure which is used for holding elements not in - * domain policy. - * It has following fields. + * Retry this request. Returned by tomoyo_supervisor() if policy violation has + * occurred in enforcing mode and the userspace daemon decided to retry. * - * (1) "list" which is linked to tomoyo_policy_list[] . - * (2) "is_deleted" is a bool which is true if marked as deleted, false - * otherwise. + * We must choose a positive value in order to distinguish "granted" (which is + * 0) and "rejected" (which is a negative value) and "retry". */ +#define TOMOYO_RETRY_REQUEST 1 + +/* Index numbers for /sys/kernel/security/tomoyo/stat interface. */ +enum tomoyo_policy_stat_type { + /* Do not change this order. */ + TOMOYO_STAT_POLICY_UPDATES, + TOMOYO_STAT_POLICY_LEARNING, /* == TOMOYO_CONFIG_LEARNING */ + TOMOYO_STAT_POLICY_PERMISSIVE, /* == TOMOYO_CONFIG_PERMISSIVE */ + TOMOYO_STAT_POLICY_ENFORCING, /* == TOMOYO_CONFIG_ENFORCING */ + TOMOYO_MAX_POLICY_STAT +}; + +/* Index numbers for profile's PREFERENCE values. */ +enum tomoyo_pref_index { + TOMOYO_PREF_MAX_AUDIT_LOG, + TOMOYO_PREF_MAX_LEARNING_ENTRY, + TOMOYO_MAX_PREF +}; + +/********** Structure definitions. **********/ + +/* Common header for holding ACL entries. */ struct tomoyo_acl_head { struct list_head list; - bool is_deleted; + s8 is_deleted; /* true or false or TOMOYO_GC_IN_PROGRESS */ } __packed; -/* - * tomoyo_request_info is a structure which is used for holding - * - * (1) Domain information of current process. - * (2) How many retries are made for this request. - * (3) Profile number used for this request. - * (4) Access control mode of the profile. - */ +/* Common header for shared entries. */ +struct tomoyo_shared_acl_head { + struct list_head list; + atomic_t users; +} __packed; + +struct tomoyo_policy_namespace; + +/* Structure for request info. */ struct tomoyo_request_info { + /* + * For holding parameters specific to operations which deal files. + * NULL if not dealing files. + */ + struct tomoyo_obj_info *obj; + /* + * For holding parameters specific to execve() request. + * NULL if not dealing do_execve(). + */ + struct tomoyo_execve *ee; struct tomoyo_domain_info *domain; /* For holding parameters. */ union { @@ -248,11 +431,13 @@ struct tomoyo_request_info { const struct tomoyo_path_info *filename; /* For using wildcards at tomoyo_find_next_domain(). */ const struct tomoyo_path_info *matched_path; + /* One of values in "enum tomoyo_path_acl_index". */ u8 operation; } path; struct { const struct tomoyo_path_info *filename1; const struct tomoyo_path_info *filename2; + /* One of values in "enum tomoyo_path2_acl_index". */ u8 operation; } path2; struct { @@ -260,21 +445,49 @@ struct tomoyo_request_info { unsigned int mode; unsigned int major; unsigned int minor; + /* One of values in "enum tomoyo_mkdev_acl_index". */ u8 operation; } mkdev; struct { const struct tomoyo_path_info *filename; unsigned long number; + /* + * One of values in + * "enum tomoyo_path_number_acl_index". + */ u8 operation; } path_number; struct { + const struct tomoyo_path_info *name; + } environ; + struct { + const __be32 *address; + u16 port; + /* One of values smaller than TOMOYO_SOCK_MAX. */ + u8 protocol; + /* One of values in "enum tomoyo_network_acl_index". */ + u8 operation; + bool is_ipv6; + } inet_network; + struct { + const struct tomoyo_path_info *address; + /* One of values smaller than TOMOYO_SOCK_MAX. */ + u8 protocol; + /* One of values in "enum tomoyo_network_acl_index". */ + u8 operation; + } unix_network; + struct { const struct tomoyo_path_info *type; const struct tomoyo_path_info *dir; const struct tomoyo_path_info *dev; unsigned long flags; int need_dev; } mount; + struct { + const struct tomoyo_path_info *domainname; + } task; } param; + struct tomoyo_acl_info *matched_acl; u8 param_type; bool granted; u8 retry; @@ -283,26 +496,7 @@ struct tomoyo_request_info { u8 type; }; -/* - * tomoyo_path_info is a structure which is used for holding a string data - * used by TOMOYO. - * This structure has several fields for supporting pattern matching. - * - * (1) "name" is the '\0' terminated string data. - * (2) "hash" is full_name_hash(name, strlen(name)). - * This allows tomoyo_pathcmp() to compare by hash before actually compare - * using strcmp(). - * (3) "const_len" is the length of the initial segment of "name" which - * consists entirely of non wildcard characters. In other words, the length - * which we can compare two strings using strncmp(). - * (4) "is_dir" is a bool which is true if "name" ends with "/", - * false otherwise. - * TOMOYO distinguishes directory and non-directory. A directory ends with - * "/" and non-directory does not end with "/". - * (5) "is_patterned" is a bool which is true if "name" contains wildcard - * characters, false otherwise. This allows TOMOYO to use "hash" and - * strcmp() for string comparison if "is_patterned" is false. - */ +/* Structure for holding a token. */ struct tomoyo_path_info { const char *name; u32 hash; /* = full_name_hash(name, strlen(name)) */ @@ -311,36 +505,39 @@ struct tomoyo_path_info { bool is_patterned; /* = tomoyo_path_contains_pattern(name) */ }; -/* - * tomoyo_name is a structure which is used for linking - * "struct tomoyo_path_info" into tomoyo_name_list . - */ +/* Structure for holding string data. */ struct tomoyo_name { - struct list_head list; - atomic_t users; + struct tomoyo_shared_acl_head head; struct tomoyo_path_info entry; }; +/* Structure for holding a word. */ struct tomoyo_name_union { + /* Either @filename or @group is NULL. */ const struct tomoyo_path_info *filename; struct tomoyo_group *group; - u8 is_group; }; +/* Structure for holding a number. */ struct tomoyo_number_union { unsigned long values[2]; - struct tomoyo_group *group; - u8 min_type; - u8 max_type; - u8 is_group; + struct tomoyo_group *group; /* Maybe NULL. */ + /* One of values in "enum tomoyo_value_type". */ + u8 value_type[2]; +}; + +/* Structure for holding an IP address. */ +struct tomoyo_ipaddr_union { + struct in6_addr ip[2]; /* Big endian. */ + struct tomoyo_group *group; /* Pointer to address group. */ + bool is_ipv6; /* Valid only if @group == NULL. */ }; -/* Structure for "path_group"/"number_group" directive. */ +/* Structure for "path_group"/"number_group"/"address_group" directive. */ struct tomoyo_group { - struct list_head list; + struct tomoyo_shared_acl_head head; const struct tomoyo_path_info *group_name; struct list_head member_list; - atomic_t users; }; /* Structure for "path_group" directive. */ @@ -355,130 +552,177 @@ struct tomoyo_number_group { struct tomoyo_number_union number; }; -/* - * tomoyo_acl_info is a structure which is used for holding - * - * (1) "list" which is linked to the ->acl_info_list of - * "struct tomoyo_domain_info" - * (2) "is_deleted" is a bool which is true if this domain is marked as - * "deleted", false otherwise. - * (3) "type" which tells type of the entry. - * - * Packing "struct tomoyo_acl_info" allows - * "struct tomoyo_path_acl" to embed "u16" and "struct tomoyo_path2_acl" - * "struct tomoyo_path_number_acl" "struct tomoyo_mkdev_acl" to embed - * "u8" without enlarging their structure size. - */ +/* Structure for "address_group" directive. */ +struct tomoyo_address_group { + struct tomoyo_acl_head head; + /* Structure for holding an IP address. */ + struct tomoyo_ipaddr_union address; +}; + +/* Subset of "struct stat". Used by conditional ACL and audit logs. */ +struct tomoyo_mini_stat { + kuid_t uid; + kgid_t gid; + ino_t ino; + umode_t mode; + dev_t dev; + dev_t rdev; +}; + +/* Structure for dumping argv[] and envp[] of "struct linux_binprm". */ +struct tomoyo_page_dump { + struct page *page; /* Previously dumped page. */ + char *data; /* Contents of "page". Size is PAGE_SIZE. */ +}; + +/* Structure for attribute checks in addition to pathname checks. */ +struct tomoyo_obj_info { + /* + * True if tomoyo_get_attributes() was already called, false otherwise. + */ + bool validate_done; + /* True if @stat[] is valid. */ + bool stat_valid[TOMOYO_MAX_PATH_STAT]; + /* First pathname. Initialized with { NULL, NULL } if no path. */ + struct path path1; + /* Second pathname. Initialized with { NULL, NULL } if no path. */ + struct path path2; + /* + * Information on @path1, @path1's parent directory, @path2, @path2's + * parent directory. + */ + struct tomoyo_mini_stat stat[TOMOYO_MAX_PATH_STAT]; + /* + * Content of symbolic link to be created. NULL for operations other + * than symlink(). + */ + struct tomoyo_path_info *symlink_target; +}; + +/* Structure for argv[]. */ +struct tomoyo_argv { + unsigned long index; + const struct tomoyo_path_info *value; + bool is_not; +}; + +/* Structure for envp[]. */ +struct tomoyo_envp { + const struct tomoyo_path_info *name; + const struct tomoyo_path_info *value; + bool is_not; +}; + +/* Structure for execve() operation. */ +struct tomoyo_execve { + struct tomoyo_request_info r; + struct tomoyo_obj_info obj; + struct linux_binprm *bprm; + const struct tomoyo_path_info *transition; + /* For dumping argv[] and envp[]. */ + struct tomoyo_page_dump dump; + /* For temporary use. */ + char *tmp; /* Size is TOMOYO_EXEC_TMPSIZE bytes */ +}; + +/* Structure for entries which follows "struct tomoyo_condition". */ +struct tomoyo_condition_element { + /* + * Left hand operand. A "struct tomoyo_argv" for TOMOYO_ARGV_ENTRY, a + * "struct tomoyo_envp" for TOMOYO_ENVP_ENTRY is attached to the tail + * of the array of this struct. + */ + u8 left; + /* + * Right hand operand. A "struct tomoyo_number_union" for + * TOMOYO_NUMBER_UNION, a "struct tomoyo_name_union" for + * TOMOYO_NAME_UNION is attached to the tail of the array of this + * struct. + */ + u8 right; + /* Equation operator. True if equals or overlaps, false otherwise. */ + bool equals; +}; + +/* Structure for optional arguments. */ +struct tomoyo_condition { + struct tomoyo_shared_acl_head head; + u32 size; /* Memory size allocated for this entry. */ + u16 condc; /* Number of conditions in this struct. */ + u16 numbers_count; /* Number of "struct tomoyo_number_union values". */ + u16 names_count; /* Number of "struct tomoyo_name_union names". */ + u16 argc; /* Number of "struct tomoyo_argv". */ + u16 envc; /* Number of "struct tomoyo_envp". */ + u8 grant_log; /* One of values in "enum tomoyo_grant_log". */ + const struct tomoyo_path_info *transit; /* Maybe NULL. */ + /* + * struct tomoyo_condition_element condition[condc]; + * struct tomoyo_number_union values[numbers_count]; + * struct tomoyo_name_union names[names_count]; + * struct tomoyo_argv argv[argc]; + * struct tomoyo_envp envp[envc]; + */ +}; + +/* Common header for individual entries. */ struct tomoyo_acl_info { struct list_head list; - bool is_deleted; - u8 type; /* = one of values in "enum tomoyo_acl_entry_type_index". */ + struct tomoyo_condition *cond; /* Maybe NULL. */ + s8 is_deleted; /* true or false or TOMOYO_GC_IN_PROGRESS */ + u8 type; /* One of values in "enum tomoyo_acl_entry_type_index". */ } __packed; -/* - * tomoyo_domain_info is a structure which is used for holding permissions - * (e.g. "allow_read /lib/libc-2.5.so") given to each domain. - * It has following fields. - * - * (1) "list" which is linked to tomoyo_domain_list . - * (2) "acl_info_list" which is linked to "struct tomoyo_acl_info". - * (3) "domainname" which holds the name of the domain. - * (4) "profile" which remembers profile number assigned to this domain. - * (5) "is_deleted" is a bool which is true if this domain is marked as - * "deleted", false otherwise. - * (6) "quota_warned" is a bool which is used for suppressing warning message - * when learning mode learned too much entries. - * (7) "ignore_global_allow_read" is a bool which is true if this domain - * should ignore "allow_read" directive in exception policy. - * (8) "transition_failed" is a bool which is set to true when this domain was - * unable to create a new domain at tomoyo_find_next_domain() because the - * name of the domain to be created was too long or it could not allocate - * memory. If set to true, more than one process continued execve() - * without domain transition. - * (9) "users" is an atomic_t that holds how many "struct cred"->security - * are referring this "struct tomoyo_domain_info". If is_deleted == true - * and users == 0, this struct will be kfree()d upon next garbage - * collection. - * - * A domain's lifecycle is an analogy of files on / directory. - * Multiple domains with the same domainname cannot be created (as with - * creating files with the same filename fails with -EEXIST). - * If a process reached a domain, that process can reside in that domain after - * that domain is marked as "deleted" (as with a process can access an already - * open()ed file after that file was unlink()ed). - */ +/* Structure for domain information. */ struct tomoyo_domain_info { struct list_head list; struct list_head acl_info_list; /* Name of this domain. Never NULL. */ const struct tomoyo_path_info *domainname; + /* Namespace for this domain. Never NULL. */ + struct tomoyo_policy_namespace *ns; u8 profile; /* Profile number to use. */ + u8 group; /* Group number to use. */ bool is_deleted; /* Delete flag. */ - bool quota_warned; /* Quota warnning flag. */ - bool ignore_global_allow_read; /* Ignore "allow_read" flag. */ - bool transition_failed; /* Domain transition failed flag. */ + bool flags[TOMOYO_MAX_DOMAIN_INFO_FLAGS]; atomic_t users; /* Number of referring credentials. */ }; /* - * tomoyo_path_acl is a structure which is used for holding an - * entry with one pathname operation (e.g. open(), mkdir()). - * It has following fields. - * - * (1) "head" which is a "struct tomoyo_acl_info". - * (2) "perm" which is a bitmask of permitted operations. - * (3) "name" is the pathname. - * - * Directives held by this structure are "allow_read/write", "allow_execute", - * "allow_read", "allow_write", "allow_unlink", "allow_rmdir", - * "allow_truncate", "allow_symlink", "allow_rewrite", "allow_chroot" and - * "allow_unmount". + * Structure for "task manual_domain_transition" directive. + */ +struct tomoyo_task_acl { + struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_MANUAL_TASK_ACL */ + /* Pointer to domainname. */ + const struct tomoyo_path_info *domainname; +}; + +/* + * Structure for "file execute", "file read", "file write", "file append", + * "file unlink", "file getattr", "file rmdir", "file truncate", + * "file symlink", "file chroot" and "file unmount" directive. */ struct tomoyo_path_acl { struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_PATH_ACL */ - u16 perm; + u16 perm; /* Bitmask of values in "enum tomoyo_path_acl_index". */ struct tomoyo_name_union name; }; /* - * tomoyo_path_number_acl is a structure which is used for holding an - * entry with one pathname and one number operation. - * It has following fields. - * - * (1) "head" which is a "struct tomoyo_acl_info". - * (2) "perm" which is a bitmask of permitted operations. - * (3) "name" is the pathname. - * (4) "number" is the numeric value. - * - * Directives held by this structure are "allow_create", "allow_mkdir", - * "allow_ioctl", "allow_mkfifo", "allow_mksock", "allow_chmod", "allow_chown" - * and "allow_chgrp". - * + * Structure for "file create", "file mkdir", "file mkfifo", "file mksock", + * "file ioctl", "file chmod", "file chown" and "file chgrp" directive. */ struct tomoyo_path_number_acl { struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_PATH_NUMBER_ACL */ + /* Bitmask of values in "enum tomoyo_path_number_acl_index". */ u8 perm; struct tomoyo_name_union name; struct tomoyo_number_union number; }; -/* - * tomoyo_mkdev_acl is a structure which is used for holding an - * entry with one pathname and three numbers operation. - * It has following fields. - * - * (1) "head" which is a "struct tomoyo_acl_info". - * (2) "perm" which is a bitmask of permitted operations. - * (3) "mode" is the create mode. - * (4) "major" is the major number of device node. - * (5) "minor" is the minor number of device node. - * - * Directives held by this structure are "allow_mkchar", "allow_mkblock". - * - */ +/* Structure for "file mkblock" and "file mkchar" directive. */ struct tomoyo_mkdev_acl { struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_MKDEV_ACL */ - u8 perm; + u8 perm; /* Bitmask of values in "enum tomoyo_mkdev_acl_index". */ struct tomoyo_name_union name; struct tomoyo_number_union mode; struct tomoyo_number_union major; @@ -486,38 +730,16 @@ struct tomoyo_mkdev_acl { }; /* - * tomoyo_path2_acl is a structure which is used for holding an - * entry with two pathnames operation (i.e. link(), rename() and pivot_root()). - * It has following fields. - * - * (1) "head" which is a "struct tomoyo_acl_info". - * (2) "perm" which is a bitmask of permitted operations. - * (3) "name1" is the source/old pathname. - * (4) "name2" is the destination/new pathname. - * - * Directives held by this structure are "allow_rename", "allow_link" and - * "allow_pivot_root". + * Structure for "file rename", "file link" and "file pivot_root" directive. */ struct tomoyo_path2_acl { struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_PATH2_ACL */ - u8 perm; + u8 perm; /* Bitmask of values in "enum tomoyo_path2_acl_index". */ struct tomoyo_name_union name1; struct tomoyo_name_union name2; }; -/* - * tomoyo_mount_acl is a structure which is used for holding an - * entry for mount operation. - * It has following fields. - * - * (1) "head" which is a "struct tomoyo_acl_info". - * (2) "dev_name" is the device name. - * (3) "dir_name" is the mount point. - * (4) "fs_type" is the filesystem type. - * (5) "flags" is the mount flags. - * - * Directive held by this structure is "allow_mount". - */ +/* Structure for "file mount" directive. */ struct tomoyo_mount_acl { struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_MOUNT_ACL */ struct tomoyo_name_union dev_name; @@ -526,7 +748,38 @@ struct tomoyo_mount_acl { struct tomoyo_number_union flags; }; -#define TOMOYO_MAX_IO_READ_QUEUE 32 +/* Structure for "misc env" directive in domain policy. */ +struct tomoyo_env_acl { + struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_ENV_ACL */ + const struct tomoyo_path_info *env; /* environment variable */ +}; + +/* Structure for "network inet" directive. */ +struct tomoyo_inet_acl { + struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_INET_ACL */ + u8 protocol; + u8 perm; /* Bitmask of values in "enum tomoyo_network_acl_index" */ + struct tomoyo_ipaddr_union address; + struct tomoyo_number_union port; +}; + +/* Structure for "network unix" directive. */ +struct tomoyo_unix_acl { + struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_UNIX_ACL */ + u8 protocol; + u8 perm; /* Bitmask of values in "enum tomoyo_network_acl_index" */ + struct tomoyo_name_union name; +}; + +/* Structure for holding a line from /sys/kernel/security/tomoyo/ interface. */ +struct tomoyo_acl_param { + char *data; + struct list_head *list; + struct tomoyo_policy_namespace *ns; + bool is_delete; +}; + +#define TOMOYO_MAX_IO_READ_QUEUE 64 /* * Structure for reading/writing policy via /sys/kernel/security/tomoyo @@ -535,98 +788,58 @@ struct tomoyo_mount_acl { struct tomoyo_io_buffer { void (*read) (struct tomoyo_io_buffer *); int (*write) (struct tomoyo_io_buffer *); - int (*poll) (struct file *file, poll_table *wait); + unsigned int (*poll) (struct file *file, poll_table *wait); /* Exclusive lock for this structure. */ struct mutex io_sem; - /* Index returned by tomoyo_read_lock(). */ - int reader_idx; char __user *read_user_buf; - int read_user_buf_avail; + size_t read_user_buf_avail; struct { + struct list_head *ns; struct list_head *domain; struct list_head *group; struct list_head *acl; - int avail; - int step; - int query_index; + size_t avail; + unsigned int step; + unsigned int query_index; u16 index; + u16 cond_index; + u8 acl_group_index; + u8 cond_step; u8 bit; u8 w_pos; bool eof; bool print_this_domain_only; - bool print_execute_only; + bool print_transition_related_only; + bool print_cond_part; const char *w[TOMOYO_MAX_IO_READ_QUEUE]; } r; - /* The position currently writing to. */ - struct tomoyo_domain_info *write_var1; + struct { + struct tomoyo_policy_namespace *ns; + /* The position currently writing to. */ + struct tomoyo_domain_info *domain; + /* Bytes available for writing. */ + size_t avail; + bool is_delete; + } w; /* Buffer for reading. */ char *read_buf; /* Size of read buffer. */ - int readbuf_size; + size_t readbuf_size; /* Buffer for writing. */ char *write_buf; - /* Bytes available for writing. */ - int write_avail; /* Size of write buffer. */ - int writebuf_size; + size_t writebuf_size; /* Type of this interface. */ - u8 type; -}; - -/* - * tomoyo_readable_file is a structure which is used for holding - * "allow_read" entries. - * It has following fields. - * - * (1) "head" is "struct tomoyo_acl_head". - * (2) "filename" is a pathname which is allowed to open(O_RDONLY). - */ -struct tomoyo_readable_file { - struct tomoyo_acl_head head; - const struct tomoyo_path_info *filename; -}; - -/* - * tomoyo_no_pattern is a structure which is used for holding - * "file_pattern" entries. - * It has following fields. - * - * (1) "head" is "struct tomoyo_acl_head". - * (2) "pattern" is a pathname pattern which is used for converting pathnames - * to pathname patterns during learning mode. - */ -struct tomoyo_no_pattern { - struct tomoyo_acl_head head; - const struct tomoyo_path_info *pattern; -}; - -/* - * tomoyo_no_rewrite is a structure which is used for holding - * "deny_rewrite" entries. - * It has following fields. - * - * (1) "head" is "struct tomoyo_acl_head". - * (2) "pattern" is a pathname which is by default not permitted to modify - * already existing content. - */ -struct tomoyo_no_rewrite { - struct tomoyo_acl_head head; - const struct tomoyo_path_info *pattern; + enum tomoyo_securityfs_interface_index type; + /* Users counter protected by tomoyo_io_buffer_list_lock. */ + u8 users; + /* List for telling GC not to kfree() elements. */ + struct list_head list; }; /* - * tomoyo_transition_control is a structure which is used for holding - * "initialize_domain"/"no_initialize_domain"/"keep_domain"/"no_keep_domain" - * entries. - * It has following fields. - * - * (1) "head" is "struct tomoyo_acl_head". - * (2) "type" is type of this entry. - * (3) "is_last_name" is a bool which is true if "domainname" is "the last - * component of a domainname", false otherwise. - * (4) "domainname" which is "a domainname" or "the last component of a - * domainname". - * (5) "program" which is a program's pathname. + * Structure for "initialize_domain"/"no_initialize_domain"/"keep_domain"/ + * "no_keep_domain" keyword. */ struct tomoyo_transition_control { struct tomoyo_acl_head head; @@ -637,35 +850,16 @@ struct tomoyo_transition_control { const struct tomoyo_path_info *program; /* Maybe NULL */ }; -/* - * tomoyo_aggregator is a structure which is used for holding - * "aggregator" entries. - * It has following fields. - * - * (1) "head" is "struct tomoyo_acl_head". - * (2) "original_name" which is originally requested name. - * (3) "aggregated_name" which is name to rewrite. - */ +/* Structure for "aggregator" keyword. */ struct tomoyo_aggregator { struct tomoyo_acl_head head; const struct tomoyo_path_info *original_name; const struct tomoyo_path_info *aggregated_name; }; -/* - * tomoyo_manager is a structure which is used for holding list of - * domainnames or programs which are permitted to modify configuration via - * /sys/kernel/security/tomoyo/ interface. - * It has following fields. - * - * (1) "head" is "struct tomoyo_acl_head". - * (2) "is_domain" is a bool which is true if "manager" is a domainname, false - * otherwise. - * (3) "manager" is a domainname or a program's pathname. - */ +/* Structure for policy manager. */ struct tomoyo_manager { struct tomoyo_acl_head head; - bool is_domain; /* True if manager is a domainname. */ /* A path to program or a domainname. */ const struct tomoyo_path_info *manager; }; @@ -677,6 +871,7 @@ struct tomoyo_preference { bool permissive_verbose; }; +/* Structure for /sys/kernel/security/tomnoyo/profile interface. */ struct tomoyo_profile { const struct tomoyo_path_info *comment; struct tomoyo_preference *learning; @@ -685,323 +880,443 @@ struct tomoyo_profile { struct tomoyo_preference preference; u8 default_config; u8 config[TOMOYO_MAX_MAC_INDEX + TOMOYO_MAX_MAC_CATEGORY_INDEX]; + unsigned int pref[TOMOYO_MAX_PREF]; +}; + +/* Structure for representing YYYY/MM/DD hh/mm/ss. */ +struct tomoyo_time { + u16 year; + u8 month; + u8 day; + u8 hour; + u8 min; + u8 sec; +}; + +/* Structure for policy namespace. */ +struct tomoyo_policy_namespace { + /* Profile table. Memory is allocated as needed. */ + struct tomoyo_profile *profile_ptr[TOMOYO_MAX_PROFILES]; + /* List of "struct tomoyo_group". */ + struct list_head group_list[TOMOYO_MAX_GROUP]; + /* List of policy. */ + struct list_head policy_list[TOMOYO_MAX_POLICY]; + /* The global ACL referred by "use_group" keyword. */ + struct list_head acl_group[TOMOYO_MAX_ACL_GROUPS]; + /* List for connecting to tomoyo_namespace_list list. */ + struct list_head namespace_list; + /* Profile version. Currently only 20110903 is defined. */ + unsigned int profile_version; + /* Name of this namespace (e.g. "<kernel>", "</usr/sbin/httpd>" ). */ + const char *name; }; /********** Function prototypes. **********/ -/* Check whether the given string starts with the given keyword. */ -bool tomoyo_str_starts(char **src, const char *find); -/* Get tomoyo_realpath() of current process. */ -const char *tomoyo_get_exe(void); -/* Format string. */ -void tomoyo_normalize_line(unsigned char *buffer); -/* Print warning or error message on console. */ -void tomoyo_warn_log(struct tomoyo_request_info *r, const char *fmt, ...) - __attribute__ ((format(printf, 2, 3))); -/* Check all profiles currently assigned to domains are defined. */ -void tomoyo_check_profile(void); -/* Open operation for /sys/kernel/security/tomoyo/ interface. */ -int tomoyo_open_control(const u8 type, struct file *file); -/* Close /sys/kernel/security/tomoyo/ interface. */ -int tomoyo_close_control(struct file *file); -/* Poll operation for /sys/kernel/security/tomoyo/ interface. */ -int tomoyo_poll_control(struct file *file, poll_table *wait); -/* Read operation for /sys/kernel/security/tomoyo/ interface. */ -int tomoyo_read_control(struct file *file, char __user *buffer, - const int buffer_len); -/* Write operation for /sys/kernel/security/tomoyo/ interface. */ -int tomoyo_write_control(struct file *file, const char __user *buffer, - const int buffer_len); -/* Check whether the domain has too many ACL entries to hold. */ -bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r); -/* Print out of memory warning message. */ -void tomoyo_warn_oom(const char *function); -/* Check whether the given name matches the given name_union. */ -const struct tomoyo_path_info * -tomoyo_compare_name_union(const struct tomoyo_path_info *name, - const struct tomoyo_name_union *ptr); -/* Check whether the given number matches the given number_union. */ +bool tomoyo_address_matches_group(const bool is_ipv6, const __be32 *address, + const struct tomoyo_group *group); bool tomoyo_compare_number_union(const unsigned long value, const struct tomoyo_number_union *ptr); -int tomoyo_get_mode(const u8 profile, const u8 index); -void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, ...) - __attribute__ ((format(printf, 2, 3))); -/* Check whether the domainname is correct. */ +bool tomoyo_condition(struct tomoyo_request_info *r, + const struct tomoyo_condition *cond); bool tomoyo_correct_domain(const unsigned char *domainname); -/* Check whether the token is correct. */ bool tomoyo_correct_path(const char *filename); bool tomoyo_correct_word(const char *string); -/* Check whether the token can be a domainname. */ bool tomoyo_domain_def(const unsigned char *buffer); -bool tomoyo_parse_name_union(const char *filename, - struct tomoyo_name_union *ptr); -/* Check whether the given filename matches the given path_group. */ -const struct tomoyo_path_info * -tomoyo_path_matches_group(const struct tomoyo_path_info *pathname, - const struct tomoyo_group *group); -/* Check whether the given value matches the given number_group. */ +bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r); +bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos, + struct tomoyo_page_dump *dump); +bool tomoyo_memory_ok(void *ptr); bool tomoyo_number_matches_group(const unsigned long min, const unsigned long max, const struct tomoyo_group *group); -/* Check whether the given filename matches the given pattern. */ +bool tomoyo_parse_ipaddr_union(struct tomoyo_acl_param *param, + struct tomoyo_ipaddr_union *ptr); +bool tomoyo_parse_name_union(struct tomoyo_acl_param *param, + struct tomoyo_name_union *ptr); +bool tomoyo_parse_number_union(struct tomoyo_acl_param *param, + struct tomoyo_number_union *ptr); bool tomoyo_path_matches_pattern(const struct tomoyo_path_info *filename, const struct tomoyo_path_info *pattern); - -bool tomoyo_parse_number_union(char *data, struct tomoyo_number_union *num); -/* Tokenize a line. */ -bool tomoyo_tokenize(char *buffer, char *w[], size_t size); -/* Write domain policy violation warning message to console? */ -bool tomoyo_verbose_mode(const struct tomoyo_domain_info *domain); -/* Fill "struct tomoyo_request_info". */ -int tomoyo_init_request_info(struct tomoyo_request_info *r, - struct tomoyo_domain_info *domain, - const u8 index); -/* Check permission for mount operation. */ -int tomoyo_mount_permission(char *dev_name, struct path *path, char *type, - unsigned long flags, void *data_page); -/* Create "aggregator" entry in exception policy. */ -int tomoyo_write_aggregator(char *data, const bool is_delete); -int tomoyo_write_transition_control(char *data, const bool is_delete, - const u8 type); -/* - * Create "allow_read/write", "allow_execute", "allow_read", "allow_write", - * "allow_create", "allow_unlink", "allow_mkdir", "allow_rmdir", - * "allow_mkfifo", "allow_mksock", "allow_mkblock", "allow_mkchar", - * "allow_truncate", "allow_symlink", "allow_rewrite", "allow_rename" and - * "allow_link" entry in domain policy. - */ -int tomoyo_write_file(char *data, struct tomoyo_domain_info *domain, - const bool is_delete); -/* Create "allow_read" entry in exception policy. */ -int tomoyo_write_globally_readable(char *data, const bool is_delete); -/* Create "allow_mount" entry in domain policy. */ -int tomoyo_write_mount(char *data, struct tomoyo_domain_info *domain, - const bool is_delete); -/* Create "deny_rewrite" entry in exception policy. */ -int tomoyo_write_no_rewrite(char *data, const bool is_delete); -/* Create "file_pattern" entry in exception policy. */ -int tomoyo_write_pattern(char *data, const bool is_delete); -/* Create "path_group"/"number_group" entry in exception policy. */ -int tomoyo_write_group(char *data, const bool is_delete, const u8 type); -int tomoyo_supervisor(struct tomoyo_request_info *r, const char *fmt, ...) - __attribute__ ((format(printf, 2, 3))); -/* Find a domain by the given name. */ -struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname); -/* Find or create a domain by the given name. */ -struct tomoyo_domain_info *tomoyo_assign_domain(const char *domainname, - const u8 profile); -struct tomoyo_profile *tomoyo_profile(const u8 profile); -/* - * Allocate memory for "struct tomoyo_path_group"/"struct tomoyo_number_group". - */ -struct tomoyo_group *tomoyo_get_group(const char *group_name, const u8 type); - -/* Check mode for specified functionality. */ -unsigned int tomoyo_check_flags(const struct tomoyo_domain_info *domain, - const u8 index); -/* Fill in "struct tomoyo_path_info" members. */ -void tomoyo_fill_path_info(struct tomoyo_path_info *ptr); -/* Run policy loader when /sbin/init starts. */ -void tomoyo_load_policy(const char *filename); - -void tomoyo_put_number_union(struct tomoyo_number_union *ptr); - -/* Convert binary string to ascii string. */ +bool tomoyo_permstr(const char *string, const char *keyword); +bool tomoyo_str_starts(char **src, const char *find); char *tomoyo_encode(const char *str); - -/* - * Returns realpath(3) of the given pathname except that - * ignores chroot'ed root and does not follow the final symlink. - */ -char *tomoyo_realpath_nofollow(const char *pathname); -/* - * Returns realpath(3) of the given pathname except that - * ignores chroot'ed root and the pathname is already solved. - */ +char *tomoyo_encode2(const char *str, int str_len); +char *tomoyo_init_log(struct tomoyo_request_info *r, int len, const char *fmt, + va_list args); +char *tomoyo_read_token(struct tomoyo_acl_param *param); char *tomoyo_realpath_from_path(struct path *path); -/* Get patterned pathname. */ -const char *tomoyo_pattern(const struct tomoyo_path_info *filename); - -/* Check memory quota. */ -bool tomoyo_memory_ok(void *ptr); -void *tomoyo_commit_ok(void *data, const unsigned int size); - -/* - * Keep the given name on the RAM. - * The RAM is shared, so NEVER try to modify or kfree() the returned name. - */ +char *tomoyo_realpath_nofollow(const char *pathname); +const char *tomoyo_get_exe(void); +const char *tomoyo_yesno(const unsigned int value); +const struct tomoyo_path_info *tomoyo_compare_name_union +(const struct tomoyo_path_info *name, const struct tomoyo_name_union *ptr); +const struct tomoyo_path_info *tomoyo_get_domainname +(struct tomoyo_acl_param *param); const struct tomoyo_path_info *tomoyo_get_name(const char *name); - -/* Check for memory usage. */ -void tomoyo_read_memory_counter(struct tomoyo_io_buffer *head); - -/* Set memory quota. */ -int tomoyo_write_memory_quota(struct tomoyo_io_buffer *head); - -/* Initialize mm related code. */ -void __init tomoyo_mm_init(void); -int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation, - const struct tomoyo_path_info *filename); +const struct tomoyo_path_info *tomoyo_path_matches_group +(const struct tomoyo_path_info *pathname, const struct tomoyo_group *group); int tomoyo_check_open_permission(struct tomoyo_domain_info *domain, struct path *path, const int flag); -int tomoyo_path_number_perm(const u8 operation, struct path *path, - unsigned long number); +void tomoyo_close_control(struct tomoyo_io_buffer *head); +int tomoyo_env_perm(struct tomoyo_request_info *r, const char *env); +int tomoyo_execute_permission(struct tomoyo_request_info *r, + const struct tomoyo_path_info *filename); +int tomoyo_find_next_domain(struct linux_binprm *bprm); +int tomoyo_get_mode(const struct tomoyo_policy_namespace *ns, const u8 profile, + const u8 index); +int tomoyo_init_request_info(struct tomoyo_request_info *r, + struct tomoyo_domain_info *domain, + const u8 index); int tomoyo_mkdev_perm(const u8 operation, struct path *path, const unsigned int mode, unsigned int dev); -int tomoyo_path_perm(const u8 operation, struct path *path); +int tomoyo_mount_permission(const char *dev_name, struct path *path, + const char *type, unsigned long flags, + void *data_page); +int tomoyo_open_control(const u8 type, struct file *file); int tomoyo_path2_perm(const u8 operation, struct path *path1, struct path *path2); -int tomoyo_find_next_domain(struct linux_binprm *bprm); - -void tomoyo_print_ulong(char *buffer, const int buffer_len, - const unsigned long value, const u8 type); - -/* Drop refcount on tomoyo_name_union. */ -void tomoyo_put_name_union(struct tomoyo_name_union *ptr); - -/* Run garbage collector. */ -void tomoyo_run_gc(void); - -void tomoyo_memory_free(void *ptr); - +int tomoyo_path_number_perm(const u8 operation, struct path *path, + unsigned long number); +int tomoyo_path_perm(const u8 operation, struct path *path, + const char *target); +unsigned int tomoyo_poll_control(struct file *file, poll_table *wait); +unsigned int tomoyo_poll_log(struct file *file, poll_table *wait); +int tomoyo_socket_bind_permission(struct socket *sock, struct sockaddr *addr, + int addr_len); +int tomoyo_socket_connect_permission(struct socket *sock, + struct sockaddr *addr, int addr_len); +int tomoyo_socket_listen_permission(struct socket *sock); +int tomoyo_socket_sendmsg_permission(struct socket *sock, struct msghdr *msg, + int size); +int tomoyo_supervisor(struct tomoyo_request_info *r, const char *fmt, ...) + __printf(2, 3); int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size, - bool is_delete, struct tomoyo_domain_info *domain, - bool (*check_duplicate) (const struct tomoyo_acl_info - *, - const struct tomoyo_acl_info - *), - bool (*merge_duplicate) (struct tomoyo_acl_info *, - struct tomoyo_acl_info *, - const bool)); + struct tomoyo_acl_param *param, + bool (*check_duplicate) + (const struct tomoyo_acl_info *, + const struct tomoyo_acl_info *), + bool (*merge_duplicate) + (struct tomoyo_acl_info *, struct tomoyo_acl_info *, + const bool)); int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size, - bool is_delete, struct list_head *list, - bool (*check_duplicate) (const struct tomoyo_acl_head - *, - const struct tomoyo_acl_head - *)); + struct tomoyo_acl_param *param, + bool (*check_duplicate) + (const struct tomoyo_acl_head *, + const struct tomoyo_acl_head *)); +int tomoyo_write_aggregator(struct tomoyo_acl_param *param); +int tomoyo_write_file(struct tomoyo_acl_param *param); +int tomoyo_write_group(struct tomoyo_acl_param *param, const u8 type); +int tomoyo_write_misc(struct tomoyo_acl_param *param); +int tomoyo_write_inet_network(struct tomoyo_acl_param *param); +int tomoyo_write_transition_control(struct tomoyo_acl_param *param, + const u8 type); +int tomoyo_write_unix_network(struct tomoyo_acl_param *param); +ssize_t tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer, + const int buffer_len); +ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head, + const char __user *buffer, const int buffer_len); +struct tomoyo_condition *tomoyo_get_condition(struct tomoyo_acl_param *param); +struct tomoyo_domain_info *tomoyo_assign_domain(const char *domainname, + const bool transit); +struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname); +struct tomoyo_group *tomoyo_get_group(struct tomoyo_acl_param *param, + const u8 idx); +struct tomoyo_policy_namespace *tomoyo_assign_namespace +(const char *domainname); +struct tomoyo_profile *tomoyo_profile(const struct tomoyo_policy_namespace *ns, + const u8 profile); +unsigned int tomoyo_check_flags(const struct tomoyo_domain_info *domain, + const u8 index); +u8 tomoyo_parse_ulong(unsigned long *result, char **str); +void *tomoyo_commit_ok(void *data, const unsigned int size); +void __init tomoyo_load_builtin_policy(void); +void __init tomoyo_mm_init(void); void tomoyo_check_acl(struct tomoyo_request_info *r, bool (*check_entry) (struct tomoyo_request_info *, const struct tomoyo_acl_info *)); +void tomoyo_check_profile(void); +void tomoyo_convert_time(time_t time, struct tomoyo_time *stamp); +void tomoyo_del_condition(struct list_head *element); +void tomoyo_fill_path_info(struct tomoyo_path_info *ptr); +void tomoyo_get_attributes(struct tomoyo_obj_info *obj); +void tomoyo_init_policy_namespace(struct tomoyo_policy_namespace *ns); +void tomoyo_load_policy(const char *filename); +void tomoyo_normalize_line(unsigned char *buffer); +void tomoyo_notify_gc(struct tomoyo_io_buffer *head, const bool is_register); +void tomoyo_print_ip(char *buf, const unsigned int size, + const struct tomoyo_ipaddr_union *ptr); +void tomoyo_print_ulong(char *buffer, const int buffer_len, + const unsigned long value, const u8 type); +void tomoyo_put_name_union(struct tomoyo_name_union *ptr); +void tomoyo_put_number_union(struct tomoyo_number_union *ptr); +void tomoyo_read_log(struct tomoyo_io_buffer *head); +void tomoyo_update_stat(const u8 index); +void tomoyo_warn_oom(const char *function); +void tomoyo_write_log(struct tomoyo_request_info *r, const char *fmt, ...) + __printf(2, 3); +void tomoyo_write_log2(struct tomoyo_request_info *r, int len, const char *fmt, + va_list args); /********** External variable definitions. **********/ -/* Lock for GC. */ -extern struct srcu_struct tomoyo_ss; - -/* The list for "struct tomoyo_domain_info". */ +extern bool tomoyo_policy_loaded; +extern const char * const tomoyo_condition_keyword +[TOMOYO_MAX_CONDITION_KEYWORD]; +extern const char * const tomoyo_dif[TOMOYO_MAX_DOMAIN_INFO_FLAGS]; +extern const char * const tomoyo_mac_keywords[TOMOYO_MAX_MAC_INDEX + + TOMOYO_MAX_MAC_CATEGORY_INDEX]; +extern const char * const tomoyo_mode[TOMOYO_CONFIG_MAX_MODE]; +extern const char * const tomoyo_path_keyword[TOMOYO_MAX_PATH_OPERATION]; +extern const char * const tomoyo_proto_keyword[TOMOYO_SOCK_MAX]; +extern const char * const tomoyo_socket_keyword[TOMOYO_MAX_NETWORK_OPERATION]; +extern const u8 tomoyo_index2category[TOMOYO_MAX_MAC_INDEX]; +extern const u8 tomoyo_pn2mac[TOMOYO_MAX_PATH_NUMBER_OPERATION]; +extern const u8 tomoyo_pnnn2mac[TOMOYO_MAX_MKDEV_OPERATION]; +extern const u8 tomoyo_pp2mac[TOMOYO_MAX_PATH2_OPERATION]; +extern struct list_head tomoyo_condition_list; extern struct list_head tomoyo_domain_list; - -extern struct list_head tomoyo_policy_list[TOMOYO_MAX_POLICY]; -extern struct list_head tomoyo_group_list[TOMOYO_MAX_GROUP]; extern struct list_head tomoyo_name_list[TOMOYO_MAX_HASH]; - -/* Lock for protecting policy. */ +extern struct list_head tomoyo_namespace_list; extern struct mutex tomoyo_policy_lock; - -/* Has /sbin/init started? */ -extern bool tomoyo_policy_loaded; - -/* The kernel's domain. */ +extern struct srcu_struct tomoyo_ss; extern struct tomoyo_domain_info tomoyo_kernel_domain; - -extern const char *tomoyo_path_keyword[TOMOYO_MAX_PATH_OPERATION]; -extern const char *tomoyo_mkdev_keyword[TOMOYO_MAX_MKDEV_OPERATION]; -extern const char *tomoyo_path2_keyword[TOMOYO_MAX_PATH2_OPERATION]; -extern const char *tomoyo_path_number_keyword[TOMOYO_MAX_PATH_NUMBER_OPERATION]; - -extern unsigned int tomoyo_quota_for_query; -extern unsigned int tomoyo_query_memory_size; +extern struct tomoyo_policy_namespace tomoyo_kernel_namespace; +extern unsigned int tomoyo_memory_quota[TOMOYO_MAX_MEMORY_STAT]; +extern unsigned int tomoyo_memory_used[TOMOYO_MAX_MEMORY_STAT]; /********** Inlined functions. **********/ +/** + * tomoyo_read_lock - Take lock for protecting policy. + * + * Returns index number for tomoyo_read_unlock(). + */ static inline int tomoyo_read_lock(void) { return srcu_read_lock(&tomoyo_ss); } +/** + * tomoyo_read_unlock - Release lock for protecting policy. + * + * @idx: Index number returned by tomoyo_read_lock(). + * + * Returns nothing. + */ static inline void tomoyo_read_unlock(int idx) { srcu_read_unlock(&tomoyo_ss, idx); } -/* strcmp() for "struct tomoyo_path_info" structure. */ -static inline bool tomoyo_pathcmp(const struct tomoyo_path_info *a, - const struct tomoyo_path_info *b) +/** + * tomoyo_sys_getppid - Copy of getppid(). + * + * Returns parent process's PID. + * + * Alpha does not have getppid() defined. To be able to build this module on + * Alpha, I have to copy getppid() from kernel/timer.c. + */ +static inline pid_t tomoyo_sys_getppid(void) { - return a->hash != b->hash || strcmp(a->name, b->name); + pid_t pid; + rcu_read_lock(); + pid = task_tgid_vnr(rcu_dereference(current->real_parent)); + rcu_read_unlock(); + return pid; } /** - * tomoyo_valid - Check whether the character is a valid char. + * tomoyo_sys_getpid - Copy of getpid(). * - * @c: The character to check. + * Returns current thread's PID. * - * Returns true if @c is a valid character, false otherwise. + * Alpha does not have getpid() defined. To be able to build this module on + * Alpha, I have to copy getpid() from kernel/timer.c. */ -static inline bool tomoyo_valid(const unsigned char c) +static inline pid_t tomoyo_sys_getpid(void) { - return c > ' ' && c < 127; + return task_tgid_vnr(current); } /** - * tomoyo_invalid - Check whether the character is an invalid char. + * tomoyo_pathcmp - strcmp() for "struct tomoyo_path_info" structure. * - * @c: The character to check. + * @a: Pointer to "struct tomoyo_path_info". + * @b: Pointer to "struct tomoyo_path_info". * - * Returns true if @c is an invalid character, false otherwise. + * Returns true if @a == @b, false otherwise. */ -static inline bool tomoyo_invalid(const unsigned char c) +static inline bool tomoyo_pathcmp(const struct tomoyo_path_info *a, + const struct tomoyo_path_info *b) { - return c && (c <= ' ' || c >= 127); + return a->hash != b->hash || strcmp(a->name, b->name); } +/** + * tomoyo_put_name - Drop reference on "struct tomoyo_name". + * + * @name: Pointer to "struct tomoyo_path_info". Maybe NULL. + * + * Returns nothing. + */ static inline void tomoyo_put_name(const struct tomoyo_path_info *name) { if (name) { struct tomoyo_name *ptr = container_of(name, typeof(*ptr), entry); - atomic_dec(&ptr->users); + atomic_dec(&ptr->head.users); } } +/** + * tomoyo_put_condition - Drop reference on "struct tomoyo_condition". + * + * @cond: Pointer to "struct tomoyo_condition". Maybe NULL. + * + * Returns nothing. + */ +static inline void tomoyo_put_condition(struct tomoyo_condition *cond) +{ + if (cond) + atomic_dec(&cond->head.users); +} + +/** + * tomoyo_put_group - Drop reference on "struct tomoyo_group". + * + * @group: Pointer to "struct tomoyo_group". Maybe NULL. + * + * Returns nothing. + */ static inline void tomoyo_put_group(struct tomoyo_group *group) { if (group) - atomic_dec(&group->users); + atomic_dec(&group->head.users); } +/** + * tomoyo_domain - Get "struct tomoyo_domain_info" for current thread. + * + * Returns pointer to "struct tomoyo_domain_info" for current thread. + */ static inline struct tomoyo_domain_info *tomoyo_domain(void) { return current_cred()->security; } +/** + * tomoyo_real_domain - Get "struct tomoyo_domain_info" for specified thread. + * + * @task: Pointer to "struct task_struct". + * + * Returns pointer to "struct tomoyo_security" for specified thread. + */ static inline struct tomoyo_domain_info *tomoyo_real_domain(struct task_struct *task) { return task_cred_xxx(task, security); } -static inline bool tomoyo_same_acl_head(const struct tomoyo_acl_info *p1, - const struct tomoyo_acl_info *p2) +/** + * tomoyo_same_name_union - Check for duplicated "struct tomoyo_name_union" entry. + * + * @a: Pointer to "struct tomoyo_name_union". + * @b: Pointer to "struct tomoyo_name_union". + * + * Returns true if @a == @b, false otherwise. + */ +static inline bool tomoyo_same_name_union +(const struct tomoyo_name_union *a, const struct tomoyo_name_union *b) { - return p1->type == p2->type; + return a->filename == b->filename && a->group == b->group; } -static inline bool tomoyo_same_name_union -(const struct tomoyo_name_union *p1, const struct tomoyo_name_union *p2) +/** + * tomoyo_same_number_union - Check for duplicated "struct tomoyo_number_union" entry. + * + * @a: Pointer to "struct tomoyo_number_union". + * @b: Pointer to "struct tomoyo_number_union". + * + * Returns true if @a == @b, false otherwise. + */ +static inline bool tomoyo_same_number_union +(const struct tomoyo_number_union *a, const struct tomoyo_number_union *b) { - return p1->filename == p2->filename && p1->group == p2->group && - p1->is_group == p2->is_group; + return a->values[0] == b->values[0] && a->values[1] == b->values[1] && + a->group == b->group && a->value_type[0] == b->value_type[0] && + a->value_type[1] == b->value_type[1]; } -static inline bool tomoyo_same_number_union -(const struct tomoyo_number_union *p1, const struct tomoyo_number_union *p2) +/** + * tomoyo_same_ipaddr_union - Check for duplicated "struct tomoyo_ipaddr_union" entry. + * + * @a: Pointer to "struct tomoyo_ipaddr_union". + * @b: Pointer to "struct tomoyo_ipaddr_union". + * + * Returns true if @a == @b, false otherwise. + */ +static inline bool tomoyo_same_ipaddr_union +(const struct tomoyo_ipaddr_union *a, const struct tomoyo_ipaddr_union *b) +{ + return !memcmp(a->ip, b->ip, sizeof(a->ip)) && a->group == b->group && + a->is_ipv6 == b->is_ipv6; +} + +/** + * tomoyo_current_namespace - Get "struct tomoyo_policy_namespace" for current thread. + * + * Returns pointer to "struct tomoyo_policy_namespace" for current thread. + */ +static inline struct tomoyo_policy_namespace *tomoyo_current_namespace(void) { - return p1->values[0] == p2->values[0] && p1->values[1] == p2->values[1] - && p1->group == p2->group && p1->min_type == p2->min_type && - p1->max_type == p2->max_type && p1->is_group == p2->is_group; + return tomoyo_domain()->ns; } +#if defined(CONFIG_SLOB) + +/** + * tomoyo_round2 - Round up to power of 2 for calculating memory usage. + * + * @size: Size to be rounded up. + * + * Returns @size. + * + * Since SLOB does not round up, this function simply returns @size. + */ +static inline int tomoyo_round2(size_t size) +{ + return size; +} + +#else + +/** + * tomoyo_round2 - Round up to power of 2 for calculating memory usage. + * + * @size: Size to be rounded up. + * + * Returns rounded size. + * + * Strictly speaking, SLAB may be able to allocate (e.g.) 96 bytes instead of + * (e.g.) 128 bytes. + */ +static inline int tomoyo_round2(size_t size) +{ +#if PAGE_SIZE == 4096 + size_t bsize = 32; +#else + size_t bsize = 64; +#endif + if (!size) + return 0; + while (size > bsize) + bsize <<= 1; + return bsize; +} + +#endif + /** * list_for_each_cookie - iterate over a list with cookie. * @pos: the &struct list_head to use as a loop cursor. diff --git a/security/tomoyo/condition.c b/security/tomoyo/condition.c new file mode 100644 index 00000000000..63681e8be62 --- /dev/null +++ b/security/tomoyo/condition.c @@ -0,0 +1,1094 @@ +/* + * security/tomoyo/condition.c + * + * Copyright (C) 2005-2011 NTT DATA CORPORATION + */ + +#include "common.h" +#include <linux/slab.h> + +/* List of "struct tomoyo_condition". */ +LIST_HEAD(tomoyo_condition_list); + +/** + * tomoyo_argv - Check argv[] in "struct linux_binbrm". + * + * @index: Index number of @arg_ptr. + * @arg_ptr: Contents of argv[@index]. + * @argc: Length of @argv. + * @argv: Pointer to "struct tomoyo_argv". + * @checked: Set to true if @argv[@index] was found. + * + * Returns true on success, false otherwise. + */ +static bool tomoyo_argv(const unsigned int index, const char *arg_ptr, + const int argc, const struct tomoyo_argv *argv, + u8 *checked) +{ + int i; + struct tomoyo_path_info arg; + arg.name = arg_ptr; + for (i = 0; i < argc; argv++, checked++, i++) { + bool result; + if (index != argv->index) + continue; + *checked = 1; + tomoyo_fill_path_info(&arg); + result = tomoyo_path_matches_pattern(&arg, argv->value); + if (argv->is_not) + result = !result; + if (!result) + return false; + } + return true; +} + +/** + * tomoyo_envp - Check envp[] in "struct linux_binbrm". + * + * @env_name: The name of environment variable. + * @env_value: The value of environment variable. + * @envc: Length of @envp. + * @envp: Pointer to "struct tomoyo_envp". + * @checked: Set to true if @envp[@env_name] was found. + * + * Returns true on success, false otherwise. + */ +static bool tomoyo_envp(const char *env_name, const char *env_value, + const int envc, const struct tomoyo_envp *envp, + u8 *checked) +{ + int i; + struct tomoyo_path_info name; + struct tomoyo_path_info value; + name.name = env_name; + tomoyo_fill_path_info(&name); + value.name = env_value; + tomoyo_fill_path_info(&value); + for (i = 0; i < envc; envp++, checked++, i++) { + bool result; + if (!tomoyo_path_matches_pattern(&name, envp->name)) + continue; + *checked = 1; + if (envp->value) { + result = tomoyo_path_matches_pattern(&value, + envp->value); + if (envp->is_not) + result = !result; + } else { + result = true; + if (!envp->is_not) + result = !result; + } + if (!result) + return false; + } + return true; +} + +/** + * tomoyo_scan_bprm - Scan "struct linux_binprm". + * + * @ee: Pointer to "struct tomoyo_execve". + * @argc: Length of @argc. + * @argv: Pointer to "struct tomoyo_argv". + * @envc: Length of @envp. + * @envp: Poiner to "struct tomoyo_envp". + * + * Returns true on success, false otherwise. + */ +static bool tomoyo_scan_bprm(struct tomoyo_execve *ee, + const u16 argc, const struct tomoyo_argv *argv, + const u16 envc, const struct tomoyo_envp *envp) +{ + struct linux_binprm *bprm = ee->bprm; + struct tomoyo_page_dump *dump = &ee->dump; + char *arg_ptr = ee->tmp; + int arg_len = 0; + unsigned long pos = bprm->p; + int offset = pos % PAGE_SIZE; + int argv_count = bprm->argc; + int envp_count = bprm->envc; + bool result = true; + u8 local_checked[32]; + u8 *checked; + if (argc + envc <= sizeof(local_checked)) { + checked = local_checked; + memset(local_checked, 0, sizeof(local_checked)); + } else { + checked = kzalloc(argc + envc, GFP_NOFS); + if (!checked) + return false; + } + while (argv_count || envp_count) { + if (!tomoyo_dump_page(bprm, pos, dump)) { + result = false; + goto out; + } + pos += PAGE_SIZE - offset; + while (offset < PAGE_SIZE) { + /* Read. */ + const char *kaddr = dump->data; + const unsigned char c = kaddr[offset++]; + if (c && arg_len < TOMOYO_EXEC_TMPSIZE - 10) { + if (c == '\\') { + arg_ptr[arg_len++] = '\\'; + arg_ptr[arg_len++] = '\\'; + } else if (c > ' ' && c < 127) { + arg_ptr[arg_len++] = c; + } else { + arg_ptr[arg_len++] = '\\'; + arg_ptr[arg_len++] = (c >> 6) + '0'; + arg_ptr[arg_len++] = + ((c >> 3) & 7) + '0'; + arg_ptr[arg_len++] = (c & 7) + '0'; + } + } else { + arg_ptr[arg_len] = '\0'; + } + if (c) + continue; + /* Check. */ + if (argv_count) { + if (!tomoyo_argv(bprm->argc - argv_count, + arg_ptr, argc, argv, + checked)) { + result = false; + break; + } + argv_count--; + } else if (envp_count) { + char *cp = strchr(arg_ptr, '='); + if (cp) { + *cp = '\0'; + if (!tomoyo_envp(arg_ptr, cp + 1, + envc, envp, + checked + argc)) { + result = false; + break; + } + } + envp_count--; + } else { + break; + } + arg_len = 0; + } + offset = 0; + if (!result) + break; + } +out: + if (result) { + int i; + /* Check not-yet-checked entries. */ + for (i = 0; i < argc; i++) { + if (checked[i]) + continue; + /* + * Return true only if all unchecked indexes in + * bprm->argv[] are not matched. + */ + if (argv[i].is_not) + continue; + result = false; + break; + } + for (i = 0; i < envc; envp++, i++) { + if (checked[argc + i]) + continue; + /* + * Return true only if all unchecked environ variables + * in bprm->envp[] are either undefined or not matched. + */ + if ((!envp->value && !envp->is_not) || + (envp->value && envp->is_not)) + continue; + result = false; + break; + } + } + if (checked != local_checked) + kfree(checked); + return result; +} + +/** + * tomoyo_scan_exec_realpath - Check "exec.realpath" parameter of "struct tomoyo_condition". + * + * @file: Pointer to "struct file". + * @ptr: Pointer to "struct tomoyo_name_union". + * @match: True if "exec.realpath=", false if "exec.realpath!=". + * + * Returns true on success, false otherwise. + */ +static bool tomoyo_scan_exec_realpath(struct file *file, + const struct tomoyo_name_union *ptr, + const bool match) +{ + bool result; + struct tomoyo_path_info exe; + if (!file) + return false; + exe.name = tomoyo_realpath_from_path(&file->f_path); + if (!exe.name) + return false; + tomoyo_fill_path_info(&exe); + result = tomoyo_compare_name_union(&exe, ptr); + kfree(exe.name); + return result == match; +} + +/** + * tomoyo_get_dqword - tomoyo_get_name() for a quoted string. + * + * @start: String to save. + * + * Returns pointer to "struct tomoyo_path_info" on success, NULL otherwise. + */ +static const struct tomoyo_path_info *tomoyo_get_dqword(char *start) +{ + char *cp = start + strlen(start) - 1; + if (cp == start || *start++ != '"' || *cp != '"') + return NULL; + *cp = '\0'; + if (*start && !tomoyo_correct_word(start)) + return NULL; + return tomoyo_get_name(start); +} + +/** + * tomoyo_parse_name_union_quoted - Parse a quoted word. + * + * @param: Pointer to "struct tomoyo_acl_param". + * @ptr: Pointer to "struct tomoyo_name_union". + * + * Returns true on success, false otherwise. + */ +static bool tomoyo_parse_name_union_quoted(struct tomoyo_acl_param *param, + struct tomoyo_name_union *ptr) +{ + char *filename = param->data; + if (*filename == '@') + return tomoyo_parse_name_union(param, ptr); + ptr->filename = tomoyo_get_dqword(filename); + return ptr->filename != NULL; +} + +/** + * tomoyo_parse_argv - Parse an argv[] condition part. + * + * @left: Lefthand value. + * @right: Righthand value. + * @argv: Pointer to "struct tomoyo_argv". + * + * Returns true on success, false otherwise. + */ +static bool tomoyo_parse_argv(char *left, char *right, + struct tomoyo_argv *argv) +{ + if (tomoyo_parse_ulong(&argv->index, &left) != + TOMOYO_VALUE_TYPE_DECIMAL || *left++ != ']' || *left) + return false; + argv->value = tomoyo_get_dqword(right); + return argv->value != NULL; +} + +/** + * tomoyo_parse_envp - Parse an envp[] condition part. + * + * @left: Lefthand value. + * @right: Righthand value. + * @envp: Pointer to "struct tomoyo_envp". + * + * Returns true on success, false otherwise. + */ +static bool tomoyo_parse_envp(char *left, char *right, + struct tomoyo_envp *envp) +{ + const struct tomoyo_path_info *name; + const struct tomoyo_path_info *value; + char *cp = left + strlen(left) - 1; + if (*cp-- != ']' || *cp != '"') + goto out; + *cp = '\0'; + if (!tomoyo_correct_word(left)) + goto out; + name = tomoyo_get_name(left); + if (!name) + goto out; + if (!strcmp(right, "NULL")) { + value = NULL; + } else { + value = tomoyo_get_dqword(right); + if (!value) { + tomoyo_put_name(name); + goto out; + } + } + envp->name = name; + envp->value = value; + return true; +out: + return false; +} + +/** + * tomoyo_same_condition - Check for duplicated "struct tomoyo_condition" entry. + * + * @a: Pointer to "struct tomoyo_condition". + * @b: Pointer to "struct tomoyo_condition". + * + * Returns true if @a == @b, false otherwise. + */ +static inline bool tomoyo_same_condition(const struct tomoyo_condition *a, + const struct tomoyo_condition *b) +{ + return a->size == b->size && a->condc == b->condc && + a->numbers_count == b->numbers_count && + a->names_count == b->names_count && + a->argc == b->argc && a->envc == b->envc && + a->grant_log == b->grant_log && a->transit == b->transit && + !memcmp(a + 1, b + 1, a->size - sizeof(*a)); +} + +/** + * tomoyo_condition_type - Get condition type. + * + * @word: Keyword string. + * + * Returns one of values in "enum tomoyo_conditions_index" on success, + * TOMOYO_MAX_CONDITION_KEYWORD otherwise. + */ +static u8 tomoyo_condition_type(const char *word) +{ + u8 i; + for (i = 0; i < TOMOYO_MAX_CONDITION_KEYWORD; i++) { + if (!strcmp(word, tomoyo_condition_keyword[i])) + break; + } + return i; +} + +/* Define this to enable debug mode. */ +/* #define DEBUG_CONDITION */ + +#ifdef DEBUG_CONDITION +#define dprintk printk +#else +#define dprintk(...) do { } while (0) +#endif + +/** + * tomoyo_commit_condition - Commit "struct tomoyo_condition". + * + * @entry: Pointer to "struct tomoyo_condition". + * + * Returns pointer to "struct tomoyo_condition" on success, NULL otherwise. + * + * This function merges duplicated entries. This function returns NULL if + * @entry is not duplicated but memory quota for policy has exceeded. + */ +static struct tomoyo_condition *tomoyo_commit_condition +(struct tomoyo_condition *entry) +{ + struct tomoyo_condition *ptr; + bool found = false; + if (mutex_lock_interruptible(&tomoyo_policy_lock)) { + dprintk(KERN_WARNING "%u: %s failed\n", __LINE__, __func__); + ptr = NULL; + found = true; + goto out; + } + list_for_each_entry(ptr, &tomoyo_condition_list, head.list) { + if (!tomoyo_same_condition(ptr, entry) || + atomic_read(&ptr->head.users) == TOMOYO_GC_IN_PROGRESS) + continue; + /* Same entry found. Share this entry. */ + atomic_inc(&ptr->head.users); + found = true; + break; + } + if (!found) { + if (tomoyo_memory_ok(entry)) { + atomic_set(&entry->head.users, 1); + list_add(&entry->head.list, &tomoyo_condition_list); + } else { + found = true; + ptr = NULL; + } + } + mutex_unlock(&tomoyo_policy_lock); +out: + if (found) { + tomoyo_del_condition(&entry->head.list); + kfree(entry); + entry = ptr; + } + return entry; +} + +/** + * tomoyo_get_transit_preference - Parse domain transition preference for execve(). + * + * @param: Pointer to "struct tomoyo_acl_param". + * @e: Pointer to "struct tomoyo_condition". + * + * Returns the condition string part. + */ +static char *tomoyo_get_transit_preference(struct tomoyo_acl_param *param, + struct tomoyo_condition *e) +{ + char * const pos = param->data; + bool flag; + if (*pos == '<') { + e->transit = tomoyo_get_domainname(param); + goto done; + } + { + char *cp = strchr(pos, ' '); + if (cp) + *cp = '\0'; + flag = tomoyo_correct_path(pos) || !strcmp(pos, "keep") || + !strcmp(pos, "initialize") || !strcmp(pos, "reset") || + !strcmp(pos, "child") || !strcmp(pos, "parent"); + if (cp) + *cp = ' '; + } + if (!flag) + return pos; + e->transit = tomoyo_get_name(tomoyo_read_token(param)); +done: + if (e->transit) + return param->data; + /* + * Return a bad read-only condition string that will let + * tomoyo_get_condition() return NULL. + */ + return "/"; +} + +/** + * tomoyo_get_condition - Parse condition part. + * + * @param: Pointer to "struct tomoyo_acl_param". + * + * Returns pointer to "struct tomoyo_condition" on success, NULL otherwise. + */ +struct tomoyo_condition *tomoyo_get_condition(struct tomoyo_acl_param *param) +{ + struct tomoyo_condition *entry = NULL; + struct tomoyo_condition_element *condp = NULL; + struct tomoyo_number_union *numbers_p = NULL; + struct tomoyo_name_union *names_p = NULL; + struct tomoyo_argv *argv = NULL; + struct tomoyo_envp *envp = NULL; + struct tomoyo_condition e = { }; + char * const start_of_string = + tomoyo_get_transit_preference(param, &e); + char * const end_of_string = start_of_string + strlen(start_of_string); + char *pos; +rerun: + pos = start_of_string; + while (1) { + u8 left = -1; + u8 right = -1; + char *left_word = pos; + char *cp; + char *right_word; + bool is_not; + if (!*left_word) + break; + /* + * Since left-hand condition does not allow use of "path_group" + * or "number_group" and environment variable's names do not + * accept '=', it is guaranteed that the original line consists + * of one or more repetition of $left$operator$right blocks + * where "$left is free from '=' and ' '" and "$operator is + * either '=' or '!='" and "$right is free from ' '". + * Therefore, we can reconstruct the original line at the end + * of dry run even if we overwrite $operator with '\0'. + */ + cp = strchr(pos, ' '); + if (cp) { + *cp = '\0'; /* Will restore later. */ + pos = cp + 1; + } else { + pos = ""; + } + right_word = strchr(left_word, '='); + if (!right_word || right_word == left_word) + goto out; + is_not = *(right_word - 1) == '!'; + if (is_not) + *(right_word++ - 1) = '\0'; /* Will restore later. */ + else if (*(right_word + 1) != '=') + *right_word++ = '\0'; /* Will restore later. */ + else + goto out; + dprintk(KERN_WARNING "%u: <%s>%s=<%s>\n", __LINE__, left_word, + is_not ? "!" : "", right_word); + if (!strcmp(left_word, "grant_log")) { + if (entry) { + if (is_not || + entry->grant_log != TOMOYO_GRANTLOG_AUTO) + goto out; + else if (!strcmp(right_word, "yes")) + entry->grant_log = TOMOYO_GRANTLOG_YES; + else if (!strcmp(right_word, "no")) + entry->grant_log = TOMOYO_GRANTLOG_NO; + else + goto out; + } + continue; + } + if (!strncmp(left_word, "exec.argv[", 10)) { + if (!argv) { + e.argc++; + e.condc++; + } else { + e.argc--; + e.condc--; + left = TOMOYO_ARGV_ENTRY; + argv->is_not = is_not; + if (!tomoyo_parse_argv(left_word + 10, + right_word, argv++)) + goto out; + } + goto store_value; + } + if (!strncmp(left_word, "exec.envp[\"", 11)) { + if (!envp) { + e.envc++; + e.condc++; + } else { + e.envc--; + e.condc--; + left = TOMOYO_ENVP_ENTRY; + envp->is_not = is_not; + if (!tomoyo_parse_envp(left_word + 11, + right_word, envp++)) + goto out; + } + goto store_value; + } + left = tomoyo_condition_type(left_word); + dprintk(KERN_WARNING "%u: <%s> left=%u\n", __LINE__, left_word, + left); + if (left == TOMOYO_MAX_CONDITION_KEYWORD) { + if (!numbers_p) { + e.numbers_count++; + } else { + e.numbers_count--; + left = TOMOYO_NUMBER_UNION; + param->data = left_word; + if (*left_word == '@' || + !tomoyo_parse_number_union(param, + numbers_p++)) + goto out; + } + } + if (!condp) + e.condc++; + else + e.condc--; + if (left == TOMOYO_EXEC_REALPATH || + left == TOMOYO_SYMLINK_TARGET) { + if (!names_p) { + e.names_count++; + } else { + e.names_count--; + right = TOMOYO_NAME_UNION; + param->data = right_word; + if (!tomoyo_parse_name_union_quoted(param, + names_p++)) + goto out; + } + goto store_value; + } + right = tomoyo_condition_type(right_word); + if (right == TOMOYO_MAX_CONDITION_KEYWORD) { + if (!numbers_p) { + e.numbers_count++; + } else { + e.numbers_count--; + right = TOMOYO_NUMBER_UNION; + param->data = right_word; + if (!tomoyo_parse_number_union(param, + numbers_p++)) + goto out; + } + } +store_value: + if (!condp) { + dprintk(KERN_WARNING "%u: dry_run left=%u right=%u " + "match=%u\n", __LINE__, left, right, !is_not); + continue; + } + condp->left = left; + condp->right = right; + condp->equals = !is_not; + dprintk(KERN_WARNING "%u: left=%u right=%u match=%u\n", + __LINE__, condp->left, condp->right, + condp->equals); + condp++; + } + dprintk(KERN_INFO "%u: cond=%u numbers=%u names=%u ac=%u ec=%u\n", + __LINE__, e.condc, e.numbers_count, e.names_count, e.argc, + e.envc); + if (entry) { + BUG_ON(e.names_count | e.numbers_count | e.argc | e.envc | + e.condc); + return tomoyo_commit_condition(entry); + } + e.size = sizeof(*entry) + + e.condc * sizeof(struct tomoyo_condition_element) + + e.numbers_count * sizeof(struct tomoyo_number_union) + + e.names_count * sizeof(struct tomoyo_name_union) + + e.argc * sizeof(struct tomoyo_argv) + + e.envc * sizeof(struct tomoyo_envp); + entry = kzalloc(e.size, GFP_NOFS); + if (!entry) + goto out2; + *entry = e; + e.transit = NULL; + condp = (struct tomoyo_condition_element *) (entry + 1); + numbers_p = (struct tomoyo_number_union *) (condp + e.condc); + names_p = (struct tomoyo_name_union *) (numbers_p + e.numbers_count); + argv = (struct tomoyo_argv *) (names_p + e.names_count); + envp = (struct tomoyo_envp *) (argv + e.argc); + { + bool flag = false; + for (pos = start_of_string; pos < end_of_string; pos++) { + if (*pos) + continue; + if (flag) /* Restore " ". */ + *pos = ' '; + else if (*(pos + 1) == '=') /* Restore "!=". */ + *pos = '!'; + else /* Restore "=". */ + *pos = '='; + flag = !flag; + } + } + goto rerun; +out: + dprintk(KERN_WARNING "%u: %s failed\n", __LINE__, __func__); + if (entry) { + tomoyo_del_condition(&entry->head.list); + kfree(entry); + } +out2: + tomoyo_put_name(e.transit); + return NULL; +} + +/** + * tomoyo_get_attributes - Revalidate "struct inode". + * + * @obj: Pointer to "struct tomoyo_obj_info". + * + * Returns nothing. + */ +void tomoyo_get_attributes(struct tomoyo_obj_info *obj) +{ + u8 i; + struct dentry *dentry = NULL; + + for (i = 0; i < TOMOYO_MAX_PATH_STAT; i++) { + struct inode *inode; + switch (i) { + case TOMOYO_PATH1: + dentry = obj->path1.dentry; + if (!dentry) + continue; + break; + case TOMOYO_PATH2: + dentry = obj->path2.dentry; + if (!dentry) + continue; + break; + default: + if (!dentry) + continue; + dentry = dget_parent(dentry); + break; + } + inode = dentry->d_inode; + if (inode) { + struct tomoyo_mini_stat *stat = &obj->stat[i]; + stat->uid = inode->i_uid; + stat->gid = inode->i_gid; + stat->ino = inode->i_ino; + stat->mode = inode->i_mode; + stat->dev = inode->i_sb->s_dev; + stat->rdev = inode->i_rdev; + obj->stat_valid[i] = true; + } + if (i & 1) /* i == TOMOYO_PATH1_PARENT || + i == TOMOYO_PATH2_PARENT */ + dput(dentry); + } +} + +/** + * tomoyo_condition - Check condition part. + * + * @r: Pointer to "struct tomoyo_request_info". + * @cond: Pointer to "struct tomoyo_condition". Maybe NULL. + * + * Returns true on success, false otherwise. + * + * Caller holds tomoyo_read_lock(). + */ +bool tomoyo_condition(struct tomoyo_request_info *r, + const struct tomoyo_condition *cond) +{ + u32 i; + unsigned long min_v[2] = { 0, 0 }; + unsigned long max_v[2] = { 0, 0 }; + const struct tomoyo_condition_element *condp; + const struct tomoyo_number_union *numbers_p; + const struct tomoyo_name_union *names_p; + const struct tomoyo_argv *argv; + const struct tomoyo_envp *envp; + struct tomoyo_obj_info *obj; + u16 condc; + u16 argc; + u16 envc; + struct linux_binprm *bprm = NULL; + if (!cond) + return true; + condc = cond->condc; + argc = cond->argc; + envc = cond->envc; + obj = r->obj; + if (r->ee) + bprm = r->ee->bprm; + if (!bprm && (argc || envc)) + return false; + condp = (struct tomoyo_condition_element *) (cond + 1); + numbers_p = (const struct tomoyo_number_union *) (condp + condc); + names_p = (const struct tomoyo_name_union *) + (numbers_p + cond->numbers_count); + argv = (const struct tomoyo_argv *) (names_p + cond->names_count); + envp = (const struct tomoyo_envp *) (argv + argc); + for (i = 0; i < condc; i++) { + const bool match = condp->equals; + const u8 left = condp->left; + const u8 right = condp->right; + bool is_bitop[2] = { false, false }; + u8 j; + condp++; + /* Check argv[] and envp[] later. */ + if (left == TOMOYO_ARGV_ENTRY || left == TOMOYO_ENVP_ENTRY) + continue; + /* Check string expressions. */ + if (right == TOMOYO_NAME_UNION) { + const struct tomoyo_name_union *ptr = names_p++; + switch (left) { + struct tomoyo_path_info *symlink; + struct tomoyo_execve *ee; + struct file *file; + case TOMOYO_SYMLINK_TARGET: + symlink = obj ? obj->symlink_target : NULL; + if (!symlink || + !tomoyo_compare_name_union(symlink, ptr) + == match) + goto out; + break; + case TOMOYO_EXEC_REALPATH: + ee = r->ee; + file = ee ? ee->bprm->file : NULL; + if (!tomoyo_scan_exec_realpath(file, ptr, + match)) + goto out; + break; + } + continue; + } + /* Check numeric or bit-op expressions. */ + for (j = 0; j < 2; j++) { + const u8 index = j ? right : left; + unsigned long value = 0; + switch (index) { + case TOMOYO_TASK_UID: + value = from_kuid(&init_user_ns, current_uid()); + break; + case TOMOYO_TASK_EUID: + value = from_kuid(&init_user_ns, current_euid()); + break; + case TOMOYO_TASK_SUID: + value = from_kuid(&init_user_ns, current_suid()); + break; + case TOMOYO_TASK_FSUID: + value = from_kuid(&init_user_ns, current_fsuid()); + break; + case TOMOYO_TASK_GID: + value = from_kgid(&init_user_ns, current_gid()); + break; + case TOMOYO_TASK_EGID: + value = from_kgid(&init_user_ns, current_egid()); + break; + case TOMOYO_TASK_SGID: + value = from_kgid(&init_user_ns, current_sgid()); + break; + case TOMOYO_TASK_FSGID: + value = from_kgid(&init_user_ns, current_fsgid()); + break; + case TOMOYO_TASK_PID: + value = tomoyo_sys_getpid(); + break; + case TOMOYO_TASK_PPID: + value = tomoyo_sys_getppid(); + break; + case TOMOYO_TYPE_IS_SOCKET: + value = S_IFSOCK; + break; + case TOMOYO_TYPE_IS_SYMLINK: + value = S_IFLNK; + break; + case TOMOYO_TYPE_IS_FILE: + value = S_IFREG; + break; + case TOMOYO_TYPE_IS_BLOCK_DEV: + value = S_IFBLK; + break; + case TOMOYO_TYPE_IS_DIRECTORY: + value = S_IFDIR; + break; + case TOMOYO_TYPE_IS_CHAR_DEV: + value = S_IFCHR; + break; + case TOMOYO_TYPE_IS_FIFO: + value = S_IFIFO; + break; + case TOMOYO_MODE_SETUID: + value = S_ISUID; + break; + case TOMOYO_MODE_SETGID: + value = S_ISGID; + break; + case TOMOYO_MODE_STICKY: + value = S_ISVTX; + break; + case TOMOYO_MODE_OWNER_READ: + value = S_IRUSR; + break; + case TOMOYO_MODE_OWNER_WRITE: + value = S_IWUSR; + break; + case TOMOYO_MODE_OWNER_EXECUTE: + value = S_IXUSR; + break; + case TOMOYO_MODE_GROUP_READ: + value = S_IRGRP; + break; + case TOMOYO_MODE_GROUP_WRITE: + value = S_IWGRP; + break; + case TOMOYO_MODE_GROUP_EXECUTE: + value = S_IXGRP; + break; + case TOMOYO_MODE_OTHERS_READ: + value = S_IROTH; + break; + case TOMOYO_MODE_OTHERS_WRITE: + value = S_IWOTH; + break; + case TOMOYO_MODE_OTHERS_EXECUTE: + value = S_IXOTH; + break; + case TOMOYO_EXEC_ARGC: + if (!bprm) + goto out; + value = bprm->argc; + break; + case TOMOYO_EXEC_ENVC: + if (!bprm) + goto out; + value = bprm->envc; + break; + case TOMOYO_NUMBER_UNION: + /* Fetch values later. */ + break; + default: + if (!obj) + goto out; + if (!obj->validate_done) { + tomoyo_get_attributes(obj); + obj->validate_done = true; + } + { + u8 stat_index; + struct tomoyo_mini_stat *stat; + switch (index) { + case TOMOYO_PATH1_UID: + case TOMOYO_PATH1_GID: + case TOMOYO_PATH1_INO: + case TOMOYO_PATH1_MAJOR: + case TOMOYO_PATH1_MINOR: + case TOMOYO_PATH1_TYPE: + case TOMOYO_PATH1_DEV_MAJOR: + case TOMOYO_PATH1_DEV_MINOR: + case TOMOYO_PATH1_PERM: + stat_index = TOMOYO_PATH1; + break; + case TOMOYO_PATH2_UID: + case TOMOYO_PATH2_GID: + case TOMOYO_PATH2_INO: + case TOMOYO_PATH2_MAJOR: + case TOMOYO_PATH2_MINOR: + case TOMOYO_PATH2_TYPE: + case TOMOYO_PATH2_DEV_MAJOR: + case TOMOYO_PATH2_DEV_MINOR: + case TOMOYO_PATH2_PERM: + stat_index = TOMOYO_PATH2; + break; + case TOMOYO_PATH1_PARENT_UID: + case TOMOYO_PATH1_PARENT_GID: + case TOMOYO_PATH1_PARENT_INO: + case TOMOYO_PATH1_PARENT_PERM: + stat_index = + TOMOYO_PATH1_PARENT; + break; + case TOMOYO_PATH2_PARENT_UID: + case TOMOYO_PATH2_PARENT_GID: + case TOMOYO_PATH2_PARENT_INO: + case TOMOYO_PATH2_PARENT_PERM: + stat_index = + TOMOYO_PATH2_PARENT; + break; + default: + goto out; + } + if (!obj->stat_valid[stat_index]) + goto out; + stat = &obj->stat[stat_index]; + switch (index) { + case TOMOYO_PATH1_UID: + case TOMOYO_PATH2_UID: + case TOMOYO_PATH1_PARENT_UID: + case TOMOYO_PATH2_PARENT_UID: + value = from_kuid(&init_user_ns, stat->uid); + break; + case TOMOYO_PATH1_GID: + case TOMOYO_PATH2_GID: + case TOMOYO_PATH1_PARENT_GID: + case TOMOYO_PATH2_PARENT_GID: + value = from_kgid(&init_user_ns, stat->gid); + break; + case TOMOYO_PATH1_INO: + case TOMOYO_PATH2_INO: + case TOMOYO_PATH1_PARENT_INO: + case TOMOYO_PATH2_PARENT_INO: + value = stat->ino; + break; + case TOMOYO_PATH1_MAJOR: + case TOMOYO_PATH2_MAJOR: + value = MAJOR(stat->dev); + break; + case TOMOYO_PATH1_MINOR: + case TOMOYO_PATH2_MINOR: + value = MINOR(stat->dev); + break; + case TOMOYO_PATH1_TYPE: + case TOMOYO_PATH2_TYPE: + value = stat->mode & S_IFMT; + break; + case TOMOYO_PATH1_DEV_MAJOR: + case TOMOYO_PATH2_DEV_MAJOR: + value = MAJOR(stat->rdev); + break; + case TOMOYO_PATH1_DEV_MINOR: + case TOMOYO_PATH2_DEV_MINOR: + value = MINOR(stat->rdev); + break; + case TOMOYO_PATH1_PERM: + case TOMOYO_PATH2_PERM: + case TOMOYO_PATH1_PARENT_PERM: + case TOMOYO_PATH2_PARENT_PERM: + value = stat->mode & S_IALLUGO; + break; + } + } + break; + } + max_v[j] = value; + min_v[j] = value; + switch (index) { + case TOMOYO_MODE_SETUID: + case TOMOYO_MODE_SETGID: + case TOMOYO_MODE_STICKY: + case TOMOYO_MODE_OWNER_READ: + case TOMOYO_MODE_OWNER_WRITE: + case TOMOYO_MODE_OWNER_EXECUTE: + case TOMOYO_MODE_GROUP_READ: + case TOMOYO_MODE_GROUP_WRITE: + case TOMOYO_MODE_GROUP_EXECUTE: + case TOMOYO_MODE_OTHERS_READ: + case TOMOYO_MODE_OTHERS_WRITE: + case TOMOYO_MODE_OTHERS_EXECUTE: + is_bitop[j] = true; + } + } + if (left == TOMOYO_NUMBER_UNION) { + /* Fetch values now. */ + const struct tomoyo_number_union *ptr = numbers_p++; + min_v[0] = ptr->values[0]; + max_v[0] = ptr->values[1]; + } + if (right == TOMOYO_NUMBER_UNION) { + /* Fetch values now. */ + const struct tomoyo_number_union *ptr = numbers_p++; + if (ptr->group) { + if (tomoyo_number_matches_group(min_v[0], + max_v[0], + ptr->group) + == match) + continue; + } else { + if ((min_v[0] <= ptr->values[1] && + max_v[0] >= ptr->values[0]) == match) + continue; + } + goto out; + } + /* + * Bit operation is valid only when counterpart value + * represents permission. + */ + if (is_bitop[0] && is_bitop[1]) { + goto out; + } else if (is_bitop[0]) { + switch (right) { + case TOMOYO_PATH1_PERM: + case TOMOYO_PATH1_PARENT_PERM: + case TOMOYO_PATH2_PERM: + case TOMOYO_PATH2_PARENT_PERM: + if (!(max_v[0] & max_v[1]) == !match) + continue; + } + goto out; + } else if (is_bitop[1]) { + switch (left) { + case TOMOYO_PATH1_PERM: + case TOMOYO_PATH1_PARENT_PERM: + case TOMOYO_PATH2_PERM: + case TOMOYO_PATH2_PARENT_PERM: + if (!(max_v[0] & max_v[1]) == !match) + continue; + } + goto out; + } + /* Normal value range comparison. */ + if ((min_v[0] <= max_v[1] && max_v[0] >= min_v[1]) == match) + continue; +out: + return false; + } + /* Check argv[] and envp[] now. */ + if (r->ee && (argc || envc)) + return tomoyo_scan_bprm(r->ee, argc, argv, envc, envp); + return true; +} diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c index 35388408e47..38651454ed0 100644 --- a/security/tomoyo/domain.c +++ b/security/tomoyo/domain.c @@ -1,9 +1,7 @@ /* * security/tomoyo/domain.c * - * Domain transition functions for TOMOYO. - * - * Copyright (C) 2005-2010 NTT DATA CORPORATION + * Copyright (C) 2005-2011 NTT DATA CORPORATION */ #include "common.h" @@ -20,8 +18,7 @@ struct tomoyo_domain_info tomoyo_kernel_domain; * * @new_entry: Pointer to "struct tomoyo_acl_info". * @size: Size of @new_entry in bytes. - * @is_delete: True if it is a delete request. - * @list: Pointer to "struct list_head". + * @param: Pointer to "struct tomoyo_acl_param". * @check_duplicate: Callback function to find duplicated entry. * * Returns 0 on success, negative value otherwise. @@ -29,25 +26,28 @@ struct tomoyo_domain_info tomoyo_kernel_domain; * Caller holds tomoyo_read_lock(). */ int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size, - bool is_delete, struct list_head *list, + struct tomoyo_acl_param *param, bool (*check_duplicate) (const struct tomoyo_acl_head *, const struct tomoyo_acl_head *)) { - int error = is_delete ? -ENOENT : -ENOMEM; + int error = param->is_delete ? -ENOENT : -ENOMEM; struct tomoyo_acl_head *entry; + struct list_head *list = param->list; if (mutex_lock_interruptible(&tomoyo_policy_lock)) return -ENOMEM; list_for_each_entry_rcu(entry, list, list) { + if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS) + continue; if (!check_duplicate(entry, new_entry)) continue; - entry->is_deleted = is_delete; + entry->is_deleted = param->is_delete; error = 0; break; } - if (error && !is_delete) { + if (error && !param->is_delete) { entry = tomoyo_commit_ok(new_entry, size); if (entry) { list_add_tail_rcu(&entry->list, list); @@ -59,12 +59,25 @@ int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size, } /** + * tomoyo_same_acl_head - Check for duplicated "struct tomoyo_acl_info" entry. + * + * @a: Pointer to "struct tomoyo_acl_info". + * @b: Pointer to "struct tomoyo_acl_info". + * + * Returns true if @a == @b, false otherwise. + */ +static inline bool tomoyo_same_acl_head(const struct tomoyo_acl_info *a, + const struct tomoyo_acl_info *b) +{ + return a->type == b->type && a->cond == b->cond; +} + +/** * tomoyo_update_domain - Update an entry for domain policy. * * @new_entry: Pointer to "struct tomoyo_acl_info". * @size: Size of @new_entry in bytes. - * @is_delete: True if it is a delete request. - * @domain: Pointer to "struct tomoyo_domain_info". + * @param: Pointer to "struct tomoyo_acl_param". * @check_duplicate: Callback function to find duplicated entry. * @merge_duplicate: Callback function to merge duplicated entry. * @@ -73,7 +86,7 @@ int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size, * Caller holds tomoyo_read_lock(). */ int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size, - bool is_delete, struct tomoyo_domain_info *domain, + struct tomoyo_acl_param *param, bool (*check_duplicate) (const struct tomoyo_acl_info *, const struct tomoyo_acl_info @@ -82,13 +95,32 @@ int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size, struct tomoyo_acl_info *, const bool)) { + const bool is_delete = param->is_delete; int error = is_delete ? -ENOENT : -ENOMEM; struct tomoyo_acl_info *entry; + struct list_head * const list = param->list; + if (param->data[0]) { + new_entry->cond = tomoyo_get_condition(param); + if (!new_entry->cond) + return -EINVAL; + /* + * Domain transition preference is allowed for only + * "file execute" entries. + */ + if (new_entry->cond->transit && + !(new_entry->type == TOMOYO_TYPE_PATH_ACL && + container_of(new_entry, struct tomoyo_path_acl, head) + ->perm == 1 << TOMOYO_TYPE_EXECUTE)) + goto out; + } if (mutex_lock_interruptible(&tomoyo_policy_lock)) - return error; - list_for_each_entry_rcu(entry, &domain->acl_info_list, list) { - if (!check_duplicate(entry, new_entry)) + goto out; + list_for_each_entry_rcu(entry, list, list) { + if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS) + continue; + if (!tomoyo_same_acl_head(entry, new_entry) || + !check_duplicate(entry, new_entry)) continue; if (merge_duplicate) entry->is_deleted = merge_duplicate(entry, new_entry, @@ -101,28 +133,51 @@ int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size, if (error && !is_delete) { entry = tomoyo_commit_ok(new_entry, size); if (entry) { - list_add_tail_rcu(&entry->list, &domain->acl_info_list); + list_add_tail_rcu(&entry->list, list); error = 0; } } mutex_unlock(&tomoyo_policy_lock); +out: + tomoyo_put_condition(new_entry->cond); return error; } +/** + * tomoyo_check_acl - Do permission check. + * + * @r: Pointer to "struct tomoyo_request_info". + * @check_entry: Callback function to check type specific parameters. + * + * Returns 0 on success, negative value otherwise. + * + * Caller holds tomoyo_read_lock(). + */ void tomoyo_check_acl(struct tomoyo_request_info *r, bool (*check_entry) (struct tomoyo_request_info *, const struct tomoyo_acl_info *)) { const struct tomoyo_domain_info *domain = r->domain; struct tomoyo_acl_info *ptr; + bool retried = false; + const struct list_head *list = &domain->acl_info_list; - list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) { +retry: + list_for_each_entry_rcu(ptr, list, list) { if (ptr->is_deleted || ptr->type != r->param_type) continue; - if (check_entry(r, ptr)) { - r->granted = true; - return; - } + if (!check_entry(r, ptr)) + continue; + if (!tomoyo_condition(r, ptr->cond)) + continue; + r->matched_acl = ptr; + r->granted = true; + return; + } + if (!retried) { + retried = true; + list = &domain->ns->acl_group[domain->group]; + goto retry; } r->granted = false; } @@ -130,24 +185,29 @@ void tomoyo_check_acl(struct tomoyo_request_info *r, /* The list for "struct tomoyo_domain_info". */ LIST_HEAD(tomoyo_domain_list); -struct list_head tomoyo_policy_list[TOMOYO_MAX_POLICY]; -struct list_head tomoyo_group_list[TOMOYO_MAX_GROUP]; - /** * tomoyo_last_word - Get last component of a domainname. * - * @domainname: Domainname to check. + * @name: Domainname to check. * * Returns the last word of @domainname. */ static const char *tomoyo_last_word(const char *name) { - const char *cp = strrchr(name, ' '); - if (cp) - return cp + 1; - return name; + const char *cp = strrchr(name, ' '); + if (cp) + return cp + 1; + return name; } +/** + * tomoyo_same_transition_control - Check for duplicated "struct tomoyo_transition_control" entry. + * + * @a: Pointer to "struct tomoyo_acl_head". + * @b: Pointer to "struct tomoyo_acl_head". + * + * Returns true if @a == @b, false otherwise. + */ static bool tomoyo_same_transition_control(const struct tomoyo_acl_head *a, const struct tomoyo_acl_head *b) { @@ -163,30 +223,36 @@ static bool tomoyo_same_transition_control(const struct tomoyo_acl_head *a, } /** - * tomoyo_update_transition_control_entry - Update "struct tomoyo_transition_control" list. + * tomoyo_write_transition_control - Write "struct tomoyo_transition_control" list. * - * @domainname: The name of domain. Maybe NULL. - * @program: The name of program. Maybe NULL. - * @type: Type of transition. - * @is_delete: True if it is a delete request. + * @param: Pointer to "struct tomoyo_acl_param". + * @type: Type of this entry. * * Returns 0 on success, negative value otherwise. */ -static int tomoyo_update_transition_control_entry(const char *domainname, - const char *program, - const u8 type, - const bool is_delete) +int tomoyo_write_transition_control(struct tomoyo_acl_param *param, + const u8 type) { struct tomoyo_transition_control e = { .type = type }; - int error = is_delete ? -ENOENT : -ENOMEM; - if (program) { + int error = param->is_delete ? -ENOENT : -ENOMEM; + char *program = param->data; + char *domainname = strstr(program, " from "); + if (domainname) { + *domainname = '\0'; + domainname += 6; + } else if (type == TOMOYO_TRANSITION_CONTROL_NO_KEEP || + type == TOMOYO_TRANSITION_CONTROL_KEEP) { + domainname = program; + program = NULL; + } + if (program && strcmp(program, "any")) { if (!tomoyo_correct_path(program)) return -EINVAL; e.program = tomoyo_get_name(program); if (!e.program) goto out; } - if (domainname) { + if (domainname && strcmp(domainname, "any")) { if (!tomoyo_correct_domain(domainname)) { if (!tomoyo_correct_path(domainname)) goto out; @@ -196,126 +262,136 @@ static int tomoyo_update_transition_control_entry(const char *domainname, if (!e.domainname) goto out; } - error = tomoyo_update_policy(&e.head, sizeof(e), is_delete, - &tomoyo_policy_list - [TOMOYO_ID_TRANSITION_CONTROL], + param->list = ¶m->ns->policy_list[TOMOYO_ID_TRANSITION_CONTROL]; + error = tomoyo_update_policy(&e.head, sizeof(e), param, tomoyo_same_transition_control); - out: +out: tomoyo_put_name(e.domainname); tomoyo_put_name(e.program); return error; } /** - * tomoyo_write_transition_control - Write "struct tomoyo_transition_control" list. + * tomoyo_scan_transition - Try to find specific domain transition type. * - * @data: String to parse. - * @is_delete: True if it is a delete request. - * @type: Type of this entry. + * @list: Pointer to "struct list_head". + * @domainname: The name of current domain. + * @program: The name of requested program. + * @last_name: The last component of @domainname. + * @type: One of values in "enum tomoyo_transition_type". * - * Returns 0 on success, negative value otherwise. + * Returns true if found one, false otherwise. + * + * Caller holds tomoyo_read_lock(). */ -int tomoyo_write_transition_control(char *data, const bool is_delete, - const u8 type) +static inline bool tomoyo_scan_transition +(const struct list_head *list, const struct tomoyo_path_info *domainname, + const struct tomoyo_path_info *program, const char *last_name, + const enum tomoyo_transition_type type) { - char *domainname = strstr(data, " from "); - if (domainname) { - *domainname = '\0'; - domainname += 6; - } else if (type == TOMOYO_TRANSITION_CONTROL_NO_KEEP || - type == TOMOYO_TRANSITION_CONTROL_KEEP) { - domainname = data; - data = NULL; + const struct tomoyo_transition_control *ptr; + list_for_each_entry_rcu(ptr, list, head.list) { + if (ptr->head.is_deleted || ptr->type != type) + continue; + if (ptr->domainname) { + if (!ptr->is_last_name) { + if (ptr->domainname != domainname) + continue; + } else { + /* + * Use direct strcmp() since this is + * unlikely used. + */ + if (strcmp(ptr->domainname->name, last_name)) + continue; + } + } + if (ptr->program && tomoyo_pathcmp(ptr->program, program)) + continue; + return true; } - return tomoyo_update_transition_control_entry(domainname, data, type, - is_delete); + return false; } /** * tomoyo_transition_type - Get domain transition type. * - * @domainname: The name of domain. - * @program: The name of program. + * @ns: Pointer to "struct tomoyo_policy_namespace". + * @domainname: The name of current domain. + * @program: The name of requested program. * - * Returns TOMOYO_TRANSITION_CONTROL_INITIALIZE if executing @program - * reinitializes domain transition, TOMOYO_TRANSITION_CONTROL_KEEP if executing - * @program suppresses domain transition, others otherwise. + * Returns TOMOYO_TRANSITION_CONTROL_TRANSIT if executing @program causes + * domain transition across namespaces, TOMOYO_TRANSITION_CONTROL_INITIALIZE if + * executing @program reinitializes domain transition within that namespace, + * TOMOYO_TRANSITION_CONTROL_KEEP if executing @program stays at @domainname , + * others otherwise. * * Caller holds tomoyo_read_lock(). */ -static u8 tomoyo_transition_type(const struct tomoyo_path_info *domainname, - const struct tomoyo_path_info *program) +static enum tomoyo_transition_type tomoyo_transition_type +(const struct tomoyo_policy_namespace *ns, + const struct tomoyo_path_info *domainname, + const struct tomoyo_path_info *program) { - const struct tomoyo_transition_control *ptr; const char *last_name = tomoyo_last_word(domainname->name); - u8 type; - for (type = 0; type < TOMOYO_MAX_TRANSITION_TYPE; type++) { - next: - list_for_each_entry_rcu(ptr, &tomoyo_policy_list - [TOMOYO_ID_TRANSITION_CONTROL], - head.list) { - if (ptr->head.is_deleted || ptr->type != type) - continue; - if (ptr->domainname) { - if (!ptr->is_last_name) { - if (ptr->domainname != domainname) - continue; - } else { - /* - * Use direct strcmp() since this is - * unlikely used. - */ - if (strcmp(ptr->domainname->name, - last_name)) - continue; - } - } - if (ptr->program && - tomoyo_pathcmp(ptr->program, program)) - continue; - if (type == TOMOYO_TRANSITION_CONTROL_NO_INITIALIZE) { - /* - * Do not check for initialize_domain if - * no_initialize_domain matched. - */ - type = TOMOYO_TRANSITION_CONTROL_NO_KEEP; - goto next; - } - goto done; + enum tomoyo_transition_type type = TOMOYO_TRANSITION_CONTROL_NO_RESET; + while (type < TOMOYO_MAX_TRANSITION_TYPE) { + const struct list_head * const list = + &ns->policy_list[TOMOYO_ID_TRANSITION_CONTROL]; + if (!tomoyo_scan_transition(list, domainname, program, + last_name, type)) { + type++; + continue; } + if (type != TOMOYO_TRANSITION_CONTROL_NO_RESET && + type != TOMOYO_TRANSITION_CONTROL_NO_INITIALIZE) + break; + /* + * Do not check for reset_domain if no_reset_domain matched. + * Do not check for initialize_domain if no_initialize_domain + * matched. + */ + type++; + type++; } - done: return type; } +/** + * tomoyo_same_aggregator - Check for duplicated "struct tomoyo_aggregator" entry. + * + * @a: Pointer to "struct tomoyo_acl_head". + * @b: Pointer to "struct tomoyo_acl_head". + * + * Returns true if @a == @b, false otherwise. + */ static bool tomoyo_same_aggregator(const struct tomoyo_acl_head *a, const struct tomoyo_acl_head *b) { - const struct tomoyo_aggregator *p1 = container_of(a, typeof(*p1), head); - const struct tomoyo_aggregator *p2 = container_of(b, typeof(*p2), head); + const struct tomoyo_aggregator *p1 = container_of(a, typeof(*p1), + head); + const struct tomoyo_aggregator *p2 = container_of(b, typeof(*p2), + head); return p1->original_name == p2->original_name && p1->aggregated_name == p2->aggregated_name; } /** - * tomoyo_update_aggregator_entry - Update "struct tomoyo_aggregator" list. + * tomoyo_write_aggregator - Write "struct tomoyo_aggregator" list. * - * @original_name: The original program's name. - * @aggregated_name: The program name to use. - * @is_delete: True if it is a delete request. + * @param: Pointer to "struct tomoyo_acl_param". * * Returns 0 on success, negative value otherwise. * * Caller holds tomoyo_read_lock(). */ -static int tomoyo_update_aggregator_entry(const char *original_name, - const char *aggregated_name, - const bool is_delete) +int tomoyo_write_aggregator(struct tomoyo_acl_param *param) { struct tomoyo_aggregator e = { }; - int error = is_delete ? -ENOENT : -ENOMEM; - - if (!tomoyo_correct_path(original_name) || + int error = param->is_delete ? -ENOENT : -ENOMEM; + const char *original_name = tomoyo_read_token(param); + const char *aggregated_name = tomoyo_read_token(param); + if (!tomoyo_correct_word(original_name) || !tomoyo_correct_path(aggregated_name)) return -EINVAL; e.original_name = tomoyo_get_name(original_name); @@ -323,83 +399,269 @@ static int tomoyo_update_aggregator_entry(const char *original_name, if (!e.original_name || !e.aggregated_name || e.aggregated_name->is_patterned) /* No patterns allowed. */ goto out; - error = tomoyo_update_policy(&e.head, sizeof(e), is_delete, - &tomoyo_policy_list[TOMOYO_ID_AGGREGATOR], + param->list = ¶m->ns->policy_list[TOMOYO_ID_AGGREGATOR]; + error = tomoyo_update_policy(&e.head, sizeof(e), param, tomoyo_same_aggregator); - out: +out: tomoyo_put_name(e.original_name); tomoyo_put_name(e.aggregated_name); return error; } /** - * tomoyo_write_aggregator - Write "struct tomoyo_aggregator" list. + * tomoyo_find_namespace - Find specified namespace. * - * @data: String to parse. - * @is_delete: True if it is a delete request. + * @name: Name of namespace to find. + * @len: Length of @name. * - * Returns 0 on success, negative value otherwise. + * Returns pointer to "struct tomoyo_policy_namespace" if found, + * NULL otherwise. * * Caller holds tomoyo_read_lock(). */ -int tomoyo_write_aggregator(char *data, const bool is_delete) +static struct tomoyo_policy_namespace *tomoyo_find_namespace +(const char *name, const unsigned int len) { - char *cp = strchr(data, ' '); + struct tomoyo_policy_namespace *ns; + list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) { + if (strncmp(name, ns->name, len) || + (name[len] && name[len] != ' ')) + continue; + return ns; + } + return NULL; +} - if (!cp) - return -EINVAL; - *cp++ = '\0'; - return tomoyo_update_aggregator_entry(data, cp, is_delete); +/** + * tomoyo_assign_namespace - Create a new namespace. + * + * @domainname: Name of namespace to create. + * + * Returns pointer to "struct tomoyo_policy_namespace" on success, + * NULL otherwise. + * + * Caller holds tomoyo_read_lock(). + */ +struct tomoyo_policy_namespace *tomoyo_assign_namespace(const char *domainname) +{ + struct tomoyo_policy_namespace *ptr; + struct tomoyo_policy_namespace *entry; + const char *cp = domainname; + unsigned int len = 0; + while (*cp && *cp++ != ' ') + len++; + ptr = tomoyo_find_namespace(domainname, len); + if (ptr) + return ptr; + if (len >= TOMOYO_EXEC_TMPSIZE - 10 || !tomoyo_domain_def(domainname)) + return NULL; + entry = kzalloc(sizeof(*entry) + len + 1, GFP_NOFS); + if (!entry) + return NULL; + if (mutex_lock_interruptible(&tomoyo_policy_lock)) + goto out; + ptr = tomoyo_find_namespace(domainname, len); + if (!ptr && tomoyo_memory_ok(entry)) { + char *name = (char *) (entry + 1); + ptr = entry; + memmove(name, domainname, len); + name[len] = '\0'; + entry->name = name; + tomoyo_init_policy_namespace(entry); + entry = NULL; + } + mutex_unlock(&tomoyo_policy_lock); +out: + kfree(entry); + return ptr; +} + +/** + * tomoyo_namespace_jump - Check for namespace jump. + * + * @domainname: Name of domain. + * + * Returns true if namespace differs, false otherwise. + */ +static bool tomoyo_namespace_jump(const char *domainname) +{ + const char *namespace = tomoyo_current_namespace()->name; + const int len = strlen(namespace); + return strncmp(domainname, namespace, len) || + (domainname[len] && domainname[len] != ' '); } /** - * tomoyo_assign_domain - Create a domain. + * tomoyo_assign_domain - Create a domain or a namespace. * * @domainname: The name of domain. - * @profile: Profile number to assign if the domain was newly created. + * @transit: True if transit to domain found or created. * * Returns pointer to "struct tomoyo_domain_info" on success, NULL otherwise. * * Caller holds tomoyo_read_lock(). */ struct tomoyo_domain_info *tomoyo_assign_domain(const char *domainname, - const u8 profile) + const bool transit) { - struct tomoyo_domain_info *entry; - struct tomoyo_domain_info *domain = NULL; - const struct tomoyo_path_info *saved_domainname; - bool found = false; - - if (!tomoyo_correct_domain(domainname)) + struct tomoyo_domain_info e = { }; + struct tomoyo_domain_info *entry = tomoyo_find_domain(domainname); + bool created = false; + if (entry) { + if (transit) { + /* + * Since namespace is created at runtime, profiles may + * not be created by the moment the process transits to + * that domain. Do not perform domain transition if + * profile for that domain is not yet created. + */ + if (tomoyo_policy_loaded && + !entry->ns->profile_ptr[entry->profile]) + return NULL; + } + return entry; + } + /* Requested domain does not exist. */ + /* Don't create requested domain if domainname is invalid. */ + if (strlen(domainname) >= TOMOYO_EXEC_TMPSIZE - 10 || + !tomoyo_correct_domain(domainname)) + return NULL; + /* + * Since definition of profiles and acl_groups may differ across + * namespaces, do not inherit "use_profile" and "use_group" settings + * by automatically creating requested domain upon domain transition. + */ + if (transit && tomoyo_namespace_jump(domainname)) + return NULL; + e.ns = tomoyo_assign_namespace(domainname); + if (!e.ns) return NULL; - saved_domainname = tomoyo_get_name(domainname); - if (!saved_domainname) + /* + * "use_profile" and "use_group" settings for automatically created + * domains are inherited from current domain. These are 0 for manually + * created domains. + */ + if (transit) { + const struct tomoyo_domain_info *domain = tomoyo_domain(); + e.profile = domain->profile; + e.group = domain->group; + } + e.domainname = tomoyo_get_name(domainname); + if (!e.domainname) return NULL; - entry = kzalloc(sizeof(*entry), GFP_NOFS); if (mutex_lock_interruptible(&tomoyo_policy_lock)) goto out; - list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) { - if (domain->is_deleted || - tomoyo_pathcmp(saved_domainname, domain->domainname)) - continue; - found = true; - break; - } - if (!found && tomoyo_memory_ok(entry)) { - INIT_LIST_HEAD(&entry->acl_info_list); - entry->domainname = saved_domainname; - saved_domainname = NULL; - entry->profile = profile; - list_add_tail_rcu(&entry->list, &tomoyo_domain_list); - domain = entry; - entry = NULL; - found = true; + entry = tomoyo_find_domain(domainname); + if (!entry) { + entry = tomoyo_commit_ok(&e, sizeof(e)); + if (entry) { + INIT_LIST_HEAD(&entry->acl_info_list); + list_add_tail_rcu(&entry->list, &tomoyo_domain_list); + created = true; + } } mutex_unlock(&tomoyo_policy_lock); - out: - tomoyo_put_name(saved_domainname); - kfree(entry); - return found ? domain : NULL; +out: + tomoyo_put_name(e.domainname); + if (entry && transit) { + if (created) { + struct tomoyo_request_info r; + tomoyo_init_request_info(&r, entry, + TOMOYO_MAC_FILE_EXECUTE); + r.granted = false; + tomoyo_write_log(&r, "use_profile %u\n", + entry->profile); + tomoyo_write_log(&r, "use_group %u\n", entry->group); + tomoyo_update_stat(TOMOYO_STAT_POLICY_UPDATES); + } + } + return entry; +} + +/** + * tomoyo_environ - Check permission for environment variable names. + * + * @ee: Pointer to "struct tomoyo_execve". + * + * Returns 0 on success, negative value otherwise. + */ +static int tomoyo_environ(struct tomoyo_execve *ee) +{ + struct tomoyo_request_info *r = &ee->r; + struct linux_binprm *bprm = ee->bprm; + /* env_page.data is allocated by tomoyo_dump_page(). */ + struct tomoyo_page_dump env_page = { }; + char *arg_ptr; /* Size is TOMOYO_EXEC_TMPSIZE bytes */ + int arg_len = 0; + unsigned long pos = bprm->p; + int offset = pos % PAGE_SIZE; + int argv_count = bprm->argc; + int envp_count = bprm->envc; + int error = -ENOMEM; + + ee->r.type = TOMOYO_MAC_ENVIRON; + ee->r.profile = r->domain->profile; + ee->r.mode = tomoyo_get_mode(r->domain->ns, ee->r.profile, + TOMOYO_MAC_ENVIRON); + if (!r->mode || !envp_count) + return 0; + arg_ptr = kzalloc(TOMOYO_EXEC_TMPSIZE, GFP_NOFS); + if (!arg_ptr) + goto out; + while (error == -ENOMEM) { + if (!tomoyo_dump_page(bprm, pos, &env_page)) + goto out; + pos += PAGE_SIZE - offset; + /* Read. */ + while (argv_count && offset < PAGE_SIZE) { + if (!env_page.data[offset++]) + argv_count--; + } + if (argv_count) { + offset = 0; + continue; + } + while (offset < PAGE_SIZE) { + const unsigned char c = env_page.data[offset++]; + + if (c && arg_len < TOMOYO_EXEC_TMPSIZE - 10) { + if (c == '=') { + arg_ptr[arg_len++] = '\0'; + } else if (c == '\\') { + arg_ptr[arg_len++] = '\\'; + arg_ptr[arg_len++] = '\\'; + } else if (c > ' ' && c < 127) { + arg_ptr[arg_len++] = c; + } else { + arg_ptr[arg_len++] = '\\'; + arg_ptr[arg_len++] = (c >> 6) + '0'; + arg_ptr[arg_len++] + = ((c >> 3) & 7) + '0'; + arg_ptr[arg_len++] = (c & 7) + '0'; + } + } else { + arg_ptr[arg_len] = '\0'; + } + if (c) + continue; + if (tomoyo_env_perm(r, arg_ptr)) { + error = -EPERM; + break; + } + if (!--envp_count) { + error = 0; + break; + } + arg_len = 0; + } + offset = 0; + } +out: + if (r->mode != TOMOYO_CONFIG_ENFORCING) + error = 0; + kfree(env_page.data); + kfree(arg_ptr); + return error; } /** @@ -413,54 +675,54 @@ struct tomoyo_domain_info *tomoyo_assign_domain(const char *domainname, */ int tomoyo_find_next_domain(struct linux_binprm *bprm) { - struct tomoyo_request_info r; - char *tmp = kzalloc(TOMOYO_EXEC_TMPSIZE, GFP_NOFS); struct tomoyo_domain_info *old_domain = tomoyo_domain(); struct tomoyo_domain_info *domain = NULL; const char *original_name = bprm->filename; - u8 mode; - bool is_enforce; int retval = -ENOMEM; - bool need_kfree = false; - struct tomoyo_path_info rn = { }; /* real name */ - - mode = tomoyo_init_request_info(&r, NULL, TOMOYO_MAC_FILE_EXECUTE); - is_enforce = (mode == TOMOYO_CONFIG_ENFORCING); - if (!tmp) - goto out; + bool reject_on_transition_failure = false; + const struct tomoyo_path_info *candidate; + struct tomoyo_path_info exename; + struct tomoyo_execve *ee = kzalloc(sizeof(*ee), GFP_NOFS); - retry: - if (need_kfree) { - kfree(rn.name); - need_kfree = false; + if (!ee) + return -ENOMEM; + ee->tmp = kzalloc(TOMOYO_EXEC_TMPSIZE, GFP_NOFS); + if (!ee->tmp) { + kfree(ee); + return -ENOMEM; } + /* ee->dump->data is allocated by tomoyo_dump_page(). */ + tomoyo_init_request_info(&ee->r, NULL, TOMOYO_MAC_FILE_EXECUTE); + ee->r.ee = ee; + ee->bprm = bprm; + ee->r.obj = &ee->obj; + ee->obj.path1 = bprm->file->f_path; /* Get symlink's pathname of program. */ retval = -ENOENT; - rn.name = tomoyo_realpath_nofollow(original_name); - if (!rn.name) + exename.name = tomoyo_realpath_nofollow(original_name); + if (!exename.name) goto out; - tomoyo_fill_path_info(&rn); - need_kfree = true; - + tomoyo_fill_path_info(&exename); +retry: /* Check 'aggregator' directive. */ { struct tomoyo_aggregator *ptr; - list_for_each_entry_rcu(ptr, &tomoyo_policy_list - [TOMOYO_ID_AGGREGATOR], head.list) { + struct list_head *list = + &old_domain->ns->policy_list[TOMOYO_ID_AGGREGATOR]; + /* Check 'aggregator' directive. */ + candidate = &exename; + list_for_each_entry_rcu(ptr, list, head.list) { if (ptr->head.is_deleted || - !tomoyo_path_matches_pattern(&rn, + !tomoyo_path_matches_pattern(&exename, ptr->original_name)) continue; - kfree(rn.name); - need_kfree = false; - /* This is OK because it is read only. */ - rn = *ptr->aggregated_name; + candidate = ptr->aggregated_name; break; } } /* Check execute permission. */ - retval = tomoyo_path_permission(&r, TOMOYO_TYPE_EXECUTE, &rn); + retval = tomoyo_execute_permission(&ee->r, candidate); if (retval == TOMOYO_RETRY_REQUEST) goto retry; if (retval < 0) @@ -471,22 +733,65 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm) * wildcard) rather than the pathname passed to execve() * (which never contains wildcard). */ - if (r.param.path.matched_path) { - if (need_kfree) - kfree(rn.name); - need_kfree = false; - /* This is OK because it is read only. */ - rn = *r.param.path.matched_path; - } + if (ee->r.param.path.matched_path) + candidate = ee->r.param.path.matched_path; - /* Calculate domain to transit to. */ - switch (tomoyo_transition_type(old_domain->domainname, &rn)) { + /* + * Check for domain transition preference if "file execute" matched. + * If preference is given, make do_execve() fail if domain transition + * has failed, for domain transition preference should be used with + * destination domain defined. + */ + if (ee->transition) { + const char *domainname = ee->transition->name; + reject_on_transition_failure = true; + if (!strcmp(domainname, "keep")) + goto force_keep_domain; + if (!strcmp(domainname, "child")) + goto force_child_domain; + if (!strcmp(domainname, "reset")) + goto force_reset_domain; + if (!strcmp(domainname, "initialize")) + goto force_initialize_domain; + if (!strcmp(domainname, "parent")) { + char *cp; + strncpy(ee->tmp, old_domain->domainname->name, + TOMOYO_EXEC_TMPSIZE - 1); + cp = strrchr(ee->tmp, ' '); + if (cp) + *cp = '\0'; + } else if (*domainname == '<') + strncpy(ee->tmp, domainname, TOMOYO_EXEC_TMPSIZE - 1); + else + snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "%s %s", + old_domain->domainname->name, domainname); + goto force_jump_domain; + } + /* + * No domain transition preference specified. + * Calculate domain to transit to. + */ + switch (tomoyo_transition_type(old_domain->ns, old_domain->domainname, + candidate)) { + case TOMOYO_TRANSITION_CONTROL_RESET: +force_reset_domain: + /* Transit to the root of specified namespace. */ + snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "<%s>", + candidate->name); + /* + * Make do_execve() fail if domain transition across namespaces + * has failed. + */ + reject_on_transition_failure = true; + break; case TOMOYO_TRANSITION_CONTROL_INITIALIZE: - /* Transit to the child of tomoyo_kernel_domain domain. */ - snprintf(tmp, TOMOYO_EXEC_TMPSIZE - 1, TOMOYO_ROOT_NAME " " - "%s", rn.name); +force_initialize_domain: + /* Transit to the child of current namespace's root. */ + snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "%s %s", + old_domain->ns->name, candidate->name); break; case TOMOYO_TRANSITION_CONTROL_KEEP: +force_keep_domain: /* Keep current domain. */ domain = old_domain; break; @@ -500,43 +805,97 @@ int tomoyo_find_next_domain(struct linux_binprm *bprm) * before /sbin/init. */ domain = old_domain; - } else { - /* Normal domain transition. */ - snprintf(tmp, TOMOYO_EXEC_TMPSIZE - 1, "%s %s", - old_domain->domainname->name, rn.name); + break; } +force_child_domain: + /* Normal domain transition. */ + snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "%s %s", + old_domain->domainname->name, candidate->name); break; } - if (domain || strlen(tmp) >= TOMOYO_EXEC_TMPSIZE - 10) - goto done; - domain = tomoyo_find_domain(tmp); +force_jump_domain: + if (!domain) + domain = tomoyo_assign_domain(ee->tmp, true); if (domain) - goto done; - if (is_enforce) { - int error = tomoyo_supervisor(&r, "# wants to create domain\n" - "%s\n", tmp); - if (error == TOMOYO_RETRY_REQUEST) - goto retry; - if (error < 0) - goto done; + retval = 0; + else if (reject_on_transition_failure) { + printk(KERN_WARNING "ERROR: Domain '%s' not ready.\n", + ee->tmp); + retval = -ENOMEM; + } else if (ee->r.mode == TOMOYO_CONFIG_ENFORCING) + retval = -ENOMEM; + else { + retval = 0; + if (!old_domain->flags[TOMOYO_DIF_TRANSITION_FAILED]) { + old_domain->flags[TOMOYO_DIF_TRANSITION_FAILED] = true; + ee->r.granted = false; + tomoyo_write_log(&ee->r, "%s", tomoyo_dif + [TOMOYO_DIF_TRANSITION_FAILED]); + printk(KERN_WARNING + "ERROR: Domain '%s' not defined.\n", ee->tmp); + } } - domain = tomoyo_assign_domain(tmp, old_domain->profile); - done: - if (domain) - goto out; - printk(KERN_WARNING "TOMOYO-ERROR: Domain '%s' not defined.\n", tmp); - if (is_enforce) - retval = -EPERM; - else - old_domain->transition_failed = true; out: if (!domain) domain = old_domain; /* Update reference count on "struct tomoyo_domain_info". */ atomic_inc(&domain->users); bprm->cred->security = domain; - if (need_kfree) - kfree(rn.name); - kfree(tmp); + kfree(exename.name); + if (!retval) { + ee->r.domain = domain; + retval = tomoyo_environ(ee); + } + kfree(ee->tmp); + kfree(ee->dump.data); + kfree(ee); return retval; } + +/** + * tomoyo_dump_page - Dump a page to buffer. + * + * @bprm: Pointer to "struct linux_binprm". + * @pos: Location to dump. + * @dump: Poiner to "struct tomoyo_page_dump". + * + * Returns true on success, false otherwise. + */ +bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos, + struct tomoyo_page_dump *dump) +{ + struct page *page; + + /* dump->data is released by tomoyo_find_next_domain(). */ + if (!dump->data) { + dump->data = kzalloc(PAGE_SIZE, GFP_NOFS); + if (!dump->data) + return false; + } + /* Same with get_arg_page(bprm, pos, 0) in fs/exec.c */ +#ifdef CONFIG_MMU + if (get_user_pages(current, bprm->mm, pos, 1, 0, 1, &page, NULL) <= 0) + return false; +#else + page = bprm->page[pos / PAGE_SIZE]; +#endif + if (page != dump->page) { + const unsigned int offset = pos % PAGE_SIZE; + /* + * Maybe kmap()/kunmap() should be used here. + * But remove_arg_zero() uses kmap_atomic()/kunmap_atomic(). + * So do I. + */ + char *kaddr = kmap_atomic(page); + + dump->page = page; + memcpy(dump->data + offset, kaddr + offset, + PAGE_SIZE - offset); + kunmap_atomic(kaddr); + } + /* Same with put_arg_page(page) in fs/exec.c */ +#ifdef CONFIG_MMU + put_page(page); +#endif + return true; +} diff --git a/security/tomoyo/environ.c b/security/tomoyo/environ.c new file mode 100644 index 00000000000..ad4c6e18a43 --- /dev/null +++ b/security/tomoyo/environ.c @@ -0,0 +1,122 @@ +/* + * security/tomoyo/environ.c + * + * Copyright (C) 2005-2011 NTT DATA CORPORATION + */ + +#include "common.h" + +/** + * tomoyo_check_env_acl - Check permission for environment variable's name. + * + * @r: Pointer to "struct tomoyo_request_info". + * @ptr: Pointer to "struct tomoyo_acl_info". + * + * Returns true if granted, false otherwise. + */ +static bool tomoyo_check_env_acl(struct tomoyo_request_info *r, + const struct tomoyo_acl_info *ptr) +{ + const struct tomoyo_env_acl *acl = + container_of(ptr, typeof(*acl), head); + + return tomoyo_path_matches_pattern(r->param.environ.name, acl->env); +} + +/** + * tomoyo_audit_env_log - Audit environment variable name log. + * + * @r: Pointer to "struct tomoyo_request_info". + * + * Returns 0 on success, negative value otherwise. + */ +static int tomoyo_audit_env_log(struct tomoyo_request_info *r) +{ + return tomoyo_supervisor(r, "misc env %s\n", + r->param.environ.name->name); +} + +/** + * tomoyo_env_perm - Check permission for environment variable's name. + * + * @r: Pointer to "struct tomoyo_request_info". + * @env: The name of environment variable. + * + * Returns 0 on success, negative value otherwise. + * + * Caller holds tomoyo_read_lock(). + */ +int tomoyo_env_perm(struct tomoyo_request_info *r, const char *env) +{ + struct tomoyo_path_info environ; + int error; + + if (!env || !*env) + return 0; + environ.name = env; + tomoyo_fill_path_info(&environ); + r->param_type = TOMOYO_TYPE_ENV_ACL; + r->param.environ.name = &environ; + do { + tomoyo_check_acl(r, tomoyo_check_env_acl); + error = tomoyo_audit_env_log(r); + } while (error == TOMOYO_RETRY_REQUEST); + return error; +} + +/** + * tomoyo_same_env_acl - Check for duplicated "struct tomoyo_env_acl" entry. + * + * @a: Pointer to "struct tomoyo_acl_info". + * @b: Pointer to "struct tomoyo_acl_info". + * + * Returns true if @a == @b, false otherwise. + */ +static bool tomoyo_same_env_acl(const struct tomoyo_acl_info *a, + const struct tomoyo_acl_info *b) +{ + const struct tomoyo_env_acl *p1 = container_of(a, typeof(*p1), head); + const struct tomoyo_env_acl *p2 = container_of(b, typeof(*p2), head); + + return p1->env == p2->env; +} + +/** + * tomoyo_write_env - Write "struct tomoyo_env_acl" list. + * + * @param: Pointer to "struct tomoyo_acl_param". + * + * Returns 0 on success, negative value otherwise. + * + * Caller holds tomoyo_read_lock(). + */ +static int tomoyo_write_env(struct tomoyo_acl_param *param) +{ + struct tomoyo_env_acl e = { .head.type = TOMOYO_TYPE_ENV_ACL }; + int error = -ENOMEM; + const char *data = tomoyo_read_token(param); + + if (!tomoyo_correct_word(data) || strchr(data, '=')) + return -EINVAL; + e.env = tomoyo_get_name(data); + if (!e.env) + return error; + error = tomoyo_update_domain(&e.head, sizeof(e), param, + tomoyo_same_env_acl, NULL); + tomoyo_put_name(e.env); + return error; +} + +/** + * tomoyo_write_misc - Update environment variable list. + * + * @param: Pointer to "struct tomoyo_acl_param". + * + * Returns 0 on success, negative value otherwise. + */ +int tomoyo_write_misc(struct tomoyo_acl_param *param) +{ + if (tomoyo_str_starts(¶m->data, "env ")) + return tomoyo_write_env(param); + return -EINVAL; +} diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c index 9d32f182301..40039079074 100644 --- a/security/tomoyo/file.c +++ b/security/tomoyo/file.c @@ -1,80 +1,51 @@ /* * security/tomoyo/file.c * - * Pathname restriction functions. - * - * Copyright (C) 2005-2010 NTT DATA CORPORATION + * Copyright (C) 2005-2011 NTT DATA CORPORATION */ #include "common.h" #include <linux/slab.h> -/* Keyword array for operations with one pathname. */ -const char *tomoyo_path_keyword[TOMOYO_MAX_PATH_OPERATION] = { - [TOMOYO_TYPE_READ_WRITE] = "read/write", - [TOMOYO_TYPE_EXECUTE] = "execute", - [TOMOYO_TYPE_READ] = "read", - [TOMOYO_TYPE_WRITE] = "write", - [TOMOYO_TYPE_UNLINK] = "unlink", - [TOMOYO_TYPE_RMDIR] = "rmdir", - [TOMOYO_TYPE_TRUNCATE] = "truncate", - [TOMOYO_TYPE_SYMLINK] = "symlink", - [TOMOYO_TYPE_REWRITE] = "rewrite", - [TOMOYO_TYPE_CHROOT] = "chroot", - [TOMOYO_TYPE_UMOUNT] = "unmount", -}; - -/* Keyword array for operations with one pathname and three numbers. */ -const char *tomoyo_mkdev_keyword[TOMOYO_MAX_MKDEV_OPERATION] = { - [TOMOYO_TYPE_MKBLOCK] = "mkblock", - [TOMOYO_TYPE_MKCHAR] = "mkchar", -}; - -/* Keyword array for operations with two pathnames. */ -const char *tomoyo_path2_keyword[TOMOYO_MAX_PATH2_OPERATION] = { - [TOMOYO_TYPE_LINK] = "link", - [TOMOYO_TYPE_RENAME] = "rename", - [TOMOYO_TYPE_PIVOT_ROOT] = "pivot_root", -}; - -/* Keyword array for operations with one pathname and one number. */ -const char *tomoyo_path_number_keyword[TOMOYO_MAX_PATH_NUMBER_OPERATION] = { - [TOMOYO_TYPE_CREATE] = "create", - [TOMOYO_TYPE_MKDIR] = "mkdir", - [TOMOYO_TYPE_MKFIFO] = "mkfifo", - [TOMOYO_TYPE_MKSOCK] = "mksock", - [TOMOYO_TYPE_IOCTL] = "ioctl", - [TOMOYO_TYPE_CHMOD] = "chmod", - [TOMOYO_TYPE_CHOWN] = "chown", - [TOMOYO_TYPE_CHGRP] = "chgrp", -}; - +/* + * Mapping table from "enum tomoyo_path_acl_index" to "enum tomoyo_mac_index". + */ static const u8 tomoyo_p2mac[TOMOYO_MAX_PATH_OPERATION] = { - [TOMOYO_TYPE_READ_WRITE] = TOMOYO_MAC_FILE_OPEN, [TOMOYO_TYPE_EXECUTE] = TOMOYO_MAC_FILE_EXECUTE, [TOMOYO_TYPE_READ] = TOMOYO_MAC_FILE_OPEN, [TOMOYO_TYPE_WRITE] = TOMOYO_MAC_FILE_OPEN, + [TOMOYO_TYPE_APPEND] = TOMOYO_MAC_FILE_OPEN, [TOMOYO_TYPE_UNLINK] = TOMOYO_MAC_FILE_UNLINK, + [TOMOYO_TYPE_GETATTR] = TOMOYO_MAC_FILE_GETATTR, [TOMOYO_TYPE_RMDIR] = TOMOYO_MAC_FILE_RMDIR, [TOMOYO_TYPE_TRUNCATE] = TOMOYO_MAC_FILE_TRUNCATE, [TOMOYO_TYPE_SYMLINK] = TOMOYO_MAC_FILE_SYMLINK, - [TOMOYO_TYPE_REWRITE] = TOMOYO_MAC_FILE_REWRITE, [TOMOYO_TYPE_CHROOT] = TOMOYO_MAC_FILE_CHROOT, [TOMOYO_TYPE_UMOUNT] = TOMOYO_MAC_FILE_UMOUNT, }; -static const u8 tomoyo_pnnn2mac[TOMOYO_MAX_MKDEV_OPERATION] = { +/* + * Mapping table from "enum tomoyo_mkdev_acl_index" to "enum tomoyo_mac_index". + */ +const u8 tomoyo_pnnn2mac[TOMOYO_MAX_MKDEV_OPERATION] = { [TOMOYO_TYPE_MKBLOCK] = TOMOYO_MAC_FILE_MKBLOCK, [TOMOYO_TYPE_MKCHAR] = TOMOYO_MAC_FILE_MKCHAR, }; -static const u8 tomoyo_pp2mac[TOMOYO_MAX_PATH2_OPERATION] = { +/* + * Mapping table from "enum tomoyo_path2_acl_index" to "enum tomoyo_mac_index". + */ +const u8 tomoyo_pp2mac[TOMOYO_MAX_PATH2_OPERATION] = { [TOMOYO_TYPE_LINK] = TOMOYO_MAC_FILE_LINK, [TOMOYO_TYPE_RENAME] = TOMOYO_MAC_FILE_RENAME, [TOMOYO_TYPE_PIVOT_ROOT] = TOMOYO_MAC_FILE_PIVOT_ROOT, }; -static const u8 tomoyo_pn2mac[TOMOYO_MAX_PATH_NUMBER_OPERATION] = { +/* + * Mapping table from "enum tomoyo_path_number_acl_index" to + * "enum tomoyo_mac_index". + */ +const u8 tomoyo_pn2mac[TOMOYO_MAX_PATH_NUMBER_OPERATION] = { [TOMOYO_TYPE_CREATE] = TOMOYO_MAC_FILE_CREATE, [TOMOYO_TYPE_MKDIR] = TOMOYO_MAC_FILE_MKDIR, [TOMOYO_TYPE_MKFIFO] = TOMOYO_MAC_FILE_MKFIFO, @@ -85,41 +56,76 @@ static const u8 tomoyo_pn2mac[TOMOYO_MAX_PATH_NUMBER_OPERATION] = { [TOMOYO_TYPE_CHGRP] = TOMOYO_MAC_FILE_CHGRP, }; +/** + * tomoyo_put_name_union - Drop reference on "struct tomoyo_name_union". + * + * @ptr: Pointer to "struct tomoyo_name_union". + * + * Returns nothing. + */ void tomoyo_put_name_union(struct tomoyo_name_union *ptr) { - if (!ptr) - return; - if (ptr->is_group) - tomoyo_put_group(ptr->group); - else - tomoyo_put_name(ptr->filename); + tomoyo_put_group(ptr->group); + tomoyo_put_name(ptr->filename); } +/** + * tomoyo_compare_name_union - Check whether a name matches "struct tomoyo_name_union" or not. + * + * @name: Pointer to "struct tomoyo_path_info". + * @ptr: Pointer to "struct tomoyo_name_union". + * + * Returns "struct tomoyo_path_info" if @name matches @ptr, NULL otherwise. + */ const struct tomoyo_path_info * tomoyo_compare_name_union(const struct tomoyo_path_info *name, const struct tomoyo_name_union *ptr) { - if (ptr->is_group) + if (ptr->group) return tomoyo_path_matches_group(name, ptr->group); if (tomoyo_path_matches_pattern(name, ptr->filename)) return ptr->filename; return NULL; } +/** + * tomoyo_put_number_union - Drop reference on "struct tomoyo_number_union". + * + * @ptr: Pointer to "struct tomoyo_number_union". + * + * Returns nothing. + */ void tomoyo_put_number_union(struct tomoyo_number_union *ptr) { - if (ptr && ptr->is_group) - tomoyo_put_group(ptr->group); + tomoyo_put_group(ptr->group); } +/** + * tomoyo_compare_number_union - Check whether a value matches "struct tomoyo_number_union" or not. + * + * @value: Number to check. + * @ptr: Pointer to "struct tomoyo_number_union". + * + * Returns true if @value matches @ptr, false otherwise. + */ bool tomoyo_compare_number_union(const unsigned long value, const struct tomoyo_number_union *ptr) { - if (ptr->is_group) + if (ptr->group) return tomoyo_number_matches_group(value, value, ptr->group); return value >= ptr->values[0] && value <= ptr->values[1]; } +/** + * tomoyo_add_slash - Add trailing '/' if needed. + * + * @buf: Pointer to "struct tomoyo_path_info". + * + * Returns nothing. + * + * @buf must be generated by tomoyo_encode() because this function does not + * allocate memory for adding '/'. + */ static void tomoyo_add_slash(struct tomoyo_path_info *buf) { if (buf->is_dir) @@ -132,24 +138,6 @@ static void tomoyo_add_slash(struct tomoyo_path_info *buf) } /** - * tomoyo_strendswith - Check whether the token ends with the given token. - * - * @name: The token to check. - * @tail: The token to find. - * - * Returns true if @name ends with @tail, false otherwise. - */ -static bool tomoyo_strendswith(const char *name, const char *tail) -{ - int len; - - if (!name || !tail) - return false; - len = strlen(name) - strlen(tail); - return len >= 0 && !strcmp(name + len, tail); -} - -/** * tomoyo_get_realpath - Get realpath. * * @buf: Pointer to "struct tomoyo_path_info". @@ -164,7 +152,7 @@ static bool tomoyo_get_realpath(struct tomoyo_path_info *buf, struct path *path) tomoyo_fill_path_info(buf); return true; } - return false; + return false; } /** @@ -176,13 +164,9 @@ static bool tomoyo_get_realpath(struct tomoyo_path_info *buf, struct path *path) */ static int tomoyo_audit_path_log(struct tomoyo_request_info *r) { - const char *operation = tomoyo_path_keyword[r->param.path.operation]; - const struct tomoyo_path_info *filename = r->param.path.filename; - if (r->granted) - return 0; - tomoyo_warn_log(r, "%s %s", operation, filename->name); - return tomoyo_supervisor(r, "allow_%s %s\n", operation, - tomoyo_pattern(filename)); + return tomoyo_supervisor(r, "file %s %s\n", tomoyo_path_keyword + [r->param.path.operation], + r->param.path.filename->name); } /** @@ -194,16 +178,10 @@ static int tomoyo_audit_path_log(struct tomoyo_request_info *r) */ static int tomoyo_audit_path2_log(struct tomoyo_request_info *r) { - const char *operation = tomoyo_path2_keyword[r->param.path2.operation]; - const struct tomoyo_path_info *filename1 = r->param.path2.filename1; - const struct tomoyo_path_info *filename2 = r->param.path2.filename2; - if (r->granted) - return 0; - tomoyo_warn_log(r, "%s %s %s", operation, filename1->name, - filename2->name); - return tomoyo_supervisor(r, "allow_%s %s %s\n", operation, - tomoyo_pattern(filename1), - tomoyo_pattern(filename2)); + return tomoyo_supervisor(r, "file %s %s %s\n", tomoyo_mac_keywords + [tomoyo_pp2mac[r->param.path2.operation]], + r->param.path2.filename1->name, + r->param.path2.filename2->name); } /** @@ -215,24 +193,18 @@ static int tomoyo_audit_path2_log(struct tomoyo_request_info *r) */ static int tomoyo_audit_mkdev_log(struct tomoyo_request_info *r) { - const char *operation = tomoyo_mkdev_keyword[r->param.mkdev.operation]; - const struct tomoyo_path_info *filename = r->param.mkdev.filename; - const unsigned int major = r->param.mkdev.major; - const unsigned int minor = r->param.mkdev.minor; - const unsigned int mode = r->param.mkdev.mode; - if (r->granted) - return 0; - tomoyo_warn_log(r, "%s %s 0%o %u %u", operation, filename->name, mode, - major, minor); - return tomoyo_supervisor(r, "allow_%s %s 0%o %u %u\n", operation, - tomoyo_pattern(filename), mode, major, minor); + return tomoyo_supervisor(r, "file %s %s 0%o %u %u\n", + tomoyo_mac_keywords + [tomoyo_pnnn2mac[r->param.mkdev.operation]], + r->param.mkdev.filename->name, + r->param.mkdev.mode, r->param.mkdev.major, + r->param.mkdev.minor); } /** * tomoyo_audit_path_number_log - Audit path/number request log. * - * @r: Pointer to "struct tomoyo_request_info". - * @error: Error code. + * @r: Pointer to "struct tomoyo_request_info". * * Returns 0 on success, negative value otherwise. */ @@ -240,11 +212,7 @@ static int tomoyo_audit_path_number_log(struct tomoyo_request_info *r) { const u8 type = r->param.path_number.operation; u8 radix; - const struct tomoyo_path_info *filename = r->param.path_number.filename; - const char *operation = tomoyo_path_number_keyword[type]; char buffer[64]; - if (r->granted) - return 0; switch (type) { case TOMOYO_TYPE_CREATE: case TOMOYO_TYPE_MKDIR: @@ -262,251 +230,23 @@ static int tomoyo_audit_path_number_log(struct tomoyo_request_info *r) } tomoyo_print_ulong(buffer, sizeof(buffer), r->param.path_number.number, radix); - tomoyo_warn_log(r, "%s %s %s", operation, filename->name, buffer); - return tomoyo_supervisor(r, "allow_%s %s %s\n", operation, - tomoyo_pattern(filename), buffer); -} - -static bool tomoyo_same_globally_readable(const struct tomoyo_acl_head *a, - const struct tomoyo_acl_head *b) -{ - return container_of(a, struct tomoyo_readable_file, - head)->filename == - container_of(b, struct tomoyo_readable_file, - head)->filename; -} - -/** - * tomoyo_update_globally_readable_entry - Update "struct tomoyo_readable_file" list. - * - * @filename: Filename unconditionally permitted to open() for reading. - * @is_delete: True if it is a delete request. - * - * Returns 0 on success, negative value otherwise. - * - * Caller holds tomoyo_read_lock(). - */ -static int tomoyo_update_globally_readable_entry(const char *filename, - const bool is_delete) -{ - struct tomoyo_readable_file e = { }; - int error; - - if (!tomoyo_correct_word(filename)) - return -EINVAL; - e.filename = tomoyo_get_name(filename); - if (!e.filename) - return -ENOMEM; - error = tomoyo_update_policy(&e.head, sizeof(e), is_delete, - &tomoyo_policy_list - [TOMOYO_ID_GLOBALLY_READABLE], - tomoyo_same_globally_readable); - tomoyo_put_name(e.filename); - return error; -} - -/** - * tomoyo_globally_readable_file - Check if the file is unconditionnaly permitted to be open()ed for reading. - * - * @filename: The filename to check. - * - * Returns true if any domain can open @filename for reading, false otherwise. - * - * Caller holds tomoyo_read_lock(). - */ -static bool tomoyo_globally_readable_file(const struct tomoyo_path_info * - filename) -{ - struct tomoyo_readable_file *ptr; - bool found = false; - - list_for_each_entry_rcu(ptr, &tomoyo_policy_list - [TOMOYO_ID_GLOBALLY_READABLE], head.list) { - if (!ptr->head.is_deleted && - tomoyo_path_matches_pattern(filename, ptr->filename)) { - found = true; - break; - } - } - return found; -} - -/** - * tomoyo_write_globally_readable - Write "struct tomoyo_readable_file" list. - * - * @data: String to parse. - * @is_delete: True if it is a delete request. - * - * Returns 0 on success, negative value otherwise. - * - * Caller holds tomoyo_read_lock(). - */ -int tomoyo_write_globally_readable(char *data, const bool is_delete) -{ - return tomoyo_update_globally_readable_entry(data, is_delete); -} - -static bool tomoyo_same_pattern(const struct tomoyo_acl_head *a, - const struct tomoyo_acl_head *b) -{ - return container_of(a, struct tomoyo_no_pattern, head)->pattern == - container_of(b, struct tomoyo_no_pattern, head)->pattern; -} - -/** - * tomoyo_update_file_pattern_entry - Update "struct tomoyo_no_pattern" list. - * - * @pattern: Pathname pattern. - * @is_delete: True if it is a delete request. - * - * Returns 0 on success, negative value otherwise. - * - * Caller holds tomoyo_read_lock(). - */ -static int tomoyo_update_file_pattern_entry(const char *pattern, - const bool is_delete) -{ - struct tomoyo_no_pattern e = { }; - int error; - - if (!tomoyo_correct_word(pattern)) - return -EINVAL; - e.pattern = tomoyo_get_name(pattern); - if (!e.pattern) - return -ENOMEM; - error = tomoyo_update_policy(&e.head, sizeof(e), is_delete, - &tomoyo_policy_list[TOMOYO_ID_PATTERN], - tomoyo_same_pattern); - tomoyo_put_name(e.pattern); - return error; -} - -/** - * tomoyo_pattern - Get patterned pathname. - * - * @filename: The filename to find patterned pathname. - * - * Returns pointer to pathname pattern if matched, @filename otherwise. - * - * Caller holds tomoyo_read_lock(). - */ -const char *tomoyo_pattern(const struct tomoyo_path_info *filename) -{ - struct tomoyo_no_pattern *ptr; - const struct tomoyo_path_info *pattern = NULL; - - list_for_each_entry_rcu(ptr, &tomoyo_policy_list[TOMOYO_ID_PATTERN], - head.list) { - if (ptr->head.is_deleted) - continue; - if (!tomoyo_path_matches_pattern(filename, ptr->pattern)) - continue; - pattern = ptr->pattern; - if (tomoyo_strendswith(pattern->name, "/\\*")) { - /* Do nothing. Try to find the better match. */ - } else { - /* This would be the better match. Use this. */ - break; - } - } - if (pattern) - filename = pattern; - return filename->name; -} - -/** - * tomoyo_write_pattern - Write "struct tomoyo_no_pattern" list. - * - * @data: String to parse. - * @is_delete: True if it is a delete request. - * - * Returns 0 on success, negative value otherwise. - * - * Caller holds tomoyo_read_lock(). - */ -int tomoyo_write_pattern(char *data, const bool is_delete) -{ - return tomoyo_update_file_pattern_entry(data, is_delete); -} - -static bool tomoyo_same_no_rewrite(const struct tomoyo_acl_head *a, - const struct tomoyo_acl_head *b) -{ - return container_of(a, struct tomoyo_no_rewrite, head)->pattern - == container_of(b, struct tomoyo_no_rewrite, head) - ->pattern; -} - -/** - * tomoyo_update_no_rewrite_entry - Update "struct tomoyo_no_rewrite" list. - * - * @pattern: Pathname pattern that are not rewritable by default. - * @is_delete: True if it is a delete request. - * - * Returns 0 on success, negative value otherwise. - * - * Caller holds tomoyo_read_lock(). - */ -static int tomoyo_update_no_rewrite_entry(const char *pattern, - const bool is_delete) -{ - struct tomoyo_no_rewrite e = { }; - int error; - - if (!tomoyo_correct_word(pattern)) - return -EINVAL; - e.pattern = tomoyo_get_name(pattern); - if (!e.pattern) - return -ENOMEM; - error = tomoyo_update_policy(&e.head, sizeof(e), is_delete, - &tomoyo_policy_list[TOMOYO_ID_NO_REWRITE], - tomoyo_same_no_rewrite); - tomoyo_put_name(e.pattern); - return error; -} - -/** - * tomoyo_no_rewrite_file - Check if the given pathname is not permitted to be rewrited. - * - * @filename: Filename to check. - * - * Returns true if @filename is specified by "deny_rewrite" directive, - * false otherwise. - * - * Caller holds tomoyo_read_lock(). - */ -static bool tomoyo_no_rewrite_file(const struct tomoyo_path_info *filename) -{ - struct tomoyo_no_rewrite *ptr; - bool found = false; - - list_for_each_entry_rcu(ptr, &tomoyo_policy_list[TOMOYO_ID_NO_REWRITE], - head.list) { - if (ptr->head.is_deleted) - continue; - if (!tomoyo_path_matches_pattern(filename, ptr->pattern)) - continue; - found = true; - break; - } - return found; + return tomoyo_supervisor(r, "file %s %s %s\n", tomoyo_mac_keywords + [tomoyo_pn2mac[type]], + r->param.path_number.filename->name, buffer); } /** - * tomoyo_write_no_rewrite - Write "struct tomoyo_no_rewrite" list. + * tomoyo_check_path_acl - Check permission for path operation. * - * @data: String to parse. - * @is_delete: True if it is a delete request. + * @r: Pointer to "struct tomoyo_request_info". + * @ptr: Pointer to "struct tomoyo_acl_info". * - * Returns 0 on success, negative value otherwise. + * Returns true if granted, false otherwise. * - * Caller holds tomoyo_read_lock(). + * To be able to use wildcard for domain transition, this function sets + * matching entry on success. Since the caller holds tomoyo_read_lock(), + * it is safe to set matching entry. */ -int tomoyo_write_no_rewrite(char *data, const bool is_delete) -{ - return tomoyo_update_no_rewrite_entry(data, is_delete); -} - static bool tomoyo_check_path_acl(struct tomoyo_request_info *r, const struct tomoyo_acl_info *ptr) { @@ -521,6 +261,14 @@ static bool tomoyo_check_path_acl(struct tomoyo_request_info *r, return false; } +/** + * tomoyo_check_path_number_acl - Check permission for path number operation. + * + * @r: Pointer to "struct tomoyo_request_info". + * @ptr: Pointer to "struct tomoyo_acl_info". + * + * Returns true if granted, false otherwise. + */ static bool tomoyo_check_path_number_acl(struct tomoyo_request_info *r, const struct tomoyo_acl_info *ptr) { @@ -533,6 +281,14 @@ static bool tomoyo_check_path_number_acl(struct tomoyo_request_info *r, &acl->name); } +/** + * tomoyo_check_path2_acl - Check permission for path path operation. + * + * @r: Pointer to "struct tomoyo_request_info". + * @ptr: Pointer to "struct tomoyo_acl_info". + * + * Returns true if granted, false otherwise. + */ static bool tomoyo_check_path2_acl(struct tomoyo_request_info *r, const struct tomoyo_acl_info *ptr) { @@ -544,8 +300,16 @@ static bool tomoyo_check_path2_acl(struct tomoyo_request_info *r, &acl->name2); } +/** + * tomoyo_check_mkdev_acl - Check permission for path number number number operation. + * + * @r: Pointer to "struct tomoyo_request_info". + * @ptr: Pointer to "struct tomoyo_acl_info". + * + * Returns true if granted, false otherwise. + */ static bool tomoyo_check_mkdev_acl(struct tomoyo_request_info *r, - const struct tomoyo_acl_info *ptr) + const struct tomoyo_acl_info *ptr) { const struct tomoyo_mkdev_acl *acl = container_of(ptr, typeof(*acl), head); @@ -560,15 +324,31 @@ static bool tomoyo_check_mkdev_acl(struct tomoyo_request_info *r, &acl->name); } +/** + * tomoyo_same_path_acl - Check for duplicated "struct tomoyo_path_acl" entry. + * + * @a: Pointer to "struct tomoyo_acl_info". + * @b: Pointer to "struct tomoyo_acl_info". + * + * Returns true if @a == @b except permission bits, false otherwise. + */ static bool tomoyo_same_path_acl(const struct tomoyo_acl_info *a, const struct tomoyo_acl_info *b) { const struct tomoyo_path_acl *p1 = container_of(a, typeof(*p1), head); const struct tomoyo_path_acl *p2 = container_of(b, typeof(*p2), head); - return tomoyo_same_acl_head(&p1->head, &p2->head) && - tomoyo_same_name_union(&p1->name, &p2->name); + return tomoyo_same_name_union(&p1->name, &p2->name); } +/** + * tomoyo_merge_path_acl - Merge duplicated "struct tomoyo_path_acl" entry. + * + * @a: Pointer to "struct tomoyo_acl_info". + * @b: Pointer to "struct tomoyo_acl_info". + * @is_delete: True for @a &= ~@b, false for @a |= @b. + * + * Returns true if @a is empty, false otherwise. + */ static bool tomoyo_merge_path_acl(struct tomoyo_acl_info *a, struct tomoyo_acl_info *b, const bool is_delete) @@ -577,19 +357,10 @@ static bool tomoyo_merge_path_acl(struct tomoyo_acl_info *a, ->perm; u16 perm = *a_perm; const u16 b_perm = container_of(b, struct tomoyo_path_acl, head)->perm; - if (is_delete) { + if (is_delete) perm &= ~b_perm; - if ((perm & TOMOYO_RW_MASK) != TOMOYO_RW_MASK) - perm &= ~(1 << TOMOYO_TYPE_READ_WRITE); - else if (!(perm & (1 << TOMOYO_TYPE_READ_WRITE))) - perm &= ~TOMOYO_RW_MASK; - } else { + else perm |= b_perm; - if ((perm & TOMOYO_RW_MASK) == TOMOYO_RW_MASK) - perm |= (1 << TOMOYO_TYPE_READ_WRITE); - else if (perm & (1 << TOMOYO_TYPE_READ_WRITE)) - perm |= TOMOYO_RW_MASK; - } *a_perm = perm; return !perm; } @@ -597,52 +368,62 @@ static bool tomoyo_merge_path_acl(struct tomoyo_acl_info *a, /** * tomoyo_update_path_acl - Update "struct tomoyo_path_acl" list. * - * @type: Type of operation. - * @filename: Filename. - * @domain: Pointer to "struct tomoyo_domain_info". - * @is_delete: True if it is a delete request. + * @perm: Permission. + * @param: Pointer to "struct tomoyo_acl_param". * * Returns 0 on success, negative value otherwise. * * Caller holds tomoyo_read_lock(). */ -static int tomoyo_update_path_acl(const u8 type, const char *filename, - struct tomoyo_domain_info * const domain, - const bool is_delete) +static int tomoyo_update_path_acl(const u16 perm, + struct tomoyo_acl_param *param) { struct tomoyo_path_acl e = { .head.type = TOMOYO_TYPE_PATH_ACL, - .perm = 1 << type + .perm = perm }; int error; - if (e.perm == (1 << TOMOYO_TYPE_READ_WRITE)) - e.perm |= TOMOYO_RW_MASK; - if (!tomoyo_parse_name_union(filename, &e.name)) - return -EINVAL; - error = tomoyo_update_domain(&e.head, sizeof(e), is_delete, domain, - tomoyo_same_path_acl, - tomoyo_merge_path_acl); + if (!tomoyo_parse_name_union(param, &e.name)) + error = -EINVAL; + else + error = tomoyo_update_domain(&e.head, sizeof(e), param, + tomoyo_same_path_acl, + tomoyo_merge_path_acl); tomoyo_put_name_union(&e.name); return error; } +/** + * tomoyo_same_mkdev_acl - Check for duplicated "struct tomoyo_mkdev_acl" entry. + * + * @a: Pointer to "struct tomoyo_acl_info". + * @b: Pointer to "struct tomoyo_acl_info". + * + * Returns true if @a == @b except permission bits, false otherwise. + */ static bool tomoyo_same_mkdev_acl(const struct tomoyo_acl_info *a, const struct tomoyo_acl_info *b) { - const struct tomoyo_mkdev_acl *p1 = container_of(a, typeof(*p1), - head); - const struct tomoyo_mkdev_acl *p2 = container_of(b, typeof(*p2), - head); - return tomoyo_same_acl_head(&p1->head, &p2->head) - && tomoyo_same_name_union(&p1->name, &p2->name) - && tomoyo_same_number_union(&p1->mode, &p2->mode) - && tomoyo_same_number_union(&p1->major, &p2->major) - && tomoyo_same_number_union(&p1->minor, &p2->minor); + const struct tomoyo_mkdev_acl *p1 = container_of(a, typeof(*p1), head); + const struct tomoyo_mkdev_acl *p2 = container_of(b, typeof(*p2), head); + return tomoyo_same_name_union(&p1->name, &p2->name) && + tomoyo_same_number_union(&p1->mode, &p2->mode) && + tomoyo_same_number_union(&p1->major, &p2->major) && + tomoyo_same_number_union(&p1->minor, &p2->minor); } +/** + * tomoyo_merge_mkdev_acl - Merge duplicated "struct tomoyo_mkdev_acl" entry. + * + * @a: Pointer to "struct tomoyo_acl_info". + * @b: Pointer to "struct tomoyo_acl_info". + * @is_delete: True for @a &= ~@b, false for @a |= @b. + * + * Returns true if @a is empty, false otherwise. + */ static bool tomoyo_merge_mkdev_acl(struct tomoyo_acl_info *a, - struct tomoyo_acl_info *b, - const bool is_delete) + struct tomoyo_acl_info *b, + const bool is_delete) { u8 *const a_perm = &container_of(a, struct tomoyo_mkdev_acl, head)->perm; @@ -660,37 +441,30 @@ static bool tomoyo_merge_mkdev_acl(struct tomoyo_acl_info *a, /** * tomoyo_update_mkdev_acl - Update "struct tomoyo_mkdev_acl" list. * - * @type: Type of operation. - * @filename: Filename. - * @mode: Create mode. - * @major: Device major number. - * @minor: Device minor number. - * @domain: Pointer to "struct tomoyo_domain_info". - * @is_delete: True if it is a delete request. + * @perm: Permission. + * @param: Pointer to "struct tomoyo_acl_param". * * Returns 0 on success, negative value otherwise. * * Caller holds tomoyo_read_lock(). */ -static int tomoyo_update_mkdev_acl(const u8 type, const char *filename, - char *mode, char *major, char *minor, - struct tomoyo_domain_info * const - domain, const bool is_delete) +static int tomoyo_update_mkdev_acl(const u8 perm, + struct tomoyo_acl_param *param) { struct tomoyo_mkdev_acl e = { .head.type = TOMOYO_TYPE_MKDEV_ACL, - .perm = 1 << type + .perm = perm }; - int error = is_delete ? -ENOENT : -ENOMEM; - if (!tomoyo_parse_name_union(filename, &e.name) || - !tomoyo_parse_number_union(mode, &e.mode) || - !tomoyo_parse_number_union(major, &e.major) || - !tomoyo_parse_number_union(minor, &e.minor)) - goto out; - error = tomoyo_update_domain(&e.head, sizeof(e), is_delete, domain, - tomoyo_same_mkdev_acl, - tomoyo_merge_mkdev_acl); - out: + int error; + if (!tomoyo_parse_name_union(param, &e.name) || + !tomoyo_parse_number_union(param, &e.mode) || + !tomoyo_parse_number_union(param, &e.major) || + !tomoyo_parse_number_union(param, &e.minor)) + error = -EINVAL; + else + error = tomoyo_update_domain(&e.head, sizeof(e), param, + tomoyo_same_mkdev_acl, + tomoyo_merge_mkdev_acl); tomoyo_put_name_union(&e.name); tomoyo_put_number_union(&e.mode); tomoyo_put_number_union(&e.major); @@ -698,16 +472,32 @@ static int tomoyo_update_mkdev_acl(const u8 type, const char *filename, return error; } +/** + * tomoyo_same_path2_acl - Check for duplicated "struct tomoyo_path2_acl" entry. + * + * @a: Pointer to "struct tomoyo_acl_info". + * @b: Pointer to "struct tomoyo_acl_info". + * + * Returns true if @a == @b except permission bits, false otherwise. + */ static bool tomoyo_same_path2_acl(const struct tomoyo_acl_info *a, const struct tomoyo_acl_info *b) { const struct tomoyo_path2_acl *p1 = container_of(a, typeof(*p1), head); const struct tomoyo_path2_acl *p2 = container_of(b, typeof(*p2), head); - return tomoyo_same_acl_head(&p1->head, &p2->head) - && tomoyo_same_name_union(&p1->name1, &p2->name1) - && tomoyo_same_name_union(&p1->name2, &p2->name2); + return tomoyo_same_name_union(&p1->name1, &p2->name1) && + tomoyo_same_name_union(&p1->name2, &p2->name2); } +/** + * tomoyo_merge_path2_acl - Merge duplicated "struct tomoyo_path2_acl" entry. + * + * @a: Pointer to "struct tomoyo_acl_info". + * @b: Pointer to "struct tomoyo_acl_info". + * @is_delete: True for @a &= ~@b, false for @a |= @b. + * + * Returns true if @a is empty, false otherwise. + */ static bool tomoyo_merge_path2_acl(struct tomoyo_acl_info *a, struct tomoyo_acl_info *b, const bool is_delete) @@ -727,33 +517,28 @@ static bool tomoyo_merge_path2_acl(struct tomoyo_acl_info *a, /** * tomoyo_update_path2_acl - Update "struct tomoyo_path2_acl" list. * - * @type: Type of operation. - * @filename1: First filename. - * @filename2: Second filename. - * @domain: Pointer to "struct tomoyo_domain_info". - * @is_delete: True if it is a delete request. + * @perm: Permission. + * @param: Pointer to "struct tomoyo_acl_param". * * Returns 0 on success, negative value otherwise. * * Caller holds tomoyo_read_lock(). */ -static int tomoyo_update_path2_acl(const u8 type, const char *filename1, - const char *filename2, - struct tomoyo_domain_info * const domain, - const bool is_delete) +static int tomoyo_update_path2_acl(const u8 perm, + struct tomoyo_acl_param *param) { struct tomoyo_path2_acl e = { .head.type = TOMOYO_TYPE_PATH2_ACL, - .perm = 1 << type + .perm = perm }; - int error = is_delete ? -ENOENT : -ENOMEM; - if (!tomoyo_parse_name_union(filename1, &e.name1) || - !tomoyo_parse_name_union(filename2, &e.name2)) - goto out; - error = tomoyo_update_domain(&e.head, sizeof(e), is_delete, domain, - tomoyo_same_path2_acl, - tomoyo_merge_path2_acl); - out: + int error; + if (!tomoyo_parse_name_union(param, &e.name1) || + !tomoyo_parse_name_union(param, &e.name2)) + error = -EINVAL; + else + error = tomoyo_update_domain(&e.head, sizeof(e), param, + tomoyo_same_path2_acl, + tomoyo_merge_path2_acl); tomoyo_put_name_union(&e.name1); tomoyo_put_name_union(&e.name2); return error; @@ -770,14 +555,13 @@ static int tomoyo_update_path2_acl(const u8 type, const char *filename1, * * Caller holds tomoyo_read_lock(). */ -int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation, - const struct tomoyo_path_info *filename) +static int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation, + const struct tomoyo_path_info *filename) { int error; - next: r->type = tomoyo_p2mac[operation]; - r->mode = tomoyo_get_mode(r->profile, r->type); + r->mode = tomoyo_get_mode(r->domain->ns, r->profile, r->type); if (r->mode == TOMOYO_CONFIG_DISABLED) return 0; r->param_type = TOMOYO_TYPE_PATH_ACL; @@ -785,30 +569,50 @@ int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation, r->param.path.operation = operation; do { tomoyo_check_acl(r, tomoyo_check_path_acl); - if (!r->granted && operation == TOMOYO_TYPE_READ && - !r->domain->ignore_global_allow_read && - tomoyo_globally_readable_file(filename)) - r->granted = true; error = tomoyo_audit_path_log(r); - /* - * Do not retry for execute request, for alias may have - * changed. - */ - } while (error == TOMOYO_RETRY_REQUEST && - operation != TOMOYO_TYPE_EXECUTE); + } while (error == TOMOYO_RETRY_REQUEST); + return error; +} + +/** + * tomoyo_execute_permission - Check permission for execute operation. + * + * @r: Pointer to "struct tomoyo_request_info". + * @filename: Filename to check. + * + * Returns 0 on success, negative value otherwise. + * + * Caller holds tomoyo_read_lock(). + */ +int tomoyo_execute_permission(struct tomoyo_request_info *r, + const struct tomoyo_path_info *filename) +{ /* - * Since "allow_truncate" doesn't imply "allow_rewrite" permission, - * we need to check "allow_rewrite" permission if the filename is - * specified by "deny_rewrite" keyword. + * Unlike other permission checks, this check is done regardless of + * profile mode settings in order to check for domain transition + * preference. */ - if (!error && operation == TOMOYO_TYPE_TRUNCATE && - tomoyo_no_rewrite_file(filename)) { - operation = TOMOYO_TYPE_REWRITE; - goto next; - } - return error; + r->type = TOMOYO_MAC_FILE_EXECUTE; + r->mode = tomoyo_get_mode(r->domain->ns, r->profile, r->type); + r->param_type = TOMOYO_TYPE_PATH_ACL; + r->param.path.filename = filename; + r->param.path.operation = TOMOYO_TYPE_EXECUTE; + tomoyo_check_acl(r, tomoyo_check_path_acl); + r->ee->transition = r->matched_acl && r->matched_acl->cond ? + r->matched_acl->cond->transit : NULL; + if (r->mode != TOMOYO_CONFIG_DISABLED) + return tomoyo_audit_path_log(r); + return 0; } +/** + * tomoyo_same_path_number_acl - Check for duplicated "struct tomoyo_path_number_acl" entry. + * + * @a: Pointer to "struct tomoyo_acl_info". + * @b: Pointer to "struct tomoyo_acl_info". + * + * Returns true if @a == @b except permission bits, false otherwise. + */ static bool tomoyo_same_path_number_acl(const struct tomoyo_acl_info *a, const struct tomoyo_acl_info *b) { @@ -816,11 +620,19 @@ static bool tomoyo_same_path_number_acl(const struct tomoyo_acl_info *a, head); const struct tomoyo_path_number_acl *p2 = container_of(b, typeof(*p2), head); - return tomoyo_same_acl_head(&p1->head, &p2->head) - && tomoyo_same_name_union(&p1->name, &p2->name) - && tomoyo_same_number_union(&p1->number, &p2->number); + return tomoyo_same_name_union(&p1->name, &p2->name) && + tomoyo_same_number_union(&p1->number, &p2->number); } +/** + * tomoyo_merge_path_number_acl - Merge duplicated "struct tomoyo_path_number_acl" entry. + * + * @a: Pointer to "struct tomoyo_acl_info". + * @b: Pointer to "struct tomoyo_acl_info". + * @is_delete: True for @a &= ~@b, false for @a |= @b. + * + * Returns true if @a is empty, false otherwise. + */ static bool tomoyo_merge_path_number_acl(struct tomoyo_acl_info *a, struct tomoyo_acl_info *b, const bool is_delete) @@ -841,33 +653,26 @@ static bool tomoyo_merge_path_number_acl(struct tomoyo_acl_info *a, /** * tomoyo_update_path_number_acl - Update ioctl/chmod/chown/chgrp ACL. * - * @type: Type of operation. - * @filename: Filename. - * @number: Number. - * @domain: Pointer to "struct tomoyo_domain_info". - * @is_delete: True if it is a delete request. + * @perm: Permission. + * @param: Pointer to "struct tomoyo_acl_param". * * Returns 0 on success, negative value otherwise. */ -static int tomoyo_update_path_number_acl(const u8 type, const char *filename, - char *number, - struct tomoyo_domain_info * const - domain, - const bool is_delete) +static int tomoyo_update_path_number_acl(const u8 perm, + struct tomoyo_acl_param *param) { struct tomoyo_path_number_acl e = { .head.type = TOMOYO_TYPE_PATH_NUMBER_ACL, - .perm = 1 << type + .perm = perm }; - int error = is_delete ? -ENOENT : -ENOMEM; - if (!tomoyo_parse_name_union(filename, &e.name)) - return -EINVAL; - if (!tomoyo_parse_number_union(number, &e.number)) - goto out; - error = tomoyo_update_domain(&e.head, sizeof(e), is_delete, domain, - tomoyo_same_path_number_acl, - tomoyo_merge_path_number_acl); - out: + int error; + if (!tomoyo_parse_name_union(param, &e.name) || + !tomoyo_parse_number_union(param, &e.number)) + error = -EINVAL; + else + error = tomoyo_update_domain(&e.head, sizeof(e), param, + tomoyo_same_path_number_acl, + tomoyo_merge_path_number_acl); tomoyo_put_name_union(&e.name); tomoyo_put_number_union(&e.number); return error; @@ -886,16 +691,20 @@ int tomoyo_path_number_perm(const u8 type, struct path *path, unsigned long number) { struct tomoyo_request_info r; + struct tomoyo_obj_info obj = { + .path1 = *path, + }; int error = -ENOMEM; struct tomoyo_path_info buf; int idx; if (tomoyo_init_request_info(&r, NULL, tomoyo_pn2mac[type]) - == TOMOYO_CONFIG_DISABLED || !path->mnt || !path->dentry) + == TOMOYO_CONFIG_DISABLED || !path->dentry) return 0; idx = tomoyo_read_lock(); if (!tomoyo_get_realpath(&buf, path)) goto out; + r.obj = &obj; if (type == TOMOYO_TYPE_MKDIR) tomoyo_add_slash(&buf); r.param_type = TOMOYO_TYPE_PATH_NUMBER_ACL; @@ -927,51 +736,33 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain, struct path *path, const int flag) { const u8 acc_mode = ACC_MODE(flag); - int error = -ENOMEM; + int error = 0; struct tomoyo_path_info buf; struct tomoyo_request_info r; + struct tomoyo_obj_info obj = { + .path1 = *path, + }; int idx; - if (!path->mnt || - (path->dentry->d_inode && S_ISDIR(path->dentry->d_inode->i_mode))) - return 0; buf.name = NULL; r.mode = TOMOYO_CONFIG_DISABLED; idx = tomoyo_read_lock(); - if (!tomoyo_get_realpath(&buf, path)) - goto out; - error = 0; - /* - * If the filename is specified by "deny_rewrite" keyword, - * we need to check "allow_rewrite" permission when the filename is not - * opened for append mode or the filename is truncated at open time. - */ - if ((acc_mode & MAY_WRITE) && !(flag & O_APPEND) - && tomoyo_init_request_info(&r, domain, TOMOYO_MAC_FILE_REWRITE) + if (acc_mode && + tomoyo_init_request_info(&r, domain, TOMOYO_MAC_FILE_OPEN) != TOMOYO_CONFIG_DISABLED) { if (!tomoyo_get_realpath(&buf, path)) { error = -ENOMEM; goto out; } - if (tomoyo_no_rewrite_file(&buf)) - error = tomoyo_path_permission(&r, TOMOYO_TYPE_REWRITE, + r.obj = &obj; + if (acc_mode & MAY_READ) + error = tomoyo_path_permission(&r, TOMOYO_TYPE_READ, + &buf); + if (!error && (acc_mode & MAY_WRITE)) + error = tomoyo_path_permission(&r, (flag & O_APPEND) ? + TOMOYO_TYPE_APPEND : + TOMOYO_TYPE_WRITE, &buf); - } - if (!error && acc_mode && - tomoyo_init_request_info(&r, domain, TOMOYO_MAC_FILE_OPEN) - != TOMOYO_CONFIG_DISABLED) { - u8 operation; - if (!buf.name && !tomoyo_get_realpath(&buf, path)) { - error = -ENOMEM; - goto out; - } - if (acc_mode == (MAY_READ | MAY_WRITE)) - operation = TOMOYO_TYPE_READ_WRITE; - else if (acc_mode == MAY_READ) - operation = TOMOYO_TYPE_READ; - else - operation = TOMOYO_TYPE_WRITE; - error = tomoyo_path_permission(&r, operation, &buf); } out: kfree(buf.name); @@ -982,47 +773,57 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain, } /** - * tomoyo_path_perm - Check permission for "unlink", "rmdir", "truncate", "symlink", "rewrite", "chroot" and "unmount". + * tomoyo_path_perm - Check permission for "unlink", "rmdir", "truncate", "symlink", "append", "chroot" and "unmount". * * @operation: Type of operation. * @path: Pointer to "struct path". + * @target: Symlink's target if @operation is TOMOYO_TYPE_SYMLINK, + * NULL otherwise. * * Returns 0 on success, negative value otherwise. */ -int tomoyo_path_perm(const u8 operation, struct path *path) +int tomoyo_path_perm(const u8 operation, struct path *path, const char *target) { - int error = -ENOMEM; - struct tomoyo_path_info buf; struct tomoyo_request_info r; + struct tomoyo_obj_info obj = { + .path1 = *path, + }; + int error; + struct tomoyo_path_info buf; + bool is_enforce; + struct tomoyo_path_info symlink_target; int idx; - if (!path->mnt) - return 0; if (tomoyo_init_request_info(&r, NULL, tomoyo_p2mac[operation]) == TOMOYO_CONFIG_DISABLED) return 0; + is_enforce = (r.mode == TOMOYO_CONFIG_ENFORCING); + error = -ENOMEM; buf.name = NULL; idx = tomoyo_read_lock(); if (!tomoyo_get_realpath(&buf, path)) goto out; + r.obj = &obj; switch (operation) { - case TOMOYO_TYPE_REWRITE: - if (!tomoyo_no_rewrite_file(&buf)) { - error = 0; - goto out; - } - break; case TOMOYO_TYPE_RMDIR: case TOMOYO_TYPE_CHROOT: - case TOMOYO_TYPE_UMOUNT: tomoyo_add_slash(&buf); break; + case TOMOYO_TYPE_SYMLINK: + symlink_target.name = tomoyo_encode(target); + if (!symlink_target.name) + goto out; + tomoyo_fill_path_info(&symlink_target); + obj.symlink_target = &symlink_target; + break; } error = tomoyo_path_permission(&r, operation, &buf); + if (operation == TOMOYO_TYPE_SYMLINK) + kfree(symlink_target.name); out: kfree(buf.name); tomoyo_read_unlock(idx); - if (r.mode != TOMOYO_CONFIG_ENFORCING) + if (!is_enforce) error = 0; return error; } @@ -1038,20 +839,23 @@ int tomoyo_path_perm(const u8 operation, struct path *path) * Returns 0 on success, negative value otherwise. */ int tomoyo_mkdev_perm(const u8 operation, struct path *path, - const unsigned int mode, unsigned int dev) + const unsigned int mode, unsigned int dev) { struct tomoyo_request_info r; + struct tomoyo_obj_info obj = { + .path1 = *path, + }; int error = -ENOMEM; struct tomoyo_path_info buf; int idx; - if (!path->mnt || - tomoyo_init_request_info(&r, NULL, tomoyo_pnnn2mac[operation]) + if (tomoyo_init_request_info(&r, NULL, tomoyo_pnnn2mac[operation]) == TOMOYO_CONFIG_DISABLED) return 0; idx = tomoyo_read_lock(); error = -ENOMEM; if (tomoyo_get_realpath(&buf, path)) { + r.obj = &obj; dev = new_decode_dev(dev); r.param_type = TOMOYO_TYPE_MKDEV_ACL; r.param.mkdev.filename = &buf; @@ -1085,10 +889,13 @@ int tomoyo_path2_perm(const u8 operation, struct path *path1, struct tomoyo_path_info buf1; struct tomoyo_path_info buf2; struct tomoyo_request_info r; + struct tomoyo_obj_info obj = { + .path1 = *path1, + .path2 = *path2, + }; int idx; - if (!path1->mnt || !path2->mnt || - tomoyo_init_request_info(&r, NULL, tomoyo_pp2mac[operation]) + if (tomoyo_init_request_info(&r, NULL, tomoyo_pp2mac[operation]) == TOMOYO_CONFIG_DISABLED) return 0; buf1.name = NULL; @@ -1100,16 +907,17 @@ int tomoyo_path2_perm(const u8 operation, struct path *path1, switch (operation) { struct dentry *dentry; case TOMOYO_TYPE_RENAME: - case TOMOYO_TYPE_LINK: + case TOMOYO_TYPE_LINK: dentry = path1->dentry; - if (!dentry->d_inode || !S_ISDIR(dentry->d_inode->i_mode)) - break; - /* fall through */ - case TOMOYO_TYPE_PIVOT_ROOT: - tomoyo_add_slash(&buf1); - tomoyo_add_slash(&buf2); + if (!dentry->d_inode || !S_ISDIR(dentry->d_inode->i_mode)) + break; + /* fall through */ + case TOMOYO_TYPE_PIVOT_ROOT: + tomoyo_add_slash(&buf1); + tomoyo_add_slash(&buf2); break; - } + } + r.obj = &obj; r.param_type = TOMOYO_TYPE_PATH2_ACL; r.param.path2.operation = operation; r.param.path2.filename1 = &buf1; @@ -1128,53 +936,91 @@ int tomoyo_path2_perm(const u8 operation, struct path *path1, } /** + * tomoyo_same_mount_acl - Check for duplicated "struct tomoyo_mount_acl" entry. + * + * @a: Pointer to "struct tomoyo_acl_info". + * @b: Pointer to "struct tomoyo_acl_info". + * + * Returns true if @a == @b, false otherwise. + */ +static bool tomoyo_same_mount_acl(const struct tomoyo_acl_info *a, + const struct tomoyo_acl_info *b) +{ + const struct tomoyo_mount_acl *p1 = container_of(a, typeof(*p1), head); + const struct tomoyo_mount_acl *p2 = container_of(b, typeof(*p2), head); + return tomoyo_same_name_union(&p1->dev_name, &p2->dev_name) && + tomoyo_same_name_union(&p1->dir_name, &p2->dir_name) && + tomoyo_same_name_union(&p1->fs_type, &p2->fs_type) && + tomoyo_same_number_union(&p1->flags, &p2->flags); +} + +/** + * tomoyo_update_mount_acl - Write "struct tomoyo_mount_acl" list. + * + * @param: Pointer to "struct tomoyo_acl_param". + * + * Returns 0 on success, negative value otherwise. + * + * Caller holds tomoyo_read_lock(). + */ +static int tomoyo_update_mount_acl(struct tomoyo_acl_param *param) +{ + struct tomoyo_mount_acl e = { .head.type = TOMOYO_TYPE_MOUNT_ACL }; + int error; + if (!tomoyo_parse_name_union(param, &e.dev_name) || + !tomoyo_parse_name_union(param, &e.dir_name) || + !tomoyo_parse_name_union(param, &e.fs_type) || + !tomoyo_parse_number_union(param, &e.flags)) + error = -EINVAL; + else + error = tomoyo_update_domain(&e.head, sizeof(e), param, + tomoyo_same_mount_acl, NULL); + tomoyo_put_name_union(&e.dev_name); + tomoyo_put_name_union(&e.dir_name); + tomoyo_put_name_union(&e.fs_type); + tomoyo_put_number_union(&e.flags); + return error; +} + +/** * tomoyo_write_file - Update file related list. * - * @data: String to parse. - * @domain: Pointer to "struct tomoyo_domain_info". - * @is_delete: True if it is a delete request. + * @param: Pointer to "struct tomoyo_acl_param". * * Returns 0 on success, negative value otherwise. * * Caller holds tomoyo_read_lock(). */ -int tomoyo_write_file(char *data, struct tomoyo_domain_info *domain, - const bool is_delete) +int tomoyo_write_file(struct tomoyo_acl_param *param) { - char *w[5]; + u16 perm = 0; u8 type; - if (!tomoyo_tokenize(data, w, sizeof(w)) || !w[1][0]) - return -EINVAL; - if (strncmp(w[0], "allow_", 6)) - goto out; - w[0] += 6; - for (type = 0; type < TOMOYO_MAX_PATH_OPERATION; type++) { - if (strcmp(w[0], tomoyo_path_keyword[type])) - continue; - return tomoyo_update_path_acl(type, w[1], domain, is_delete); - } - if (!w[2][0]) - goto out; - for (type = 0; type < TOMOYO_MAX_PATH2_OPERATION; type++) { - if (strcmp(w[0], tomoyo_path2_keyword[type])) - continue; - return tomoyo_update_path2_acl(type, w[1], w[2], domain, - is_delete); - } - for (type = 0; type < TOMOYO_MAX_PATH_NUMBER_OPERATION; type++) { - if (strcmp(w[0], tomoyo_path_number_keyword[type])) - continue; - return tomoyo_update_path_number_acl(type, w[1], w[2], domain, - is_delete); - } - if (!w[3][0] || !w[4][0]) - goto out; - for (type = 0; type < TOMOYO_MAX_MKDEV_OPERATION; type++) { - if (strcmp(w[0], tomoyo_mkdev_keyword[type])) - continue; - return tomoyo_update_mkdev_acl(type, w[1], w[2], w[3], - w[4], domain, is_delete); - } - out: + const char *operation = tomoyo_read_token(param); + for (type = 0; type < TOMOYO_MAX_PATH_OPERATION; type++) + if (tomoyo_permstr(operation, tomoyo_path_keyword[type])) + perm |= 1 << type; + if (perm) + return tomoyo_update_path_acl(perm, param); + for (type = 0; type < TOMOYO_MAX_PATH2_OPERATION; type++) + if (tomoyo_permstr(operation, + tomoyo_mac_keywords[tomoyo_pp2mac[type]])) + perm |= 1 << type; + if (perm) + return tomoyo_update_path2_acl(perm, param); + for (type = 0; type < TOMOYO_MAX_PATH_NUMBER_OPERATION; type++) + if (tomoyo_permstr(operation, + tomoyo_mac_keywords[tomoyo_pn2mac[type]])) + perm |= 1 << type; + if (perm) + return tomoyo_update_path_number_acl(perm, param); + for (type = 0; type < TOMOYO_MAX_MKDEV_OPERATION; type++) + if (tomoyo_permstr(operation, + tomoyo_mac_keywords[tomoyo_pnnn2mac[type]])) + perm |= 1 << type; + if (perm) + return tomoyo_update_mkdev_acl(perm, param); + if (tomoyo_permstr(operation, + tomoyo_mac_keywords[TOMOYO_MAC_FILE_MOUNT])) + return tomoyo_update_mount_acl(param); return -EINVAL; } diff --git a/security/tomoyo/gc.c b/security/tomoyo/gc.c index a877e4c3b10..986a6a75686 100644 --- a/security/tomoyo/gc.c +++ b/security/tomoyo/gc.c @@ -1,59 +1,109 @@ /* * security/tomoyo/gc.c * - * Implementation of the Domain-Based Mandatory Access Control. - * - * Copyright (C) 2005-2010 NTT DATA CORPORATION - * + * Copyright (C) 2005-2011 NTT DATA CORPORATION */ #include "common.h" #include <linux/kthread.h> #include <linux/slab.h> -struct tomoyo_gc { - struct list_head list; - int type; - struct list_head *element; -}; -static LIST_HEAD(tomoyo_gc_queue); -static DEFINE_MUTEX(tomoyo_gc_mutex); - -/* Caller holds tomoyo_policy_lock mutex. */ -static bool tomoyo_add_to_gc(const int type, struct list_head *element) +/** + * tomoyo_memory_free - Free memory for elements. + * + * @ptr: Pointer to allocated memory. + * + * Returns nothing. + * + * Caller holds tomoyo_policy_lock mutex. + */ +static inline void tomoyo_memory_free(void *ptr) { - struct tomoyo_gc *entry = kzalloc(sizeof(*entry), GFP_ATOMIC); - if (!entry) - return false; - entry->type = type; - entry->element = element; - list_add(&entry->list, &tomoyo_gc_queue); - list_del_rcu(element); - return true; + tomoyo_memory_used[TOMOYO_MEMORY_POLICY] -= ksize(ptr); + kfree(ptr); } -static void tomoyo_del_allow_read(struct list_head *element) -{ - struct tomoyo_readable_file *ptr = - container_of(element, typeof(*ptr), head.list); - tomoyo_put_name(ptr->filename); -} +/* The list for "struct tomoyo_io_buffer". */ +static LIST_HEAD(tomoyo_io_buffer_list); +/* Lock for protecting tomoyo_io_buffer_list. */ +static DEFINE_SPINLOCK(tomoyo_io_buffer_list_lock); -static void tomoyo_del_file_pattern(struct list_head *element) +/** + * tomoyo_struct_used_by_io_buffer - Check whether the list element is used by /sys/kernel/security/tomoyo/ users or not. + * + * @element: Pointer to "struct list_head". + * + * Returns true if @element is used by /sys/kernel/security/tomoyo/ users, + * false otherwise. + */ +static bool tomoyo_struct_used_by_io_buffer(const struct list_head *element) { - struct tomoyo_no_pattern *ptr = - container_of(element, typeof(*ptr), head.list); - tomoyo_put_name(ptr->pattern); + struct tomoyo_io_buffer *head; + bool in_use = false; + + spin_lock(&tomoyo_io_buffer_list_lock); + list_for_each_entry(head, &tomoyo_io_buffer_list, list) { + head->users++; + spin_unlock(&tomoyo_io_buffer_list_lock); + mutex_lock(&head->io_sem); + if (head->r.domain == element || head->r.group == element || + head->r.acl == element || &head->w.domain->list == element) + in_use = true; + mutex_unlock(&head->io_sem); + spin_lock(&tomoyo_io_buffer_list_lock); + head->users--; + if (in_use) + break; + } + spin_unlock(&tomoyo_io_buffer_list_lock); + return in_use; } -static void tomoyo_del_no_rewrite(struct list_head *element) +/** + * tomoyo_name_used_by_io_buffer - Check whether the string is used by /sys/kernel/security/tomoyo/ users or not. + * + * @string: String to check. + * + * Returns true if @string is used by /sys/kernel/security/tomoyo/ users, + * false otherwise. + */ +static bool tomoyo_name_used_by_io_buffer(const char *string) { - struct tomoyo_no_rewrite *ptr = - container_of(element, typeof(*ptr), head.list); - tomoyo_put_name(ptr->pattern); + struct tomoyo_io_buffer *head; + const size_t size = strlen(string) + 1; + bool in_use = false; + + spin_lock(&tomoyo_io_buffer_list_lock); + list_for_each_entry(head, &tomoyo_io_buffer_list, list) { + int i; + head->users++; + spin_unlock(&tomoyo_io_buffer_list_lock); + mutex_lock(&head->io_sem); + for (i = 0; i < TOMOYO_MAX_IO_READ_QUEUE; i++) { + const char *w = head->r.w[i]; + if (w < string || w > string + size) + continue; + in_use = true; + break; + } + mutex_unlock(&head->io_sem); + spin_lock(&tomoyo_io_buffer_list_lock); + head->users--; + if (in_use) + break; + } + spin_unlock(&tomoyo_io_buffer_list_lock); + return in_use; } -static void tomoyo_del_transition_control(struct list_head *element) +/** + * tomoyo_del_transition_control - Delete members in "struct tomoyo_transition_control". + * + * @element: Pointer to "struct list_head". + * + * Returns nothing. + */ +static inline void tomoyo_del_transition_control(struct list_head *element) { struct tomoyo_transition_control *ptr = container_of(element, typeof(*ptr), head.list); @@ -61,7 +111,14 @@ static void tomoyo_del_transition_control(struct list_head *element) tomoyo_put_name(ptr->program); } -static void tomoyo_del_aggregator(struct list_head *element) +/** + * tomoyo_del_aggregator - Delete members in "struct tomoyo_aggregator". + * + * @element: Pointer to "struct list_head". + * + * Returns nothing. + */ +static inline void tomoyo_del_aggregator(struct list_head *element) { struct tomoyo_aggregator *ptr = container_of(element, typeof(*ptr), head.list); @@ -69,17 +126,32 @@ static void tomoyo_del_aggregator(struct list_head *element) tomoyo_put_name(ptr->aggregated_name); } -static void tomoyo_del_manager(struct list_head *element) +/** + * tomoyo_del_manager - Delete members in "struct tomoyo_manager". + * + * @element: Pointer to "struct list_head". + * + * Returns nothing. + */ +static inline void tomoyo_del_manager(struct list_head *element) { struct tomoyo_manager *ptr = container_of(element, typeof(*ptr), head.list); tomoyo_put_name(ptr->manager); } +/** + * tomoyo_del_acl - Delete members in "struct tomoyo_acl_info". + * + * @element: Pointer to "struct list_head". + * + * Returns nothing. + */ static void tomoyo_del_acl(struct list_head *element) { struct tomoyo_acl_info *acl = container_of(element, typeof(*acl), list); + tomoyo_put_condition(acl->cond); switch (acl->type) { case TOMOYO_TYPE_PATH_ACL: { @@ -124,231 +196,460 @@ static void tomoyo_del_acl(struct list_head *element) tomoyo_put_number_union(&entry->flags); } break; + case TOMOYO_TYPE_ENV_ACL: + { + struct tomoyo_env_acl *entry = + container_of(acl, typeof(*entry), head); + + tomoyo_put_name(entry->env); + } + break; + case TOMOYO_TYPE_INET_ACL: + { + struct tomoyo_inet_acl *entry = + container_of(acl, typeof(*entry), head); + + tomoyo_put_group(entry->address.group); + tomoyo_put_number_union(&entry->port); + } + break; + case TOMOYO_TYPE_UNIX_ACL: + { + struct tomoyo_unix_acl *entry = + container_of(acl, typeof(*entry), head); + + tomoyo_put_name_union(&entry->name); + } + break; + case TOMOYO_TYPE_MANUAL_TASK_ACL: + { + struct tomoyo_task_acl *entry = + container_of(acl, typeof(*entry), head); + tomoyo_put_name(entry->domainname); + } + break; } } -static bool tomoyo_del_domain(struct list_head *element) +/** + * tomoyo_del_domain - Delete members in "struct tomoyo_domain_info". + * + * @element: Pointer to "struct list_head". + * + * Returns nothing. + * + * Caller holds tomoyo_policy_lock mutex. + */ +static inline void tomoyo_del_domain(struct list_head *element) { struct tomoyo_domain_info *domain = container_of(element, typeof(*domain), list); struct tomoyo_acl_info *acl; struct tomoyo_acl_info *tmp; /* - * Since we don't protect whole execve() operation using SRCU, - * we need to recheck domain->users at this point. - * - * (1) Reader starts SRCU section upon execve(). - * (2) Reader traverses tomoyo_domain_list and finds this domain. - * (3) Writer marks this domain as deleted. - * (4) Garbage collector removes this domain from tomoyo_domain_list - * because this domain is marked as deleted and used by nobody. - * (5) Reader saves reference to this domain into - * "struct linux_binprm"->cred->security . - * (6) Reader finishes SRCU section, although execve() operation has - * not finished yet. - * (7) Garbage collector waits for SRCU synchronization. - * (8) Garbage collector kfree() this domain because this domain is - * used by nobody. - * (9) Reader finishes execve() operation and restores this domain from - * "struct linux_binprm"->cred->security. - * - * By updating domain->users at (5), we can solve this race problem - * by rechecking domain->users at (8). + * Since this domain is referenced from neither + * "struct tomoyo_io_buffer" nor "struct cred"->security, we can delete + * elements without checking for is_deleted flag. */ - if (atomic_read(&domain->users)) - return false; list_for_each_entry_safe(acl, tmp, &domain->acl_info_list, list) { tomoyo_del_acl(&acl->list); tomoyo_memory_free(acl); } tomoyo_put_name(domain->domainname); - return true; } +/** + * tomoyo_del_condition - Delete members in "struct tomoyo_condition". + * + * @element: Pointer to "struct list_head". + * + * Returns nothing. + */ +void tomoyo_del_condition(struct list_head *element) +{ + struct tomoyo_condition *cond = container_of(element, typeof(*cond), + head.list); + const u16 condc = cond->condc; + const u16 numbers_count = cond->numbers_count; + const u16 names_count = cond->names_count; + const u16 argc = cond->argc; + const u16 envc = cond->envc; + unsigned int i; + const struct tomoyo_condition_element *condp + = (const struct tomoyo_condition_element *) (cond + 1); + struct tomoyo_number_union *numbers_p + = (struct tomoyo_number_union *) (condp + condc); + struct tomoyo_name_union *names_p + = (struct tomoyo_name_union *) (numbers_p + numbers_count); + const struct tomoyo_argv *argv + = (const struct tomoyo_argv *) (names_p + names_count); + const struct tomoyo_envp *envp + = (const struct tomoyo_envp *) (argv + argc); + for (i = 0; i < numbers_count; i++) + tomoyo_put_number_union(numbers_p++); + for (i = 0; i < names_count; i++) + tomoyo_put_name_union(names_p++); + for (i = 0; i < argc; argv++, i++) + tomoyo_put_name(argv->value); + for (i = 0; i < envc; envp++, i++) { + tomoyo_put_name(envp->name); + tomoyo_put_name(envp->value); + } +} -static void tomoyo_del_name(struct list_head *element) +/** + * tomoyo_del_name - Delete members in "struct tomoyo_name". + * + * @element: Pointer to "struct list_head". + * + * Returns nothing. + */ +static inline void tomoyo_del_name(struct list_head *element) { - const struct tomoyo_name *ptr = - container_of(element, typeof(*ptr), list); + /* Nothing to do. */ } -static void tomoyo_del_path_group(struct list_head *element) +/** + * tomoyo_del_path_group - Delete members in "struct tomoyo_path_group". + * + * @element: Pointer to "struct list_head". + * + * Returns nothing. + */ +static inline void tomoyo_del_path_group(struct list_head *element) { struct tomoyo_path_group *member = container_of(element, typeof(*member), head.list); tomoyo_put_name(member->member_name); } -static void tomoyo_del_group(struct list_head *element) +/** + * tomoyo_del_group - Delete "struct tomoyo_group". + * + * @element: Pointer to "struct list_head". + * + * Returns nothing. + */ +static inline void tomoyo_del_group(struct list_head *element) { struct tomoyo_group *group = - container_of(element, typeof(*group), list); + container_of(element, typeof(*group), head.list); tomoyo_put_name(group->group_name); } -static void tomoyo_del_number_group(struct list_head *element) +/** + * tomoyo_del_address_group - Delete members in "struct tomoyo_address_group". + * + * @element: Pointer to "struct list_head". + * + * Returns nothing. + */ +static inline void tomoyo_del_address_group(struct list_head *element) { - struct tomoyo_number_group *member = - container_of(element, typeof(*member), head.list); + /* Nothing to do. */ } -static bool tomoyo_collect_member(struct list_head *member_list, int id) +/** + * tomoyo_del_number_group - Delete members in "struct tomoyo_number_group". + * + * @element: Pointer to "struct list_head". + * + * Returns nothing. + */ +static inline void tomoyo_del_number_group(struct list_head *element) +{ + /* Nothing to do. */ +} + +/** + * tomoyo_try_to_gc - Try to kfree() an entry. + * + * @type: One of values in "enum tomoyo_policy_id". + * @element: Pointer to "struct list_head". + * + * Returns nothing. + * + * Caller holds tomoyo_policy_lock mutex. + */ +static void tomoyo_try_to_gc(const enum tomoyo_policy_id type, + struct list_head *element) +{ + /* + * __list_del_entry() guarantees that the list element became no longer + * reachable from the list which the element was originally on (e.g. + * tomoyo_domain_list). Also, synchronize_srcu() guarantees that the + * list element became no longer referenced by syscall users. + */ + __list_del_entry(element); + mutex_unlock(&tomoyo_policy_lock); + synchronize_srcu(&tomoyo_ss); + /* + * However, there are two users which may still be using the list + * element. We need to defer until both users forget this element. + * + * Don't kfree() until "struct tomoyo_io_buffer"->r.{domain,group,acl} + * and "struct tomoyo_io_buffer"->w.domain forget this element. + */ + if (tomoyo_struct_used_by_io_buffer(element)) + goto reinject; + switch (type) { + case TOMOYO_ID_TRANSITION_CONTROL: + tomoyo_del_transition_control(element); + break; + case TOMOYO_ID_MANAGER: + tomoyo_del_manager(element); + break; + case TOMOYO_ID_AGGREGATOR: + tomoyo_del_aggregator(element); + break; + case TOMOYO_ID_GROUP: + tomoyo_del_group(element); + break; + case TOMOYO_ID_PATH_GROUP: + tomoyo_del_path_group(element); + break; + case TOMOYO_ID_ADDRESS_GROUP: + tomoyo_del_address_group(element); + break; + case TOMOYO_ID_NUMBER_GROUP: + tomoyo_del_number_group(element); + break; + case TOMOYO_ID_CONDITION: + tomoyo_del_condition(element); + break; + case TOMOYO_ID_NAME: + /* + * Don't kfree() until all "struct tomoyo_io_buffer"->r.w[] + * forget this element. + */ + if (tomoyo_name_used_by_io_buffer + (container_of(element, typeof(struct tomoyo_name), + head.list)->entry.name)) + goto reinject; + tomoyo_del_name(element); + break; + case TOMOYO_ID_ACL: + tomoyo_del_acl(element); + break; + case TOMOYO_ID_DOMAIN: + /* + * Don't kfree() until all "struct cred"->security forget this + * element. + */ + if (atomic_read(&container_of + (element, typeof(struct tomoyo_domain_info), + list)->users)) + goto reinject; + break; + case TOMOYO_MAX_POLICY: + break; + } + mutex_lock(&tomoyo_policy_lock); + if (type == TOMOYO_ID_DOMAIN) + tomoyo_del_domain(element); + tomoyo_memory_free(element); + return; +reinject: + /* + * We can safely reinject this element here bacause + * (1) Appending list elements and removing list elements are protected + * by tomoyo_policy_lock mutex. + * (2) Only this function removes list elements and this function is + * exclusively executed by tomoyo_gc_mutex mutex. + * are true. + */ + mutex_lock(&tomoyo_policy_lock); + list_add_rcu(element, element->prev); +} + +/** + * tomoyo_collect_member - Delete elements with "struct tomoyo_acl_head". + * + * @id: One of values in "enum tomoyo_policy_id". + * @member_list: Pointer to "struct list_head". + * + * Returns nothing. + */ +static void tomoyo_collect_member(const enum tomoyo_policy_id id, + struct list_head *member_list) { struct tomoyo_acl_head *member; - list_for_each_entry(member, member_list, list) { + struct tomoyo_acl_head *tmp; + list_for_each_entry_safe(member, tmp, member_list, list) { if (!member->is_deleted) continue; - if (!tomoyo_add_to_gc(id, &member->list)) - return false; + member->is_deleted = TOMOYO_GC_IN_PROGRESS; + tomoyo_try_to_gc(id, &member->list); } - return true; } -static bool tomoyo_collect_acl(struct tomoyo_domain_info *domain) +/** + * tomoyo_collect_acl - Delete elements in "struct tomoyo_domain_info". + * + * @list: Pointer to "struct list_head". + * + * Returns nothing. + */ +static void tomoyo_collect_acl(struct list_head *list) { struct tomoyo_acl_info *acl; - list_for_each_entry(acl, &domain->acl_info_list, list) { + struct tomoyo_acl_info *tmp; + list_for_each_entry_safe(acl, tmp, list, list) { if (!acl->is_deleted) continue; - if (!tomoyo_add_to_gc(TOMOYO_ID_ACL, &acl->list)) - return false; + acl->is_deleted = TOMOYO_GC_IN_PROGRESS; + tomoyo_try_to_gc(TOMOYO_ID_ACL, &acl->list); } - return true; } +/** + * tomoyo_collect_entry - Try to kfree() deleted elements. + * + * Returns nothing. + */ static void tomoyo_collect_entry(void) { int i; - if (mutex_lock_interruptible(&tomoyo_policy_lock)) - return; - for (i = 0; i < TOMOYO_MAX_POLICY; i++) { - if (!tomoyo_collect_member(&tomoyo_policy_list[i], i)) - goto unlock; - } + enum tomoyo_policy_id id; + struct tomoyo_policy_namespace *ns; + mutex_lock(&tomoyo_policy_lock); { struct tomoyo_domain_info *domain; - list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) { - if (!tomoyo_collect_acl(domain)) - goto unlock; + struct tomoyo_domain_info *tmp; + list_for_each_entry_safe(domain, tmp, &tomoyo_domain_list, + list) { + tomoyo_collect_acl(&domain->acl_info_list); if (!domain->is_deleted || atomic_read(&domain->users)) continue; - /* - * Nobody is referring this domain. But somebody may - * refer this domain after successful execve(). - * We recheck domain->users after SRCU synchronization. - */ - if (!tomoyo_add_to_gc(TOMOYO_ID_DOMAIN, &domain->list)) - goto unlock; + tomoyo_try_to_gc(TOMOYO_ID_DOMAIN, &domain->list); } } - for (i = 0; i < TOMOYO_MAX_HASH; i++) { - struct tomoyo_name *ptr; - list_for_each_entry_rcu(ptr, &tomoyo_name_list[i], list) { - if (atomic_read(&ptr->users)) + list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) { + for (id = 0; id < TOMOYO_MAX_POLICY; id++) + tomoyo_collect_member(id, &ns->policy_list[id]); + for (i = 0; i < TOMOYO_MAX_ACL_GROUPS; i++) + tomoyo_collect_acl(&ns->acl_group[i]); + } + { + struct tomoyo_shared_acl_head *ptr; + struct tomoyo_shared_acl_head *tmp; + list_for_each_entry_safe(ptr, tmp, &tomoyo_condition_list, + list) { + if (atomic_read(&ptr->users) > 0) continue; - if (!tomoyo_add_to_gc(TOMOYO_ID_NAME, &ptr->list)) - goto unlock; + atomic_set(&ptr->users, TOMOYO_GC_IN_PROGRESS); + tomoyo_try_to_gc(TOMOYO_ID_CONDITION, &ptr->list); } } - for (i = 0; i < TOMOYO_MAX_GROUP; i++) { - struct list_head *list = &tomoyo_group_list[i]; - int id; - struct tomoyo_group *group; - switch (i) { - case 0: - id = TOMOYO_ID_PATH_GROUP; - break; - default: - id = TOMOYO_ID_NUMBER_GROUP; - break; + list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) { + for (i = 0; i < TOMOYO_MAX_GROUP; i++) { + struct list_head *list = &ns->group_list[i]; + struct tomoyo_group *group; + struct tomoyo_group *tmp; + switch (i) { + case 0: + id = TOMOYO_ID_PATH_GROUP; + break; + case 1: + id = TOMOYO_ID_NUMBER_GROUP; + break; + default: + id = TOMOYO_ID_ADDRESS_GROUP; + break; + } + list_for_each_entry_safe(group, tmp, list, head.list) { + tomoyo_collect_member(id, &group->member_list); + if (!list_empty(&group->member_list) || + atomic_read(&group->head.users) > 0) + continue; + atomic_set(&group->head.users, + TOMOYO_GC_IN_PROGRESS); + tomoyo_try_to_gc(TOMOYO_ID_GROUP, + &group->head.list); + } } - list_for_each_entry(group, list, list) { - if (!tomoyo_collect_member(&group->member_list, id)) - goto unlock; - if (!list_empty(&group->member_list) || - atomic_read(&group->users)) + } + for (i = 0; i < TOMOYO_MAX_HASH; i++) { + struct list_head *list = &tomoyo_name_list[i]; + struct tomoyo_shared_acl_head *ptr; + struct tomoyo_shared_acl_head *tmp; + list_for_each_entry_safe(ptr, tmp, list, list) { + if (atomic_read(&ptr->users) > 0) continue; - if (!tomoyo_add_to_gc(TOMOYO_ID_GROUP, &group->list)) - goto unlock; + atomic_set(&ptr->users, TOMOYO_GC_IN_PROGRESS); + tomoyo_try_to_gc(TOMOYO_ID_NAME, &ptr->list); } } - unlock: mutex_unlock(&tomoyo_policy_lock); } -static void tomoyo_kfree_entry(void) +/** + * tomoyo_gc_thread - Garbage collector thread function. + * + * @unused: Unused. + * + * Returns 0. + */ +static int tomoyo_gc_thread(void *unused) { - struct tomoyo_gc *p; - struct tomoyo_gc *tmp; + /* Garbage collector thread is exclusive. */ + static DEFINE_MUTEX(tomoyo_gc_mutex); + if (!mutex_trylock(&tomoyo_gc_mutex)) + goto out; + tomoyo_collect_entry(); + { + struct tomoyo_io_buffer *head; + struct tomoyo_io_buffer *tmp; - list_for_each_entry_safe(p, tmp, &tomoyo_gc_queue, list) { - struct list_head *element = p->element; - switch (p->type) { - case TOMOYO_ID_TRANSITION_CONTROL: - tomoyo_del_transition_control(element); - break; - case TOMOYO_ID_AGGREGATOR: - tomoyo_del_aggregator(element); - break; - case TOMOYO_ID_GLOBALLY_READABLE: - tomoyo_del_allow_read(element); - break; - case TOMOYO_ID_PATTERN: - tomoyo_del_file_pattern(element); - break; - case TOMOYO_ID_NO_REWRITE: - tomoyo_del_no_rewrite(element); - break; - case TOMOYO_ID_MANAGER: - tomoyo_del_manager(element); - break; - case TOMOYO_ID_NAME: - tomoyo_del_name(element); - break; - case TOMOYO_ID_ACL: - tomoyo_del_acl(element); - break; - case TOMOYO_ID_DOMAIN: - if (!tomoyo_del_domain(element)) + spin_lock(&tomoyo_io_buffer_list_lock); + list_for_each_entry_safe(head, tmp, &tomoyo_io_buffer_list, + list) { + if (head->users) continue; - break; - case TOMOYO_ID_PATH_GROUP: - tomoyo_del_path_group(element); - break; - case TOMOYO_ID_GROUP: - tomoyo_del_group(element); - break; - case TOMOYO_ID_NUMBER_GROUP: - tomoyo_del_number_group(element); - break; + list_del(&head->list); + kfree(head->read_buf); + kfree(head->write_buf); + kfree(head); } - tomoyo_memory_free(element); - list_del(&p->list); - kfree(p); + spin_unlock(&tomoyo_io_buffer_list_lock); } + mutex_unlock(&tomoyo_gc_mutex); +out: + /* This acts as do_exit(0). */ + return 0; } -static int tomoyo_gc_thread(void *unused) +/** + * tomoyo_notify_gc - Register/unregister /sys/kernel/security/tomoyo/ users. + * + * @head: Pointer to "struct tomoyo_io_buffer". + * @is_register: True if register, false if unregister. + * + * Returns nothing. + */ +void tomoyo_notify_gc(struct tomoyo_io_buffer *head, const bool is_register) { - daemonize("GC for TOMOYO"); - if (mutex_trylock(&tomoyo_gc_mutex)) { - int i; - for (i = 0; i < 10; i++) { - tomoyo_collect_entry(); - if (list_empty(&tomoyo_gc_queue)) - break; - synchronize_srcu(&tomoyo_ss); - tomoyo_kfree_entry(); + bool is_write = false; + + spin_lock(&tomoyo_io_buffer_list_lock); + if (is_register) { + head->users = 1; + list_add(&head->list, &tomoyo_io_buffer_list); + } else { + is_write = head->write_buf != NULL; + if (!--head->users) { + list_del(&head->list); + kfree(head->read_buf); + kfree(head->write_buf); + kfree(head); } - mutex_unlock(&tomoyo_gc_mutex); } - do_exit(0); -} - -void tomoyo_run_gc(void) -{ - struct task_struct *task = kthread_create(tomoyo_gc_thread, NULL, - "GC for TOMOYO"); - if (!IS_ERR(task)) - wake_up_process(task); + spin_unlock(&tomoyo_io_buffer_list_lock); + if (is_write) { + struct task_struct *task = kthread_create(tomoyo_gc_thread, + NULL, + "GC for TOMOYO"); + if (!IS_ERR(task)) + wake_up_process(task); + } } diff --git a/security/tomoyo/group.c b/security/tomoyo/group.c index e94352ce723..50092534ec5 100644 --- a/security/tomoyo/group.c +++ b/security/tomoyo/group.c @@ -1,21 +1,37 @@ /* * security/tomoyo/group.c * - * Copyright (C) 2005-2010 NTT DATA CORPORATION + * Copyright (C) 2005-2011 NTT DATA CORPORATION */ #include <linux/slab.h> #include "common.h" +/** + * tomoyo_same_path_group - Check for duplicated "struct tomoyo_path_group" entry. + * + * @a: Pointer to "struct tomoyo_acl_head". + * @b: Pointer to "struct tomoyo_acl_head". + * + * Returns true if @a == @b, false otherwise. + */ static bool tomoyo_same_path_group(const struct tomoyo_acl_head *a, - const struct tomoyo_acl_head *b) + const struct tomoyo_acl_head *b) { return container_of(a, struct tomoyo_path_group, head)->member_name == container_of(b, struct tomoyo_path_group, head)->member_name; } +/** + * tomoyo_same_number_group - Check for duplicated "struct tomoyo_number_group" entry. + * + * @a: Pointer to "struct tomoyo_acl_head". + * @b: Pointer to "struct tomoyo_acl_head". + * + * Returns true if @a == @b, false otherwise. + */ static bool tomoyo_same_number_group(const struct tomoyo_acl_head *a, - const struct tomoyo_acl_head *b) + const struct tomoyo_acl_head *b) { return !memcmp(&container_of(a, struct tomoyo_number_group, head) ->number, @@ -26,50 +42,70 @@ static bool tomoyo_same_number_group(const struct tomoyo_acl_head *a, } /** - * tomoyo_write_group - Write "struct tomoyo_path_group"/"struct tomoyo_number_group" list. + * tomoyo_same_address_group - Check for duplicated "struct tomoyo_address_group" entry. * - * @data: String to parse. - * @is_delete: True if it is a delete request. - * @type: Type of this group. + * @a: Pointer to "struct tomoyo_acl_head". + * @b: Pointer to "struct tomoyo_acl_head". + * + * Returns true if @a == @b, false otherwise. + */ +static bool tomoyo_same_address_group(const struct tomoyo_acl_head *a, + const struct tomoyo_acl_head *b) +{ + const struct tomoyo_address_group *p1 = container_of(a, typeof(*p1), + head); + const struct tomoyo_address_group *p2 = container_of(b, typeof(*p2), + head); + + return tomoyo_same_ipaddr_union(&p1->address, &p2->address); +} + +/** + * tomoyo_write_group - Write "struct tomoyo_path_group"/"struct tomoyo_number_group"/"struct tomoyo_address_group" list. + * + * @param: Pointer to "struct tomoyo_acl_param". + * @type: Type of this group. * * Returns 0 on success, negative value otherwise. */ -int tomoyo_write_group(char *data, const bool is_delete, const u8 type) +int tomoyo_write_group(struct tomoyo_acl_param *param, const u8 type) { - struct tomoyo_group *group; - struct list_head *member; - char *w[2]; + struct tomoyo_group *group = tomoyo_get_group(param, type); int error = -EINVAL; - if (!tomoyo_tokenize(data, w, sizeof(w)) || !w[1][0]) - return -EINVAL; - group = tomoyo_get_group(w[0], type); if (!group) return -ENOMEM; - member = &group->member_list; + param->list = &group->member_list; if (type == TOMOYO_PATH_GROUP) { struct tomoyo_path_group e = { }; - e.member_name = tomoyo_get_name(w[1]); + e.member_name = tomoyo_get_name(tomoyo_read_token(param)); if (!e.member_name) { error = -ENOMEM; goto out; } - error = tomoyo_update_policy(&e.head, sizeof(e), is_delete, - member, tomoyo_same_path_group); + error = tomoyo_update_policy(&e.head, sizeof(e), param, + tomoyo_same_path_group); tomoyo_put_name(e.member_name); } else if (type == TOMOYO_NUMBER_GROUP) { struct tomoyo_number_group e = { }; - if (w[1][0] == '@' - || !tomoyo_parse_number_union(w[1], &e.number) - || e.number.values[0] > e.number.values[1]) + if (param->data[0] == '@' || + !tomoyo_parse_number_union(param, &e.number)) goto out; - error = tomoyo_update_policy(&e.head, sizeof(e), is_delete, - member, tomoyo_same_number_group); + error = tomoyo_update_policy(&e.head, sizeof(e), param, + tomoyo_same_number_group); /* * tomoyo_put_number_union() is not needed because - * w[1][0] != '@'. + * param->data[0] != '@'. */ + } else { + struct tomoyo_address_group e = { }; + + if (param->data[0] == '@' || + !tomoyo_parse_ipaddr_union(param, &e.address)) + goto out; + error = tomoyo_update_policy(&e.head, sizeof(e), param, + tomoyo_same_address_group); } - out: +out: tomoyo_put_group(group); return error; } @@ -77,8 +113,8 @@ int tomoyo_write_group(char *data, const bool is_delete, const u8 type) /** * tomoyo_path_matches_group - Check whether the given pathname matches members of the given pathname group. * - * @pathname: The name of pathname. - * @group: Pointer to "struct tomoyo_path_group". + * @pathname: The name of pathname. + * @group: Pointer to "struct tomoyo_path_group". * * Returns matched member's pathname if @pathname matches pathnames in @group, * NULL otherwise. @@ -128,3 +164,35 @@ bool tomoyo_number_matches_group(const unsigned long min, } return matched; } + +/** + * tomoyo_address_matches_group - Check whether the given address matches members of the given address group. + * + * @is_ipv6: True if @address is an IPv6 address. + * @address: An IPv4 or IPv6 address. + * @group: Pointer to "struct tomoyo_address_group". + * + * Returns true if @address matches addresses in @group group, false otherwise. + * + * Caller holds tomoyo_read_lock(). + */ +bool tomoyo_address_matches_group(const bool is_ipv6, const __be32 *address, + const struct tomoyo_group *group) +{ + struct tomoyo_address_group *member; + bool matched = false; + const u8 size = is_ipv6 ? 16 : 4; + + list_for_each_entry_rcu(member, &group->member_list, head.list) { + if (member->head.is_deleted) + continue; + if (member->address.is_ipv6 != is_ipv6) + continue; + if (memcmp(&member->address.ip[0], address, size) > 0 || + memcmp(address, &member->address.ip[1], size) > 0) + continue; + matched = true; + break; + } + return matched; +} diff --git a/security/tomoyo/load_policy.c b/security/tomoyo/load_policy.c index bbada7ca1b9..078fac0bb4c 100644 --- a/security/tomoyo/load_policy.c +++ b/security/tomoyo/load_policy.c @@ -1,15 +1,32 @@ /* * security/tomoyo/load_policy.c * - * Policy loader launcher for TOMOYO. - * - * Copyright (C) 2005-2010 NTT DATA CORPORATION + * Copyright (C) 2005-2011 NTT DATA CORPORATION */ #include "common.h" -/* path to policy loader */ -static const char *tomoyo_loader = "/sbin/tomoyo-init"; +#ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER + +/* + * Path to the policy loader. (default = CONFIG_SECURITY_TOMOYO_POLICY_LOADER) + */ +static const char *tomoyo_loader; + +/** + * tomoyo_loader_setup - Set policy loader. + * + * @str: Program to use as a policy loader (e.g. /sbin/tomoyo-init ). + * + * Returns 0. + */ +static int __init tomoyo_loader_setup(char *str) +{ + tomoyo_loader = str; + return 0; +} + +__setup("TOMOYO_loader=", tomoyo_loader_setup); /** * tomoyo_policy_loader_exists - Check whether /sbin/tomoyo-init exists. @@ -18,24 +35,38 @@ static const char *tomoyo_loader = "/sbin/tomoyo-init"; */ static bool tomoyo_policy_loader_exists(void) { - /* - * Don't activate MAC if the policy loader doesn't exist. - * If the initrd includes /sbin/init but real-root-dev has not - * mounted on / yet, activating MAC will block the system since - * policies are not loaded yet. - * Thus, let do_execve() call this function everytime. - */ struct path path; - + if (!tomoyo_loader) + tomoyo_loader = CONFIG_SECURITY_TOMOYO_POLICY_LOADER; if (kern_path(tomoyo_loader, LOOKUP_FOLLOW, &path)) { - printk(KERN_INFO "Not activating Mandatory Access Control now " - "since %s doesn't exist.\n", tomoyo_loader); + printk(KERN_INFO "Not activating Mandatory Access Control " + "as %s does not exist.\n", tomoyo_loader); return false; } path_put(&path); return true; } +/* + * Path to the trigger. (default = CONFIG_SECURITY_TOMOYO_ACTIVATION_TRIGGER) + */ +static const char *tomoyo_trigger; + +/** + * tomoyo_trigger_setup - Set trigger for activation. + * + * @str: Program to use as an activation trigger (e.g. /sbin/init ). + * + * Returns 0. + */ +static int __init tomoyo_trigger_setup(char *str) +{ + tomoyo_trigger = str; + return 0; +} + +__setup("TOMOYO_trigger=", tomoyo_trigger_setup); + /** * tomoyo_load_policy - Run external policy loader to load policy. * @@ -51,24 +82,19 @@ static bool tomoyo_policy_loader_exists(void) */ void tomoyo_load_policy(const char *filename) { + static bool done; char *argv[2]; char *envp[3]; - if (tomoyo_policy_loaded) + if (tomoyo_policy_loaded || done) return; - /* - * Check filename is /sbin/init or /sbin/tomoyo-start. - * /sbin/tomoyo-start is a dummy filename in case where /sbin/init can't - * be passed. - * You can create /sbin/tomoyo-start by - * "ln -s /bin/true /sbin/tomoyo-start". - */ - if (strcmp(filename, "/sbin/init") && - strcmp(filename, "/sbin/tomoyo-start")) + if (!tomoyo_trigger) + tomoyo_trigger = CONFIG_SECURITY_TOMOYO_ACTIVATION_TRIGGER; + if (strcmp(filename, tomoyo_trigger)) return; if (!tomoyo_policy_loader_exists()) return; - + done = true; printk(KERN_INFO "Calling %s to load policy. Please wait.\n", tomoyo_loader); argv[0] = (char *) tomoyo_loader; @@ -76,6 +102,8 @@ void tomoyo_load_policy(const char *filename) envp[0] = "HOME=/"; envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin"; envp[2] = NULL; - call_usermodehelper(argv[0], argv, envp, 1); + call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC); tomoyo_check_profile(); } + +#endif diff --git a/security/tomoyo/memory.c b/security/tomoyo/memory.c index 297612669c7..0e995716cc2 100644 --- a/security/tomoyo/memory.c +++ b/security/tomoyo/memory.c @@ -1,9 +1,7 @@ /* * security/tomoyo/memory.c * - * Memory management functions for TOMOYO. - * - * Copyright (C) 2005-2010 NTT DATA CORPORATION + * Copyright (C) 2005-2011 NTT DATA CORPORATION */ #include <linux/hash.h> @@ -29,10 +27,10 @@ void tomoyo_warn_oom(const char *function) panic("MAC Initialization failed.\n"); } -/* Memory allocated for policy. */ -static atomic_t tomoyo_policy_memory_size; -/* Quota for holding policy. */ -static unsigned int tomoyo_quota_for_policy; +/* Memoy currently used by policy/audit log/query. */ +unsigned int tomoyo_memory_used[TOMOYO_MAX_MEMORY_STAT]; +/* Memory quota for "policy"/"audit log"/"query". */ +unsigned int tomoyo_memory_quota[TOMOYO_MAX_MEMORY_STAT]; /** * tomoyo_memory_ok - Check memory quota. @@ -42,18 +40,20 @@ static unsigned int tomoyo_quota_for_policy; * Returns true on success, false otherwise. * * Returns true if @ptr is not NULL and quota not exceeded, false otherwise. + * + * Caller holds tomoyo_policy_lock mutex. */ bool tomoyo_memory_ok(void *ptr) { - size_t s = ptr ? ksize(ptr) : 0; - atomic_add(s, &tomoyo_policy_memory_size); - if (ptr && (!tomoyo_quota_for_policy || - atomic_read(&tomoyo_policy_memory_size) - <= tomoyo_quota_for_policy)) { - memset(ptr, 0, s); - return true; + if (ptr) { + const size_t s = ksize(ptr); + tomoyo_memory_used[TOMOYO_MEMORY_POLICY] += s; + if (!tomoyo_memory_quota[TOMOYO_MEMORY_POLICY] || + tomoyo_memory_used[TOMOYO_MEMORY_POLICY] <= + tomoyo_memory_quota[TOMOYO_MEMORY_POLICY]) + return true; + tomoyo_memory_used[TOMOYO_MEMORY_POLICY] -= s; } - atomic_sub(s, &tomoyo_policy_memory_size); tomoyo_warn_oom(__func__); return false; } @@ -66,6 +66,8 @@ bool tomoyo_memory_ok(void *ptr) * * Returns pointer to allocated memory on success, NULL otherwise. * @data is zero-cleared on success. + * + * Caller holds tomoyo_policy_lock mutex. */ void *tomoyo_commit_ok(void *data, const unsigned int size) { @@ -75,32 +77,25 @@ void *tomoyo_commit_ok(void *data, const unsigned int size) memset(data, 0, size); return ptr; } - return NULL; -} - -/** - * tomoyo_memory_free - Free memory for elements. - * - * @ptr: Pointer to allocated memory. - */ -void tomoyo_memory_free(void *ptr) -{ - atomic_sub(ksize(ptr), &tomoyo_policy_memory_size); kfree(ptr); + return NULL; } /** * tomoyo_get_group - Allocate memory for "struct tomoyo_path_group"/"struct tomoyo_number_group". * - * @group_name: The name of address group. - * @idx: Index number. + * @param: Pointer to "struct tomoyo_acl_param". + * @idx: Index number. * * Returns pointer to "struct tomoyo_group" on success, NULL otherwise. */ -struct tomoyo_group *tomoyo_get_group(const char *group_name, const u8 idx) +struct tomoyo_group *tomoyo_get_group(struct tomoyo_acl_param *param, + const u8 idx) { struct tomoyo_group e = { }; struct tomoyo_group *group = NULL; + struct list_head *list; + const char *group_name = tomoyo_read_token(param); bool found = false; if (!tomoyo_correct_word(group_name) || idx >= TOMOYO_MAX_GROUP) return NULL; @@ -109,10 +104,12 @@ struct tomoyo_group *tomoyo_get_group(const char *group_name, const u8 idx) return NULL; if (mutex_lock_interruptible(&tomoyo_policy_lock)) goto out; - list_for_each_entry(group, &tomoyo_group_list[idx], list) { - if (e.group_name != group->group_name) + list = ¶m->ns->group_list[idx]; + list_for_each_entry(group, list, head.list) { + if (e.group_name != group->group_name || + atomic_read(&group->head.users) == TOMOYO_GC_IN_PROGRESS) continue; - atomic_inc(&group->users); + atomic_inc(&group->head.users); found = true; break; } @@ -120,15 +117,14 @@ struct tomoyo_group *tomoyo_get_group(const char *group_name, const u8 idx) struct tomoyo_group *entry = tomoyo_commit_ok(&e, sizeof(e)); if (entry) { INIT_LIST_HEAD(&entry->member_list); - atomic_set(&entry->users, 1); - list_add_tail_rcu(&entry->list, - &tomoyo_group_list[idx]); + atomic_set(&entry->head.users, 1); + list_add_tail_rcu(&entry->head.list, list); group = entry; found = true; } } mutex_unlock(&tomoyo_policy_lock); - out: +out: tomoyo_put_name(e.group_name); return found ? group : NULL; } @@ -153,7 +149,6 @@ const struct tomoyo_path_info *tomoyo_get_name(const char *name) struct tomoyo_name *ptr; unsigned int hash; int len; - int allocated_len; struct list_head *head; if (!name) @@ -163,120 +158,44 @@ const struct tomoyo_path_info *tomoyo_get_name(const char *name) head = &tomoyo_name_list[hash_long(hash, TOMOYO_HASH_BITS)]; if (mutex_lock_interruptible(&tomoyo_policy_lock)) return NULL; - list_for_each_entry(ptr, head, list) { - if (hash != ptr->entry.hash || strcmp(name, ptr->entry.name)) + list_for_each_entry(ptr, head, head.list) { + if (hash != ptr->entry.hash || strcmp(name, ptr->entry.name) || + atomic_read(&ptr->head.users) == TOMOYO_GC_IN_PROGRESS) continue; - atomic_inc(&ptr->users); + atomic_inc(&ptr->head.users); goto out; } ptr = kzalloc(sizeof(*ptr) + len, GFP_NOFS); - allocated_len = ptr ? ksize(ptr) : 0; - if (!ptr || (tomoyo_quota_for_policy && - atomic_read(&tomoyo_policy_memory_size) + allocated_len - > tomoyo_quota_for_policy)) { + if (tomoyo_memory_ok(ptr)) { + ptr->entry.name = ((char *) ptr) + sizeof(*ptr); + memmove((char *) ptr->entry.name, name, len); + atomic_set(&ptr->head.users, 1); + tomoyo_fill_path_info(&ptr->entry); + list_add_tail(&ptr->head.list, head); + } else { kfree(ptr); ptr = NULL; - tomoyo_warn_oom(__func__); - goto out; } - atomic_add(allocated_len, &tomoyo_policy_memory_size); - ptr->entry.name = ((char *) ptr) + sizeof(*ptr); - memmove((char *) ptr->entry.name, name, len); - atomic_set(&ptr->users, 1); - tomoyo_fill_path_info(&ptr->entry); - list_add_tail(&ptr->list, head); - out: +out: mutex_unlock(&tomoyo_policy_lock); return ptr ? &ptr->entry : NULL; } +/* Initial namespace.*/ +struct tomoyo_policy_namespace tomoyo_kernel_namespace; + /** * tomoyo_mm_init - Initialize mm related code. */ void __init tomoyo_mm_init(void) { int idx; - - for (idx = 0; idx < TOMOYO_MAX_POLICY; idx++) - INIT_LIST_HEAD(&tomoyo_policy_list[idx]); - for (idx = 0; idx < TOMOYO_MAX_GROUP; idx++) - INIT_LIST_HEAD(&tomoyo_group_list[idx]); for (idx = 0; idx < TOMOYO_MAX_HASH; idx++) INIT_LIST_HEAD(&tomoyo_name_list[idx]); + tomoyo_kernel_namespace.name = "<kernel>"; + tomoyo_init_policy_namespace(&tomoyo_kernel_namespace); + tomoyo_kernel_domain.ns = &tomoyo_kernel_namespace; INIT_LIST_HEAD(&tomoyo_kernel_domain.acl_info_list); - tomoyo_kernel_domain.domainname = tomoyo_get_name(TOMOYO_ROOT_NAME); + tomoyo_kernel_domain.domainname = tomoyo_get_name("<kernel>"); list_add_tail_rcu(&tomoyo_kernel_domain.list, &tomoyo_domain_list); - idx = tomoyo_read_lock(); - if (tomoyo_find_domain(TOMOYO_ROOT_NAME) != &tomoyo_kernel_domain) - panic("Can't register tomoyo_kernel_domain"); - { - /* Load built-in policy. */ - tomoyo_write_transition_control("/sbin/hotplug", false, - TOMOYO_TRANSITION_CONTROL_INITIALIZE); - tomoyo_write_transition_control("/sbin/modprobe", false, - TOMOYO_TRANSITION_CONTROL_INITIALIZE); - } - tomoyo_read_unlock(idx); -} - - -/* Memory allocated for query lists. */ -unsigned int tomoyo_query_memory_size; -/* Quota for holding query lists. */ -unsigned int tomoyo_quota_for_query; - -/** - * tomoyo_read_memory_counter - Check for memory usage in bytes. - * - * @head: Pointer to "struct tomoyo_io_buffer". - * - * Returns memory usage. - */ -void tomoyo_read_memory_counter(struct tomoyo_io_buffer *head) -{ - if (!head->r.eof) { - const unsigned int policy - = atomic_read(&tomoyo_policy_memory_size); - const unsigned int query = tomoyo_query_memory_size; - char buffer[64]; - - memset(buffer, 0, sizeof(buffer)); - if (tomoyo_quota_for_policy) - snprintf(buffer, sizeof(buffer) - 1, - " (Quota: %10u)", - tomoyo_quota_for_policy); - else - buffer[0] = '\0'; - tomoyo_io_printf(head, "Policy: %10u%s\n", policy, - buffer); - if (tomoyo_quota_for_query) - snprintf(buffer, sizeof(buffer) - 1, - " (Quota: %10u)", - tomoyo_quota_for_query); - else - buffer[0] = '\0'; - tomoyo_io_printf(head, "Query lists: %10u%s\n", query, - buffer); - tomoyo_io_printf(head, "Total: %10u\n", policy + query); - head->r.eof = true; - } -} - -/** - * tomoyo_write_memory_quota - Set memory quota. - * - * @head: Pointer to "struct tomoyo_io_buffer". - * - * Returns 0. - */ -int tomoyo_write_memory_quota(struct tomoyo_io_buffer *head) -{ - char *data = head->write_buf; - unsigned int size; - - if (sscanf(data, "Policy: %u", &size) == 1) - tomoyo_quota_for_policy = size; - else if (sscanf(data, "Query lists: %u", &size) == 1) - tomoyo_quota_for_query = size; - return 0; } diff --git a/security/tomoyo/mount.c b/security/tomoyo/mount.c index 82bf8c2390b..390c646013c 100644 --- a/security/tomoyo/mount.c +++ b/security/tomoyo/mount.c @@ -1,28 +1,22 @@ /* * security/tomoyo/mount.c * - * Copyright (C) 2005-2010 NTT DATA CORPORATION + * Copyright (C) 2005-2011 NTT DATA CORPORATION */ #include <linux/slab.h> #include "common.h" -/* Keywords for mount restrictions. */ - -/* Allow to call 'mount --bind /source_dir /dest_dir' */ -#define TOMOYO_MOUNT_BIND_KEYWORD "--bind" -/* Allow to call 'mount --move /old_dir /new_dir ' */ -#define TOMOYO_MOUNT_MOVE_KEYWORD "--move" -/* Allow to call 'mount -o remount /dir ' */ -#define TOMOYO_MOUNT_REMOUNT_KEYWORD "--remount" -/* Allow to call 'mount --make-unbindable /dir' */ -#define TOMOYO_MOUNT_MAKE_UNBINDABLE_KEYWORD "--make-unbindable" -/* Allow to call 'mount --make-private /dir' */ -#define TOMOYO_MOUNT_MAKE_PRIVATE_KEYWORD "--make-private" -/* Allow to call 'mount --make-slave /dir' */ -#define TOMOYO_MOUNT_MAKE_SLAVE_KEYWORD "--make-slave" -/* Allow to call 'mount --make-shared /dir' */ -#define TOMOYO_MOUNT_MAKE_SHARED_KEYWORD "--make-shared" +/* String table for special mount operations. */ +static const char * const tomoyo_mounts[TOMOYO_MAX_SPECIAL_MOUNT] = { + [TOMOYO_MOUNT_BIND] = "--bind", + [TOMOYO_MOUNT_MOVE] = "--move", + [TOMOYO_MOUNT_REMOUNT] = "--remount", + [TOMOYO_MOUNT_MAKE_UNBINDABLE] = "--make-unbindable", + [TOMOYO_MOUNT_MAKE_PRIVATE] = "--make-private", + [TOMOYO_MOUNT_MAKE_SLAVE] = "--make-slave", + [TOMOYO_MOUNT_MAKE_SHARED] = "--make-shared", +}; /** * tomoyo_audit_mount_log - Audit mount log. @@ -33,50 +27,42 @@ */ static int tomoyo_audit_mount_log(struct tomoyo_request_info *r) { - const char *dev = r->param.mount.dev->name; - const char *dir = r->param.mount.dir->name; - const char *type = r->param.mount.type->name; - const unsigned long flags = r->param.mount.flags; - if (r->granted) - return 0; - if (!strcmp(type, TOMOYO_MOUNT_REMOUNT_KEYWORD)) - tomoyo_warn_log(r, "mount -o remount %s 0x%lX", dir, flags); - else if (!strcmp(type, TOMOYO_MOUNT_BIND_KEYWORD) - || !strcmp(type, TOMOYO_MOUNT_MOVE_KEYWORD)) - tomoyo_warn_log(r, "mount %s %s %s 0x%lX", type, dev, dir, - flags); - else if (!strcmp(type, TOMOYO_MOUNT_MAKE_UNBINDABLE_KEYWORD) || - !strcmp(type, TOMOYO_MOUNT_MAKE_PRIVATE_KEYWORD) || - !strcmp(type, TOMOYO_MOUNT_MAKE_SLAVE_KEYWORD) || - !strcmp(type, TOMOYO_MOUNT_MAKE_SHARED_KEYWORD)) - tomoyo_warn_log(r, "mount %s %s 0x%lX", type, dir, flags); - else - tomoyo_warn_log(r, "mount -t %s %s %s 0x%lX", type, dev, dir, - flags); - return tomoyo_supervisor(r, - TOMOYO_KEYWORD_ALLOW_MOUNT "%s %s %s 0x%lX\n", - tomoyo_pattern(r->param.mount.dev), - tomoyo_pattern(r->param.mount.dir), type, - flags); + return tomoyo_supervisor(r, "file mount %s %s %s 0x%lX\n", + r->param.mount.dev->name, + r->param.mount.dir->name, + r->param.mount.type->name, + r->param.mount.flags); } +/** + * tomoyo_check_mount_acl - Check permission for path path path number operation. + * + * @r: Pointer to "struct tomoyo_request_info". + * @ptr: Pointer to "struct tomoyo_acl_info". + * + * Returns true if granted, false otherwise. + */ static bool tomoyo_check_mount_acl(struct tomoyo_request_info *r, const struct tomoyo_acl_info *ptr) { const struct tomoyo_mount_acl *acl = container_of(ptr, typeof(*acl), head); - return tomoyo_compare_number_union(r->param.mount.flags, &acl->flags) && - tomoyo_compare_name_union(r->param.mount.type, &acl->fs_type) && - tomoyo_compare_name_union(r->param.mount.dir, &acl->dir_name) && + return tomoyo_compare_number_union(r->param.mount.flags, + &acl->flags) && + tomoyo_compare_name_union(r->param.mount.type, + &acl->fs_type) && + tomoyo_compare_name_union(r->param.mount.dir, + &acl->dir_name) && (!r->param.mount.need_dev || - tomoyo_compare_name_union(r->param.mount.dev, &acl->dev_name)); + tomoyo_compare_name_union(r->param.mount.dev, + &acl->dev_name)); } /** * tomoyo_mount_acl - Check permission for mount() operation. * * @r: Pointer to "struct tomoyo_request_info". - * @dev_name: Name of device file. + * @dev_name: Name of device file. Maybe NULL. * @dir: Pointer to "struct path". * @type: Name of filesystem type. * @flags: Mount options. @@ -85,9 +71,12 @@ static bool tomoyo_check_mount_acl(struct tomoyo_request_info *r, * * Caller holds tomoyo_read_lock(). */ -static int tomoyo_mount_acl(struct tomoyo_request_info *r, char *dev_name, - struct path *dir, char *type, unsigned long flags) +static int tomoyo_mount_acl(struct tomoyo_request_info *r, + const char *dev_name, + struct path *dir, const char *type, + unsigned long flags) { + struct tomoyo_obj_info obj = { }; struct path path; struct file_system_type *fstype = NULL; const char *requested_type = NULL; @@ -98,6 +87,7 @@ static int tomoyo_mount_acl(struct tomoyo_request_info *r, char *dev_name, struct tomoyo_path_info rdir; int need_dev = 0; int error = -ENOMEM; + r->obj = &obj; /* Get fstype. */ requested_type = tomoyo_encode(type); @@ -107,6 +97,7 @@ static int tomoyo_mount_acl(struct tomoyo_request_info *r, char *dev_name, tomoyo_fill_path_info(&rtype); /* Get mount point. */ + obj.path2 = *dir; requested_dir_name = tomoyo_realpath_from_path(dir); if (!requested_dir_name) { error = -ENOMEM; @@ -116,15 +107,15 @@ static int tomoyo_mount_acl(struct tomoyo_request_info *r, char *dev_name, tomoyo_fill_path_info(&rdir); /* Compare fs name. */ - if (!strcmp(type, TOMOYO_MOUNT_REMOUNT_KEYWORD)) { + if (type == tomoyo_mounts[TOMOYO_MOUNT_REMOUNT]) { /* dev_name is ignored. */ - } else if (!strcmp(type, TOMOYO_MOUNT_MAKE_UNBINDABLE_KEYWORD) || - !strcmp(type, TOMOYO_MOUNT_MAKE_PRIVATE_KEYWORD) || - !strcmp(type, TOMOYO_MOUNT_MAKE_SLAVE_KEYWORD) || - !strcmp(type, TOMOYO_MOUNT_MAKE_SHARED_KEYWORD)) { + } else if (type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_UNBINDABLE] || + type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_PRIVATE] || + type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_SLAVE] || + type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_SHARED]) { /* dev_name is ignored. */ - } else if (!strcmp(type, TOMOYO_MOUNT_BIND_KEYWORD) || - !strcmp(type, TOMOYO_MOUNT_MOVE_KEYWORD)) { + } else if (type == tomoyo_mounts[TOMOYO_MOUNT_BIND] || + type == tomoyo_mounts[TOMOYO_MOUNT_MOVE]) { need_dev = -1; /* dev_name is a directory */ } else { fstype = get_fs_type(type); @@ -138,10 +129,11 @@ static int tomoyo_mount_acl(struct tomoyo_request_info *r, char *dev_name, } if (need_dev) { /* Get mount point or device file. */ - if (kern_path(dev_name, LOOKUP_FOLLOW, &path)) { + if (!dev_name || kern_path(dev_name, LOOKUP_FOLLOW, &path)) { error = -ENOENT; goto out; } + obj.path1 = path; requested_dev_name = tomoyo_realpath_from_path(&path); if (!requested_dev_name) { error = -ENOENT; @@ -175,22 +167,26 @@ static int tomoyo_mount_acl(struct tomoyo_request_info *r, char *dev_name, if (fstype) put_filesystem(fstype); kfree(requested_type); + /* Drop refcount obtained by kern_path(). */ + if (obj.path1.dentry) + path_put(&obj.path1); return error; } /** * tomoyo_mount_permission - Check permission for mount() operation. * - * @dev_name: Name of device file. + * @dev_name: Name of device file. Maybe NULL. * @path: Pointer to "struct path". - * @type: Name of filesystem type. May be NULL. + * @type: Name of filesystem type. Maybe NULL. * @flags: Mount options. - * @data_page: Optional data. May be NULL. + * @data_page: Optional data. Maybe NULL. * * Returns 0 on success, negative value otherwise. */ -int tomoyo_mount_permission(char *dev_name, struct path *path, char *type, - unsigned long flags, void *data_page) +int tomoyo_mount_permission(const char *dev_name, struct path *path, + const char *type, unsigned long flags, + void *data_page) { struct tomoyo_request_info r; int error; @@ -202,32 +198,34 @@ int tomoyo_mount_permission(char *dev_name, struct path *path, char *type, if ((flags & MS_MGC_MSK) == MS_MGC_VAL) flags &= ~MS_MGC_MSK; if (flags & MS_REMOUNT) { - type = TOMOYO_MOUNT_REMOUNT_KEYWORD; + type = tomoyo_mounts[TOMOYO_MOUNT_REMOUNT]; flags &= ~MS_REMOUNT; - } - if (flags & MS_MOVE) { - type = TOMOYO_MOUNT_MOVE_KEYWORD; - flags &= ~MS_MOVE; - } - if (flags & MS_BIND) { - type = TOMOYO_MOUNT_BIND_KEYWORD; + } else if (flags & MS_BIND) { + type = tomoyo_mounts[TOMOYO_MOUNT_BIND]; flags &= ~MS_BIND; - } - if (flags & MS_UNBINDABLE) { - type = TOMOYO_MOUNT_MAKE_UNBINDABLE_KEYWORD; - flags &= ~MS_UNBINDABLE; - } - if (flags & MS_PRIVATE) { - type = TOMOYO_MOUNT_MAKE_PRIVATE_KEYWORD; + } else if (flags & MS_SHARED) { + if (flags & (MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) + return -EINVAL; + type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_SHARED]; + flags &= ~MS_SHARED; + } else if (flags & MS_PRIVATE) { + if (flags & (MS_SHARED | MS_SLAVE | MS_UNBINDABLE)) + return -EINVAL; + type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_PRIVATE]; flags &= ~MS_PRIVATE; - } - if (flags & MS_SLAVE) { - type = TOMOYO_MOUNT_MAKE_SLAVE_KEYWORD; + } else if (flags & MS_SLAVE) { + if (flags & (MS_SHARED | MS_PRIVATE | MS_UNBINDABLE)) + return -EINVAL; + type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_SLAVE]; flags &= ~MS_SLAVE; - } - if (flags & MS_SHARED) { - type = TOMOYO_MOUNT_MAKE_SHARED_KEYWORD; - flags &= ~MS_SHARED; + } else if (flags & MS_UNBINDABLE) { + if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE)) + return -EINVAL; + type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_UNBINDABLE]; + flags &= ~MS_UNBINDABLE; + } else if (flags & MS_MOVE) { + type = tomoyo_mounts[TOMOYO_MOUNT_MOVE]; + flags &= ~MS_MOVE; } if (!type) type = "<NULL>"; @@ -236,49 +234,3 @@ int tomoyo_mount_permission(char *dev_name, struct path *path, char *type, tomoyo_read_unlock(idx); return error; } - -static bool tomoyo_same_mount_acl(const struct tomoyo_acl_info *a, - const struct tomoyo_acl_info *b) -{ - const struct tomoyo_mount_acl *p1 = container_of(a, typeof(*p1), head); - const struct tomoyo_mount_acl *p2 = container_of(b, typeof(*p2), head); - return tomoyo_same_acl_head(&p1->head, &p2->head) && - tomoyo_same_name_union(&p1->dev_name, &p2->dev_name) && - tomoyo_same_name_union(&p1->dir_name, &p2->dir_name) && - tomoyo_same_name_union(&p1->fs_type, &p2->fs_type) && - tomoyo_same_number_union(&p1->flags, &p2->flags); -} - -/** - * tomoyo_write_mount - Write "struct tomoyo_mount_acl" list. - * - * @data: String to parse. - * @domain: Pointer to "struct tomoyo_domain_info". - * @is_delete: True if it is a delete request. - * - * Returns 0 on success, negative value otherwise. - * - * Caller holds tomoyo_read_lock(). - */ -int tomoyo_write_mount(char *data, struct tomoyo_domain_info *domain, - const bool is_delete) -{ - struct tomoyo_mount_acl e = { .head.type = TOMOYO_TYPE_MOUNT_ACL }; - int error = is_delete ? -ENOENT : -ENOMEM; - char *w[4]; - if (!tomoyo_tokenize(data, w, sizeof(w)) || !w[3][0]) - return -EINVAL; - if (!tomoyo_parse_name_union(w[0], &e.dev_name) || - !tomoyo_parse_name_union(w[1], &e.dir_name) || - !tomoyo_parse_name_union(w[2], &e.fs_type) || - !tomoyo_parse_number_union(w[3], &e.flags)) - goto out; - error = tomoyo_update_domain(&e.head, sizeof(e), is_delete, domain, - tomoyo_same_mount_acl, NULL); - out: - tomoyo_put_name_union(&e.dev_name); - tomoyo_put_name_union(&e.dir_name); - tomoyo_put_name_union(&e.fs_type); - tomoyo_put_number_union(&e.flags); - return error; -} diff --git a/security/tomoyo/network.c b/security/tomoyo/network.c new file mode 100644 index 00000000000..97527710a72 --- /dev/null +++ b/security/tomoyo/network.c @@ -0,0 +1,771 @@ +/* + * security/tomoyo/network.c + * + * Copyright (C) 2005-2011 NTT DATA CORPORATION + */ + +#include "common.h" +#include <linux/slab.h> + +/* Structure for holding inet domain socket's address. */ +struct tomoyo_inet_addr_info { + __be16 port; /* In network byte order. */ + const __be32 *address; /* In network byte order. */ + bool is_ipv6; +}; + +/* Structure for holding unix domain socket's address. */ +struct tomoyo_unix_addr_info { + u8 *addr; /* This may not be '\0' terminated string. */ + unsigned int addr_len; +}; + +/* Structure for holding socket address. */ +struct tomoyo_addr_info { + u8 protocol; + u8 operation; + struct tomoyo_inet_addr_info inet; + struct tomoyo_unix_addr_info unix0; +}; + +/* String table for socket's protocols. */ +const char * const tomoyo_proto_keyword[TOMOYO_SOCK_MAX] = { + [SOCK_STREAM] = "stream", + [SOCK_DGRAM] = "dgram", + [SOCK_RAW] = "raw", + [SOCK_SEQPACKET] = "seqpacket", + [0] = " ", /* Dummy for avoiding NULL pointer dereference. */ + [4] = " ", /* Dummy for avoiding NULL pointer dereference. */ +}; + +/** + * tomoyo_parse_ipaddr_union - Parse an IP address. + * + * @param: Pointer to "struct tomoyo_acl_param". + * @ptr: Pointer to "struct tomoyo_ipaddr_union". + * + * Returns true on success, false otherwise. + */ +bool tomoyo_parse_ipaddr_union(struct tomoyo_acl_param *param, + struct tomoyo_ipaddr_union *ptr) +{ + u8 * const min = ptr->ip[0].in6_u.u6_addr8; + u8 * const max = ptr->ip[1].in6_u.u6_addr8; + char *address = tomoyo_read_token(param); + const char *end; + + if (!strchr(address, ':') && + in4_pton(address, -1, min, '-', &end) > 0) { + ptr->is_ipv6 = false; + if (!*end) + ptr->ip[1].s6_addr32[0] = ptr->ip[0].s6_addr32[0]; + else if (*end++ != '-' || + in4_pton(end, -1, max, '\0', &end) <= 0 || *end) + return false; + return true; + } + if (in6_pton(address, -1, min, '-', &end) > 0) { + ptr->is_ipv6 = true; + if (!*end) + memmove(max, min, sizeof(u16) * 8); + else if (*end++ != '-' || + in6_pton(end, -1, max, '\0', &end) <= 0 || *end) + return false; + return true; + } + return false; +} + +/** + * tomoyo_print_ipv4 - Print an IPv4 address. + * + * @buffer: Buffer to write to. + * @buffer_len: Size of @buffer. + * @min_ip: Pointer to __be32. + * @max_ip: Pointer to __be32. + * + * Returns nothing. + */ +static void tomoyo_print_ipv4(char *buffer, const unsigned int buffer_len, + const __be32 *min_ip, const __be32 *max_ip) +{ + snprintf(buffer, buffer_len, "%pI4%c%pI4", min_ip, + *min_ip == *max_ip ? '\0' : '-', max_ip); +} + +/** + * tomoyo_print_ipv6 - Print an IPv6 address. + * + * @buffer: Buffer to write to. + * @buffer_len: Size of @buffer. + * @min_ip: Pointer to "struct in6_addr". + * @max_ip: Pointer to "struct in6_addr". + * + * Returns nothing. + */ +static void tomoyo_print_ipv6(char *buffer, const unsigned int buffer_len, + const struct in6_addr *min_ip, + const struct in6_addr *max_ip) +{ + snprintf(buffer, buffer_len, "%pI6c%c%pI6c", min_ip, + !memcmp(min_ip, max_ip, 16) ? '\0' : '-', max_ip); +} + +/** + * tomoyo_print_ip - Print an IP address. + * + * @buf: Buffer to write to. + * @size: Size of @buf. + * @ptr: Pointer to "struct ipaddr_union". + * + * Returns nothing. + */ +void tomoyo_print_ip(char *buf, const unsigned int size, + const struct tomoyo_ipaddr_union *ptr) +{ + if (ptr->is_ipv6) + tomoyo_print_ipv6(buf, size, &ptr->ip[0], &ptr->ip[1]); + else + tomoyo_print_ipv4(buf, size, &ptr->ip[0].s6_addr32[0], + &ptr->ip[1].s6_addr32[0]); +} + +/* + * Mapping table from "enum tomoyo_network_acl_index" to + * "enum tomoyo_mac_index" for inet domain socket. + */ +static const u8 tomoyo_inet2mac +[TOMOYO_SOCK_MAX][TOMOYO_MAX_NETWORK_OPERATION] = { + [SOCK_STREAM] = { + [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_INET_STREAM_BIND, + [TOMOYO_NETWORK_LISTEN] = + TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN, + [TOMOYO_NETWORK_CONNECT] = + TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT, + }, + [SOCK_DGRAM] = { + [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_INET_DGRAM_BIND, + [TOMOYO_NETWORK_SEND] = TOMOYO_MAC_NETWORK_INET_DGRAM_SEND, + }, + [SOCK_RAW] = { + [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_INET_RAW_BIND, + [TOMOYO_NETWORK_SEND] = TOMOYO_MAC_NETWORK_INET_RAW_SEND, + }, +}; + +/* + * Mapping table from "enum tomoyo_network_acl_index" to + * "enum tomoyo_mac_index" for unix domain socket. + */ +static const u8 tomoyo_unix2mac +[TOMOYO_SOCK_MAX][TOMOYO_MAX_NETWORK_OPERATION] = { + [SOCK_STREAM] = { + [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND, + [TOMOYO_NETWORK_LISTEN] = + TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN, + [TOMOYO_NETWORK_CONNECT] = + TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT, + }, + [SOCK_DGRAM] = { + [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND, + [TOMOYO_NETWORK_SEND] = TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND, + }, + [SOCK_SEQPACKET] = { + [TOMOYO_NETWORK_BIND] = + TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND, + [TOMOYO_NETWORK_LISTEN] = + TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN, + [TOMOYO_NETWORK_CONNECT] = + TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT, + }, +}; + +/** + * tomoyo_same_inet_acl - Check for duplicated "struct tomoyo_inet_acl" entry. + * + * @a: Pointer to "struct tomoyo_acl_info". + * @b: Pointer to "struct tomoyo_acl_info". + * + * Returns true if @a == @b except permission bits, false otherwise. + */ +static bool tomoyo_same_inet_acl(const struct tomoyo_acl_info *a, + const struct tomoyo_acl_info *b) +{ + const struct tomoyo_inet_acl *p1 = container_of(a, typeof(*p1), head); + const struct tomoyo_inet_acl *p2 = container_of(b, typeof(*p2), head); + + return p1->protocol == p2->protocol && + tomoyo_same_ipaddr_union(&p1->address, &p2->address) && + tomoyo_same_number_union(&p1->port, &p2->port); +} + +/** + * tomoyo_same_unix_acl - Check for duplicated "struct tomoyo_unix_acl" entry. + * + * @a: Pointer to "struct tomoyo_acl_info". + * @b: Pointer to "struct tomoyo_acl_info". + * + * Returns true if @a == @b except permission bits, false otherwise. + */ +static bool tomoyo_same_unix_acl(const struct tomoyo_acl_info *a, + const struct tomoyo_acl_info *b) +{ + const struct tomoyo_unix_acl *p1 = container_of(a, typeof(*p1), head); + const struct tomoyo_unix_acl *p2 = container_of(b, typeof(*p2), head); + + return p1->protocol == p2->protocol && + tomoyo_same_name_union(&p1->name, &p2->name); +} + +/** + * tomoyo_merge_inet_acl - Merge duplicated "struct tomoyo_inet_acl" entry. + * + * @a: Pointer to "struct tomoyo_acl_info". + * @b: Pointer to "struct tomoyo_acl_info". + * @is_delete: True for @a &= ~@b, false for @a |= @b. + * + * Returns true if @a is empty, false otherwise. + */ +static bool tomoyo_merge_inet_acl(struct tomoyo_acl_info *a, + struct tomoyo_acl_info *b, + const bool is_delete) +{ + u8 * const a_perm = + &container_of(a, struct tomoyo_inet_acl, head)->perm; + u8 perm = *a_perm; + const u8 b_perm = container_of(b, struct tomoyo_inet_acl, head)->perm; + + if (is_delete) + perm &= ~b_perm; + else + perm |= b_perm; + *a_perm = perm; + return !perm; +} + +/** + * tomoyo_merge_unix_acl - Merge duplicated "struct tomoyo_unix_acl" entry. + * + * @a: Pointer to "struct tomoyo_acl_info". + * @b: Pointer to "struct tomoyo_acl_info". + * @is_delete: True for @a &= ~@b, false for @a |= @b. + * + * Returns true if @a is empty, false otherwise. + */ +static bool tomoyo_merge_unix_acl(struct tomoyo_acl_info *a, + struct tomoyo_acl_info *b, + const bool is_delete) +{ + u8 * const a_perm = + &container_of(a, struct tomoyo_unix_acl, head)->perm; + u8 perm = *a_perm; + const u8 b_perm = container_of(b, struct tomoyo_unix_acl, head)->perm; + + if (is_delete) + perm &= ~b_perm; + else + perm |= b_perm; + *a_perm = perm; + return !perm; +} + +/** + * tomoyo_write_inet_network - Write "struct tomoyo_inet_acl" list. + * + * @param: Pointer to "struct tomoyo_acl_param". + * + * Returns 0 on success, negative value otherwise. + * + * Caller holds tomoyo_read_lock(). + */ +int tomoyo_write_inet_network(struct tomoyo_acl_param *param) +{ + struct tomoyo_inet_acl e = { .head.type = TOMOYO_TYPE_INET_ACL }; + int error = -EINVAL; + u8 type; + const char *protocol = tomoyo_read_token(param); + const char *operation = tomoyo_read_token(param); + + for (e.protocol = 0; e.protocol < TOMOYO_SOCK_MAX; e.protocol++) + if (!strcmp(protocol, tomoyo_proto_keyword[e.protocol])) + break; + for (type = 0; type < TOMOYO_MAX_NETWORK_OPERATION; type++) + if (tomoyo_permstr(operation, tomoyo_socket_keyword[type])) + e.perm |= 1 << type; + if (e.protocol == TOMOYO_SOCK_MAX || !e.perm) + return -EINVAL; + if (param->data[0] == '@') { + param->data++; + e.address.group = + tomoyo_get_group(param, TOMOYO_ADDRESS_GROUP); + if (!e.address.group) + return -ENOMEM; + } else { + if (!tomoyo_parse_ipaddr_union(param, &e.address)) + goto out; + } + if (!tomoyo_parse_number_union(param, &e.port) || + e.port.values[1] > 65535) + goto out; + error = tomoyo_update_domain(&e.head, sizeof(e), param, + tomoyo_same_inet_acl, + tomoyo_merge_inet_acl); +out: + tomoyo_put_group(e.address.group); + tomoyo_put_number_union(&e.port); + return error; +} + +/** + * tomoyo_write_unix_network - Write "struct tomoyo_unix_acl" list. + * + * @param: Pointer to "struct tomoyo_acl_param". + * + * Returns 0 on success, negative value otherwise. + */ +int tomoyo_write_unix_network(struct tomoyo_acl_param *param) +{ + struct tomoyo_unix_acl e = { .head.type = TOMOYO_TYPE_UNIX_ACL }; + int error; + u8 type; + const char *protocol = tomoyo_read_token(param); + const char *operation = tomoyo_read_token(param); + + for (e.protocol = 0; e.protocol < TOMOYO_SOCK_MAX; e.protocol++) + if (!strcmp(protocol, tomoyo_proto_keyword[e.protocol])) + break; + for (type = 0; type < TOMOYO_MAX_NETWORK_OPERATION; type++) + if (tomoyo_permstr(operation, tomoyo_socket_keyword[type])) + e.perm |= 1 << type; + if (e.protocol == TOMOYO_SOCK_MAX || !e.perm) + return -EINVAL; + if (!tomoyo_parse_name_union(param, &e.name)) + return -EINVAL; + error = tomoyo_update_domain(&e.head, sizeof(e), param, + tomoyo_same_unix_acl, + tomoyo_merge_unix_acl); + tomoyo_put_name_union(&e.name); + return error; +} + +/** + * tomoyo_audit_net_log - Audit network log. + * + * @r: Pointer to "struct tomoyo_request_info". + * @family: Name of socket family ("inet" or "unix"). + * @protocol: Name of protocol in @family. + * @operation: Name of socket operation. + * @address: Name of address. + * + * Returns 0 on success, negative value otherwise. + */ +static int tomoyo_audit_net_log(struct tomoyo_request_info *r, + const char *family, const u8 protocol, + const u8 operation, const char *address) +{ + return tomoyo_supervisor(r, "network %s %s %s %s\n", family, + tomoyo_proto_keyword[protocol], + tomoyo_socket_keyword[operation], address); +} + +/** + * tomoyo_audit_inet_log - Audit INET network log. + * + * @r: Pointer to "struct tomoyo_request_info". + * + * Returns 0 on success, negative value otherwise. + */ +static int tomoyo_audit_inet_log(struct tomoyo_request_info *r) +{ + char buf[128]; + int len; + const __be32 *address = r->param.inet_network.address; + + if (r->param.inet_network.is_ipv6) + tomoyo_print_ipv6(buf, sizeof(buf), (const struct in6_addr *) + address, (const struct in6_addr *) address); + else + tomoyo_print_ipv4(buf, sizeof(buf), address, address); + len = strlen(buf); + snprintf(buf + len, sizeof(buf) - len, " %u", + r->param.inet_network.port); + return tomoyo_audit_net_log(r, "inet", r->param.inet_network.protocol, + r->param.inet_network.operation, buf); +} + +/** + * tomoyo_audit_unix_log - Audit UNIX network log. + * + * @r: Pointer to "struct tomoyo_request_info". + * + * Returns 0 on success, negative value otherwise. + */ +static int tomoyo_audit_unix_log(struct tomoyo_request_info *r) +{ + return tomoyo_audit_net_log(r, "unix", r->param.unix_network.protocol, + r->param.unix_network.operation, + r->param.unix_network.address->name); +} + +/** + * tomoyo_check_inet_acl - Check permission for inet domain socket operation. + * + * @r: Pointer to "struct tomoyo_request_info". + * @ptr: Pointer to "struct tomoyo_acl_info". + * + * Returns true if granted, false otherwise. + */ +static bool tomoyo_check_inet_acl(struct tomoyo_request_info *r, + const struct tomoyo_acl_info *ptr) +{ + const struct tomoyo_inet_acl *acl = + container_of(ptr, typeof(*acl), head); + const u8 size = r->param.inet_network.is_ipv6 ? 16 : 4; + + if (!(acl->perm & (1 << r->param.inet_network.operation)) || + !tomoyo_compare_number_union(r->param.inet_network.port, + &acl->port)) + return false; + if (acl->address.group) + return tomoyo_address_matches_group + (r->param.inet_network.is_ipv6, + r->param.inet_network.address, acl->address.group); + return acl->address.is_ipv6 == r->param.inet_network.is_ipv6 && + memcmp(&acl->address.ip[0], + r->param.inet_network.address, size) <= 0 && + memcmp(r->param.inet_network.address, + &acl->address.ip[1], size) <= 0; +} + +/** + * tomoyo_check_unix_acl - Check permission for unix domain socket operation. + * + * @r: Pointer to "struct tomoyo_request_info". + * @ptr: Pointer to "struct tomoyo_acl_info". + * + * Returns true if granted, false otherwise. + */ +static bool tomoyo_check_unix_acl(struct tomoyo_request_info *r, + const struct tomoyo_acl_info *ptr) +{ + const struct tomoyo_unix_acl *acl = + container_of(ptr, typeof(*acl), head); + + return (acl->perm & (1 << r->param.unix_network.operation)) && + tomoyo_compare_name_union(r->param.unix_network.address, + &acl->name); +} + +/** + * tomoyo_inet_entry - Check permission for INET network operation. + * + * @address: Pointer to "struct tomoyo_addr_info". + * + * Returns 0 on success, negative value otherwise. + */ +static int tomoyo_inet_entry(const struct tomoyo_addr_info *address) +{ + const int idx = tomoyo_read_lock(); + struct tomoyo_request_info r; + int error = 0; + const u8 type = tomoyo_inet2mac[address->protocol][address->operation]; + + if (type && tomoyo_init_request_info(&r, NULL, type) + != TOMOYO_CONFIG_DISABLED) { + r.param_type = TOMOYO_TYPE_INET_ACL; + r.param.inet_network.protocol = address->protocol; + r.param.inet_network.operation = address->operation; + r.param.inet_network.is_ipv6 = address->inet.is_ipv6; + r.param.inet_network.address = address->inet.address; + r.param.inet_network.port = ntohs(address->inet.port); + do { + tomoyo_check_acl(&r, tomoyo_check_inet_acl); + error = tomoyo_audit_inet_log(&r); + } while (error == TOMOYO_RETRY_REQUEST); + } + tomoyo_read_unlock(idx); + return error; +} + +/** + * tomoyo_check_inet_address - Check permission for inet domain socket's operation. + * + * @addr: Pointer to "struct sockaddr". + * @addr_len: Size of @addr. + * @port: Port number. + * @address: Pointer to "struct tomoyo_addr_info". + * + * Returns 0 on success, negative value otherwise. + */ +static int tomoyo_check_inet_address(const struct sockaddr *addr, + const unsigned int addr_len, + const u16 port, + struct tomoyo_addr_info *address) +{ + struct tomoyo_inet_addr_info *i = &address->inet; + + switch (addr->sa_family) { + case AF_INET6: + if (addr_len < SIN6_LEN_RFC2133) + goto skip; + i->is_ipv6 = true; + i->address = (__be32 *) + ((struct sockaddr_in6 *) addr)->sin6_addr.s6_addr; + i->port = ((struct sockaddr_in6 *) addr)->sin6_port; + break; + case AF_INET: + if (addr_len < sizeof(struct sockaddr_in)) + goto skip; + i->is_ipv6 = false; + i->address = (__be32 *) + &((struct sockaddr_in *) addr)->sin_addr; + i->port = ((struct sockaddr_in *) addr)->sin_port; + break; + default: + goto skip; + } + if (address->protocol == SOCK_RAW) + i->port = htons(port); + return tomoyo_inet_entry(address); +skip: + return 0; +} + +/** + * tomoyo_unix_entry - Check permission for UNIX network operation. + * + * @address: Pointer to "struct tomoyo_addr_info". + * + * Returns 0 on success, negative value otherwise. + */ +static int tomoyo_unix_entry(const struct tomoyo_addr_info *address) +{ + const int idx = tomoyo_read_lock(); + struct tomoyo_request_info r; + int error = 0; + const u8 type = tomoyo_unix2mac[address->protocol][address->operation]; + + if (type && tomoyo_init_request_info(&r, NULL, type) + != TOMOYO_CONFIG_DISABLED) { + char *buf = address->unix0.addr; + int len = address->unix0.addr_len - sizeof(sa_family_t); + + if (len <= 0) { + buf = "anonymous"; + len = 9; + } else if (buf[0]) { + len = strnlen(buf, len); + } + buf = tomoyo_encode2(buf, len); + if (buf) { + struct tomoyo_path_info addr; + + addr.name = buf; + tomoyo_fill_path_info(&addr); + r.param_type = TOMOYO_TYPE_UNIX_ACL; + r.param.unix_network.protocol = address->protocol; + r.param.unix_network.operation = address->operation; + r.param.unix_network.address = &addr; + do { + tomoyo_check_acl(&r, tomoyo_check_unix_acl); + error = tomoyo_audit_unix_log(&r); + } while (error == TOMOYO_RETRY_REQUEST); + kfree(buf); + } else + error = -ENOMEM; + } + tomoyo_read_unlock(idx); + return error; +} + +/** + * tomoyo_check_unix_address - Check permission for unix domain socket's operation. + * + * @addr: Pointer to "struct sockaddr". + * @addr_len: Size of @addr. + * @address: Pointer to "struct tomoyo_addr_info". + * + * Returns 0 on success, negative value otherwise. + */ +static int tomoyo_check_unix_address(struct sockaddr *addr, + const unsigned int addr_len, + struct tomoyo_addr_info *address) +{ + struct tomoyo_unix_addr_info *u = &address->unix0; + + if (addr->sa_family != AF_UNIX) + return 0; + u->addr = ((struct sockaddr_un *) addr)->sun_path; + u->addr_len = addr_len; + return tomoyo_unix_entry(address); +} + +/** + * tomoyo_kernel_service - Check whether I'm kernel service or not. + * + * Returns true if I'm kernel service, false otherwise. + */ +static bool tomoyo_kernel_service(void) +{ + /* Nothing to do if I am a kernel service. */ + return segment_eq(get_fs(), KERNEL_DS); +} + +/** + * tomoyo_sock_family - Get socket's family. + * + * @sk: Pointer to "struct sock". + * + * Returns one of PF_INET, PF_INET6, PF_UNIX or 0. + */ +static u8 tomoyo_sock_family(struct sock *sk) +{ + u8 family; + + if (tomoyo_kernel_service()) + return 0; + family = sk->sk_family; + switch (family) { + case PF_INET: + case PF_INET6: + case PF_UNIX: + return family; + default: + return 0; + } +} + +/** + * tomoyo_socket_listen_permission - Check permission for listening a socket. + * + * @sock: Pointer to "struct socket". + * + * Returns 0 on success, negative value otherwise. + */ +int tomoyo_socket_listen_permission(struct socket *sock) +{ + struct tomoyo_addr_info address; + const u8 family = tomoyo_sock_family(sock->sk); + const unsigned int type = sock->type; + struct sockaddr_storage addr; + int addr_len; + + if (!family || (type != SOCK_STREAM && type != SOCK_SEQPACKET)) + return 0; + { + const int error = sock->ops->getname(sock, (struct sockaddr *) + &addr, &addr_len, 0); + + if (error) + return error; + } + address.protocol = type; + address.operation = TOMOYO_NETWORK_LISTEN; + if (family == PF_UNIX) + return tomoyo_check_unix_address((struct sockaddr *) &addr, + addr_len, &address); + return tomoyo_check_inet_address((struct sockaddr *) &addr, addr_len, + 0, &address); +} + +/** + * tomoyo_socket_connect_permission - Check permission for setting the remote address of a socket. + * + * @sock: Pointer to "struct socket". + * @addr: Pointer to "struct sockaddr". + * @addr_len: Size of @addr. + * + * Returns 0 on success, negative value otherwise. + */ +int tomoyo_socket_connect_permission(struct socket *sock, + struct sockaddr *addr, int addr_len) +{ + struct tomoyo_addr_info address; + const u8 family = tomoyo_sock_family(sock->sk); + const unsigned int type = sock->type; + + if (!family) + return 0; + address.protocol = type; + switch (type) { + case SOCK_DGRAM: + case SOCK_RAW: + address.operation = TOMOYO_NETWORK_SEND; + break; + case SOCK_STREAM: + case SOCK_SEQPACKET: + address.operation = TOMOYO_NETWORK_CONNECT; + break; + default: + return 0; + } + if (family == PF_UNIX) + return tomoyo_check_unix_address(addr, addr_len, &address); + return tomoyo_check_inet_address(addr, addr_len, sock->sk->sk_protocol, + &address); +} + +/** + * tomoyo_socket_bind_permission - Check permission for setting the local address of a socket. + * + * @sock: Pointer to "struct socket". + * @addr: Pointer to "struct sockaddr". + * @addr_len: Size of @addr. + * + * Returns 0 on success, negative value otherwise. + */ +int tomoyo_socket_bind_permission(struct socket *sock, struct sockaddr *addr, + int addr_len) +{ + struct tomoyo_addr_info address; + const u8 family = tomoyo_sock_family(sock->sk); + const unsigned int type = sock->type; + + if (!family) + return 0; + switch (type) { + case SOCK_STREAM: + case SOCK_DGRAM: + case SOCK_RAW: + case SOCK_SEQPACKET: + address.protocol = type; + address.operation = TOMOYO_NETWORK_BIND; + break; + default: + return 0; + } + if (family == PF_UNIX) + return tomoyo_check_unix_address(addr, addr_len, &address); + return tomoyo_check_inet_address(addr, addr_len, sock->sk->sk_protocol, + &address); +} + +/** + * tomoyo_socket_sendmsg_permission - Check permission for sending a datagram. + * + * @sock: Pointer to "struct socket". + * @msg: Pointer to "struct msghdr". + * @size: Unused. + * + * Returns 0 on success, negative value otherwise. + */ +int tomoyo_socket_sendmsg_permission(struct socket *sock, struct msghdr *msg, + int size) +{ + struct tomoyo_addr_info address; + const u8 family = tomoyo_sock_family(sock->sk); + const unsigned int type = sock->type; + + if (!msg->msg_name || !family || + (type != SOCK_DGRAM && type != SOCK_RAW)) + return 0; + address.protocol = type; + address.operation = TOMOYO_NETWORK_SEND; + if (family == PF_UNIX) + return tomoyo_check_unix_address((struct sockaddr *) + msg->msg_name, + msg->msg_namelen, &address); + return tomoyo_check_inet_address((struct sockaddr *) msg->msg_name, + msg->msg_namelen, + sock->sk->sk_protocol, &address); +} diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c index 1d0bf8fa192..a3386d11942 100644 --- a/security/tomoyo/realpath.c +++ b/security/tomoyo/realpath.c @@ -1,32 +1,26 @@ /* * security/tomoyo/realpath.c * - * Pathname calculation functions for TOMOYO. - * - * Copyright (C) 2005-2010 NTT DATA CORPORATION + * Copyright (C) 2005-2011 NTT DATA CORPORATION */ -#include <linux/types.h> -#include <linux/mount.h> -#include <linux/mnt_namespace.h> -#include <linux/fs_struct.h> -#include <linux/magic.h> -#include <linux/slab.h> -#include <net/sock.h> #include "common.h" +#include <linux/magic.h> /** - * tomoyo_encode: Convert binary string to ascii string. + * tomoyo_encode2 - Encode binary string to ascii string. * - * @str: String in binary format. + * @str: String in binary format. + * @str_len: Size of @str in byte. * * Returns pointer to @str in ascii format on success, NULL otherwise. * * This function uses kzalloc(), so caller must kfree() if this function * didn't return NULL. */ -char *tomoyo_encode(const char *str) +char *tomoyo_encode2(const char *str, int str_len) { + int i; int len = 0; const char *p = str; char *cp; @@ -34,8 +28,9 @@ char *tomoyo_encode(const char *str) if (!p) return NULL; - while (*p) { - const unsigned char c = *p++; + for (i = 0; i < str_len; i++) { + const unsigned char c = p[i]; + if (c == '\\') len += 2; else if (c > ' ' && c < 127) @@ -50,8 +45,8 @@ char *tomoyo_encode(const char *str) return NULL; cp0 = cp; p = str; - while (*p) { - const unsigned char c = *p++; + for (i = 0; i < str_len; i++) { + const unsigned char c = p[i]; if (c == '\\') { *cp++ = '\\'; @@ -69,6 +64,175 @@ char *tomoyo_encode(const char *str) } /** + * tomoyo_encode - Encode binary string to ascii string. + * + * @str: String in binary format. + * + * Returns pointer to @str in ascii format on success, NULL otherwise. + * + * This function uses kzalloc(), so caller must kfree() if this function + * didn't return NULL. + */ +char *tomoyo_encode(const char *str) +{ + return str ? tomoyo_encode2(str, strlen(str)) : NULL; +} + +/** + * tomoyo_get_absolute_path - Get the path of a dentry but ignores chroot'ed root. + * + * @path: Pointer to "struct path". + * @buffer: Pointer to buffer to return value in. + * @buflen: Sizeof @buffer. + * + * Returns the buffer on success, an error code otherwise. + * + * If dentry is a directory, trailing '/' is appended. + */ +static char *tomoyo_get_absolute_path(struct path *path, char * const buffer, + const int buflen) +{ + char *pos = ERR_PTR(-ENOMEM); + if (buflen >= 256) { + /* go to whatever namespace root we are under */ + pos = d_absolute_path(path, buffer, buflen - 1); + if (!IS_ERR(pos) && *pos == '/' && pos[1]) { + struct inode *inode = path->dentry->d_inode; + if (inode && S_ISDIR(inode->i_mode)) { + buffer[buflen - 2] = '/'; + buffer[buflen - 1] = '\0'; + } + } + } + return pos; +} + +/** + * tomoyo_get_dentry_path - Get the path of a dentry. + * + * @dentry: Pointer to "struct dentry". + * @buffer: Pointer to buffer to return value in. + * @buflen: Sizeof @buffer. + * + * Returns the buffer on success, an error code otherwise. + * + * If dentry is a directory, trailing '/' is appended. + */ +static char *tomoyo_get_dentry_path(struct dentry *dentry, char * const buffer, + const int buflen) +{ + char *pos = ERR_PTR(-ENOMEM); + if (buflen >= 256) { + pos = dentry_path_raw(dentry, buffer, buflen - 1); + if (!IS_ERR(pos) && *pos == '/' && pos[1]) { + struct inode *inode = dentry->d_inode; + if (inode && S_ISDIR(inode->i_mode)) { + buffer[buflen - 2] = '/'; + buffer[buflen - 1] = '\0'; + } + } + } + return pos; +} + +/** + * tomoyo_get_local_path - Get the path of a dentry. + * + * @dentry: Pointer to "struct dentry". + * @buffer: Pointer to buffer to return value in. + * @buflen: Sizeof @buffer. + * + * Returns the buffer on success, an error code otherwise. + */ +static char *tomoyo_get_local_path(struct dentry *dentry, char * const buffer, + const int buflen) +{ + struct super_block *sb = dentry->d_sb; + char *pos = tomoyo_get_dentry_path(dentry, buffer, buflen); + if (IS_ERR(pos)) + return pos; + /* Convert from $PID to self if $PID is current thread. */ + if (sb->s_magic == PROC_SUPER_MAGIC && *pos == '/') { + char *ep; + const pid_t pid = (pid_t) simple_strtoul(pos + 1, &ep, 10); + if (*ep == '/' && pid && pid == + task_tgid_nr_ns(current, sb->s_fs_info)) { + pos = ep - 5; + if (pos < buffer) + goto out; + memmove(pos, "/self", 5); + } + goto prepend_filesystem_name; + } + /* Use filesystem name for unnamed devices. */ + if (!MAJOR(sb->s_dev)) + goto prepend_filesystem_name; + { + struct inode *inode = sb->s_root->d_inode; + /* + * Use filesystem name if filesystem does not support rename() + * operation. + */ + if (!inode->i_op->rename) + goto prepend_filesystem_name; + } + /* Prepend device name. */ + { + char name[64]; + int name_len; + const dev_t dev = sb->s_dev; + name[sizeof(name) - 1] = '\0'; + snprintf(name, sizeof(name) - 1, "dev(%u,%u):", MAJOR(dev), + MINOR(dev)); + name_len = strlen(name); + pos -= name_len; + if (pos < buffer) + goto out; + memmove(pos, name, name_len); + return pos; + } + /* Prepend filesystem name. */ +prepend_filesystem_name: + { + const char *name = sb->s_type->name; + const int name_len = strlen(name); + pos -= name_len + 1; + if (pos < buffer) + goto out; + memmove(pos, name, name_len); + pos[name_len] = ':'; + } + return pos; +out: + return ERR_PTR(-ENOMEM); +} + +/** + * tomoyo_get_socket_name - Get the name of a socket. + * + * @path: Pointer to "struct path". + * @buffer: Pointer to buffer to return value in. + * @buflen: Sizeof @buffer. + * + * Returns the buffer. + */ +static char *tomoyo_get_socket_name(struct path *path, char * const buffer, + const int buflen) +{ + struct inode *inode = path->dentry->d_inode; + struct socket *sock = inode ? SOCKET_I(inode) : NULL; + struct sock *sk = sock ? sock->sk : NULL; + if (sk) { + snprintf(buffer, buflen, "socket:[family=%u:type=%u:" + "protocol=%u]", sk->sk_family, sk->sk_type, + sk->sk_protocol); + } else { + snprintf(buffer, buflen, "socket:[unknown]"); + } + return buffer; +} + +/** * tomoyo_realpath_from_path - Returns realpath(3) of the given pathname but ignores chroot'ed root. * * @path: Pointer to "struct path". @@ -89,55 +253,50 @@ char *tomoyo_realpath_from_path(struct path *path) char *name = NULL; unsigned int buf_len = PAGE_SIZE / 2; struct dentry *dentry = path->dentry; - bool is_dir; + struct super_block *sb; if (!dentry) return NULL; - is_dir = dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode); + sb = dentry->d_sb; while (1) { - struct path ns_root = { .mnt = NULL, .dentry = NULL }; char *pos; + struct inode *inode; buf_len <<= 1; kfree(buf); buf = kmalloc(buf_len, GFP_NOFS); if (!buf) break; + /* To make sure that pos is '\0' terminated. */ + buf[buf_len - 1] = '\0'; /* Get better name for socket. */ - if (dentry->d_sb && dentry->d_sb->s_magic == SOCKFS_MAGIC) { - struct inode *inode = dentry->d_inode; - struct socket *sock = inode ? SOCKET_I(inode) : NULL; - struct sock *sk = sock ? sock->sk : NULL; - if (sk) { - snprintf(buf, buf_len - 1, "socket:[family=%u:" - "type=%u:protocol=%u]", sk->sk_family, - sk->sk_type, sk->sk_protocol); - } else { - snprintf(buf, buf_len - 1, "socket:[unknown]"); - } - name = tomoyo_encode(buf); - break; + if (sb->s_magic == SOCKFS_MAGIC) { + pos = tomoyo_get_socket_name(path, buf, buf_len - 1); + goto encode; } - /* For "socket:[\$]" and "pipe:[\$]". */ + /* For "pipe:[\$]". */ if (dentry->d_op && dentry->d_op->d_dname) { pos = dentry->d_op->d_dname(dentry, buf, buf_len - 1); - if (IS_ERR(pos)) - continue; - name = tomoyo_encode(pos); - break; + goto encode; } - /* If we don't have a vfsmount, we can't calculate. */ - if (!path->mnt) - break; - /* go to whatever namespace root we are under */ - pos = __d_path(path, &ns_root, buf, buf_len); - /* Prepend "/proc" prefix if using internal proc vfs mount. */ - if (!IS_ERR(pos) && (path->mnt->mnt_flags & MNT_INTERNAL) && - (path->mnt->mnt_sb->s_magic == PROC_SUPER_MAGIC)) { - pos -= 5; - if (pos >= buf) - memcpy(pos, "/proc", 5); - else - pos = ERR_PTR(-ENOMEM); + inode = sb->s_root->d_inode; + /* + * Get local name for filesystems without rename() operation + * or dentry without vfsmount. + */ + if (!path->mnt || !inode->i_op->rename) + pos = tomoyo_get_local_path(path->dentry, buf, + buf_len - 1); + /* Get absolute name for the rest. */ + else { + pos = tomoyo_get_absolute_path(path, buf, buf_len - 1); + /* + * Fall back to local name if absolute name is not + * available. + */ + if (pos == ERR_PTR(-EINVAL)) + pos = tomoyo_get_local_path(path->dentry, buf, + buf_len - 1); } +encode: if (IS_ERR(pos)) continue; name = tomoyo_encode(pos); @@ -146,16 +305,6 @@ char *tomoyo_realpath_from_path(struct path *path) kfree(buf); if (!name) tomoyo_warn_oom(__func__); - else if (is_dir && *name) { - /* Append trailing '/' if dentry is a directory. */ - char *pos = name + strlen(name) - 1; - if (*pos != '/') - /* - * This is OK because tomoyo_encode() reserves space - * for appending "/". - */ - *++pos = '/'; - } return name; } diff --git a/security/tomoyo/securityfs_if.c b/security/tomoyo/securityfs_if.c index e43d5554b50..179a955b319 100644 --- a/security/tomoyo/securityfs_if.c +++ b/security/tomoyo/securityfs_if.c @@ -1,15 +1,131 @@ /* - * security/tomoyo/common.c + * security/tomoyo/securityfs_if.c * - * Securityfs interface for TOMOYO. - * - * Copyright (C) 2005-2010 NTT DATA CORPORATION + * Copyright (C) 2005-2011 NTT DATA CORPORATION */ #include <linux/security.h> #include "common.h" /** + * tomoyo_check_task_acl - Check permission for task operation. + * + * @r: Pointer to "struct tomoyo_request_info". + * @ptr: Pointer to "struct tomoyo_acl_info". + * + * Returns true if granted, false otherwise. + */ +static bool tomoyo_check_task_acl(struct tomoyo_request_info *r, + const struct tomoyo_acl_info *ptr) +{ + const struct tomoyo_task_acl *acl = container_of(ptr, typeof(*acl), + head); + return !tomoyo_pathcmp(r->param.task.domainname, acl->domainname); +} + +/** + * tomoyo_write_self - write() for /sys/kernel/security/tomoyo/self_domain interface. + * + * @file: Pointer to "struct file". + * @buf: Domainname to transit to. + * @count: Size of @buf. + * @ppos: Unused. + * + * Returns @count on success, negative value otherwise. + * + * If domain transition was permitted but the domain transition failed, this + * function returns error rather than terminating current thread with SIGKILL. + */ +static ssize_t tomoyo_write_self(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) +{ + char *data; + int error; + if (!count || count >= TOMOYO_EXEC_TMPSIZE - 10) + return -ENOMEM; + data = kzalloc(count + 1, GFP_NOFS); + if (!data) + return -ENOMEM; + if (copy_from_user(data, buf, count)) { + error = -EFAULT; + goto out; + } + tomoyo_normalize_line(data); + if (tomoyo_correct_domain(data)) { + const int idx = tomoyo_read_lock(); + struct tomoyo_path_info name; + struct tomoyo_request_info r; + name.name = data; + tomoyo_fill_path_info(&name); + /* Check "task manual_domain_transition" permission. */ + tomoyo_init_request_info(&r, NULL, TOMOYO_MAC_FILE_EXECUTE); + r.param_type = TOMOYO_TYPE_MANUAL_TASK_ACL; + r.param.task.domainname = &name; + tomoyo_check_acl(&r, tomoyo_check_task_acl); + if (!r.granted) + error = -EPERM; + else { + struct tomoyo_domain_info *new_domain = + tomoyo_assign_domain(data, true); + if (!new_domain) { + error = -ENOENT; + } else { + struct cred *cred = prepare_creds(); + if (!cred) { + error = -ENOMEM; + } else { + struct tomoyo_domain_info *old_domain = + cred->security; + cred->security = new_domain; + atomic_inc(&new_domain->users); + atomic_dec(&old_domain->users); + commit_creds(cred); + error = 0; + } + } + } + tomoyo_read_unlock(idx); + } else + error = -EINVAL; +out: + kfree(data); + return error ? error : count; +} + +/** + * tomoyo_read_self - read() for /sys/kernel/security/tomoyo/self_domain interface. + * + * @file: Pointer to "struct file". + * @buf: Domainname which current thread belongs to. + * @count: Size of @buf. + * @ppos: Bytes read by now. + * + * Returns read size on success, negative value otherwise. + */ +static ssize_t tomoyo_read_self(struct file *file, char __user *buf, + size_t count, loff_t *ppos) +{ + const char *domain = tomoyo_domain()->domainname->name; + loff_t len = strlen(domain); + loff_t pos = *ppos; + if (pos >= len || !count) + return 0; + len -= pos; + if (count < len) + len = count; + if (copy_to_user(buf, domain + pos, len)) + return -EFAULT; + *ppos += len; + return len; +} + +/* Operations for /sys/kernel/security/tomoyo/self_domain interface. */ +static const struct file_operations tomoyo_self_operations = { + .write = tomoyo_write_self, + .read = tomoyo_read_self, +}; + +/** * tomoyo_open - open() for /sys/kernel/security/tomoyo/ interface. * * @inode: Pointer to "struct inode". @@ -19,7 +135,7 @@ */ static int tomoyo_open(struct inode *inode, struct file *file) { - const int key = ((u8 *) file->f_path.dentry->d_inode->i_private) + const int key = ((u8 *) file_inode(file)->i_private) - ((u8 *) NULL); return tomoyo_open_control(key, file); } @@ -27,23 +143,23 @@ static int tomoyo_open(struct inode *inode, struct file *file) /** * tomoyo_release - close() for /sys/kernel/security/tomoyo/ interface. * - * @inode: Pointer to "struct inode". * @file: Pointer to "struct file". * - * Returns 0 on success, negative value otherwise. */ static int tomoyo_release(struct inode *inode, struct file *file) { - return tomoyo_close_control(file); + tomoyo_close_control(file->private_data); + return 0; } /** - * tomoyo_poll - poll() for /proc/ccs/ interface. + * tomoyo_poll - poll() for /sys/kernel/security/tomoyo/ interface. * * @file: Pointer to "struct file". - * @wait: Pointer to "poll_table". + * @wait: Pointer to "poll_table". Maybe NULL. * - * Returns 0 on success, negative value otherwise. + * Returns POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM if ready to read/write, + * POLLOUT | POLLWRNORM otherwise. */ static unsigned int tomoyo_poll(struct file *file, poll_table *wait) { @@ -63,7 +179,7 @@ static unsigned int tomoyo_poll(struct file *file, poll_table *wait) static ssize_t tomoyo_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { - return tomoyo_read_control(file, buf, count); + return tomoyo_read_control(file->private_data, buf, count); } /** @@ -79,7 +195,7 @@ static ssize_t tomoyo_read(struct file *file, char __user *buf, size_t count, static ssize_t tomoyo_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { - return tomoyo_write_control(file, buf, count); + return tomoyo_write_control(file->private_data, buf, count); } /* @@ -108,7 +224,7 @@ static const struct file_operations tomoyo_operations = { * * Returns nothing. */ -static void __init tomoyo_create_entry(const char *name, const mode_t mode, +static void __init tomoyo_create_entry(const char *name, const umode_t mode, struct dentry *parent, const u8 key) { securityfs_create_file(name, mode, parent, ((u8 *) NULL) + key, @@ -135,20 +251,21 @@ static int __init tomoyo_initerface_init(void) TOMOYO_DOMAINPOLICY); tomoyo_create_entry("exception_policy", 0600, tomoyo_dir, TOMOYO_EXCEPTIONPOLICY); - tomoyo_create_entry("self_domain", 0400, tomoyo_dir, - TOMOYO_SELFDOMAIN); - tomoyo_create_entry(".domain_status", 0600, tomoyo_dir, - TOMOYO_DOMAIN_STATUS); + tomoyo_create_entry("audit", 0400, tomoyo_dir, + TOMOYO_AUDIT); tomoyo_create_entry(".process_status", 0600, tomoyo_dir, TOMOYO_PROCESS_STATUS); - tomoyo_create_entry("meminfo", 0600, tomoyo_dir, - TOMOYO_MEMINFO); + tomoyo_create_entry("stat", 0644, tomoyo_dir, + TOMOYO_STAT); tomoyo_create_entry("profile", 0600, tomoyo_dir, TOMOYO_PROFILE); tomoyo_create_entry("manager", 0600, tomoyo_dir, TOMOYO_MANAGER); tomoyo_create_entry("version", 0400, tomoyo_dir, TOMOYO_VERSION); + securityfs_create_file("self_domain", 0666, tomoyo_dir, NULL, + &tomoyo_self_operations); + tomoyo_load_builtin_policy(); return 0; } diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c index 95d3f957223..f0b756e27fe 100644 --- a/security/tomoyo/tomoyo.c +++ b/security/tomoyo/tomoyo.c @@ -1,20 +1,35 @@ /* * security/tomoyo/tomoyo.c * - * LSM hooks for TOMOYO Linux. - * - * Copyright (C) 2005-2010 NTT DATA CORPORATION + * Copyright (C) 2005-2011 NTT DATA CORPORATION */ #include <linux/security.h> #include "common.h" +/** + * tomoyo_cred_alloc_blank - Target for security_cred_alloc_blank(). + * + * @new: Pointer to "struct cred". + * @gfp: Memory allocation flags. + * + * Returns 0. + */ static int tomoyo_cred_alloc_blank(struct cred *new, gfp_t gfp) { new->security = NULL; return 0; } +/** + * tomoyo_cred_prepare - Target for security_prepare_creds(). + * + * @new: Pointer to "struct cred". + * @old: Pointer to "struct cred". + * @gfp: Memory allocation flags. + * + * Returns 0. + */ static int tomoyo_cred_prepare(struct cred *new, const struct cred *old, gfp_t gfp) { @@ -25,11 +40,22 @@ static int tomoyo_cred_prepare(struct cred *new, const struct cred *old, return 0; } +/** + * tomoyo_cred_transfer - Target for security_transfer_creds(). + * + * @new: Pointer to "struct cred". + * @old: Pointer to "struct cred". + */ static void tomoyo_cred_transfer(struct cred *new, const struct cred *old) { tomoyo_cred_prepare(new, old, 0); } +/** + * tomoyo_cred_free - Target for security_cred_free(). + * + * @cred: Pointer to "struct cred". + */ static void tomoyo_cred_free(struct cred *cred) { struct tomoyo_domain_info *domain = cred->security; @@ -37,6 +63,13 @@ static void tomoyo_cred_free(struct cred *cred) atomic_dec(&domain->users); } +/** + * tomoyo_bprm_set_creds - Target for security_bprm_set_creds(). + * + * @bprm: Pointer to "struct linux_binprm". + * + * Returns 0 on success, negative value otherwise. + */ static int tomoyo_bprm_set_creds(struct linux_binprm *bprm) { int rc; @@ -51,12 +84,14 @@ static int tomoyo_bprm_set_creds(struct linux_binprm *bprm) */ if (bprm->cred_prepared) return 0; +#ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER /* * Load policy if /sbin/tomoyo-init exists and /sbin/init is requested * for the first time. */ if (!tomoyo_policy_loaded) tomoyo_load_policy(bprm->filename); +#endif /* * Release reference to "struct tomoyo_domain_info" stored inside * "bprm->cred->security". New reference to "struct tomoyo_domain_info" @@ -73,6 +108,13 @@ static int tomoyo_bprm_set_creds(struct linux_binprm *bprm) return 0; } +/** + * tomoyo_bprm_check_security - Target for security_bprm_check(). + * + * @bprm: Pointer to "struct linux_binprm". + * + * Returns 0 on success, negative value otherwise. + */ static int tomoyo_bprm_check_security(struct linux_binprm *bprm) { struct tomoyo_domain_info *domain = bprm->cred->security; @@ -90,43 +132,109 @@ static int tomoyo_bprm_check_security(struct linux_binprm *bprm) /* * Read permission is checked against interpreters using next domain. */ - return tomoyo_check_open_permission(domain, &bprm->file->f_path, O_RDONLY); + return tomoyo_check_open_permission(domain, &bprm->file->f_path, + O_RDONLY); } +/** + * tomoyo_inode_getattr - Target for security_inode_getattr(). + * + * @mnt: Pointer to "struct vfsmount". + * @dentry: Pointer to "struct dentry". + * + * Returns 0 on success, negative value otherwise. + */ +static int tomoyo_inode_getattr(struct vfsmount *mnt, struct dentry *dentry) +{ + struct path path = { mnt, dentry }; + return tomoyo_path_perm(TOMOYO_TYPE_GETATTR, &path, NULL); +} + +/** + * tomoyo_path_truncate - Target for security_path_truncate(). + * + * @path: Pointer to "struct path". + * + * Returns 0 on success, negative value otherwise. + */ static int tomoyo_path_truncate(struct path *path) { - return tomoyo_path_perm(TOMOYO_TYPE_TRUNCATE, path); + return tomoyo_path_perm(TOMOYO_TYPE_TRUNCATE, path, NULL); } +/** + * tomoyo_path_unlink - Target for security_path_unlink(). + * + * @parent: Pointer to "struct path". + * @dentry: Pointer to "struct dentry". + * + * Returns 0 on success, negative value otherwise. + */ static int tomoyo_path_unlink(struct path *parent, struct dentry *dentry) { struct path path = { parent->mnt, dentry }; - return tomoyo_path_perm(TOMOYO_TYPE_UNLINK, &path); + return tomoyo_path_perm(TOMOYO_TYPE_UNLINK, &path, NULL); } +/** + * tomoyo_path_mkdir - Target for security_path_mkdir(). + * + * @parent: Pointer to "struct path". + * @dentry: Pointer to "struct dentry". + * @mode: DAC permission mode. + * + * Returns 0 on success, negative value otherwise. + */ static int tomoyo_path_mkdir(struct path *parent, struct dentry *dentry, - int mode) + umode_t mode) { struct path path = { parent->mnt, dentry }; return tomoyo_path_number_perm(TOMOYO_TYPE_MKDIR, &path, mode & S_IALLUGO); } +/** + * tomoyo_path_rmdir - Target for security_path_rmdir(). + * + * @parent: Pointer to "struct path". + * @dentry: Pointer to "struct dentry". + * + * Returns 0 on success, negative value otherwise. + */ static int tomoyo_path_rmdir(struct path *parent, struct dentry *dentry) { struct path path = { parent->mnt, dentry }; - return tomoyo_path_perm(TOMOYO_TYPE_RMDIR, &path); + return tomoyo_path_perm(TOMOYO_TYPE_RMDIR, &path, NULL); } +/** + * tomoyo_path_symlink - Target for security_path_symlink(). + * + * @parent: Pointer to "struct path". + * @dentry: Pointer to "struct dentry". + * @old_name: Symlink's content. + * + * Returns 0 on success, negative value otherwise. + */ static int tomoyo_path_symlink(struct path *parent, struct dentry *dentry, const char *old_name) { struct path path = { parent->mnt, dentry }; - return tomoyo_path_perm(TOMOYO_TYPE_SYMLINK, &path); + return tomoyo_path_perm(TOMOYO_TYPE_SYMLINK, &path, old_name); } +/** + * tomoyo_path_mknod - Target for security_path_mknod(). + * + * @parent: Pointer to "struct path". + * @dentry: Pointer to "struct dentry". + * @mode: DAC permission mode. + * @dev: Device attributes. + * + * Returns 0 on success, negative value otherwise. + */ static int tomoyo_path_mknod(struct path *parent, struct dentry *dentry, - int mode, unsigned int dev) + umode_t mode, unsigned int dev) { struct path path = { parent->mnt, dentry }; int type = TOMOYO_TYPE_CREATE; @@ -155,6 +263,15 @@ static int tomoyo_path_mknod(struct path *parent, struct dentry *dentry, return tomoyo_path_number_perm(type, &path, perm); } +/** + * tomoyo_path_link - Target for security_path_link(). + * + * @old_dentry: Pointer to "struct dentry". + * @new_dir: Pointer to "struct path". + * @new_dentry: Pointer to "struct dentry". + * + * Returns 0 on success, negative value otherwise. + */ static int tomoyo_path_link(struct dentry *old_dentry, struct path *new_dir, struct dentry *new_dentry) { @@ -163,6 +280,16 @@ static int tomoyo_path_link(struct dentry *old_dentry, struct path *new_dir, return tomoyo_path2_perm(TOMOYO_TYPE_LINK, &path1, &path2); } +/** + * tomoyo_path_rename - Target for security_path_rename(). + * + * @old_parent: Pointer to "struct path". + * @old_dentry: Pointer to "struct dentry". + * @new_parent: Pointer to "struct path". + * @new_dentry: Pointer to "struct dentry". + * + * Returns 0 on success, negative value otherwise. + */ static int tomoyo_path_rename(struct path *old_parent, struct dentry *old_dentry, struct path *new_parent, @@ -173,15 +300,33 @@ static int tomoyo_path_rename(struct path *old_parent, return tomoyo_path2_perm(TOMOYO_TYPE_RENAME, &path1, &path2); } +/** + * tomoyo_file_fcntl - Target for security_file_fcntl(). + * + * @file: Pointer to "struct file". + * @cmd: Command for fcntl(). + * @arg: Argument for @cmd. + * + * Returns 0 on success, negative value otherwise. + */ static int tomoyo_file_fcntl(struct file *file, unsigned int cmd, unsigned long arg) { - if (cmd == F_SETFL && ((arg ^ file->f_flags) & O_APPEND)) - return tomoyo_path_perm(TOMOYO_TYPE_REWRITE, &file->f_path); - return 0; + if (!(cmd == F_SETFL && ((arg ^ file->f_flags) & O_APPEND))) + return 0; + return tomoyo_check_open_permission(tomoyo_domain(), &file->f_path, + O_WRONLY | (arg & O_APPEND)); } -static int tomoyo_dentry_open(struct file *f, const struct cred *cred) +/** + * tomoyo_file_open - Target for security_file_open(). + * + * @f: Pointer to "struct file". + * @cred: Pointer to "struct cred". + * + * Returns 0 on success, negative value otherwise. + */ +static int tomoyo_file_open(struct file *f, const struct cred *cred) { int flags = f->f_flags; /* Don't check read permission here if called from do_execve(). */ @@ -190,52 +335,170 @@ static int tomoyo_dentry_open(struct file *f, const struct cred *cred) return tomoyo_check_open_permission(tomoyo_domain(), &f->f_path, flags); } +/** + * tomoyo_file_ioctl - Target for security_file_ioctl(). + * + * @file: Pointer to "struct file". + * @cmd: Command for ioctl(). + * @arg: Argument for @cmd. + * + * Returns 0 on success, negative value otherwise. + */ static int tomoyo_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return tomoyo_path_number_perm(TOMOYO_TYPE_IOCTL, &file->f_path, cmd); } -static int tomoyo_path_chmod(struct dentry *dentry, struct vfsmount *mnt, - mode_t mode) +/** + * tomoyo_path_chmod - Target for security_path_chmod(). + * + * @path: Pointer to "struct path". + * @mode: DAC permission mode. + * + * Returns 0 on success, negative value otherwise. + */ +static int tomoyo_path_chmod(struct path *path, umode_t mode) { - struct path path = { mnt, dentry }; - return tomoyo_path_number_perm(TOMOYO_TYPE_CHMOD, &path, + return tomoyo_path_number_perm(TOMOYO_TYPE_CHMOD, path, mode & S_IALLUGO); } -static int tomoyo_path_chown(struct path *path, uid_t uid, gid_t gid) +/** + * tomoyo_path_chown - Target for security_path_chown(). + * + * @path: Pointer to "struct path". + * @uid: Owner ID. + * @gid: Group ID. + * + * Returns 0 on success, negative value otherwise. + */ +static int tomoyo_path_chown(struct path *path, kuid_t uid, kgid_t gid) { int error = 0; - if (uid != (uid_t) -1) - error = tomoyo_path_number_perm(TOMOYO_TYPE_CHOWN, path, uid); - if (!error && gid != (gid_t) -1) - error = tomoyo_path_number_perm(TOMOYO_TYPE_CHGRP, path, gid); + if (uid_valid(uid)) + error = tomoyo_path_number_perm(TOMOYO_TYPE_CHOWN, path, + from_kuid(&init_user_ns, uid)); + if (!error && gid_valid(gid)) + error = tomoyo_path_number_perm(TOMOYO_TYPE_CHGRP, path, + from_kgid(&init_user_ns, gid)); return error; } +/** + * tomoyo_path_chroot - Target for security_path_chroot(). + * + * @path: Pointer to "struct path". + * + * Returns 0 on success, negative value otherwise. + */ static int tomoyo_path_chroot(struct path *path) { - return tomoyo_path_perm(TOMOYO_TYPE_CHROOT, path); + return tomoyo_path_perm(TOMOYO_TYPE_CHROOT, path, NULL); } -static int tomoyo_sb_mount(char *dev_name, struct path *path, - char *type, unsigned long flags, void *data) +/** + * tomoyo_sb_mount - Target for security_sb_mount(). + * + * @dev_name: Name of device file. Maybe NULL. + * @path: Pointer to "struct path". + * @type: Name of filesystem type. Maybe NULL. + * @flags: Mount options. + * @data: Optional data. Maybe NULL. + * + * Returns 0 on success, negative value otherwise. + */ +static int tomoyo_sb_mount(const char *dev_name, struct path *path, + const char *type, unsigned long flags, void *data) { return tomoyo_mount_permission(dev_name, path, type, flags, data); } +/** + * tomoyo_sb_umount - Target for security_sb_umount(). + * + * @mnt: Pointer to "struct vfsmount". + * @flags: Unmount options. + * + * Returns 0 on success, negative value otherwise. + */ static int tomoyo_sb_umount(struct vfsmount *mnt, int flags) { struct path path = { mnt, mnt->mnt_root }; - return tomoyo_path_perm(TOMOYO_TYPE_UMOUNT, &path); + return tomoyo_path_perm(TOMOYO_TYPE_UMOUNT, &path, NULL); } +/** + * tomoyo_sb_pivotroot - Target for security_sb_pivotroot(). + * + * @old_path: Pointer to "struct path". + * @new_path: Pointer to "struct path". + * + * Returns 0 on success, negative value otherwise. + */ static int tomoyo_sb_pivotroot(struct path *old_path, struct path *new_path) { return tomoyo_path2_perm(TOMOYO_TYPE_PIVOT_ROOT, new_path, old_path); } +/** + * tomoyo_socket_listen - Check permission for listen(). + * + * @sock: Pointer to "struct socket". + * @backlog: Backlog parameter. + * + * Returns 0 on success, negative value otherwise. + */ +static int tomoyo_socket_listen(struct socket *sock, int backlog) +{ + return tomoyo_socket_listen_permission(sock); +} + +/** + * tomoyo_socket_connect - Check permission for connect(). + * + * @sock: Pointer to "struct socket". + * @addr: Pointer to "struct sockaddr". + * @addr_len: Size of @addr. + * + * Returns 0 on success, negative value otherwise. + */ +static int tomoyo_socket_connect(struct socket *sock, struct sockaddr *addr, + int addr_len) +{ + return tomoyo_socket_connect_permission(sock, addr, addr_len); +} + +/** + * tomoyo_socket_bind - Check permission for bind(). + * + * @sock: Pointer to "struct socket". + * @addr: Pointer to "struct sockaddr". + * @addr_len: Size of @addr. + * + * Returns 0 on success, negative value otherwise. + */ +static int tomoyo_socket_bind(struct socket *sock, struct sockaddr *addr, + int addr_len) +{ + return tomoyo_socket_bind_permission(sock, addr, addr_len); +} + +/** + * tomoyo_socket_sendmsg - Check permission for sendmsg(). + * + * @sock: Pointer to "struct socket". + * @msg: Pointer to "struct msghdr". + * @size: Size of message. + * + * Returns 0 on success, negative value otherwise. + */ +static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg, + int size) +{ + return tomoyo_socket_sendmsg_permission(sock, msg, size); +} + /* * tomoyo_security_ops is a "struct security_operations" which is used for * registering TOMOYO. @@ -249,7 +512,7 @@ static struct security_operations tomoyo_security_ops = { .bprm_set_creds = tomoyo_bprm_set_creds, .bprm_check_security = tomoyo_bprm_check_security, .file_fcntl = tomoyo_file_fcntl, - .dentry_open = tomoyo_dentry_open, + .file_open = tomoyo_file_open, .path_truncate = tomoyo_path_truncate, .path_unlink = tomoyo_path_unlink, .path_mkdir = tomoyo_path_mkdir, @@ -258,6 +521,7 @@ static struct security_operations tomoyo_security_ops = { .path_mknod = tomoyo_path_mknod, .path_link = tomoyo_path_link, .path_rename = tomoyo_path_rename, + .inode_getattr = tomoyo_inode_getattr, .file_ioctl = tomoyo_file_ioctl, .path_chmod = tomoyo_path_chmod, .path_chown = tomoyo_path_chown, @@ -265,11 +529,20 @@ static struct security_operations tomoyo_security_ops = { .sb_mount = tomoyo_sb_mount, .sb_umount = tomoyo_sb_umount, .sb_pivotroot = tomoyo_sb_pivotroot, + .socket_bind = tomoyo_socket_bind, + .socket_connect = tomoyo_socket_connect, + .socket_listen = tomoyo_socket_listen, + .socket_sendmsg = tomoyo_socket_sendmsg, }; /* Lock for GC. */ -struct srcu_struct tomoyo_ss; +DEFINE_SRCU(tomoyo_ss); +/** + * tomoyo_init - Register TOMOYO Linux as a LSM module. + * + * Returns 0. + */ static int __init tomoyo_init(void) { struct cred *cred = (struct cred *) current_cred(); @@ -277,8 +550,7 @@ static int __init tomoyo_init(void) if (!security_module_enable(&tomoyo_security_ops)) return 0; /* register ourselves with the security framework */ - if (register_security(&tomoyo_security_ops) || - init_srcu_struct(&tomoyo_ss)) + if (register_security(&tomoyo_security_ops)) panic("Failure registering TOMOYO Linux"); printk(KERN_INFO "TOMOYO Linux initialized\n"); cred->security = &tomoyo_kernel_domain; diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c index 9bfc1ee8222..2952ba576fb 100644 --- a/security/tomoyo/util.c +++ b/security/tomoyo/util.c @@ -1,9 +1,7 @@ /* * security/tomoyo/util.c * - * Utility functions for TOMOYO. - * - * Copyright (C) 2005-2010 NTT DATA CORPORATION + * Copyright (C) 2005-2011 NTT DATA CORPORATION */ #include <linux/slab.h> @@ -15,18 +13,188 @@ DEFINE_MUTEX(tomoyo_policy_lock); /* Has /sbin/init started? */ bool tomoyo_policy_loaded; +/* + * Mapping table from "enum tomoyo_mac_index" to + * "enum tomoyo_mac_category_index". + */ +const u8 tomoyo_index2category[TOMOYO_MAX_MAC_INDEX] = { + /* CONFIG::file group */ + [TOMOYO_MAC_FILE_EXECUTE] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_OPEN] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_CREATE] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_UNLINK] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_GETATTR] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_MKDIR] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_RMDIR] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_MKFIFO] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_MKSOCK] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_TRUNCATE] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_SYMLINK] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_MKBLOCK] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_MKCHAR] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_LINK] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_RENAME] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_CHMOD] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_CHOWN] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_CHGRP] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_IOCTL] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_CHROOT] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_MOUNT] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_UMOUNT] = TOMOYO_MAC_CATEGORY_FILE, + [TOMOYO_MAC_FILE_PIVOT_ROOT] = TOMOYO_MAC_CATEGORY_FILE, + /* CONFIG::network group */ + [TOMOYO_MAC_NETWORK_INET_STREAM_BIND] = + TOMOYO_MAC_CATEGORY_NETWORK, + [TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN] = + TOMOYO_MAC_CATEGORY_NETWORK, + [TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT] = + TOMOYO_MAC_CATEGORY_NETWORK, + [TOMOYO_MAC_NETWORK_INET_DGRAM_BIND] = + TOMOYO_MAC_CATEGORY_NETWORK, + [TOMOYO_MAC_NETWORK_INET_DGRAM_SEND] = + TOMOYO_MAC_CATEGORY_NETWORK, + [TOMOYO_MAC_NETWORK_INET_RAW_BIND] = + TOMOYO_MAC_CATEGORY_NETWORK, + [TOMOYO_MAC_NETWORK_INET_RAW_SEND] = + TOMOYO_MAC_CATEGORY_NETWORK, + [TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND] = + TOMOYO_MAC_CATEGORY_NETWORK, + [TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN] = + TOMOYO_MAC_CATEGORY_NETWORK, + [TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT] = + TOMOYO_MAC_CATEGORY_NETWORK, + [TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND] = + TOMOYO_MAC_CATEGORY_NETWORK, + [TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND] = + TOMOYO_MAC_CATEGORY_NETWORK, + [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND] = + TOMOYO_MAC_CATEGORY_NETWORK, + [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN] = + TOMOYO_MAC_CATEGORY_NETWORK, + [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT] = + TOMOYO_MAC_CATEGORY_NETWORK, + /* CONFIG::misc group */ + [TOMOYO_MAC_ENVIRON] = TOMOYO_MAC_CATEGORY_MISC, +}; + +/** + * tomoyo_convert_time - Convert time_t to YYYY/MM/DD hh/mm/ss. + * + * @time: Seconds since 1970/01/01 00:00:00. + * @stamp: Pointer to "struct tomoyo_time". + * + * Returns nothing. + * + * This function does not handle Y2038 problem. + */ +void tomoyo_convert_time(time_t time, struct tomoyo_time *stamp) +{ + static const u16 tomoyo_eom[2][12] = { + { 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 }, + { 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 } + }; + u16 y; + u8 m; + bool r; + stamp->sec = time % 60; + time /= 60; + stamp->min = time % 60; + time /= 60; + stamp->hour = time % 24; + time /= 24; + for (y = 1970; ; y++) { + const unsigned short days = (y & 3) ? 365 : 366; + if (time < days) + break; + time -= days; + } + r = (y & 3) == 0; + for (m = 0; m < 11 && time >= tomoyo_eom[r][m]; m++) + ; + if (m) + time -= tomoyo_eom[r][m - 1]; + stamp->year = y; + stamp->month = ++m; + stamp->day = ++time; +} + +/** + * tomoyo_permstr - Find permission keywords. + * + * @string: String representation for permissions in foo/bar/buz format. + * @keyword: Keyword to find from @string/ + * + * Returns ture if @keyword was found in @string, false otherwise. + * + * This function assumes that strncmp(w1, w2, strlen(w1)) != 0 if w1 != w2. + */ +bool tomoyo_permstr(const char *string, const char *keyword) +{ + const char *cp = strstr(string, keyword); + if (cp) + return cp == string || *(cp - 1) == '/'; + return false; +} + +/** + * tomoyo_read_token - Read a word from a line. + * + * @param: Pointer to "struct tomoyo_acl_param". + * + * Returns a word on success, "" otherwise. + * + * To allow the caller to skip NULL check, this function returns "" rather than + * NULL if there is no more words to read. + */ +char *tomoyo_read_token(struct tomoyo_acl_param *param) +{ + char *pos = param->data; + char *del = strchr(pos, ' '); + if (del) + *del++ = '\0'; + else + del = pos + strlen(pos); + param->data = del; + return pos; +} + +/** + * tomoyo_get_domainname - Read a domainname from a line. + * + * @param: Pointer to "struct tomoyo_acl_param". + * + * Returns a domainname on success, NULL otherwise. + */ +const struct tomoyo_path_info *tomoyo_get_domainname +(struct tomoyo_acl_param *param) +{ + char *start = param->data; + char *pos = start; + while (*pos) { + if (*pos++ != ' ' || *pos++ == '/') + continue; + pos -= 2; + *pos++ = '\0'; + break; + } + param->data = pos; + if (tomoyo_correct_domain(start)) + return tomoyo_get_name(start); + return NULL; +} + /** * tomoyo_parse_ulong - Parse an "unsigned long" value. * * @result: Pointer to "unsigned long". * @str: Pointer to string to parse. * - * Returns value type on success, 0 otherwise. + * Returns one of values in "enum tomoyo_value_type". * * The @src is updated to point the first character after the value * on success. */ -static u8 tomoyo_parse_ulong(unsigned long *result, char **str) +u8 tomoyo_parse_ulong(unsigned long *result, char **str) { const char *cp = *str; char *ep; @@ -43,7 +211,7 @@ static u8 tomoyo_parse_ulong(unsigned long *result, char **str) } *result = simple_strtoul(cp, &ep, base); if (cp == ep) - return 0; + return TOMOYO_VALUE_TYPE_INVALID; *str = ep; switch (base) { case 16: @@ -81,63 +249,65 @@ void tomoyo_print_ulong(char *buffer, const int buffer_len, /** * tomoyo_parse_name_union - Parse a tomoyo_name_union. * - * @filename: Name or name group. - * @ptr: Pointer to "struct tomoyo_name_union". + * @param: Pointer to "struct tomoyo_acl_param". + * @ptr: Pointer to "struct tomoyo_name_union". * * Returns true on success, false otherwise. */ -bool tomoyo_parse_name_union(const char *filename, +bool tomoyo_parse_name_union(struct tomoyo_acl_param *param, struct tomoyo_name_union *ptr) { - if (!tomoyo_correct_word(filename)) - return false; - if (filename[0] == '@') { - ptr->group = tomoyo_get_group(filename + 1, TOMOYO_PATH_GROUP); - ptr->is_group = true; + char *filename; + if (param->data[0] == '@') { + param->data++; + ptr->group = tomoyo_get_group(param, TOMOYO_PATH_GROUP); return ptr->group != NULL; } + filename = tomoyo_read_token(param); + if (!tomoyo_correct_word(filename)) + return false; ptr->filename = tomoyo_get_name(filename); - ptr->is_group = false; return ptr->filename != NULL; } /** * tomoyo_parse_number_union - Parse a tomoyo_number_union. * - * @data: Number or number range or number group. - * @ptr: Pointer to "struct tomoyo_number_union". + * @param: Pointer to "struct tomoyo_acl_param". + * @ptr: Pointer to "struct tomoyo_number_union". * * Returns true on success, false otherwise. */ -bool tomoyo_parse_number_union(char *data, struct tomoyo_number_union *num) +bool tomoyo_parse_number_union(struct tomoyo_acl_param *param, + struct tomoyo_number_union *ptr) { + char *data; u8 type; unsigned long v; - memset(num, 0, sizeof(*num)); - if (data[0] == '@') { - if (!tomoyo_correct_word(data)) - return false; - num->group = tomoyo_get_group(data + 1, TOMOYO_NUMBER_GROUP); - num->is_group = true; - return num->group != NULL; + memset(ptr, 0, sizeof(*ptr)); + if (param->data[0] == '@') { + param->data++; + ptr->group = tomoyo_get_group(param, TOMOYO_NUMBER_GROUP); + return ptr->group != NULL; } + data = tomoyo_read_token(param); type = tomoyo_parse_ulong(&v, &data); - if (!type) + if (type == TOMOYO_VALUE_TYPE_INVALID) return false; - num->values[0] = v; - num->min_type = type; + ptr->values[0] = v; + ptr->value_type[0] = type; if (!*data) { - num->values[1] = v; - num->max_type = type; + ptr->values[1] = v; + ptr->value_type[1] = type; return true; } if (*data++ != '-') return false; type = tomoyo_parse_ulong(&v, &data); - if (!type || *data) + if (type == TOMOYO_VALUE_TYPE_INVALID || *data || ptr->values[0] > v) return false; - num->values[1] = v; - num->max_type = type; + ptr->values[1] = v; + ptr->value_type[1] = type; return true; } @@ -185,6 +355,30 @@ static inline u8 tomoyo_make_byte(const u8 c1, const u8 c2, const u8 c3) } /** + * tomoyo_valid - Check whether the character is a valid char. + * + * @c: The character to check. + * + * Returns true if @c is a valid character, false otherwise. + */ +static inline bool tomoyo_valid(const unsigned char c) +{ + return c > ' ' && c < 127; +} + +/** + * tomoyo_invalid - Check whether the character is an invalid char. + * + * @c: The character to check. + * + * Returns true if @c is an invalid character, false otherwise. + */ +static inline bool tomoyo_invalid(const unsigned char c) +{ + return c && (c <= ' ' || c >= 127); +} + +/** * tomoyo_str_starts - Check whether the given string starts with the given keyword. * * @src: Pointer to pointer to the string. @@ -238,36 +432,9 @@ void tomoyo_normalize_line(unsigned char *buffer) } /** - * tomoyo_tokenize - Tokenize string. - * - * @buffer: The line to tokenize. - * @w: Pointer to "char *". - * @size: Sizeof @w . - * - * Returns true on success, false otherwise. - */ -bool tomoyo_tokenize(char *buffer, char *w[], size_t size) -{ - int count = size / sizeof(char *); - int i; - for (i = 0; i < count; i++) - w[i] = ""; - for (i = 0; i < count; i++) { - char *cp = strchr(buffer, ' '); - if (cp) - *cp = '\0'; - w[i] = buffer; - if (!cp) - break; - buffer = cp + 1; - } - return i < count || !*buffer; -} - -/** * tomoyo_correct_word2 - Validate a string. * - * @string: The string to check. May be non-'\0'-terminated. + * @string: The string to check. Maybe non-'\0'-terminated. * @len: Length of @string. * * Check whether the given string follows the naming rules. @@ -325,13 +492,13 @@ static bool tomoyo_correct_word2(const char *string, size_t len) if (d < '0' || d > '7' || e < '0' || e > '7') break; c = tomoyo_make_byte(c, d, e); - if (tomoyo_invalid(c)) - continue; /* pattern is not \000 */ + if (c <= ' ' || c >= 127) + continue; } goto out; } else if (in_repetition && c == '/') { goto out; - } else if (tomoyo_invalid(c)) { + } else if (c <= ' ' || c >= 127) { goto out; } } @@ -377,26 +544,21 @@ bool tomoyo_correct_path(const char *filename) */ bool tomoyo_correct_domain(const unsigned char *domainname) { - if (!domainname || strncmp(domainname, TOMOYO_ROOT_NAME, - TOMOYO_ROOT_NAME_LEN)) - goto out; - domainname += TOMOYO_ROOT_NAME_LEN; - if (!*domainname) + if (!domainname || !tomoyo_domain_def(domainname)) + return false; + domainname = strchr(domainname, ' '); + if (!domainname++) return true; - if (*domainname++ != ' ') - goto out; while (1) { const unsigned char *cp = strchr(domainname, ' '); if (!cp) break; if (*domainname != '/' || - !tomoyo_correct_word2(domainname, cp - domainname - 1)) - goto out; + !tomoyo_correct_word2(domainname, cp - domainname)) + return false; domainname = cp + 1; } return tomoyo_correct_path(domainname); - out: - return false; } /** @@ -408,7 +570,19 @@ bool tomoyo_correct_domain(const unsigned char *domainname) */ bool tomoyo_domain_def(const unsigned char *buffer) { - return !strncmp(buffer, TOMOYO_ROOT_NAME, TOMOYO_ROOT_NAME_LEN); + const unsigned char *cp; + int len; + if (*buffer != '<') + return false; + cp = strchr(buffer, ' '); + if (!cp) + len = strlen(buffer); + else + len = cp - buffer; + if (buffer[len - 1] != '>' || + !tomoyo_correct_word2(buffer + 1, len - 2)) + return false; + return true; } /** @@ -775,18 +949,13 @@ bool tomoyo_path_matches_pattern(const struct tomoyo_path_info *filename, const char *tomoyo_get_exe(void) { struct mm_struct *mm = current->mm; - struct vm_area_struct *vma; const char *cp = NULL; if (!mm) return NULL; down_read(&mm->mmap_sem); - for (vma = mm->mmap; vma; vma = vma->vm_next) { - if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file) { - cp = tomoyo_realpath_from_path(&vma->vm_file->f_path); - break; - } - } + if (mm->exe_file) + cp = tomoyo_realpath_from_path(&mm->exe_file->f_path); up_read(&mm->mmap_sem); return cp; } @@ -794,22 +963,27 @@ const char *tomoyo_get_exe(void) /** * tomoyo_get_mode - Get MAC mode. * + * @ns: Pointer to "struct tomoyo_policy_namespace". * @profile: Profile number. * @index: Index number of functionality. * * Returns mode. */ -int tomoyo_get_mode(const u8 profile, const u8 index) +int tomoyo_get_mode(const struct tomoyo_policy_namespace *ns, const u8 profile, + const u8 index) { u8 mode; - const u8 category = TOMOYO_MAC_CATEGORY_FILE; + struct tomoyo_profile *p; + if (!tomoyo_policy_loaded) return TOMOYO_CONFIG_DISABLED; - mode = tomoyo_profile(profile)->config[index]; + p = tomoyo_profile(ns, profile); + mode = p->config[index]; if (mode == TOMOYO_CONFIG_USE_DEFAULT) - mode = tomoyo_profile(profile)->config[category]; + mode = p->config[tomoyo_index2category[index] + + TOMOYO_MAX_MAC_INDEX]; if (mode == TOMOYO_CONFIG_USE_DEFAULT) - mode = tomoyo_profile(profile)->default_config; + mode = p->default_config; return mode & 3; } @@ -833,65 +1007,11 @@ int tomoyo_init_request_info(struct tomoyo_request_info *r, profile = domain->profile; r->profile = profile; r->type = index; - r->mode = tomoyo_get_mode(profile, index); + r->mode = tomoyo_get_mode(domain->ns, profile, index); return r->mode; } /** - * tomoyo_last_word - Get last component of a line. - * - * @line: A line. - * - * Returns the last word of a line. - */ -const char *tomoyo_last_word(const char *name) -{ - const char *cp = strrchr(name, ' '); - if (cp) - return cp + 1; - return name; -} - -/** - * tomoyo_warn_log - Print warning or error message on console. - * - * @r: Pointer to "struct tomoyo_request_info". - * @fmt: The printf()'s format string, followed by parameters. - */ -void tomoyo_warn_log(struct tomoyo_request_info *r, const char *fmt, ...) -{ - va_list args; - char *buffer; - const struct tomoyo_domain_info * const domain = r->domain; - const struct tomoyo_profile *profile = tomoyo_profile(domain->profile); - switch (r->mode) { - case TOMOYO_CONFIG_ENFORCING: - if (!profile->enforcing->enforcing_verbose) - return; - break; - case TOMOYO_CONFIG_PERMISSIVE: - if (!profile->permissive->permissive_verbose) - return; - break; - case TOMOYO_CONFIG_LEARNING: - if (!profile->learning->learning_verbose) - return; - break; - } - buffer = kmalloc(4096, GFP_NOFS); - if (!buffer) - return; - va_start(args, fmt); - vsnprintf(buffer, 4095, fmt, args); - va_end(args); - buffer[4095] = '\0'; - printk(KERN_WARNING "%s: Access %s denied for %s\n", - r->mode == TOMOYO_CONFIG_ENFORCING ? "ERROR" : "WARNING", buffer, - tomoyo_last_word(domain->domainname->name)); - kfree(buffer); -} - -/** * tomoyo_domain_quota_is_ok - Check for domain's quota. * * @r: Pointer to "struct tomoyo_request_info". @@ -911,52 +1031,54 @@ bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r) if (!domain) return true; list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) { + u16 perm; + u8 i; if (ptr->is_deleted) continue; switch (ptr->type) { - u16 perm; - u8 i; case TOMOYO_TYPE_PATH_ACL: perm = container_of(ptr, struct tomoyo_path_acl, head) ->perm; - for (i = 0; i < TOMOYO_MAX_PATH_OPERATION; i++) - if (perm & (1 << i)) - count++; - if (perm & (1 << TOMOYO_TYPE_READ_WRITE)) - count -= 2; break; case TOMOYO_TYPE_PATH2_ACL: perm = container_of(ptr, struct tomoyo_path2_acl, head) ->perm; - for (i = 0; i < TOMOYO_MAX_PATH2_OPERATION; i++) - if (perm & (1 << i)) - count++; break; case TOMOYO_TYPE_PATH_NUMBER_ACL: perm = container_of(ptr, struct tomoyo_path_number_acl, head)->perm; - for (i = 0; i < TOMOYO_MAX_PATH_NUMBER_OPERATION; i++) - if (perm & (1 << i)) - count++; break; case TOMOYO_TYPE_MKDEV_ACL: perm = container_of(ptr, struct tomoyo_mkdev_acl, head)->perm; - for (i = 0; i < TOMOYO_MAX_MKDEV_OPERATION; i++) - if (perm & (1 << i)) - count++; + break; + case TOMOYO_TYPE_INET_ACL: + perm = container_of(ptr, struct tomoyo_inet_acl, + head)->perm; + break; + case TOMOYO_TYPE_UNIX_ACL: + perm = container_of(ptr, struct tomoyo_unix_acl, + head)->perm; + break; + case TOMOYO_TYPE_MANUAL_TASK_ACL: + perm = 0; break; default: - count++; + perm = 1; } + for (i = 0; i < 16; i++) + if (perm & (1 << i)) + count++; } - if (count < tomoyo_profile(domain->profile)->learning-> - learning_max_entry) + if (count < tomoyo_profile(domain->ns, domain->profile)-> + pref[TOMOYO_PREF_MAX_LEARNING_ENTRY]) return true; - if (!domain->quota_warned) { - domain->quota_warned = true; - printk(KERN_WARNING "TOMOYO-WARNING: " - "Domain '%s' has so many ACLs to hold. " + if (!domain->flags[TOMOYO_DIF_QUOTA_WARNED]) { + domain->flags[TOMOYO_DIF_QUOTA_WARNED] = true; + /* r->granted = false; */ + tomoyo_write_log(r, "%s", tomoyo_dif[TOMOYO_DIF_QUOTA_WARNED]); + printk(KERN_WARNING "WARNING: " + "Domain '%s' has too many ACLs to hold. " "Stopped learning mode.\n", domain->domainname->name); } return false; diff --git a/security/yama/Kconfig b/security/yama/Kconfig new file mode 100644 index 00000000000..20ef5143c0c --- /dev/null +++ b/security/yama/Kconfig @@ -0,0 +1,21 @@ +config SECURITY_YAMA + bool "Yama support" + depends on SECURITY + select SECURITYFS + select SECURITY_PATH + default n + help + This selects Yama, which extends DAC support with additional + system-wide security settings beyond regular Linux discretionary + access controls. Currently available is ptrace scope restriction. + Further information can be found in Documentation/security/Yama.txt. + + If you are unsure how to answer this question, answer N. + +config SECURITY_YAMA_STACKED + bool "Yama stacked with other LSMs" + depends on SECURITY_YAMA + default n + help + When Yama is built into the kernel, force it to stack with the + selected primary LSM. diff --git a/security/yama/Makefile b/security/yama/Makefile new file mode 100644 index 00000000000..8b5e0658845 --- /dev/null +++ b/security/yama/Makefile @@ -0,0 +1,3 @@ +obj-$(CONFIG_SECURITY_YAMA) := yama.o + +yama-y := yama_lsm.o diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c new file mode 100644 index 00000000000..13c88fbcf03 --- /dev/null +++ b/security/yama/yama_lsm.c @@ -0,0 +1,443 @@ +/* + * Yama Linux Security Module + * + * Author: Kees Cook <keescook@chromium.org> + * + * Copyright (C) 2010 Canonical, Ltd. + * Copyright (C) 2011 The Chromium OS Authors. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + * + */ + +#include <linux/security.h> +#include <linux/sysctl.h> +#include <linux/ptrace.h> +#include <linux/prctl.h> +#include <linux/ratelimit.h> +#include <linux/workqueue.h> + +#define YAMA_SCOPE_DISABLED 0 +#define YAMA_SCOPE_RELATIONAL 1 +#define YAMA_SCOPE_CAPABILITY 2 +#define YAMA_SCOPE_NO_ATTACH 3 + +static int ptrace_scope = YAMA_SCOPE_RELATIONAL; + +/* describe a ptrace relationship for potential exception */ +struct ptrace_relation { + struct task_struct *tracer; + struct task_struct *tracee; + bool invalid; + struct list_head node; + struct rcu_head rcu; +}; + +static LIST_HEAD(ptracer_relations); +static DEFINE_SPINLOCK(ptracer_relations_lock); + +static void yama_relation_cleanup(struct work_struct *work); +static DECLARE_WORK(yama_relation_work, yama_relation_cleanup); + +/** + * yama_relation_cleanup - remove invalid entries from the relation list + * + */ +static void yama_relation_cleanup(struct work_struct *work) +{ + struct ptrace_relation *relation; + + spin_lock(&ptracer_relations_lock); + rcu_read_lock(); + list_for_each_entry_rcu(relation, &ptracer_relations, node) { + if (relation->invalid) { + list_del_rcu(&relation->node); + kfree_rcu(relation, rcu); + } + } + rcu_read_unlock(); + spin_unlock(&ptracer_relations_lock); +} + +/** + * yama_ptracer_add - add/replace an exception for this tracer/tracee pair + * @tracer: the task_struct of the process doing the ptrace + * @tracee: the task_struct of the process to be ptraced + * + * Each tracee can have, at most, one tracer registered. Each time this + * is called, the prior registered tracer will be replaced for the tracee. + * + * Returns 0 if relationship was added, -ve on error. + */ +static int yama_ptracer_add(struct task_struct *tracer, + struct task_struct *tracee) +{ + struct ptrace_relation *relation, *added; + + added = kmalloc(sizeof(*added), GFP_KERNEL); + if (!added) + return -ENOMEM; + + added->tracee = tracee; + added->tracer = tracer; + added->invalid = false; + + spin_lock(&ptracer_relations_lock); + rcu_read_lock(); + list_for_each_entry_rcu(relation, &ptracer_relations, node) { + if (relation->invalid) + continue; + if (relation->tracee == tracee) { + list_replace_rcu(&relation->node, &added->node); + kfree_rcu(relation, rcu); + goto out; + } + } + + list_add_rcu(&added->node, &ptracer_relations); + +out: + rcu_read_unlock(); + spin_unlock(&ptracer_relations_lock); + return 0; +} + +/** + * yama_ptracer_del - remove exceptions related to the given tasks + * @tracer: remove any relation where tracer task matches + * @tracee: remove any relation where tracee task matches + */ +static void yama_ptracer_del(struct task_struct *tracer, + struct task_struct *tracee) +{ + struct ptrace_relation *relation; + bool marked = false; + + rcu_read_lock(); + list_for_each_entry_rcu(relation, &ptracer_relations, node) { + if (relation->invalid) + continue; + if (relation->tracee == tracee || + (tracer && relation->tracer == tracer)) { + relation->invalid = true; + marked = true; + } + } + rcu_read_unlock(); + + if (marked) + schedule_work(&yama_relation_work); +} + +/** + * yama_task_free - check for task_pid to remove from exception list + * @task: task being removed + */ +void yama_task_free(struct task_struct *task) +{ + yama_ptracer_del(task, task); +} + +/** + * yama_task_prctl - check for Yama-specific prctl operations + * @option: operation + * @arg2: argument + * @arg3: argument + * @arg4: argument + * @arg5: argument + * + * Return 0 on success, -ve on error. -ENOSYS is returned when Yama + * does not handle the given option. + */ +int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3, + unsigned long arg4, unsigned long arg5) +{ + int rc; + struct task_struct *myself = current; + + rc = cap_task_prctl(option, arg2, arg3, arg4, arg5); + if (rc != -ENOSYS) + return rc; + + switch (option) { + case PR_SET_PTRACER: + /* Since a thread can call prctl(), find the group leader + * before calling _add() or _del() on it, since we want + * process-level granularity of control. The tracer group + * leader checking is handled later when walking the ancestry + * at the time of PTRACE_ATTACH check. + */ + rcu_read_lock(); + if (!thread_group_leader(myself)) + myself = rcu_dereference(myself->group_leader); + get_task_struct(myself); + rcu_read_unlock(); + + if (arg2 == 0) { + yama_ptracer_del(NULL, myself); + rc = 0; + } else if (arg2 == PR_SET_PTRACER_ANY || (int)arg2 == -1) { + rc = yama_ptracer_add(NULL, myself); + } else { + struct task_struct *tracer; + + rcu_read_lock(); + tracer = find_task_by_vpid(arg2); + if (tracer) + get_task_struct(tracer); + else + rc = -EINVAL; + rcu_read_unlock(); + + if (tracer) { + rc = yama_ptracer_add(tracer, myself); + put_task_struct(tracer); + } + } + + put_task_struct(myself); + break; + } + + return rc; +} + +/** + * task_is_descendant - walk up a process family tree looking for a match + * @parent: the process to compare against while walking up from child + * @child: the process to start from while looking upwards for parent + * + * Returns 1 if child is a descendant of parent, 0 if not. + */ +static int task_is_descendant(struct task_struct *parent, + struct task_struct *child) +{ + int rc = 0; + struct task_struct *walker = child; + + if (!parent || !child) + return 0; + + rcu_read_lock(); + if (!thread_group_leader(parent)) + parent = rcu_dereference(parent->group_leader); + while (walker->pid > 0) { + if (!thread_group_leader(walker)) + walker = rcu_dereference(walker->group_leader); + if (walker == parent) { + rc = 1; + break; + } + walker = rcu_dereference(walker->real_parent); + } + rcu_read_unlock(); + + return rc; +} + +/** + * ptracer_exception_found - tracer registered as exception for this tracee + * @tracer: the task_struct of the process attempting ptrace + * @tracee: the task_struct of the process to be ptraced + * + * Returns 1 if tracer has is ptracer exception ancestor for tracee. + */ +static int ptracer_exception_found(struct task_struct *tracer, + struct task_struct *tracee) +{ + int rc = 0; + struct ptrace_relation *relation; + struct task_struct *parent = NULL; + bool found = false; + + rcu_read_lock(); + if (!thread_group_leader(tracee)) + tracee = rcu_dereference(tracee->group_leader); + list_for_each_entry_rcu(relation, &ptracer_relations, node) { + if (relation->invalid) + continue; + if (relation->tracee == tracee) { + parent = relation->tracer; + found = true; + break; + } + } + + if (found && (parent == NULL || task_is_descendant(parent, tracer))) + rc = 1; + rcu_read_unlock(); + + return rc; +} + +/** + * yama_ptrace_access_check - validate PTRACE_ATTACH calls + * @child: task that current task is attempting to ptrace + * @mode: ptrace attach mode + * + * Returns 0 if following the ptrace is allowed, -ve on error. + */ +int yama_ptrace_access_check(struct task_struct *child, + unsigned int mode) +{ + int rc; + + /* If standard caps disallows it, so does Yama. We should + * only tighten restrictions further. + */ + rc = cap_ptrace_access_check(child, mode); + if (rc) + return rc; + + /* require ptrace target be a child of ptracer on attach */ + if (mode == PTRACE_MODE_ATTACH) { + switch (ptrace_scope) { + case YAMA_SCOPE_DISABLED: + /* No additional restrictions. */ + break; + case YAMA_SCOPE_RELATIONAL: + rcu_read_lock(); + if (!task_is_descendant(current, child) && + !ptracer_exception_found(current, child) && + !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE)) + rc = -EPERM; + rcu_read_unlock(); + break; + case YAMA_SCOPE_CAPABILITY: + rcu_read_lock(); + if (!ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE)) + rc = -EPERM; + rcu_read_unlock(); + break; + case YAMA_SCOPE_NO_ATTACH: + default: + rc = -EPERM; + break; + } + } + + if (rc) { + printk_ratelimited(KERN_NOTICE + "ptrace of pid %d was attempted by: %s (pid %d)\n", + child->pid, current->comm, current->pid); + } + + return rc; +} + +/** + * yama_ptrace_traceme - validate PTRACE_TRACEME calls + * @parent: task that will become the ptracer of the current task + * + * Returns 0 if following the ptrace is allowed, -ve on error. + */ +int yama_ptrace_traceme(struct task_struct *parent) +{ + int rc; + + /* If standard caps disallows it, so does Yama. We should + * only tighten restrictions further. + */ + rc = cap_ptrace_traceme(parent); + if (rc) + return rc; + + /* Only disallow PTRACE_TRACEME on more aggressive settings. */ + switch (ptrace_scope) { + case YAMA_SCOPE_CAPABILITY: + if (!has_ns_capability(parent, current_user_ns(), CAP_SYS_PTRACE)) + rc = -EPERM; + break; + case YAMA_SCOPE_NO_ATTACH: + rc = -EPERM; + break; + } + + if (rc) { + printk_ratelimited(KERN_NOTICE + "ptraceme of pid %d was attempted by: %s (pid %d)\n", + current->pid, parent->comm, parent->pid); + } + + return rc; +} + +#ifndef CONFIG_SECURITY_YAMA_STACKED +static struct security_operations yama_ops = { + .name = "yama", + + .ptrace_access_check = yama_ptrace_access_check, + .ptrace_traceme = yama_ptrace_traceme, + .task_prctl = yama_task_prctl, + .task_free = yama_task_free, +}; +#endif + +#ifdef CONFIG_SYSCTL +static int yama_dointvec_minmax(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + int rc; + + if (write && !capable(CAP_SYS_PTRACE)) + return -EPERM; + + rc = proc_dointvec_minmax(table, write, buffer, lenp, ppos); + if (rc) + return rc; + + /* Lock the max value if it ever gets set. */ + if (write && *(int *)table->data == *(int *)table->extra2) + table->extra1 = table->extra2; + + return rc; +} + +static int zero; +static int max_scope = YAMA_SCOPE_NO_ATTACH; + +struct ctl_path yama_sysctl_path[] = { + { .procname = "kernel", }, + { .procname = "yama", }, + { } +}; + +static struct ctl_table yama_sysctl_table[] = { + { + .procname = "ptrace_scope", + .data = &ptrace_scope, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = yama_dointvec_minmax, + .extra1 = &zero, + .extra2 = &max_scope, + }, + { } +}; +#endif /* CONFIG_SYSCTL */ + +static __init int yama_init(void) +{ +#ifndef CONFIG_SECURITY_YAMA_STACKED + if (!security_module_enable(&yama_ops)) + return 0; +#endif + + printk(KERN_INFO "Yama: becoming mindful.\n"); + +#ifndef CONFIG_SECURITY_YAMA_STACKED + if (register_security(&yama_ops)) + panic("Yama: kernel registration failed.\n"); +#endif + +#ifdef CONFIG_SYSCTL + if (!register_sysctl_paths(yama_sysctl_path, yama_sysctl_table)) + panic("Yama: sysctl registration failed.\n"); +#endif + + return 0; +} + +security_initcall(yama_init); |
