aboutsummaryrefslogtreecommitdiff
path: root/security
diff options
context:
space:
mode:
Diffstat (limited to 'security')
-rw-r--r--security/Kconfig54
-rw-r--r--security/Makefile15
-rw-r--r--security/apparmor/.gitignore5
-rw-r--r--security/apparmor/Kconfig43
-rw-r--r--security/apparmor/Makefile70
-rw-r--r--security/apparmor/apparmorfs.c969
-rw-r--r--security/apparmor/audit.c209
-rw-r--r--security/apparmor/capability.c143
-rw-r--r--security/apparmor/context.c222
-rw-r--r--security/apparmor/crypto.c95
-rw-r--r--security/apparmor/domain.c854
-rw-r--r--security/apparmor/file.c458
-rw-r--r--security/apparmor/include/apparmor.h120
-rw-r--r--security/apparmor/include/apparmorfs.h104
-rw-r--r--security/apparmor/include/audit.h147
-rw-r--r--security/apparmor/include/capability.h48
-rw-r--r--security/apparmor/include/context.h178
-rw-r--r--security/apparmor/include/crypto.h36
-rw-r--r--security/apparmor/include/domain.h36
-rw-r--r--security/apparmor/include/file.h216
-rw-r--r--security/apparmor/include/ipc.h28
-rw-r--r--security/apparmor/include/match.h141
-rw-r--r--security/apparmor/include/path.h32
-rw-r--r--security/apparmor/include/policy.h408
-rw-r--r--security/apparmor/include/policy_unpack.h39
-rw-r--r--security/apparmor/include/procattr.h25
-rw-r--r--security/apparmor/include/resource.h50
-rw-r--r--security/apparmor/include/sid.h26
-rw-r--r--security/apparmor/ipc.c111
-rw-r--r--security/apparmor/lib.c106
-rw-r--r--security/apparmor/lsm.c949
-rw-r--r--security/apparmor/match.c428
-rw-r--r--security/apparmor/path.c236
-rw-r--r--security/apparmor/policy.c1301
-rw-r--r--security/apparmor/policy_unpack.c805
-rw-r--r--security/apparmor/procattr.c165
-rw-r--r--security/apparmor/resource.c154
-rw-r--r--security/apparmor/sid.c55
-rw-r--r--security/capability.c223
-rw-r--r--security/commoncap.c238
-rw-r--r--security/device_cgroup.c810
-rw-r--r--security/inode.c204
-rw-r--r--security/integrity/Kconfig48
-rw-r--r--security/integrity/Makefile15
-rw-r--r--security/integrity/digsig.c58
-rw-r--r--security/integrity/digsig_asymmetric.c104
-rw-r--r--security/integrity/evm/Kconfig52
-rw-r--r--security/integrity/evm/Makefile7
-rw-r--r--security/integrity/evm/evm.h53
-rw-r--r--security/integrity/evm/evm_crypto.c262
-rw-r--r--security/integrity/evm/evm_main.c485
-rw-r--r--security/integrity/evm/evm_posix_acl.c27
-rw-r--r--security/integrity/evm/evm_secfs.c104
-rw-r--r--security/integrity/iint.c172
-rw-r--r--security/integrity/ima/Kconfig97
-rw-r--r--security/integrity/ima/Makefile3
-rw-r--r--security/integrity/ima/ima.h195
-rw-r--r--security/integrity/ima/ima_api.c280
-rw-r--r--security/integrity/ima/ima_appraise.c387
-rw-r--r--security/integrity/ima/ima_crypto.c252
-rw-r--r--security/integrity/ima/ima_fs.c144
-rw-r--r--security/integrity/ima/ima_iint.c149
-rw-r--r--security/integrity/ima/ima_init.c56
-rw-r--r--security/integrity/ima/ima_main.c483
-rw-r--r--security/integrity/ima/ima_policy.c497
-rw-r--r--security/integrity/ima/ima_queue.c39
-rw-r--r--security/integrity/ima/ima_template.c190
-rw-r--r--security/integrity/ima/ima_template_lib.c342
-rw-r--r--security/integrity/ima/ima_template_lib.h49
-rw-r--r--security/integrity/integrity.h164
-rw-r--r--security/integrity/integrity_audit.c (renamed from security/integrity/ima/ima_audit.c)40
-rw-r--r--security/keys/Kconfig100
-rw-r--r--security/keys/Makefile12
-rw-r--r--security/keys/big_key.c207
-rw-r--r--security/keys/compat.c77
-rw-r--r--security/keys/encrypted-keys/Makefile10
-rw-r--r--security/keys/encrypted-keys/ecryptfs_format.c81
-rw-r--r--security/keys/encrypted-keys/ecryptfs_format.h30
-rw-r--r--security/keys/encrypted-keys/encrypted.c1040
-rw-r--r--security/keys/encrypted-keys/encrypted.h66
-rw-r--r--security/keys/encrypted-keys/masterkey_trusted.c47
-rw-r--r--security/keys/gc.c411
-rw-r--r--security/keys/internal.h141
-rw-r--r--security/keys/key.c719
-rw-r--r--security/keys/keyctl.c839
-rw-r--r--security/keys/keyring.c1727
-rw-r--r--security/keys/permission.c83
-rw-r--r--security/keys/persistent.c167
-rw-r--r--security/keys/proc.c92
-rw-r--r--security/keys/process_keys.c444
-rw-r--r--security/keys/request_key.c333
-rw-r--r--security/keys/request_key_auth.c104
-rw-r--r--security/keys/sysctl.c13
-rw-r--r--security/keys/trusted.c1163
-rw-r--r--security/keys/trusted.h134
-rw-r--r--security/keys/user_defined.c128
-rw-r--r--security/lsm_audit.c196
-rw-r--r--security/min_addr.c2
-rw-r--r--security/security.c548
-rw-r--r--security/selinux/Makefile23
-rw-r--r--security/selinux/avc.c245
-rw-r--r--security/selinux/exports.c48
-rw-r--r--security/selinux/hooks.c1951
-rw-r--r--security/selinux/include/avc.h102
-rw-r--r--security/selinux/include/avc_ss.h6
-rw-r--r--security/selinux/include/classmap.h31
-rw-r--r--security/selinux/include/initial_sid_to_string.h2
-rw-r--r--security/selinux/include/netif.h2
-rw-r--r--security/selinux/include/netlabel.h10
-rw-r--r--security/selinux/include/netnode.h2
-rw-r--r--security/selinux/include/netport.h2
-rw-r--r--security/selinux/include/objsec.h14
-rw-r--r--security/selinux/include/security.h77
-rw-r--r--security/selinux/include/xfrm.h61
-rw-r--r--security/selinux/netif.c29
-rw-r--r--security/selinux/netlabel.c56
-rw-r--r--security/selinux/netlink.c28
-rw-r--r--security/selinux/netnode.c39
-rw-r--r--security/selinux/netport.c33
-rw-r--r--security/selinux/nlmsgtab.c26
-rw-r--r--security/selinux/selinuxfs.c1136
-rw-r--r--security/selinux/ss/Makefile9
-rw-r--r--security/selinux/ss/avtab.c85
-rw-r--r--security/selinux/ss/avtab.h28
-rw-r--r--security/selinux/ss/conditional.c198
-rw-r--r--security/selinux/ss/conditional.h3
-rw-r--r--security/selinux/ss/constraint.h1
-rw-r--r--security/selinux/ss/context.h32
-rw-r--r--security/selinux/ss/ebitmap.c105
-rw-r--r--security/selinux/ss/ebitmap.h12
-rw-r--r--security/selinux/ss/hashtab.c3
-rw-r--r--security/selinux/ss/mls.c128
-rw-r--r--security/selinux/ss/mls.h7
-rw-r--r--security/selinux/ss/mls_types.h9
-rw-r--r--security/selinux/ss/policydb.c2627
-rw-r--r--security/selinux/ss/policydb.h98
-rw-r--r--security/selinux/ss/services.c1048
-rw-r--r--security/selinux/ss/sidtab.c39
-rw-r--r--security/selinux/ss/sidtab.h2
-rw-r--r--security/selinux/ss/status.c126
-rw-r--r--security/selinux/ss/symtab.c3
-rw-r--r--security/selinux/xfrm.c496
-rw-r--r--security/smack/Kconfig6
-rw-r--r--security/smack/smack.h270
-rw-r--r--security/smack/smack_access.c505
-rw-r--r--security/smack/smack_lsm.c1902
-rw-r--r--security/smack/smackfs.c1792
-rw-r--r--security/tomoyo/.gitignore2
-rw-r--r--security/tomoyo/Kconfig63
-rw-r--r--security/tomoyo/Makefile49
-rw-r--r--security/tomoyo/audit.c470
-rw-r--r--security/tomoyo/common.c3845
-rw-r--r--security/tomoyo/common.h1547
-rw-r--r--security/tomoyo/condition.c1094
-rw-r--r--security/tomoyo/domain.c1449
-rw-r--r--security/tomoyo/environ.c122
-rw-r--r--security/tomoyo/file.c1727
-rw-r--r--security/tomoyo/gc.c655
-rw-r--r--security/tomoyo/group.c198
-rw-r--r--security/tomoyo/load_policy.c109
-rw-r--r--security/tomoyo/memory.c201
-rw-r--r--security/tomoyo/mount.c236
-rw-r--r--security/tomoyo/network.c771
-rw-r--r--security/tomoyo/realpath.c666
-rw-r--r--security/tomoyo/realpath.h66
-rw-r--r--security/tomoyo/securityfs_if.c272
-rw-r--r--security/tomoyo/tomoyo.c454
-rw-r--r--security/tomoyo/tomoyo.h94
-rw-r--r--security/tomoyo/util.c1085
-rw-r--r--security/yama/Kconfig21
-rw-r--r--security/yama/Makefile3
-rw-r--r--security/yama/yama_lsm.c443
172 files changed, 40729 insertions, 12594 deletions
diff --git a/security/Kconfig b/security/Kconfig
index 226b9556b25..beb86b500ad 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -4,40 +4,19 @@
menu "Security options"
-config KEYS
- bool "Enable access key retention support"
- help
- This option provides support for retaining authentication tokens and
- access keys in the kernel.
-
- It also includes provision of methods by which such keys might be
- associated with a process so that network filesystems, encryption
- support and the like can find them.
-
- Furthermore, a special type of key is available that acts as keyring:
- a searchable sequence of keys. Each process is equipped with access
- to five standard keyrings: UID-specific, GID-specific, session,
- process and thread.
-
- If you are unsure as to whether this is required, answer N.
+source security/keys/Kconfig
-config KEYS_DEBUG_PROC_KEYS
- bool "Enable the /proc/keys file by which keys may be viewed"
- depends on KEYS
+config SECURITY_DMESG_RESTRICT
+ bool "Restrict unprivileged access to the kernel syslog"
+ default n
help
- This option turns on support for the /proc/keys file - through which
- can be listed all the keys on the system that are viewable by the
- reading process.
+ This enforces restrictions on unprivileged users reading the kernel
+ syslog via dmesg(8).
- The only keys included in the list are those that grant View
- permission to the reading process whether or not it possesses them.
- Note that LSM security checks are still performed, and may further
- filter out keys that the current process is not authorised to view.
+ If this option is not selected, no restrictions will be enforced
+ unless the dmesg_restrict sysctl is explicitly set to (1).
- Only key attributes are listed here; key payloads are not included in
- the resulting table.
-
- If you are unsure as to whether this is required, answer N.
+ If you are unsure how to answer this question, answer N.
config SECURITY
bool "Enable different security models"
@@ -124,6 +103,7 @@ config INTEL_TXT
config LSM_MMAP_MIN_ADDR
int "Low address space for LSM to protect from user allocation"
depends on SECURITY && SECURITY_SELINUX
+ default 32768 if ARM || (ARM64 && COMPAT)
default 65536
help
This is the portion of low virtual memory which should be protected
@@ -140,14 +120,18 @@ config LSM_MMAP_MIN_ADDR
source security/selinux/Kconfig
source security/smack/Kconfig
source security/tomoyo/Kconfig
+source security/apparmor/Kconfig
+source security/yama/Kconfig
-source security/integrity/ima/Kconfig
+source security/integrity/Kconfig
choice
prompt "Default security module"
default DEFAULT_SECURITY_SELINUX if SECURITY_SELINUX
default DEFAULT_SECURITY_SMACK if SECURITY_SMACK
default DEFAULT_SECURITY_TOMOYO if SECURITY_TOMOYO
+ default DEFAULT_SECURITY_APPARMOR if SECURITY_APPARMOR
+ default DEFAULT_SECURITY_YAMA if SECURITY_YAMA
default DEFAULT_SECURITY_DAC
help
@@ -163,6 +147,12 @@ choice
config DEFAULT_SECURITY_TOMOYO
bool "TOMOYO" if SECURITY_TOMOYO=y
+ config DEFAULT_SECURITY_APPARMOR
+ bool "AppArmor" if SECURITY_APPARMOR=y
+
+ config DEFAULT_SECURITY_YAMA
+ bool "Yama" if SECURITY_YAMA=y
+
config DEFAULT_SECURITY_DAC
bool "Unix Discretionary Access Controls"
@@ -173,6 +163,8 @@ config DEFAULT_SECURITY
default "selinux" if DEFAULT_SECURITY_SELINUX
default "smack" if DEFAULT_SECURITY_SMACK
default "tomoyo" if DEFAULT_SECURITY_TOMOYO
+ default "apparmor" if DEFAULT_SECURITY_APPARMOR
+ default "yama" if DEFAULT_SECURITY_YAMA
default "" if DEFAULT_SECURITY_DAC
endmenu
diff --git a/security/Makefile b/security/Makefile
index da20a193c8d..05f1c934d74 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -6,6 +6,8 @@ obj-$(CONFIG_KEYS) += keys/
subdir-$(CONFIG_SECURITY_SELINUX) += selinux
subdir-$(CONFIG_SECURITY_SMACK) += smack
subdir-$(CONFIG_SECURITY_TOMOYO) += tomoyo
+subdir-$(CONFIG_SECURITY_APPARMOR) += apparmor
+subdir-$(CONFIG_SECURITY_YAMA) += yama
# always enable default capabilities
obj-y += commoncap.o
@@ -14,13 +16,14 @@ obj-$(CONFIG_MMU) += min_addr.o
# Object file lists
obj-$(CONFIG_SECURITY) += security.o capability.o
obj-$(CONFIG_SECURITYFS) += inode.o
-# Must precede capability.o in order to stack properly.
-obj-$(CONFIG_SECURITY_SELINUX) += selinux/built-in.o
-obj-$(CONFIG_SECURITY_SMACK) += smack/built-in.o
+obj-$(CONFIG_SECURITY_SELINUX) += selinux/
+obj-$(CONFIG_SECURITY_SMACK) += smack/
obj-$(CONFIG_AUDIT) += lsm_audit.o
-obj-$(CONFIG_SECURITY_TOMOYO) += tomoyo/built-in.o
+obj-$(CONFIG_SECURITY_TOMOYO) += tomoyo/
+obj-$(CONFIG_SECURITY_APPARMOR) += apparmor/
+obj-$(CONFIG_SECURITY_YAMA) += yama/
obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o
# Object integrity file lists
-subdir-$(CONFIG_IMA) += integrity/ima
-obj-$(CONFIG_IMA) += integrity/ima/built-in.o
+subdir-$(CONFIG_INTEGRITY) += integrity
+obj-$(CONFIG_INTEGRITY) += integrity/
diff --git a/security/apparmor/.gitignore b/security/apparmor/.gitignore
new file mode 100644
index 00000000000..9cdec70d72b
--- /dev/null
+++ b/security/apparmor/.gitignore
@@ -0,0 +1,5 @@
+#
+# Generated include files
+#
+capability_names.h
+rlim_names.h
diff --git a/security/apparmor/Kconfig b/security/apparmor/Kconfig
new file mode 100644
index 00000000000..d49c53960b6
--- /dev/null
+++ b/security/apparmor/Kconfig
@@ -0,0 +1,43 @@
+config SECURITY_APPARMOR
+ bool "AppArmor support"
+ depends on SECURITY && NET
+ select AUDIT
+ select SECURITY_PATH
+ select SECURITYFS
+ select SECURITY_NETWORK
+ default n
+ help
+ This enables the AppArmor security module.
+ Required userspace tools (if they are not included in your
+ distribution) and further information may be found at
+ http://apparmor.wiki.kernel.org
+
+ If you are unsure how to answer this question, answer N.
+
+config SECURITY_APPARMOR_BOOTPARAM_VALUE
+ int "AppArmor boot parameter default value"
+ depends on SECURITY_APPARMOR
+ range 0 1
+ default 1
+ help
+ This option sets the default value for the kernel parameter
+ 'apparmor', which allows AppArmor to be enabled or disabled
+ at boot. If this option is set to 0 (zero), the AppArmor
+ kernel parameter will default to 0, disabling AppArmor at
+ boot. If this option is set to 1 (one), the AppArmor
+ kernel parameter will default to 1, enabling AppArmor at
+ boot.
+
+ If you are unsure how to answer this question, answer 1.
+
+config SECURITY_APPARMOR_HASH
+ bool "SHA1 hash of loaded profiles"
+ depends on SECURITY_APPARMOR
+ depends on CRYPTO
+ select CRYPTO_SHA1
+ default y
+
+ help
+ This option selects whether sha1 hashing is done against loaded
+ profiles and exported for inspection to user space via the apparmor
+ filesystem.
diff --git a/security/apparmor/Makefile b/security/apparmor/Makefile
new file mode 100644
index 00000000000..d693df87481
--- /dev/null
+++ b/security/apparmor/Makefile
@@ -0,0 +1,70 @@
+# Makefile for AppArmor Linux Security Module
+#
+obj-$(CONFIG_SECURITY_APPARMOR) += apparmor.o
+
+apparmor-y := apparmorfs.o audit.o capability.o context.o ipc.o lib.o match.o \
+ path.o domain.o policy.o policy_unpack.o procattr.o lsm.o \
+ resource.o sid.o file.o
+apparmor-$(CONFIG_SECURITY_APPARMOR_HASH) += crypto.o
+
+clean-files := capability_names.h rlim_names.h
+
+
+# Build a lower case string table of capability names
+# Transforms lines from
+# #define CAP_DAC_OVERRIDE 1
+# to
+# [1] = "dac_override",
+quiet_cmd_make-caps = GEN $@
+cmd_make-caps = echo "static const char *const capability_names[] = {" > $@ ;\
+ sed $< >>$@ -r -n -e '/CAP_FS_MASK/d' \
+ -e 's/^\#define[ \t]+CAP_([A-Z0-9_]+)[ \t]+([0-9]+)/[\2] = "\L\1",/p';\
+ echo "};" >> $@ ;\
+ echo -n '\#define AA_FS_CAPS_MASK "' >> $@ ;\
+ sed $< -r -n -e '/CAP_FS_MASK/d' \
+ -e 's/^\#define[ \t]+CAP_([A-Z0-9_]+)[ \t]+([0-9]+)/\L\1/p' | \
+ tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@
+
+
+# Build a lower case string table of rlimit names.
+# Transforms lines from
+# #define RLIMIT_STACK 3 /* max stack size */
+# to
+# [RLIMIT_STACK] = "stack",
+#
+# and build a second integer table (with the second sed cmd), that maps
+# RLIMIT defines to the order defined in asm-generic/resource.h This is
+# required by policy load to map policy ordering of RLIMITs to internal
+# ordering for architectures that redefine an RLIMIT.
+# Transforms lines from
+# #define RLIMIT_STACK 3 /* max stack size */
+# to
+# RLIMIT_STACK,
+#
+# and build the securityfs entries for the mapping.
+# Transforms lines from
+# #define RLIMIT_FSIZE 1 /* Maximum filesize */
+# #define RLIMIT_STACK 3 /* max stack size */
+# to
+# #define AA_FS_RLIMIT_MASK "fsize stack"
+quiet_cmd_make-rlim = GEN $@
+cmd_make-rlim = echo "static const char *const rlim_names[RLIM_NLIMITS] = {" \
+ > $@ ;\
+ sed $< >> $@ -r -n \
+ -e 's/^\# ?define[ \t]+(RLIMIT_([A-Z0-9_]+)).*/[\1] = "\L\2",/p';\
+ echo "};" >> $@ ;\
+ echo "static const int rlim_map[RLIM_NLIMITS] = {" >> $@ ;\
+ sed -r -n "s/^\# ?define[ \t]+(RLIMIT_[A-Z0-9_]+).*/\1,/p" $< >> $@ ;\
+ echo "};" >> $@ ; \
+ echo -n '\#define AA_FS_RLIMIT_MASK "' >> $@ ;\
+ sed -r -n 's/^\# ?define[ \t]+RLIMIT_([A-Z0-9_]+).*/\L\1/p' $< | \
+ tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@
+
+$(obj)/capability.o : $(obj)/capability_names.h
+$(obj)/resource.o : $(obj)/rlim_names.h
+$(obj)/capability_names.h : $(srctree)/include/uapi/linux/capability.h \
+ $(src)/Makefile
+ $(call cmd,make-caps)
+$(obj)/rlim_names.h : $(srctree)/include/uapi/asm-generic/resource.h \
+ $(src)/Makefile
+ $(call cmd,make-rlim)
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
new file mode 100644
index 00000000000..7db9954f1af
--- /dev/null
+++ b/security/apparmor/apparmorfs.c
@@ -0,0 +1,969 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor /sys/kernel/security/apparmor interface functions
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include <linux/ctype.h>
+#include <linux/security.h>
+#include <linux/vmalloc.h>
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/uaccess.h>
+#include <linux/namei.h>
+#include <linux/capability.h>
+#include <linux/rcupdate.h>
+
+#include "include/apparmor.h"
+#include "include/apparmorfs.h"
+#include "include/audit.h"
+#include "include/context.h"
+#include "include/crypto.h"
+#include "include/policy.h"
+#include "include/resource.h"
+
+/**
+ * aa_mangle_name - mangle a profile name to std profile layout form
+ * @name: profile name to mangle (NOT NULL)
+ * @target: buffer to store mangled name, same length as @name (MAYBE NULL)
+ *
+ * Returns: length of mangled name
+ */
+static int mangle_name(char *name, char *target)
+{
+ char *t = target;
+
+ while (*name == '/' || *name == '.')
+ name++;
+
+ if (target) {
+ for (; *name; name++) {
+ if (*name == '/')
+ *(t)++ = '.';
+ else if (isspace(*name))
+ *(t)++ = '_';
+ else if (isalnum(*name) || strchr("._-", *name))
+ *(t)++ = *name;
+ }
+
+ *t = 0;
+ } else {
+ int len = 0;
+ for (; *name; name++) {
+ if (isalnum(*name) || isspace(*name) ||
+ strchr("/._-", *name))
+ len++;
+ }
+
+ return len;
+ }
+
+ return t - target;
+}
+
+/**
+ * aa_simple_write_to_buffer - common routine for getting policy from user
+ * @op: operation doing the user buffer copy
+ * @userbuf: user buffer to copy data from (NOT NULL)
+ * @alloc_size: size of user buffer (REQUIRES: @alloc_size >= @copy_size)
+ * @copy_size: size of data to copy from user buffer
+ * @pos: position write is at in the file (NOT NULL)
+ *
+ * Returns: kernel buffer containing copy of user buffer data or an
+ * ERR_PTR on failure.
+ */
+static char *aa_simple_write_to_buffer(int op, const char __user *userbuf,
+ size_t alloc_size, size_t copy_size,
+ loff_t *pos)
+{
+ char *data;
+
+ BUG_ON(copy_size > alloc_size);
+
+ if (*pos != 0)
+ /* only writes from pos 0, that is complete writes */
+ return ERR_PTR(-ESPIPE);
+
+ /*
+ * Don't allow profile load/replace/remove from profiles that don't
+ * have CAP_MAC_ADMIN
+ */
+ if (!aa_may_manage_policy(op))
+ return ERR_PTR(-EACCES);
+
+ /* freed by caller to simple_write_to_buffer */
+ data = kvmalloc(alloc_size);
+ if (data == NULL)
+ return ERR_PTR(-ENOMEM);
+
+ if (copy_from_user(data, userbuf, copy_size)) {
+ kvfree(data);
+ return ERR_PTR(-EFAULT);
+ }
+
+ return data;
+}
+
+
+/* .load file hook fn to load policy */
+static ssize_t profile_load(struct file *f, const char __user *buf, size_t size,
+ loff_t *pos)
+{
+ char *data;
+ ssize_t error;
+
+ data = aa_simple_write_to_buffer(OP_PROF_LOAD, buf, size, size, pos);
+
+ error = PTR_ERR(data);
+ if (!IS_ERR(data)) {
+ error = aa_replace_profiles(data, size, PROF_ADD);
+ kvfree(data);
+ }
+
+ return error;
+}
+
+static const struct file_operations aa_fs_profile_load = {
+ .write = profile_load,
+ .llseek = default_llseek,
+};
+
+/* .replace file hook fn to load and/or replace policy */
+static ssize_t profile_replace(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *data;
+ ssize_t error;
+
+ data = aa_simple_write_to_buffer(OP_PROF_REPL, buf, size, size, pos);
+ error = PTR_ERR(data);
+ if (!IS_ERR(data)) {
+ error = aa_replace_profiles(data, size, PROF_REPLACE);
+ kvfree(data);
+ }
+
+ return error;
+}
+
+static const struct file_operations aa_fs_profile_replace = {
+ .write = profile_replace,
+ .llseek = default_llseek,
+};
+
+/* .remove file hook fn to remove loaded policy */
+static ssize_t profile_remove(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ char *data;
+ ssize_t error;
+
+ /*
+ * aa_remove_profile needs a null terminated string so 1 extra
+ * byte is allocated and the copied data is null terminated.
+ */
+ data = aa_simple_write_to_buffer(OP_PROF_RM, buf, size + 1, size, pos);
+
+ error = PTR_ERR(data);
+ if (!IS_ERR(data)) {
+ data[size] = 0;
+ error = aa_remove_profiles(data, size);
+ kvfree(data);
+ }
+
+ return error;
+}
+
+static const struct file_operations aa_fs_profile_remove = {
+ .write = profile_remove,
+ .llseek = default_llseek,
+};
+
+static int aa_fs_seq_show(struct seq_file *seq, void *v)
+{
+ struct aa_fs_entry *fs_file = seq->private;
+
+ if (!fs_file)
+ return 0;
+
+ switch (fs_file->v_type) {
+ case AA_FS_TYPE_BOOLEAN:
+ seq_printf(seq, "%s\n", fs_file->v.boolean ? "yes" : "no");
+ break;
+ case AA_FS_TYPE_STRING:
+ seq_printf(seq, "%s\n", fs_file->v.string);
+ break;
+ case AA_FS_TYPE_U64:
+ seq_printf(seq, "%#08lx\n", fs_file->v.u64);
+ break;
+ default:
+ /* Ignore unpritable entry types. */
+ break;
+ }
+
+ return 0;
+}
+
+static int aa_fs_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, aa_fs_seq_show, inode->i_private);
+}
+
+const struct file_operations aa_fs_seq_file_ops = {
+ .owner = THIS_MODULE,
+ .open = aa_fs_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int aa_fs_seq_profile_open(struct inode *inode, struct file *file,
+ int (*show)(struct seq_file *, void *))
+{
+ struct aa_replacedby *r = aa_get_replacedby(inode->i_private);
+ int error = single_open(file, show, r);
+
+ if (error) {
+ file->private_data = NULL;
+ aa_put_replacedby(r);
+ }
+
+ return error;
+}
+
+static int aa_fs_seq_profile_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = (struct seq_file *) file->private_data;
+ if (seq)
+ aa_put_replacedby(seq->private);
+ return single_release(inode, file);
+}
+
+static int aa_fs_seq_profname_show(struct seq_file *seq, void *v)
+{
+ struct aa_replacedby *r = seq->private;
+ struct aa_profile *profile = aa_get_profile_rcu(&r->profile);
+ seq_printf(seq, "%s\n", profile->base.name);
+ aa_put_profile(profile);
+
+ return 0;
+}
+
+static int aa_fs_seq_profname_open(struct inode *inode, struct file *file)
+{
+ return aa_fs_seq_profile_open(inode, file, aa_fs_seq_profname_show);
+}
+
+static const struct file_operations aa_fs_profname_fops = {
+ .owner = THIS_MODULE,
+ .open = aa_fs_seq_profname_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = aa_fs_seq_profile_release,
+};
+
+static int aa_fs_seq_profmode_show(struct seq_file *seq, void *v)
+{
+ struct aa_replacedby *r = seq->private;
+ struct aa_profile *profile = aa_get_profile_rcu(&r->profile);
+ seq_printf(seq, "%s\n", aa_profile_mode_names[profile->mode]);
+ aa_put_profile(profile);
+
+ return 0;
+}
+
+static int aa_fs_seq_profmode_open(struct inode *inode, struct file *file)
+{
+ return aa_fs_seq_profile_open(inode, file, aa_fs_seq_profmode_show);
+}
+
+static const struct file_operations aa_fs_profmode_fops = {
+ .owner = THIS_MODULE,
+ .open = aa_fs_seq_profmode_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = aa_fs_seq_profile_release,
+};
+
+static int aa_fs_seq_profattach_show(struct seq_file *seq, void *v)
+{
+ struct aa_replacedby *r = seq->private;
+ struct aa_profile *profile = aa_get_profile_rcu(&r->profile);
+ if (profile->attach)
+ seq_printf(seq, "%s\n", profile->attach);
+ else if (profile->xmatch)
+ seq_puts(seq, "<unknown>\n");
+ else
+ seq_printf(seq, "%s\n", profile->base.name);
+ aa_put_profile(profile);
+
+ return 0;
+}
+
+static int aa_fs_seq_profattach_open(struct inode *inode, struct file *file)
+{
+ return aa_fs_seq_profile_open(inode, file, aa_fs_seq_profattach_show);
+}
+
+static const struct file_operations aa_fs_profattach_fops = {
+ .owner = THIS_MODULE,
+ .open = aa_fs_seq_profattach_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = aa_fs_seq_profile_release,
+};
+
+static int aa_fs_seq_hash_show(struct seq_file *seq, void *v)
+{
+ struct aa_replacedby *r = seq->private;
+ struct aa_profile *profile = aa_get_profile_rcu(&r->profile);
+ unsigned int i, size = aa_hash_size();
+
+ if (profile->hash) {
+ for (i = 0; i < size; i++)
+ seq_printf(seq, "%.2x", profile->hash[i]);
+ seq_puts(seq, "\n");
+ }
+
+ return 0;
+}
+
+static int aa_fs_seq_hash_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, aa_fs_seq_hash_show, inode->i_private);
+}
+
+static const struct file_operations aa_fs_seq_hash_fops = {
+ .owner = THIS_MODULE,
+ .open = aa_fs_seq_hash_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/** fns to setup dynamic per profile/namespace files **/
+void __aa_fs_profile_rmdir(struct aa_profile *profile)
+{
+ struct aa_profile *child;
+ int i;
+
+ if (!profile)
+ return;
+
+ list_for_each_entry(child, &profile->base.profiles, base.list)
+ __aa_fs_profile_rmdir(child);
+
+ for (i = AAFS_PROF_SIZEOF - 1; i >= 0; --i) {
+ struct aa_replacedby *r;
+ if (!profile->dents[i])
+ continue;
+
+ r = profile->dents[i]->d_inode->i_private;
+ securityfs_remove(profile->dents[i]);
+ aa_put_replacedby(r);
+ profile->dents[i] = NULL;
+ }
+}
+
+void __aa_fs_profile_migrate_dents(struct aa_profile *old,
+ struct aa_profile *new)
+{
+ int i;
+
+ for (i = 0; i < AAFS_PROF_SIZEOF; i++) {
+ new->dents[i] = old->dents[i];
+ old->dents[i] = NULL;
+ }
+}
+
+static struct dentry *create_profile_file(struct dentry *dir, const char *name,
+ struct aa_profile *profile,
+ const struct file_operations *fops)
+{
+ struct aa_replacedby *r = aa_get_replacedby(profile->replacedby);
+ struct dentry *dent;
+
+ dent = securityfs_create_file(name, S_IFREG | 0444, dir, r, fops);
+ if (IS_ERR(dent))
+ aa_put_replacedby(r);
+
+ return dent;
+}
+
+/* requires lock be held */
+int __aa_fs_profile_mkdir(struct aa_profile *profile, struct dentry *parent)
+{
+ struct aa_profile *child;
+ struct dentry *dent = NULL, *dir;
+ int error;
+
+ if (!parent) {
+ struct aa_profile *p;
+ p = aa_deref_parent(profile);
+ dent = prof_dir(p);
+ /* adding to parent that previously didn't have children */
+ dent = securityfs_create_dir("profiles", dent);
+ if (IS_ERR(dent))
+ goto fail;
+ prof_child_dir(p) = parent = dent;
+ }
+
+ if (!profile->dirname) {
+ int len, id_len;
+ len = mangle_name(profile->base.name, NULL);
+ id_len = snprintf(NULL, 0, ".%ld", profile->ns->uniq_id);
+
+ profile->dirname = kmalloc(len + id_len + 1, GFP_KERNEL);
+ if (!profile->dirname)
+ goto fail;
+
+ mangle_name(profile->base.name, profile->dirname);
+ sprintf(profile->dirname + len, ".%ld", profile->ns->uniq_id++);
+ }
+
+ dent = securityfs_create_dir(profile->dirname, parent);
+ if (IS_ERR(dent))
+ goto fail;
+ prof_dir(profile) = dir = dent;
+
+ dent = create_profile_file(dir, "name", profile, &aa_fs_profname_fops);
+ if (IS_ERR(dent))
+ goto fail;
+ profile->dents[AAFS_PROF_NAME] = dent;
+
+ dent = create_profile_file(dir, "mode", profile, &aa_fs_profmode_fops);
+ if (IS_ERR(dent))
+ goto fail;
+ profile->dents[AAFS_PROF_MODE] = dent;
+
+ dent = create_profile_file(dir, "attach", profile,
+ &aa_fs_profattach_fops);
+ if (IS_ERR(dent))
+ goto fail;
+ profile->dents[AAFS_PROF_ATTACH] = dent;
+
+ if (profile->hash) {
+ dent = create_profile_file(dir, "sha1", profile,
+ &aa_fs_seq_hash_fops);
+ if (IS_ERR(dent))
+ goto fail;
+ profile->dents[AAFS_PROF_HASH] = dent;
+ }
+
+ list_for_each_entry(child, &profile->base.profiles, base.list) {
+ error = __aa_fs_profile_mkdir(child, prof_child_dir(profile));
+ if (error)
+ goto fail2;
+ }
+
+ return 0;
+
+fail:
+ error = PTR_ERR(dent);
+
+fail2:
+ __aa_fs_profile_rmdir(profile);
+
+ return error;
+}
+
+void __aa_fs_namespace_rmdir(struct aa_namespace *ns)
+{
+ struct aa_namespace *sub;
+ struct aa_profile *child;
+ int i;
+
+ if (!ns)
+ return;
+
+ list_for_each_entry(child, &ns->base.profiles, base.list)
+ __aa_fs_profile_rmdir(child);
+
+ list_for_each_entry(sub, &ns->sub_ns, base.list) {
+ mutex_lock(&sub->lock);
+ __aa_fs_namespace_rmdir(sub);
+ mutex_unlock(&sub->lock);
+ }
+
+ for (i = AAFS_NS_SIZEOF - 1; i >= 0; --i) {
+ securityfs_remove(ns->dents[i]);
+ ns->dents[i] = NULL;
+ }
+}
+
+int __aa_fs_namespace_mkdir(struct aa_namespace *ns, struct dentry *parent,
+ const char *name)
+{
+ struct aa_namespace *sub;
+ struct aa_profile *child;
+ struct dentry *dent, *dir;
+ int error;
+
+ if (!name)
+ name = ns->base.name;
+
+ dent = securityfs_create_dir(name, parent);
+ if (IS_ERR(dent))
+ goto fail;
+ ns_dir(ns) = dir = dent;
+
+ dent = securityfs_create_dir("profiles", dir);
+ if (IS_ERR(dent))
+ goto fail;
+ ns_subprofs_dir(ns) = dent;
+
+ dent = securityfs_create_dir("namespaces", dir);
+ if (IS_ERR(dent))
+ goto fail;
+ ns_subns_dir(ns) = dent;
+
+ list_for_each_entry(child, &ns->base.profiles, base.list) {
+ error = __aa_fs_profile_mkdir(child, ns_subprofs_dir(ns));
+ if (error)
+ goto fail2;
+ }
+
+ list_for_each_entry(sub, &ns->sub_ns, base.list) {
+ mutex_lock(&sub->lock);
+ error = __aa_fs_namespace_mkdir(sub, ns_subns_dir(ns), NULL);
+ mutex_unlock(&sub->lock);
+ if (error)
+ goto fail2;
+ }
+
+ return 0;
+
+fail:
+ error = PTR_ERR(dent);
+
+fail2:
+ __aa_fs_namespace_rmdir(ns);
+
+ return error;
+}
+
+
+#define list_entry_next(pos, member) \
+ list_entry(pos->member.next, typeof(*pos), member)
+#define list_entry_is_head(pos, head, member) (&pos->member == (head))
+
+/**
+ * __next_namespace - find the next namespace to list
+ * @root: root namespace to stop search at (NOT NULL)
+ * @ns: current ns position (NOT NULL)
+ *
+ * Find the next namespace from @ns under @root and handle all locking needed
+ * while switching current namespace.
+ *
+ * Returns: next namespace or NULL if at last namespace under @root
+ * Requires: ns->parent->lock to be held
+ * NOTE: will not unlock root->lock
+ */
+static struct aa_namespace *__next_namespace(struct aa_namespace *root,
+ struct aa_namespace *ns)
+{
+ struct aa_namespace *parent, *next;
+
+ /* is next namespace a child */
+ if (!list_empty(&ns->sub_ns)) {
+ next = list_first_entry(&ns->sub_ns, typeof(*ns), base.list);
+ mutex_lock(&next->lock);
+ return next;
+ }
+
+ /* check if the next ns is a sibling, parent, gp, .. */
+ parent = ns->parent;
+ while (ns != root) {
+ mutex_unlock(&ns->lock);
+ next = list_entry_next(ns, base.list);
+ if (!list_entry_is_head(next, &parent->sub_ns, base.list)) {
+ mutex_lock(&next->lock);
+ return next;
+ }
+ ns = parent;
+ parent = parent->parent;
+ }
+
+ return NULL;
+}
+
+/**
+ * __first_profile - find the first profile in a namespace
+ * @root: namespace that is root of profiles being displayed (NOT NULL)
+ * @ns: namespace to start in (NOT NULL)
+ *
+ * Returns: unrefcounted profile or NULL if no profile
+ * Requires: profile->ns.lock to be held
+ */
+static struct aa_profile *__first_profile(struct aa_namespace *root,
+ struct aa_namespace *ns)
+{
+ for (; ns; ns = __next_namespace(root, ns)) {
+ if (!list_empty(&ns->base.profiles))
+ return list_first_entry(&ns->base.profiles,
+ struct aa_profile, base.list);
+ }
+ return NULL;
+}
+
+/**
+ * __next_profile - step to the next profile in a profile tree
+ * @profile: current profile in tree (NOT NULL)
+ *
+ * Perform a depth first traversal on the profile tree in a namespace
+ *
+ * Returns: next profile or NULL if done
+ * Requires: profile->ns.lock to be held
+ */
+static struct aa_profile *__next_profile(struct aa_profile *p)
+{
+ struct aa_profile *parent;
+ struct aa_namespace *ns = p->ns;
+
+ /* is next profile a child */
+ if (!list_empty(&p->base.profiles))
+ return list_first_entry(&p->base.profiles, typeof(*p),
+ base.list);
+
+ /* is next profile a sibling, parent sibling, gp, sibling, .. */
+ parent = rcu_dereference_protected(p->parent,
+ mutex_is_locked(&p->ns->lock));
+ while (parent) {
+ p = list_entry_next(p, base.list);
+ if (!list_entry_is_head(p, &parent->base.profiles, base.list))
+ return p;
+ p = parent;
+ parent = rcu_dereference_protected(parent->parent,
+ mutex_is_locked(&parent->ns->lock));
+ }
+
+ /* is next another profile in the namespace */
+ p = list_entry_next(p, base.list);
+ if (!list_entry_is_head(p, &ns->base.profiles, base.list))
+ return p;
+
+ return NULL;
+}
+
+/**
+ * next_profile - step to the next profile in where ever it may be
+ * @root: root namespace (NOT NULL)
+ * @profile: current profile (NOT NULL)
+ *
+ * Returns: next profile or NULL if there isn't one
+ */
+static struct aa_profile *next_profile(struct aa_namespace *root,
+ struct aa_profile *profile)
+{
+ struct aa_profile *next = __next_profile(profile);
+ if (next)
+ return next;
+
+ /* finished all profiles in namespace move to next namespace */
+ return __first_profile(root, __next_namespace(root, profile->ns));
+}
+
+/**
+ * p_start - start a depth first traversal of profile tree
+ * @f: seq_file to fill
+ * @pos: current position
+ *
+ * Returns: first profile under current namespace or NULL if none found
+ *
+ * acquires first ns->lock
+ */
+static void *p_start(struct seq_file *f, loff_t *pos)
+{
+ struct aa_profile *profile = NULL;
+ struct aa_namespace *root = aa_current_profile()->ns;
+ loff_t l = *pos;
+ f->private = aa_get_namespace(root);
+
+
+ /* find the first profile */
+ mutex_lock(&root->lock);
+ profile = __first_profile(root, root);
+
+ /* skip to position */
+ for (; profile && l > 0; l--)
+ profile = next_profile(root, profile);
+
+ return profile;
+}
+
+/**
+ * p_next - read the next profile entry
+ * @f: seq_file to fill
+ * @p: profile previously returned
+ * @pos: current position
+ *
+ * Returns: next profile after @p or NULL if none
+ *
+ * may acquire/release locks in namespace tree as necessary
+ */
+static void *p_next(struct seq_file *f, void *p, loff_t *pos)
+{
+ struct aa_profile *profile = p;
+ struct aa_namespace *ns = f->private;
+ (*pos)++;
+
+ return next_profile(ns, profile);
+}
+
+/**
+ * p_stop - stop depth first traversal
+ * @f: seq_file we are filling
+ * @p: the last profile writen
+ *
+ * Release all locking done by p_start/p_next on namespace tree
+ */
+static void p_stop(struct seq_file *f, void *p)
+{
+ struct aa_profile *profile = p;
+ struct aa_namespace *root = f->private, *ns;
+
+ if (profile) {
+ for (ns = profile->ns; ns && ns != root; ns = ns->parent)
+ mutex_unlock(&ns->lock);
+ }
+ mutex_unlock(&root->lock);
+ aa_put_namespace(root);
+}
+
+/**
+ * seq_show_profile - show a profile entry
+ * @f: seq_file to file
+ * @p: current position (profile) (NOT NULL)
+ *
+ * Returns: error on failure
+ */
+static int seq_show_profile(struct seq_file *f, void *p)
+{
+ struct aa_profile *profile = (struct aa_profile *)p;
+ struct aa_namespace *root = f->private;
+
+ if (profile->ns != root)
+ seq_printf(f, ":%s://", aa_ns_name(root, profile->ns));
+ seq_printf(f, "%s (%s)\n", profile->base.hname,
+ aa_profile_mode_names[profile->mode]);
+
+ return 0;
+}
+
+static const struct seq_operations aa_fs_profiles_op = {
+ .start = p_start,
+ .next = p_next,
+ .stop = p_stop,
+ .show = seq_show_profile,
+};
+
+static int profiles_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &aa_fs_profiles_op);
+}
+
+static int profiles_release(struct inode *inode, struct file *file)
+{
+ return seq_release(inode, file);
+}
+
+static const struct file_operations aa_fs_profiles_fops = {
+ .open = profiles_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = profiles_release,
+};
+
+
+/** Base file system setup **/
+static struct aa_fs_entry aa_fs_entry_file[] = {
+ AA_FS_FILE_STRING("mask", "create read write exec append mmap_exec " \
+ "link lock"),
+ { }
+};
+
+static struct aa_fs_entry aa_fs_entry_domain[] = {
+ AA_FS_FILE_BOOLEAN("change_hat", 1),
+ AA_FS_FILE_BOOLEAN("change_hatv", 1),
+ AA_FS_FILE_BOOLEAN("change_onexec", 1),
+ AA_FS_FILE_BOOLEAN("change_profile", 1),
+ { }
+};
+
+static struct aa_fs_entry aa_fs_entry_policy[] = {
+ AA_FS_FILE_BOOLEAN("set_load", 1),
+ {}
+};
+
+static struct aa_fs_entry aa_fs_entry_features[] = {
+ AA_FS_DIR("policy", aa_fs_entry_policy),
+ AA_FS_DIR("domain", aa_fs_entry_domain),
+ AA_FS_DIR("file", aa_fs_entry_file),
+ AA_FS_FILE_U64("capability", VFS_CAP_FLAGS_MASK),
+ AA_FS_DIR("rlimit", aa_fs_entry_rlimit),
+ AA_FS_DIR("caps", aa_fs_entry_caps),
+ { }
+};
+
+static struct aa_fs_entry aa_fs_entry_apparmor[] = {
+ AA_FS_FILE_FOPS(".load", 0640, &aa_fs_profile_load),
+ AA_FS_FILE_FOPS(".replace", 0640, &aa_fs_profile_replace),
+ AA_FS_FILE_FOPS(".remove", 0640, &aa_fs_profile_remove),
+ AA_FS_FILE_FOPS("profiles", 0640, &aa_fs_profiles_fops),
+ AA_FS_DIR("features", aa_fs_entry_features),
+ { }
+};
+
+static struct aa_fs_entry aa_fs_entry =
+ AA_FS_DIR("apparmor", aa_fs_entry_apparmor);
+
+/**
+ * aafs_create_file - create a file entry in the apparmor securityfs
+ * @fs_file: aa_fs_entry to build an entry for (NOT NULL)
+ * @parent: the parent dentry in the securityfs
+ *
+ * Use aafs_remove_file to remove entries created with this fn.
+ */
+static int __init aafs_create_file(struct aa_fs_entry *fs_file,
+ struct dentry *parent)
+{
+ int error = 0;
+
+ fs_file->dentry = securityfs_create_file(fs_file->name,
+ S_IFREG | fs_file->mode,
+ parent, fs_file,
+ fs_file->file_ops);
+ if (IS_ERR(fs_file->dentry)) {
+ error = PTR_ERR(fs_file->dentry);
+ fs_file->dentry = NULL;
+ }
+ return error;
+}
+
+static void __init aafs_remove_dir(struct aa_fs_entry *fs_dir);
+/**
+ * aafs_create_dir - recursively create a directory entry in the securityfs
+ * @fs_dir: aa_fs_entry (and all child entries) to build (NOT NULL)
+ * @parent: the parent dentry in the securityfs
+ *
+ * Use aafs_remove_dir to remove entries created with this fn.
+ */
+static int __init aafs_create_dir(struct aa_fs_entry *fs_dir,
+ struct dentry *parent)
+{
+ struct aa_fs_entry *fs_file;
+ struct dentry *dir;
+ int error;
+
+ dir = securityfs_create_dir(fs_dir->name, parent);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+ fs_dir->dentry = dir;
+
+ for (fs_file = fs_dir->v.files; fs_file && fs_file->name; ++fs_file) {
+ if (fs_file->v_type == AA_FS_TYPE_DIR)
+ error = aafs_create_dir(fs_file, fs_dir->dentry);
+ else
+ error = aafs_create_file(fs_file, fs_dir->dentry);
+ if (error)
+ goto failed;
+ }
+
+ return 0;
+
+failed:
+ aafs_remove_dir(fs_dir);
+
+ return error;
+}
+
+/**
+ * aafs_remove_file - drop a single file entry in the apparmor securityfs
+ * @fs_file: aa_fs_entry to detach from the securityfs (NOT NULL)
+ */
+static void __init aafs_remove_file(struct aa_fs_entry *fs_file)
+{
+ if (!fs_file->dentry)
+ return;
+
+ securityfs_remove(fs_file->dentry);
+ fs_file->dentry = NULL;
+}
+
+/**
+ * aafs_remove_dir - recursively drop a directory entry from the securityfs
+ * @fs_dir: aa_fs_entry (and all child entries) to detach (NOT NULL)
+ */
+static void __init aafs_remove_dir(struct aa_fs_entry *fs_dir)
+{
+ struct aa_fs_entry *fs_file;
+
+ for (fs_file = fs_dir->v.files; fs_file && fs_file->name; ++fs_file) {
+ if (fs_file->v_type == AA_FS_TYPE_DIR)
+ aafs_remove_dir(fs_file);
+ else
+ aafs_remove_file(fs_file);
+ }
+
+ aafs_remove_file(fs_dir);
+}
+
+/**
+ * aa_destroy_aafs - cleanup and free aafs
+ *
+ * releases dentries allocated by aa_create_aafs
+ */
+void __init aa_destroy_aafs(void)
+{
+ aafs_remove_dir(&aa_fs_entry);
+}
+
+/**
+ * aa_create_aafs - create the apparmor security filesystem
+ *
+ * dentries created here are released by aa_destroy_aafs
+ *
+ * Returns: error on failure
+ */
+static int __init aa_create_aafs(void)
+{
+ int error;
+
+ if (!apparmor_initialized)
+ return 0;
+
+ if (aa_fs_entry.dentry) {
+ AA_ERROR("%s: AppArmor securityfs already exists\n", __func__);
+ return -EEXIST;
+ }
+
+ /* Populate fs tree. */
+ error = aafs_create_dir(&aa_fs_entry, NULL);
+ if (error)
+ goto error;
+
+ error = __aa_fs_namespace_mkdir(root_ns, aa_fs_entry.dentry,
+ "policy");
+ if (error)
+ goto error;
+
+ /* TODO: add support for apparmorfs_null and apparmorfs_mnt */
+
+ /* Report that AppArmor fs is enabled */
+ aa_info_message("AppArmor Filesystem Enabled");
+ return 0;
+
+error:
+ aa_destroy_aafs();
+ AA_ERROR("Error creating AppArmor securityfs\n");
+ return error;
+}
+
+fs_initcall(aa_create_aafs);
diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c
new file mode 100644
index 00000000000..89c78658031
--- /dev/null
+++ b/security/apparmor/audit.c
@@ -0,0 +1,209 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor auditing functions
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include <linux/audit.h>
+#include <linux/socket.h>
+
+#include "include/apparmor.h"
+#include "include/audit.h"
+#include "include/policy.h"
+
+const char *const op_table[] = {
+ "null",
+
+ "sysctl",
+ "capable",
+
+ "unlink",
+ "mkdir",
+ "rmdir",
+ "mknod",
+ "truncate",
+ "link",
+ "symlink",
+ "rename_src",
+ "rename_dest",
+ "chmod",
+ "chown",
+ "getattr",
+ "open",
+
+ "file_perm",
+ "file_lock",
+ "file_mmap",
+ "file_mprotect",
+
+ "create",
+ "post_create",
+ "bind",
+ "connect",
+ "listen",
+ "accept",
+ "sendmsg",
+ "recvmsg",
+ "getsockname",
+ "getpeername",
+ "getsockopt",
+ "setsockopt",
+ "socket_shutdown",
+
+ "ptrace",
+
+ "exec",
+ "change_hat",
+ "change_profile",
+ "change_onexec",
+
+ "setprocattr",
+ "setrlimit",
+
+ "profile_replace",
+ "profile_load",
+ "profile_remove"
+};
+
+const char *const audit_mode_names[] = {
+ "normal",
+ "quiet_denied",
+ "quiet",
+ "noquiet",
+ "all"
+};
+
+static const char *const aa_audit_type[] = {
+ "AUDIT",
+ "ALLOWED",
+ "DENIED",
+ "HINT",
+ "STATUS",
+ "ERROR",
+ "KILLED",
+ "AUTO"
+};
+
+/*
+ * Currently AppArmor auditing is fed straight into the audit framework.
+ *
+ * TODO:
+ * netlink interface for complain mode
+ * user auditing, - send user auditing to netlink interface
+ * system control of whether user audit messages go to system log
+ */
+
+/**
+ * audit_base - core AppArmor function.
+ * @ab: audit buffer to fill (NOT NULL)
+ * @ca: audit structure containing data to audit (NOT NULL)
+ *
+ * Record common AppArmor audit data from @sa
+ */
+static void audit_pre(struct audit_buffer *ab, void *ca)
+{
+ struct common_audit_data *sa = ca;
+
+ if (aa_g_audit_header) {
+ audit_log_format(ab, "apparmor=");
+ audit_log_string(ab, aa_audit_type[sa->aad->type]);
+ }
+
+ if (sa->aad->op) {
+ audit_log_format(ab, " operation=");
+ audit_log_string(ab, op_table[sa->aad->op]);
+ }
+
+ if (sa->aad->info) {
+ audit_log_format(ab, " info=");
+ audit_log_string(ab, sa->aad->info);
+ if (sa->aad->error)
+ audit_log_format(ab, " error=%d", sa->aad->error);
+ }
+
+ if (sa->aad->profile) {
+ struct aa_profile *profile = sa->aad->profile;
+ if (profile->ns != root_ns) {
+ audit_log_format(ab, " namespace=");
+ audit_log_untrustedstring(ab, profile->ns->base.hname);
+ }
+ audit_log_format(ab, " profile=");
+ audit_log_untrustedstring(ab, profile->base.hname);
+ }
+
+ if (sa->aad->name) {
+ audit_log_format(ab, " name=");
+ audit_log_untrustedstring(ab, sa->aad->name);
+ }
+}
+
+/**
+ * aa_audit_msg - Log a message to the audit subsystem
+ * @sa: audit event structure (NOT NULL)
+ * @cb: optional callback fn for type specific fields (MAYBE NULL)
+ */
+void aa_audit_msg(int type, struct common_audit_data *sa,
+ void (*cb) (struct audit_buffer *, void *))
+{
+ sa->aad->type = type;
+ common_lsm_audit(sa, audit_pre, cb);
+}
+
+/**
+ * aa_audit - Log a profile based audit event to the audit subsystem
+ * @type: audit type for the message
+ * @profile: profile to check against (NOT NULL)
+ * @gfp: allocation flags to use
+ * @sa: audit event (NOT NULL)
+ * @cb: optional callback fn for type specific fields (MAYBE NULL)
+ *
+ * Handle default message switching based off of audit mode flags
+ *
+ * Returns: error on failure
+ */
+int aa_audit(int type, struct aa_profile *profile, gfp_t gfp,
+ struct common_audit_data *sa,
+ void (*cb) (struct audit_buffer *, void *))
+{
+ BUG_ON(!profile);
+
+ if (type == AUDIT_APPARMOR_AUTO) {
+ if (likely(!sa->aad->error)) {
+ if (AUDIT_MODE(profile) != AUDIT_ALL)
+ return 0;
+ type = AUDIT_APPARMOR_AUDIT;
+ } else if (COMPLAIN_MODE(profile))
+ type = AUDIT_APPARMOR_ALLOWED;
+ else
+ type = AUDIT_APPARMOR_DENIED;
+ }
+ if (AUDIT_MODE(profile) == AUDIT_QUIET ||
+ (type == AUDIT_APPARMOR_DENIED &&
+ AUDIT_MODE(profile) == AUDIT_QUIET))
+ return sa->aad->error;
+
+ if (KILL_MODE(profile) && type == AUDIT_APPARMOR_DENIED)
+ type = AUDIT_APPARMOR_KILL;
+
+ if (!unconfined(profile))
+ sa->aad->profile = profile;
+
+ aa_audit_msg(type, sa, cb);
+
+ if (sa->aad->type == AUDIT_APPARMOR_KILL)
+ (void)send_sig_info(SIGKILL, NULL,
+ sa->u.tsk ? sa->u.tsk : current);
+
+ if (sa->aad->type == AUDIT_APPARMOR_ALLOWED)
+ return complain_error(sa->aad->error);
+
+ return sa->aad->error;
+}
diff --git a/security/apparmor/capability.c b/security/apparmor/capability.c
new file mode 100644
index 00000000000..1101c6f64bb
--- /dev/null
+++ b/security/apparmor/capability.c
@@ -0,0 +1,143 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor capability mediation functions
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include <linux/capability.h>
+#include <linux/errno.h>
+#include <linux/gfp.h>
+
+#include "include/apparmor.h"
+#include "include/capability.h"
+#include "include/context.h"
+#include "include/policy.h"
+#include "include/audit.h"
+
+/*
+ * Table of capability names: we generate it from capabilities.h.
+ */
+#include "capability_names.h"
+
+struct aa_fs_entry aa_fs_entry_caps[] = {
+ AA_FS_FILE_STRING("mask", AA_FS_CAPS_MASK),
+ { }
+};
+
+struct audit_cache {
+ struct aa_profile *profile;
+ kernel_cap_t caps;
+};
+
+static DEFINE_PER_CPU(struct audit_cache, audit_cache);
+
+/**
+ * audit_cb - call back for capability components of audit struct
+ * @ab - audit buffer (NOT NULL)
+ * @va - audit struct to audit data from (NOT NULL)
+ */
+static void audit_cb(struct audit_buffer *ab, void *va)
+{
+ struct common_audit_data *sa = va;
+ audit_log_format(ab, " capname=");
+ audit_log_untrustedstring(ab, capability_names[sa->u.cap]);
+}
+
+/**
+ * audit_caps - audit a capability
+ * @profile: profile being tested for confinement (NOT NULL)
+ * @cap: capability tested
+ * @error: error code returned by test
+ *
+ * Do auditing of capability and handle, audit/complain/kill modes switching
+ * and duplicate message elimination.
+ *
+ * Returns: 0 or sa->error on success, error code on failure
+ */
+static int audit_caps(struct aa_profile *profile, int cap, int error)
+{
+ struct audit_cache *ent;
+ int type = AUDIT_APPARMOR_AUTO;
+ struct common_audit_data sa;
+ struct apparmor_audit_data aad = {0,};
+ sa.type = LSM_AUDIT_DATA_CAP;
+ sa.aad = &aad;
+ sa.u.cap = cap;
+ sa.aad->op = OP_CAPABLE;
+ sa.aad->error = error;
+
+ if (likely(!error)) {
+ /* test if auditing is being forced */
+ if (likely((AUDIT_MODE(profile) != AUDIT_ALL) &&
+ !cap_raised(profile->caps.audit, cap)))
+ return 0;
+ type = AUDIT_APPARMOR_AUDIT;
+ } else if (KILL_MODE(profile) ||
+ cap_raised(profile->caps.kill, cap)) {
+ type = AUDIT_APPARMOR_KILL;
+ } else if (cap_raised(profile->caps.quiet, cap) &&
+ AUDIT_MODE(profile) != AUDIT_NOQUIET &&
+ AUDIT_MODE(profile) != AUDIT_ALL) {
+ /* quiet auditing */
+ return error;
+ }
+
+ /* Do simple duplicate message elimination */
+ ent = &get_cpu_var(audit_cache);
+ if (profile == ent->profile && cap_raised(ent->caps, cap)) {
+ put_cpu_var(audit_cache);
+ if (COMPLAIN_MODE(profile))
+ return complain_error(error);
+ return error;
+ } else {
+ aa_put_profile(ent->profile);
+ ent->profile = aa_get_profile(profile);
+ cap_raise(ent->caps, cap);
+ }
+ put_cpu_var(audit_cache);
+
+ return aa_audit(type, profile, GFP_ATOMIC, &sa, audit_cb);
+}
+
+/**
+ * profile_capable - test if profile allows use of capability @cap
+ * @profile: profile being enforced (NOT NULL, NOT unconfined)
+ * @cap: capability to test if allowed
+ *
+ * Returns: 0 if allowed else -EPERM
+ */
+static int profile_capable(struct aa_profile *profile, int cap)
+{
+ return cap_raised(profile->caps.allow, cap) ? 0 : -EPERM;
+}
+
+/**
+ * aa_capable - test permission to use capability
+ * @profile: profile being tested against (NOT NULL)
+ * @cap: capability to be tested
+ * @audit: whether an audit record should be generated
+ *
+ * Look up capability in profile capability set.
+ *
+ * Returns: 0 on success, or else an error code.
+ */
+int aa_capable(struct aa_profile *profile, int cap, int audit)
+{
+ int error = profile_capable(profile, cap);
+
+ if (!audit) {
+ if (COMPLAIN_MODE(profile))
+ return complain_error(error);
+ return error;
+ }
+
+ return audit_caps(profile, cap, error);
+}
diff --git a/security/apparmor/context.c b/security/apparmor/context.c
new file mode 100644
index 00000000000..3064c6ced87
--- /dev/null
+++ b/security/apparmor/context.c
@@ -0,0 +1,222 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor functions used to manipulate object security
+ * contexts.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ *
+ * AppArmor sets confinement on every task, via the the aa_task_cxt and
+ * the aa_task_cxt.profile, both of which are required and are not allowed
+ * to be NULL. The aa_task_cxt is not reference counted and is unique
+ * to each cred (which is reference count). The profile pointed to by
+ * the task_cxt is reference counted.
+ *
+ * TODO
+ * If a task uses change_hat it currently does not return to the old
+ * cred or task context but instead creates a new one. Ideally the task
+ * should return to the previous cred if it has not been modified.
+ *
+ */
+
+#include "include/context.h"
+#include "include/policy.h"
+
+/**
+ * aa_alloc_task_context - allocate a new task_cxt
+ * @flags: gfp flags for allocation
+ *
+ * Returns: allocated buffer or NULL on failure
+ */
+struct aa_task_cxt *aa_alloc_task_context(gfp_t flags)
+{
+ return kzalloc(sizeof(struct aa_task_cxt), flags);
+}
+
+/**
+ * aa_free_task_context - free a task_cxt
+ * @cxt: task_cxt to free (MAYBE NULL)
+ */
+void aa_free_task_context(struct aa_task_cxt *cxt)
+{
+ if (cxt) {
+ aa_put_profile(cxt->profile);
+ aa_put_profile(cxt->previous);
+ aa_put_profile(cxt->onexec);
+
+ kzfree(cxt);
+ }
+}
+
+/**
+ * aa_dup_task_context - duplicate a task context, incrementing reference counts
+ * @new: a blank task context (NOT NULL)
+ * @old: the task context to copy (NOT NULL)
+ */
+void aa_dup_task_context(struct aa_task_cxt *new, const struct aa_task_cxt *old)
+{
+ *new = *old;
+ aa_get_profile(new->profile);
+ aa_get_profile(new->previous);
+ aa_get_profile(new->onexec);
+}
+
+/**
+ * aa_get_task_profile - Get another task's profile
+ * @task: task to query (NOT NULL)
+ *
+ * Returns: counted reference to @task's profile
+ */
+struct aa_profile *aa_get_task_profile(struct task_struct *task)
+{
+ struct aa_profile *p;
+
+ rcu_read_lock();
+ p = aa_get_profile(__aa_task_profile(task));
+ rcu_read_unlock();
+
+ return p;
+}
+
+/**
+ * aa_replace_current_profile - replace the current tasks profiles
+ * @profile: new profile (NOT NULL)
+ *
+ * Returns: 0 or error on failure
+ */
+int aa_replace_current_profile(struct aa_profile *profile)
+{
+ struct aa_task_cxt *cxt = current_cxt();
+ struct cred *new;
+ BUG_ON(!profile);
+
+ if (cxt->profile == profile)
+ return 0;
+
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+
+ cxt = cred_cxt(new);
+ if (unconfined(profile) || (cxt->profile->ns != profile->ns))
+ /* if switching to unconfined or a different profile namespace
+ * clear out context state
+ */
+ aa_clear_task_cxt_trans(cxt);
+
+ /* be careful switching cxt->profile, when racing replacement it
+ * is possible that cxt->profile->replacedby->profile is the reference
+ * keeping @profile valid, so make sure to get its reference before
+ * dropping the reference on cxt->profile */
+ aa_get_profile(profile);
+ aa_put_profile(cxt->profile);
+ cxt->profile = profile;
+
+ commit_creds(new);
+ return 0;
+}
+
+/**
+ * aa_set_current_onexec - set the tasks change_profile to happen onexec
+ * @profile: system profile to set at exec (MAYBE NULL to clear value)
+ *
+ * Returns: 0 or error on failure
+ */
+int aa_set_current_onexec(struct aa_profile *profile)
+{
+ struct aa_task_cxt *cxt;
+ struct cred *new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+
+ cxt = cred_cxt(new);
+ aa_get_profile(profile);
+ aa_put_profile(cxt->onexec);
+ cxt->onexec = profile;
+
+ commit_creds(new);
+ return 0;
+}
+
+/**
+ * aa_set_current_hat - set the current tasks hat
+ * @profile: profile to set as the current hat (NOT NULL)
+ * @token: token value that must be specified to change from the hat
+ *
+ * Do switch of tasks hat. If the task is currently in a hat
+ * validate the token to match.
+ *
+ * Returns: 0 or error on failure
+ */
+int aa_set_current_hat(struct aa_profile *profile, u64 token)
+{
+ struct aa_task_cxt *cxt;
+ struct cred *new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+ BUG_ON(!profile);
+
+ cxt = cred_cxt(new);
+ if (!cxt->previous) {
+ /* transfer refcount */
+ cxt->previous = cxt->profile;
+ cxt->token = token;
+ } else if (cxt->token == token) {
+ aa_put_profile(cxt->profile);
+ } else {
+ /* previous_profile && cxt->token != token */
+ abort_creds(new);
+ return -EACCES;
+ }
+ cxt->profile = aa_get_newest_profile(profile);
+ /* clear exec on switching context */
+ aa_put_profile(cxt->onexec);
+ cxt->onexec = NULL;
+
+ commit_creds(new);
+ return 0;
+}
+
+/**
+ * aa_restore_previous_profile - exit from hat context restoring the profile
+ * @token: the token that must be matched to exit hat context
+ *
+ * Attempt to return out of a hat to the previous profile. The token
+ * must match the stored token value.
+ *
+ * Returns: 0 or error of failure
+ */
+int aa_restore_previous_profile(u64 token)
+{
+ struct aa_task_cxt *cxt;
+ struct cred *new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+
+ cxt = cred_cxt(new);
+ if (cxt->token != token) {
+ abort_creds(new);
+ return -EACCES;
+ }
+ /* ignore restores when there is no saved profile */
+ if (!cxt->previous) {
+ abort_creds(new);
+ return 0;
+ }
+
+ aa_put_profile(cxt->profile);
+ cxt->profile = aa_get_newest_profile(cxt->previous);
+ BUG_ON(!cxt->profile);
+ /* clear exec && prev information when restoring to previous context */
+ aa_clear_task_cxt_trans(cxt);
+
+ commit_creds(new);
+ return 0;
+}
diff --git a/security/apparmor/crypto.c b/security/apparmor/crypto.c
new file mode 100644
index 00000000000..532471d0b3a
--- /dev/null
+++ b/security/apparmor/crypto.c
@@ -0,0 +1,95 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor policy loading interface function definitions.
+ *
+ * Copyright 2013 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * Fns to provide a checksum of policy that has been loaded this can be
+ * compared to userspace policy compiles to check loaded policy is what
+ * it should be.
+ */
+
+#include <crypto/hash.h>
+
+#include "include/apparmor.h"
+#include "include/crypto.h"
+
+static unsigned int apparmor_hash_size;
+
+static struct crypto_shash *apparmor_tfm;
+
+unsigned int aa_hash_size(void)
+{
+ return apparmor_hash_size;
+}
+
+int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start,
+ size_t len)
+{
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(apparmor_tfm)];
+ } desc;
+ int error = -ENOMEM;
+ u32 le32_version = cpu_to_le32(version);
+
+ if (!apparmor_tfm)
+ return 0;
+
+ profile->hash = kzalloc(apparmor_hash_size, GFP_KERNEL);
+ if (!profile->hash)
+ goto fail;
+
+ desc.shash.tfm = apparmor_tfm;
+ desc.shash.flags = 0;
+
+ error = crypto_shash_init(&desc.shash);
+ if (error)
+ goto fail;
+ error = crypto_shash_update(&desc.shash, (u8 *) &le32_version, 4);
+ if (error)
+ goto fail;
+ error = crypto_shash_update(&desc.shash, (u8 *) start, len);
+ if (error)
+ goto fail;
+ error = crypto_shash_final(&desc.shash, profile->hash);
+ if (error)
+ goto fail;
+
+ return 0;
+
+fail:
+ kfree(profile->hash);
+ profile->hash = NULL;
+
+ return error;
+}
+
+static int __init init_profile_hash(void)
+{
+ struct crypto_shash *tfm;
+
+ if (!apparmor_initialized)
+ return 0;
+
+ tfm = crypto_alloc_shash("sha1", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm)) {
+ int error = PTR_ERR(tfm);
+ AA_ERROR("failed to setup profile sha1 hashing: %d\n", error);
+ return error;
+ }
+ apparmor_tfm = tfm;
+ apparmor_hash_size = crypto_shash_digestsize(apparmor_tfm);
+
+ aa_info_message("AppArmor sha1 policy hashing enabled");
+
+ return 0;
+}
+
+late_initcall(init_profile_hash);
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
new file mode 100644
index 00000000000..452567d3a08
--- /dev/null
+++ b/security/apparmor/domain.c
@@ -0,0 +1,854 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor policy attachment and domain transitions
+ *
+ * Copyright (C) 2002-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include <linux/errno.h>
+#include <linux/fdtable.h>
+#include <linux/file.h>
+#include <linux/mount.h>
+#include <linux/syscalls.h>
+#include <linux/tracehook.h>
+#include <linux/personality.h>
+
+#include "include/audit.h"
+#include "include/apparmorfs.h"
+#include "include/context.h"
+#include "include/domain.h"
+#include "include/file.h"
+#include "include/ipc.h"
+#include "include/match.h"
+#include "include/path.h"
+#include "include/policy.h"
+
+/**
+ * aa_free_domain_entries - free entries in a domain table
+ * @domain: the domain table to free (MAYBE NULL)
+ */
+void aa_free_domain_entries(struct aa_domain *domain)
+{
+ int i;
+ if (domain) {
+ if (!domain->table)
+ return;
+
+ for (i = 0; i < domain->size; i++)
+ kzfree(domain->table[i]);
+ kzfree(domain->table);
+ domain->table = NULL;
+ }
+}
+
+/**
+ * may_change_ptraced_domain - check if can change profile on ptraced task
+ * @to_profile: profile to change to (NOT NULL)
+ *
+ * Check if current is ptraced and if so if the tracing task is allowed
+ * to trace the new domain
+ *
+ * Returns: %0 or error if change not allowed
+ */
+static int may_change_ptraced_domain(struct aa_profile *to_profile)
+{
+ struct task_struct *tracer;
+ struct aa_profile *tracerp = NULL;
+ int error = 0;
+
+ rcu_read_lock();
+ tracer = ptrace_parent(current);
+ if (tracer)
+ /* released below */
+ tracerp = aa_get_task_profile(tracer);
+
+ /* not ptraced */
+ if (!tracer || unconfined(tracerp))
+ goto out;
+
+ error = aa_may_ptrace(tracerp, to_profile, PTRACE_MODE_ATTACH);
+
+out:
+ rcu_read_unlock();
+ aa_put_profile(tracerp);
+
+ return error;
+}
+
+/**
+ * change_profile_perms - find permissions for change_profile
+ * @profile: the current profile (NOT NULL)
+ * @ns: the namespace being switched to (NOT NULL)
+ * @name: the name of the profile to change to (NOT NULL)
+ * @request: requested perms
+ * @start: state to start matching in
+ *
+ * Returns: permission set
+ */
+static struct file_perms change_profile_perms(struct aa_profile *profile,
+ struct aa_namespace *ns,
+ const char *name, u32 request,
+ unsigned int start)
+{
+ struct file_perms perms;
+ struct path_cond cond = { };
+ unsigned int state;
+
+ if (unconfined(profile)) {
+ perms.allow = AA_MAY_CHANGE_PROFILE | AA_MAY_ONEXEC;
+ perms.audit = perms.quiet = perms.kill = 0;
+ return perms;
+ } else if (!profile->file.dfa) {
+ return nullperms;
+ } else if ((ns == profile->ns)) {
+ /* try matching against rules with out namespace prepended */
+ aa_str_perms(profile->file.dfa, start, name, &cond, &perms);
+ if (COMBINED_PERM_MASK(perms) & request)
+ return perms;
+ }
+
+ /* try matching with namespace name and then profile */
+ state = aa_dfa_match(profile->file.dfa, start, ns->base.name);
+ state = aa_dfa_match_len(profile->file.dfa, state, ":", 1);
+ aa_str_perms(profile->file.dfa, state, name, &cond, &perms);
+
+ return perms;
+}
+
+/**
+ * __attach_match_ - find an attachment match
+ * @name - to match against (NOT NULL)
+ * @head - profile list to walk (NOT NULL)
+ *
+ * Do a linear search on the profiles in the list. There is a matching
+ * preference where an exact match is preferred over a name which uses
+ * expressions to match, and matching expressions with the greatest
+ * xmatch_len are preferred.
+ *
+ * Requires: @head not be shared or have appropriate locks held
+ *
+ * Returns: profile or NULL if no match found
+ */
+static struct aa_profile *__attach_match(const char *name,
+ struct list_head *head)
+{
+ int len = 0;
+ struct aa_profile *profile, *candidate = NULL;
+
+ list_for_each_entry_rcu(profile, head, base.list) {
+ if (profile->flags & PFLAG_NULL)
+ continue;
+ if (profile->xmatch && profile->xmatch_len > len) {
+ unsigned int state = aa_dfa_match(profile->xmatch,
+ DFA_START, name);
+ u32 perm = dfa_user_allow(profile->xmatch, state);
+ /* any accepting state means a valid match. */
+ if (perm & MAY_EXEC) {
+ candidate = profile;
+ len = profile->xmatch_len;
+ }
+ } else if (!strcmp(profile->base.name, name))
+ /* exact non-re match, no more searching required */
+ return profile;
+ }
+
+ return candidate;
+}
+
+/**
+ * find_attach - do attachment search for unconfined processes
+ * @ns: the current namespace (NOT NULL)
+ * @list: list to search (NOT NULL)
+ * @name: the executable name to match against (NOT NULL)
+ *
+ * Returns: profile or NULL if no match found
+ */
+static struct aa_profile *find_attach(struct aa_namespace *ns,
+ struct list_head *list, const char *name)
+{
+ struct aa_profile *profile;
+
+ rcu_read_lock();
+ profile = aa_get_profile(__attach_match(name, list));
+ rcu_read_unlock();
+
+ return profile;
+}
+
+/**
+ * separate_fqname - separate the namespace and profile names
+ * @fqname: the fqname name to split (NOT NULL)
+ * @ns_name: the namespace name if it exists (NOT NULL)
+ *
+ * This is the xtable equivalent routine of aa_split_fqname. It finds the
+ * split in an xtable fqname which contains an embedded \0 instead of a :
+ * if a namespace is specified. This is done so the xtable is constant and
+ * isn't re-split on every lookup.
+ *
+ * Either the profile or namespace name may be optional but if the namespace
+ * is specified the profile name termination must be present. This results
+ * in the following possible encodings:
+ * profile_name\0
+ * :ns_name\0profile_name\0
+ * :ns_name\0\0
+ *
+ * NOTE: the xtable fqname is pre-validated at load time in unpack_trans_table
+ *
+ * Returns: profile name if it is specified else NULL
+ */
+static const char *separate_fqname(const char *fqname, const char **ns_name)
+{
+ const char *name;
+
+ if (fqname[0] == ':') {
+ /* In this case there is guaranteed to be two \0 terminators
+ * in the string. They are verified at load time by
+ * by unpack_trans_table
+ */
+ *ns_name = fqname + 1; /* skip : */
+ name = *ns_name + strlen(*ns_name) + 1;
+ if (!*name)
+ name = NULL;
+ } else {
+ *ns_name = NULL;
+ name = fqname;
+ }
+
+ return name;
+}
+
+static const char *next_name(int xtype, const char *name)
+{
+ return NULL;
+}
+
+/**
+ * x_table_lookup - lookup an x transition name via transition table
+ * @profile: current profile (NOT NULL)
+ * @xindex: index into x transition table
+ *
+ * Returns: refcounted profile, or NULL on failure (MAYBE NULL)
+ */
+static struct aa_profile *x_table_lookup(struct aa_profile *profile, u32 xindex)
+{
+ struct aa_profile *new_profile = NULL;
+ struct aa_namespace *ns = profile->ns;
+ u32 xtype = xindex & AA_X_TYPE_MASK;
+ int index = xindex & AA_X_INDEX_MASK;
+ const char *name;
+
+ /* index is guaranteed to be in range, validated at load time */
+ for (name = profile->file.trans.table[index]; !new_profile && name;
+ name = next_name(xtype, name)) {
+ struct aa_namespace *new_ns;
+ const char *xname = NULL;
+
+ new_ns = NULL;
+ if (xindex & AA_X_CHILD) {
+ /* release by caller */
+ new_profile = aa_find_child(profile, name);
+ continue;
+ } else if (*name == ':') {
+ /* switching namespace */
+ const char *ns_name;
+ xname = name = separate_fqname(name, &ns_name);
+ if (!xname)
+ /* no name so use profile name */
+ xname = profile->base.hname;
+ if (*ns_name == '@') {
+ /* TODO: variable support */
+ ;
+ }
+ /* released below */
+ new_ns = aa_find_namespace(ns, ns_name);
+ if (!new_ns)
+ continue;
+ } else if (*name == '@') {
+ /* TODO: variable support */
+ continue;
+ } else {
+ /* basic namespace lookup */
+ xname = name;
+ }
+
+ /* released by caller */
+ new_profile = aa_lookup_profile(new_ns ? new_ns : ns, xname);
+ aa_put_namespace(new_ns);
+ }
+
+ /* released by caller */
+ return new_profile;
+}
+
+/**
+ * x_to_profile - get target profile for a given xindex
+ * @profile: current profile (NOT NULL)
+ * @name: name to lookup (NOT NULL)
+ * @xindex: index into x transition table
+ *
+ * find profile for a transition index
+ *
+ * Returns: refcounted profile or NULL if not found available
+ */
+static struct aa_profile *x_to_profile(struct aa_profile *profile,
+ const char *name, u32 xindex)
+{
+ struct aa_profile *new_profile = NULL;
+ struct aa_namespace *ns = profile->ns;
+ u32 xtype = xindex & AA_X_TYPE_MASK;
+
+ switch (xtype) {
+ case AA_X_NONE:
+ /* fail exec unless ix || ux fallback - handled by caller */
+ return NULL;
+ case AA_X_NAME:
+ if (xindex & AA_X_CHILD)
+ /* released by caller */
+ new_profile = find_attach(ns, &profile->base.profiles,
+ name);
+ else
+ /* released by caller */
+ new_profile = find_attach(ns, &ns->base.profiles,
+ name);
+ break;
+ case AA_X_TABLE:
+ /* released by caller */
+ new_profile = x_table_lookup(profile, xindex);
+ break;
+ }
+
+ /* released by caller */
+ return new_profile;
+}
+
+/**
+ * apparmor_bprm_set_creds - set the new creds on the bprm struct
+ * @bprm: binprm for the exec (NOT NULL)
+ *
+ * Returns: %0 or error on failure
+ */
+int apparmor_bprm_set_creds(struct linux_binprm *bprm)
+{
+ struct aa_task_cxt *cxt;
+ struct aa_profile *profile, *new_profile = NULL;
+ struct aa_namespace *ns;
+ char *buffer = NULL;
+ unsigned int state;
+ struct file_perms perms = {};
+ struct path_cond cond = {
+ file_inode(bprm->file)->i_uid,
+ file_inode(bprm->file)->i_mode
+ };
+ const char *name = NULL, *target = NULL, *info = NULL;
+ int error = cap_bprm_set_creds(bprm);
+ if (error)
+ return error;
+
+ if (bprm->cred_prepared)
+ return 0;
+
+ cxt = cred_cxt(bprm->cred);
+ BUG_ON(!cxt);
+
+ profile = aa_get_newest_profile(cxt->profile);
+ /*
+ * get the namespace from the replacement profile as replacement
+ * can change the namespace
+ */
+ ns = profile->ns;
+ state = profile->file.start;
+
+ /* buffer freed below, name is pointer into buffer */
+ error = aa_path_name(&bprm->file->f_path, profile->path_flags, &buffer,
+ &name, &info);
+ if (error) {
+ if (unconfined(profile) ||
+ (profile->flags & PFLAG_IX_ON_NAME_ERROR))
+ error = 0;
+ name = bprm->filename;
+ goto audit;
+ }
+
+ /* Test for onexec first as onexec directives override other
+ * x transitions.
+ */
+ if (unconfined(profile)) {
+ /* unconfined task */
+ if (cxt->onexec)
+ /* change_profile on exec already been granted */
+ new_profile = aa_get_profile(cxt->onexec);
+ else
+ new_profile = find_attach(ns, &ns->base.profiles, name);
+ if (!new_profile)
+ goto cleanup;
+ /*
+ * NOTE: Domain transitions from unconfined are allowed
+ * even when no_new_privs is set because this aways results
+ * in a further reduction of permissions.
+ */
+ goto apply;
+ }
+
+ /* find exec permissions for name */
+ state = aa_str_perms(profile->file.dfa, state, name, &cond, &perms);
+ if (cxt->onexec) {
+ struct file_perms cp;
+ info = "change_profile onexec";
+ if (!(perms.allow & AA_MAY_ONEXEC))
+ goto audit;
+
+ /* test if this exec can be paired with change_profile onexec.
+ * onexec permission is linked to exec with a standard pairing
+ * exec\0change_profile
+ */
+ state = aa_dfa_null_transition(profile->file.dfa, state);
+ cp = change_profile_perms(profile, cxt->onexec->ns,
+ cxt->onexec->base.name,
+ AA_MAY_ONEXEC, state);
+
+ if (!(cp.allow & AA_MAY_ONEXEC))
+ goto audit;
+ new_profile = aa_get_newest_profile(cxt->onexec);
+ goto apply;
+ }
+
+ if (perms.allow & MAY_EXEC) {
+ /* exec permission determine how to transition */
+ new_profile = x_to_profile(profile, name, perms.xindex);
+ if (!new_profile) {
+ if (perms.xindex & AA_X_INHERIT) {
+ /* (p|c|n)ix - don't change profile but do
+ * use the newest version, which was picked
+ * up above when getting profile
+ */
+ info = "ix fallback";
+ new_profile = aa_get_profile(profile);
+ goto x_clear;
+ } else if (perms.xindex & AA_X_UNCONFINED) {
+ new_profile = aa_get_newest_profile(ns->unconfined);
+ info = "ux fallback";
+ } else {
+ error = -ENOENT;
+ info = "profile not found";
+ /* remove MAY_EXEC to audit as failure */
+ perms.allow &= ~MAY_EXEC;
+ }
+ }
+ } else if (COMPLAIN_MODE(profile)) {
+ /* no exec permission - are we in learning mode */
+ new_profile = aa_new_null_profile(profile, 0);
+ if (!new_profile) {
+ error = -ENOMEM;
+ info = "could not create null profile";
+ } else {
+ error = -EACCES;
+ target = new_profile->base.hname;
+ }
+ perms.xindex |= AA_X_UNSAFE;
+ } else
+ /* fail exec */
+ error = -EACCES;
+
+ /*
+ * Policy has specified a domain transition, if no_new_privs then
+ * fail the exec.
+ */
+ if (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS) {
+ aa_put_profile(new_profile);
+ error = -EPERM;
+ goto cleanup;
+ }
+
+ if (!new_profile)
+ goto audit;
+
+ if (bprm->unsafe & LSM_UNSAFE_SHARE) {
+ /* FIXME: currently don't mediate shared state */
+ ;
+ }
+
+ if (bprm->unsafe & (LSM_UNSAFE_PTRACE | LSM_UNSAFE_PTRACE_CAP)) {
+ error = may_change_ptraced_domain(new_profile);
+ if (error) {
+ aa_put_profile(new_profile);
+ goto audit;
+ }
+ }
+
+ /* Determine if secure exec is needed.
+ * Can be at this point for the following reasons:
+ * 1. unconfined switching to confined
+ * 2. confined switching to different confinement
+ * 3. confined switching to unconfined
+ *
+ * Cases 2 and 3 are marked as requiring secure exec
+ * (unless policy specified "unsafe exec")
+ *
+ * bprm->unsafe is used to cache the AA_X_UNSAFE permission
+ * to avoid having to recompute in secureexec
+ */
+ if (!(perms.xindex & AA_X_UNSAFE)) {
+ AA_DEBUG("scrubbing environment variables for %s profile=%s\n",
+ name, new_profile->base.hname);
+ bprm->unsafe |= AA_SECURE_X_NEEDED;
+ }
+apply:
+ target = new_profile->base.hname;
+ /* when transitioning profiles clear unsafe personality bits */
+ bprm->per_clear |= PER_CLEAR_ON_SETID;
+
+x_clear:
+ aa_put_profile(cxt->profile);
+ /* transfer new profile reference will be released when cxt is freed */
+ cxt->profile = new_profile;
+
+ /* clear out all temporary/transitional state from the context */
+ aa_clear_task_cxt_trans(cxt);
+
+audit:
+ error = aa_audit_file(profile, &perms, GFP_KERNEL, OP_EXEC, MAY_EXEC,
+ name, target, cond.uid, info, error);
+
+cleanup:
+ aa_put_profile(profile);
+ kfree(buffer);
+
+ return error;
+}
+
+/**
+ * apparmor_bprm_secureexec - determine if secureexec is needed
+ * @bprm: binprm for exec (NOT NULL)
+ *
+ * Returns: %1 if secureexec is needed else %0
+ */
+int apparmor_bprm_secureexec(struct linux_binprm *bprm)
+{
+ int ret = cap_bprm_secureexec(bprm);
+
+ /* the decision to use secure exec is computed in set_creds
+ * and stored in bprm->unsafe.
+ */
+ if (!ret && (bprm->unsafe & AA_SECURE_X_NEEDED))
+ ret = 1;
+
+ return ret;
+}
+
+/**
+ * apparmor_bprm_committing_creds - do task cleanup on committing new creds
+ * @bprm: binprm for the exec (NOT NULL)
+ */
+void apparmor_bprm_committing_creds(struct linux_binprm *bprm)
+{
+ struct aa_profile *profile = __aa_current_profile();
+ struct aa_task_cxt *new_cxt = cred_cxt(bprm->cred);
+
+ /* bail out if unconfined or not changing profile */
+ if ((new_cxt->profile == profile) ||
+ (unconfined(new_cxt->profile)))
+ return;
+
+ current->pdeath_signal = 0;
+
+ /* reset soft limits and set hard limits for the new profile */
+ __aa_transition_rlimits(profile, new_cxt->profile);
+}
+
+/**
+ * apparmor_bprm_commited_cred - do cleanup after new creds committed
+ * @bprm: binprm for the exec (NOT NULL)
+ */
+void apparmor_bprm_committed_creds(struct linux_binprm *bprm)
+{
+ /* TODO: cleanup signals - ipc mediation */
+ return;
+}
+
+/*
+ * Functions for self directed profile change
+ */
+
+/**
+ * new_compound_name - create an hname with @n2 appended to @n1
+ * @n1: base of hname (NOT NULL)
+ * @n2: name to append (NOT NULL)
+ *
+ * Returns: new name or NULL on error
+ */
+static char *new_compound_name(const char *n1, const char *n2)
+{
+ char *name = kmalloc(strlen(n1) + strlen(n2) + 3, GFP_KERNEL);
+ if (name)
+ sprintf(name, "%s//%s", n1, n2);
+ return name;
+}
+
+/**
+ * aa_change_hat - change hat to/from subprofile
+ * @hats: vector of hat names to try changing into (MAYBE NULL if @count == 0)
+ * @count: number of hat names in @hats
+ * @token: magic value to validate the hat change
+ * @permtest: true if this is just a permission test
+ *
+ * Change to the first profile specified in @hats that exists, and store
+ * the @hat_magic in the current task context. If the count == 0 and the
+ * @token matches that stored in the current task context, return to the
+ * top level profile.
+ *
+ * Returns %0 on success, error otherwise.
+ */
+int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
+{
+ const struct cred *cred;
+ struct aa_task_cxt *cxt;
+ struct aa_profile *profile, *previous_profile, *hat = NULL;
+ char *name = NULL;
+ int i;
+ struct file_perms perms = {};
+ const char *target = NULL, *info = NULL;
+ int error = 0;
+
+ /*
+ * Fail explicitly requested domain transitions if no_new_privs.
+ * There is no exception for unconfined as change_hat is not
+ * available.
+ */
+ if (current->no_new_privs)
+ return -EPERM;
+
+ /* released below */
+ cred = get_current_cred();
+ cxt = cred_cxt(cred);
+ profile = aa_cred_profile(cred);
+ previous_profile = cxt->previous;
+
+ if (unconfined(profile)) {
+ info = "unconfined";
+ error = -EPERM;
+ goto audit;
+ }
+
+ if (count) {
+ /* attempting to change into a new hat or switch to a sibling */
+ struct aa_profile *root;
+ if (PROFILE_IS_HAT(profile))
+ root = aa_get_profile_rcu(&profile->parent);
+ else
+ root = aa_get_profile(profile);
+
+ /* find first matching hat */
+ for (i = 0; i < count && !hat; i++)
+ /* released below */
+ hat = aa_find_child(root, hats[i]);
+ if (!hat) {
+ if (!COMPLAIN_MODE(root) || permtest) {
+ if (list_empty(&root->base.profiles))
+ error = -ECHILD;
+ else
+ error = -ENOENT;
+ aa_put_profile(root);
+ goto out;
+ }
+
+ /*
+ * In complain mode and failed to match any hats.
+ * Audit the failure is based off of the first hat
+ * supplied. This is done due how userspace
+ * interacts with change_hat.
+ *
+ * TODO: Add logging of all failed hats
+ */
+
+ /* freed below */
+ name = new_compound_name(root->base.hname, hats[0]);
+ aa_put_profile(root);
+ target = name;
+ /* released below */
+ hat = aa_new_null_profile(profile, 1);
+ if (!hat) {
+ info = "failed null profile create";
+ error = -ENOMEM;
+ goto audit;
+ }
+ } else {
+ aa_put_profile(root);
+ target = hat->base.hname;
+ if (!PROFILE_IS_HAT(hat)) {
+ info = "target not hat";
+ error = -EPERM;
+ goto audit;
+ }
+ }
+
+ error = may_change_ptraced_domain(hat);
+ if (error) {
+ info = "ptraced";
+ error = -EPERM;
+ goto audit;
+ }
+
+ if (!permtest) {
+ error = aa_set_current_hat(hat, token);
+ if (error == -EACCES)
+ /* kill task in case of brute force attacks */
+ perms.kill = AA_MAY_CHANGEHAT;
+ else if (name && !error)
+ /* reset error for learning of new hats */
+ error = -ENOENT;
+ }
+ } else if (previous_profile) {
+ /* Return to saved profile. Kill task if restore fails
+ * to avoid brute force attacks
+ */
+ target = previous_profile->base.hname;
+ error = aa_restore_previous_profile(token);
+ perms.kill = AA_MAY_CHANGEHAT;
+ } else
+ /* ignore restores when there is no saved profile */
+ goto out;
+
+audit:
+ if (!permtest)
+ error = aa_audit_file(profile, &perms, GFP_KERNEL,
+ OP_CHANGE_HAT, AA_MAY_CHANGEHAT, NULL,
+ target, GLOBAL_ROOT_UID, info, error);
+
+out:
+ aa_put_profile(hat);
+ kfree(name);
+ put_cred(cred);
+
+ return error;
+}
+
+/**
+ * aa_change_profile - perform a one-way profile transition
+ * @ns_name: name of the profile namespace to change to (MAYBE NULL)
+ * @hname: name of profile to change to (MAYBE NULL)
+ * @onexec: whether this transition is to take place immediately or at exec
+ * @permtest: true if this is just a permission test
+ *
+ * Change to new profile @name. Unlike with hats, there is no way
+ * to change back. If @name isn't specified the current profile name is
+ * used.
+ * If @onexec then the transition is delayed until
+ * the next exec.
+ *
+ * Returns %0 on success, error otherwise.
+ */
+int aa_change_profile(const char *ns_name, const char *hname, bool onexec,
+ bool permtest)
+{
+ const struct cred *cred;
+ struct aa_profile *profile, *target = NULL;
+ struct aa_namespace *ns = NULL;
+ struct file_perms perms = {};
+ const char *name = NULL, *info = NULL;
+ int op, error = 0;
+ u32 request;
+
+ if (!hname && !ns_name)
+ return -EINVAL;
+
+ if (onexec) {
+ request = AA_MAY_ONEXEC;
+ op = OP_CHANGE_ONEXEC;
+ } else {
+ request = AA_MAY_CHANGE_PROFILE;
+ op = OP_CHANGE_PROFILE;
+ }
+
+ cred = get_current_cred();
+ profile = aa_cred_profile(cred);
+
+ /*
+ * Fail explicitly requested domain transitions if no_new_privs
+ * and not unconfined.
+ * Domain transitions from unconfined are allowed even when
+ * no_new_privs is set because this aways results in a reduction
+ * of permissions.
+ */
+ if (current->no_new_privs && !unconfined(profile)) {
+ put_cred(cred);
+ return -EPERM;
+ }
+
+ if (ns_name) {
+ /* released below */
+ ns = aa_find_namespace(profile->ns, ns_name);
+ if (!ns) {
+ /* we don't create new namespace in complain mode */
+ name = ns_name;
+ info = "namespace not found";
+ error = -ENOENT;
+ goto audit;
+ }
+ } else
+ /* released below */
+ ns = aa_get_namespace(profile->ns);
+
+ /* if the name was not specified, use the name of the current profile */
+ if (!hname) {
+ if (unconfined(profile))
+ hname = ns->unconfined->base.hname;
+ else
+ hname = profile->base.hname;
+ }
+
+ perms = change_profile_perms(profile, ns, hname, request,
+ profile->file.start);
+ if (!(perms.allow & request)) {
+ error = -EACCES;
+ goto audit;
+ }
+
+ /* released below */
+ target = aa_lookup_profile(ns, hname);
+ if (!target) {
+ info = "profile not found";
+ error = -ENOENT;
+ if (permtest || !COMPLAIN_MODE(profile))
+ goto audit;
+ /* released below */
+ target = aa_new_null_profile(profile, 0);
+ if (!target) {
+ info = "failed null profile create";
+ error = -ENOMEM;
+ goto audit;
+ }
+ }
+
+ /* check if tracing task is allowed to trace target domain */
+ error = may_change_ptraced_domain(target);
+ if (error) {
+ info = "ptrace prevents transition";
+ goto audit;
+ }
+
+ if (permtest)
+ goto audit;
+
+ if (onexec)
+ error = aa_set_current_onexec(target);
+ else
+ error = aa_replace_current_profile(target);
+
+audit:
+ if (!permtest)
+ error = aa_audit_file(profile, &perms, GFP_KERNEL, op, request,
+ name, hname, GLOBAL_ROOT_UID, info, error);
+
+ aa_put_namespace(ns);
+ aa_put_profile(target);
+ put_cred(cred);
+
+ return error;
+}
diff --git a/security/apparmor/file.c b/security/apparmor/file.c
new file mode 100644
index 00000000000..fdaa50cb187
--- /dev/null
+++ b/security/apparmor/file.c
@@ -0,0 +1,458 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor mediation of files
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include "include/apparmor.h"
+#include "include/audit.h"
+#include "include/file.h"
+#include "include/match.h"
+#include "include/path.h"
+#include "include/policy.h"
+
+struct file_perms nullperms;
+
+
+/**
+ * audit_file_mask - convert mask to permission string
+ * @buffer: buffer to write string to (NOT NULL)
+ * @mask: permission mask to convert
+ */
+static void audit_file_mask(struct audit_buffer *ab, u32 mask)
+{
+ char str[10];
+
+ char *m = str;
+
+ if (mask & AA_EXEC_MMAP)
+ *m++ = 'm';
+ if (mask & (MAY_READ | AA_MAY_META_READ))
+ *m++ = 'r';
+ if (mask & (MAY_WRITE | AA_MAY_META_WRITE | AA_MAY_CHMOD |
+ AA_MAY_CHOWN))
+ *m++ = 'w';
+ else if (mask & MAY_APPEND)
+ *m++ = 'a';
+ if (mask & AA_MAY_CREATE)
+ *m++ = 'c';
+ if (mask & AA_MAY_DELETE)
+ *m++ = 'd';
+ if (mask & AA_MAY_LINK)
+ *m++ = 'l';
+ if (mask & AA_MAY_LOCK)
+ *m++ = 'k';
+ if (mask & MAY_EXEC)
+ *m++ = 'x';
+ *m = '\0';
+
+ audit_log_string(ab, str);
+}
+
+/**
+ * file_audit_cb - call back for file specific audit fields
+ * @ab: audit_buffer (NOT NULL)
+ * @va: audit struct to audit values of (NOT NULL)
+ */
+static void file_audit_cb(struct audit_buffer *ab, void *va)
+{
+ struct common_audit_data *sa = va;
+ kuid_t fsuid = current_fsuid();
+
+ if (sa->aad->fs.request & AA_AUDIT_FILE_MASK) {
+ audit_log_format(ab, " requested_mask=");
+ audit_file_mask(ab, sa->aad->fs.request);
+ }
+ if (sa->aad->fs.denied & AA_AUDIT_FILE_MASK) {
+ audit_log_format(ab, " denied_mask=");
+ audit_file_mask(ab, sa->aad->fs.denied);
+ }
+ if (sa->aad->fs.request & AA_AUDIT_FILE_MASK) {
+ audit_log_format(ab, " fsuid=%d",
+ from_kuid(&init_user_ns, fsuid));
+ audit_log_format(ab, " ouid=%d",
+ from_kuid(&init_user_ns, sa->aad->fs.ouid));
+ }
+
+ if (sa->aad->fs.target) {
+ audit_log_format(ab, " target=");
+ audit_log_untrustedstring(ab, sa->aad->fs.target);
+ }
+}
+
+/**
+ * aa_audit_file - handle the auditing of file operations
+ * @profile: the profile being enforced (NOT NULL)
+ * @perms: the permissions computed for the request (NOT NULL)
+ * @gfp: allocation flags
+ * @op: operation being mediated
+ * @request: permissions requested
+ * @name: name of object being mediated (MAYBE NULL)
+ * @target: name of target (MAYBE NULL)
+ * @ouid: object uid
+ * @info: extra information message (MAYBE NULL)
+ * @error: 0 if operation allowed else failure error code
+ *
+ * Returns: %0 or error on failure
+ */
+int aa_audit_file(struct aa_profile *profile, struct file_perms *perms,
+ gfp_t gfp, int op, u32 request, const char *name,
+ const char *target, kuid_t ouid, const char *info, int error)
+{
+ int type = AUDIT_APPARMOR_AUTO;
+ struct common_audit_data sa;
+ struct apparmor_audit_data aad = {0,};
+ sa.type = LSM_AUDIT_DATA_NONE;
+ sa.aad = &aad;
+ aad.op = op,
+ aad.fs.request = request;
+ aad.name = name;
+ aad.fs.target = target;
+ aad.fs.ouid = ouid;
+ aad.info = info;
+ aad.error = error;
+
+ if (likely(!sa.aad->error)) {
+ u32 mask = perms->audit;
+
+ if (unlikely(AUDIT_MODE(profile) == AUDIT_ALL))
+ mask = 0xffff;
+
+ /* mask off perms that are not being force audited */
+ sa.aad->fs.request &= mask;
+
+ if (likely(!sa.aad->fs.request))
+ return 0;
+ type = AUDIT_APPARMOR_AUDIT;
+ } else {
+ /* only report permissions that were denied */
+ sa.aad->fs.request = sa.aad->fs.request & ~perms->allow;
+
+ if (sa.aad->fs.request & perms->kill)
+ type = AUDIT_APPARMOR_KILL;
+
+ /* quiet known rejects, assumes quiet and kill do not overlap */
+ if ((sa.aad->fs.request & perms->quiet) &&
+ AUDIT_MODE(profile) != AUDIT_NOQUIET &&
+ AUDIT_MODE(profile) != AUDIT_ALL)
+ sa.aad->fs.request &= ~perms->quiet;
+
+ if (!sa.aad->fs.request)
+ return COMPLAIN_MODE(profile) ? 0 : sa.aad->error;
+ }
+
+ sa.aad->fs.denied = sa.aad->fs.request & ~perms->allow;
+ return aa_audit(type, profile, gfp, &sa, file_audit_cb);
+}
+
+/**
+ * map_old_perms - map old file perms layout to the new layout
+ * @old: permission set in old mapping
+ *
+ * Returns: new permission mapping
+ */
+static u32 map_old_perms(u32 old)
+{
+ u32 new = old & 0xf;
+ if (old & MAY_READ)
+ new |= AA_MAY_META_READ;
+ if (old & MAY_WRITE)
+ new |= AA_MAY_META_WRITE | AA_MAY_CREATE | AA_MAY_DELETE |
+ AA_MAY_CHMOD | AA_MAY_CHOWN;
+ if (old & 0x10)
+ new |= AA_MAY_LINK;
+ /* the old mapping lock and link_subset flags where overlaid
+ * and use was determined by part of a pair that they were in
+ */
+ if (old & 0x20)
+ new |= AA_MAY_LOCK | AA_LINK_SUBSET;
+ if (old & 0x40) /* AA_EXEC_MMAP */
+ new |= AA_EXEC_MMAP;
+
+ return new;
+}
+
+/**
+ * compute_perms - convert dfa compressed perms to internal perms
+ * @dfa: dfa to compute perms for (NOT NULL)
+ * @state: state in dfa
+ * @cond: conditions to consider (NOT NULL)
+ *
+ * TODO: convert from dfa + state to permission entry, do computation conversion
+ * at load time.
+ *
+ * Returns: computed permission set
+ */
+static struct file_perms compute_perms(struct aa_dfa *dfa, unsigned int state,
+ struct path_cond *cond)
+{
+ struct file_perms perms;
+
+ /* FIXME: change over to new dfa format
+ * currently file perms are encoded in the dfa, new format
+ * splits the permissions from the dfa. This mapping can be
+ * done at profile load
+ */
+ perms.kill = 0;
+
+ if (uid_eq(current_fsuid(), cond->uid)) {
+ perms.allow = map_old_perms(dfa_user_allow(dfa, state));
+ perms.audit = map_old_perms(dfa_user_audit(dfa, state));
+ perms.quiet = map_old_perms(dfa_user_quiet(dfa, state));
+ perms.xindex = dfa_user_xindex(dfa, state);
+ } else {
+ perms.allow = map_old_perms(dfa_other_allow(dfa, state));
+ perms.audit = map_old_perms(dfa_other_audit(dfa, state));
+ perms.quiet = map_old_perms(dfa_other_quiet(dfa, state));
+ perms.xindex = dfa_other_xindex(dfa, state);
+ }
+ perms.allow |= AA_MAY_META_READ;
+
+ /* change_profile wasn't determined by ownership in old mapping */
+ if (ACCEPT_TABLE(dfa)[state] & 0x80000000)
+ perms.allow |= AA_MAY_CHANGE_PROFILE;
+ if (ACCEPT_TABLE(dfa)[state] & 0x40000000)
+ perms.allow |= AA_MAY_ONEXEC;
+
+ return perms;
+}
+
+/**
+ * aa_str_perms - find permission that match @name
+ * @dfa: to match against (MAYBE NULL)
+ * @state: state to start matching in
+ * @name: string to match against dfa (NOT NULL)
+ * @cond: conditions to consider for permission set computation (NOT NULL)
+ * @perms: Returns - the permissions found when matching @name
+ *
+ * Returns: the final state in @dfa when beginning @start and walking @name
+ */
+unsigned int aa_str_perms(struct aa_dfa *dfa, unsigned int start,
+ const char *name, struct path_cond *cond,
+ struct file_perms *perms)
+{
+ unsigned int state;
+ if (!dfa) {
+ *perms = nullperms;
+ return DFA_NOMATCH;
+ }
+
+ state = aa_dfa_match(dfa, start, name);
+ *perms = compute_perms(dfa, state, cond);
+
+ return state;
+}
+
+/**
+ * is_deleted - test if a file has been completely unlinked
+ * @dentry: dentry of file to test for deletion (NOT NULL)
+ *
+ * Returns: %1 if deleted else %0
+ */
+static inline bool is_deleted(struct dentry *dentry)
+{
+ if (d_unlinked(dentry) && dentry->d_inode->i_nlink == 0)
+ return 1;
+ return 0;
+}
+
+/**
+ * aa_path_perm - do permissions check & audit for @path
+ * @op: operation being checked
+ * @profile: profile being enforced (NOT NULL)
+ * @path: path to check permissions of (NOT NULL)
+ * @flags: any additional path flags beyond what the profile specifies
+ * @request: requested permissions
+ * @cond: conditional info for this request (NOT NULL)
+ *
+ * Returns: %0 else error if access denied or other error
+ */
+int aa_path_perm(int op, struct aa_profile *profile, struct path *path,
+ int flags, u32 request, struct path_cond *cond)
+{
+ char *buffer = NULL;
+ struct file_perms perms = {};
+ const char *name, *info = NULL;
+ int error;
+
+ flags |= profile->path_flags | (S_ISDIR(cond->mode) ? PATH_IS_DIR : 0);
+ error = aa_path_name(path, flags, &buffer, &name, &info);
+ if (error) {
+ if (error == -ENOENT && is_deleted(path->dentry)) {
+ /* Access to open files that are deleted are
+ * give a pass (implicit delegation)
+ */
+ error = 0;
+ info = NULL;
+ perms.allow = request;
+ }
+ } else {
+ aa_str_perms(profile->file.dfa, profile->file.start, name, cond,
+ &perms);
+ if (request & ~perms.allow)
+ error = -EACCES;
+ }
+ error = aa_audit_file(profile, &perms, GFP_KERNEL, op, request, name,
+ NULL, cond->uid, info, error);
+ kfree(buffer);
+
+ return error;
+}
+
+/**
+ * xindex_is_subset - helper for aa_path_link
+ * @link: link permission set
+ * @target: target permission set
+ *
+ * test target x permissions are equal OR a subset of link x permissions
+ * this is done as part of the subset test, where a hardlink must have
+ * a subset of permissions that the target has.
+ *
+ * Returns: %1 if subset else %0
+ */
+static inline bool xindex_is_subset(u32 link, u32 target)
+{
+ if (((link & ~AA_X_UNSAFE) != (target & ~AA_X_UNSAFE)) ||
+ ((link & AA_X_UNSAFE) && !(target & AA_X_UNSAFE)))
+ return 0;
+
+ return 1;
+}
+
+/**
+ * aa_path_link - Handle hard link permission check
+ * @profile: the profile being enforced (NOT NULL)
+ * @old_dentry: the target dentry (NOT NULL)
+ * @new_dir: directory the new link will be created in (NOT NULL)
+ * @new_dentry: the link being created (NOT NULL)
+ *
+ * Handle the permission test for a link & target pair. Permission
+ * is encoded as a pair where the link permission is determined
+ * first, and if allowed, the target is tested. The target test
+ * is done from the point of the link match (not start of DFA)
+ * making the target permission dependent on the link permission match.
+ *
+ * The subset test if required forces that permissions granted
+ * on link are a subset of the permission granted to target.
+ *
+ * Returns: %0 if allowed else error
+ */
+int aa_path_link(struct aa_profile *profile, struct dentry *old_dentry,
+ struct path *new_dir, struct dentry *new_dentry)
+{
+ struct path link = { new_dir->mnt, new_dentry };
+ struct path target = { new_dir->mnt, old_dentry };
+ struct path_cond cond = {
+ old_dentry->d_inode->i_uid,
+ old_dentry->d_inode->i_mode
+ };
+ char *buffer = NULL, *buffer2 = NULL;
+ const char *lname, *tname = NULL, *info = NULL;
+ struct file_perms lperms, perms;
+ u32 request = AA_MAY_LINK;
+ unsigned int state;
+ int error;
+
+ lperms = nullperms;
+
+ /* buffer freed below, lname is pointer in buffer */
+ error = aa_path_name(&link, profile->path_flags, &buffer, &lname,
+ &info);
+ if (error)
+ goto audit;
+
+ /* buffer2 freed below, tname is pointer in buffer2 */
+ error = aa_path_name(&target, profile->path_flags, &buffer2, &tname,
+ &info);
+ if (error)
+ goto audit;
+
+ error = -EACCES;
+ /* aa_str_perms - handles the case of the dfa being NULL */
+ state = aa_str_perms(profile->file.dfa, profile->file.start, lname,
+ &cond, &lperms);
+
+ if (!(lperms.allow & AA_MAY_LINK))
+ goto audit;
+
+ /* test to see if target can be paired with link */
+ state = aa_dfa_null_transition(profile->file.dfa, state);
+ aa_str_perms(profile->file.dfa, state, tname, &cond, &perms);
+
+ /* force audit/quiet masks for link are stored in the second entry
+ * in the link pair.
+ */
+ lperms.audit = perms.audit;
+ lperms.quiet = perms.quiet;
+ lperms.kill = perms.kill;
+
+ if (!(perms.allow & AA_MAY_LINK)) {
+ info = "target restricted";
+ goto audit;
+ }
+
+ /* done if link subset test is not required */
+ if (!(perms.allow & AA_LINK_SUBSET))
+ goto done_tests;
+
+ /* Do link perm subset test requiring allowed permission on link are a
+ * subset of the allowed permissions on target.
+ */
+ aa_str_perms(profile->file.dfa, profile->file.start, tname, &cond,
+ &perms);
+
+ /* AA_MAY_LINK is not considered in the subset test */
+ request = lperms.allow & ~AA_MAY_LINK;
+ lperms.allow &= perms.allow | AA_MAY_LINK;
+
+ request |= AA_AUDIT_FILE_MASK & (lperms.allow & ~perms.allow);
+ if (request & ~lperms.allow) {
+ goto audit;
+ } else if ((lperms.allow & MAY_EXEC) &&
+ !xindex_is_subset(lperms.xindex, perms.xindex)) {
+ lperms.allow &= ~MAY_EXEC;
+ request |= MAY_EXEC;
+ info = "link not subset of target";
+ goto audit;
+ }
+
+done_tests:
+ error = 0;
+
+audit:
+ error = aa_audit_file(profile, &lperms, GFP_KERNEL, OP_LINK, request,
+ lname, tname, cond.uid, info, error);
+ kfree(buffer);
+ kfree(buffer2);
+
+ return error;
+}
+
+/**
+ * aa_file_perm - do permission revalidation check & audit for @file
+ * @op: operation being checked
+ * @profile: profile being enforced (NOT NULL)
+ * @file: file to revalidate access permissions on (NOT NULL)
+ * @request: requested permissions
+ *
+ * Returns: %0 if access allowed else error
+ */
+int aa_file_perm(int op, struct aa_profile *profile, struct file *file,
+ u32 request)
+{
+ struct path_cond cond = {
+ .uid = file_inode(file)->i_uid,
+ .mode = file_inode(file)->i_mode
+ };
+
+ return aa_path_perm(op, profile, &file->f_path, PATH_DELEGATE_DELETED,
+ request, &cond);
+}
diff --git a/security/apparmor/include/apparmor.h b/security/apparmor/include/apparmor.h
new file mode 100644
index 00000000000..97130f88838
--- /dev/null
+++ b/security/apparmor/include/apparmor.h
@@ -0,0 +1,120 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor basic global and lib definitions
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __APPARMOR_H
+#define __APPARMOR_H
+
+#include <linux/slab.h>
+#include <linux/fs.h>
+
+#include "match.h"
+
+/*
+ * Class of mediation types in the AppArmor policy db
+ */
+#define AA_CLASS_ENTRY 0
+#define AA_CLASS_UNKNOWN 1
+#define AA_CLASS_FILE 2
+#define AA_CLASS_CAP 3
+#define AA_CLASS_NET 4
+#define AA_CLASS_RLIMITS 5
+#define AA_CLASS_DOMAIN 6
+
+#define AA_CLASS_LAST AA_CLASS_DOMAIN
+
+/* Control parameters settable through module/boot flags */
+extern enum audit_mode aa_g_audit;
+extern bool aa_g_audit_header;
+extern bool aa_g_debug;
+extern bool aa_g_lock_policy;
+extern bool aa_g_logsyscall;
+extern bool aa_g_paranoid_load;
+extern unsigned int aa_g_path_max;
+
+/*
+ * DEBUG remains global (no per profile flag) since it is mostly used in sysctl
+ * which is not related to profile accesses.
+ */
+
+#define AA_DEBUG(fmt, args...) \
+ do { \
+ if (aa_g_debug && printk_ratelimit()) \
+ printk(KERN_DEBUG "AppArmor: " fmt, ##args); \
+ } while (0)
+
+#define AA_ERROR(fmt, args...) \
+ do { \
+ if (printk_ratelimit()) \
+ printk(KERN_ERR "AppArmor: " fmt, ##args); \
+ } while (0)
+
+/* Flag indicating whether initialization completed */
+extern int apparmor_initialized __initdata;
+
+/* fn's in lib */
+char *aa_split_fqname(char *args, char **ns_name);
+void aa_info_message(const char *str);
+void *__aa_kvmalloc(size_t size, gfp_t flags);
+
+static inline void *kvmalloc(size_t size)
+{
+ return __aa_kvmalloc(size, 0);
+}
+
+static inline void *kvzalloc(size_t size)
+{
+ return __aa_kvmalloc(size, __GFP_ZERO);
+}
+
+/* returns 0 if kref not incremented */
+static inline int kref_get_not0(struct kref *kref)
+{
+ return atomic_inc_not_zero(&kref->refcount);
+}
+
+/**
+ * aa_strneq - compare null terminated @str to a non null terminated substring
+ * @str: a null terminated string
+ * @sub: a substring, not necessarily null terminated
+ * @len: length of @sub to compare
+ *
+ * The @str string must be full consumed for this to be considered a match
+ */
+static inline bool aa_strneq(const char *str, const char *sub, int len)
+{
+ return !strncmp(str, sub, len) && !str[len];
+}
+
+/**
+ * aa_dfa_null_transition - step to next state after null character
+ * @dfa: the dfa to match against
+ * @start: the state of the dfa to start matching in
+ *
+ * aa_dfa_null_transition transitions to the next state after a null
+ * character which is not used in standard matching and is only
+ * used to separate pairs.
+ */
+static inline unsigned int aa_dfa_null_transition(struct aa_dfa *dfa,
+ unsigned int start)
+{
+ /* the null transition only needs the string's null terminator byte */
+ return aa_dfa_next(dfa, start, 0);
+}
+
+static inline bool mediated_filesystem(struct inode *inode)
+{
+ return !(inode->i_sb->s_flags & MS_NOUSER);
+}
+
+#endif /* __APPARMOR_H */
diff --git a/security/apparmor/include/apparmorfs.h b/security/apparmor/include/apparmorfs.h
new file mode 100644
index 00000000000..414e56878dd
--- /dev/null
+++ b/security/apparmor/include/apparmorfs.h
@@ -0,0 +1,104 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor filesystem definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __AA_APPARMORFS_H
+#define __AA_APPARMORFS_H
+
+enum aa_fs_type {
+ AA_FS_TYPE_BOOLEAN,
+ AA_FS_TYPE_STRING,
+ AA_FS_TYPE_U64,
+ AA_FS_TYPE_FOPS,
+ AA_FS_TYPE_DIR,
+};
+
+struct aa_fs_entry;
+
+struct aa_fs_entry {
+ const char *name;
+ struct dentry *dentry;
+ umode_t mode;
+ enum aa_fs_type v_type;
+ union {
+ bool boolean;
+ char *string;
+ unsigned long u64;
+ struct aa_fs_entry *files;
+ } v;
+ const struct file_operations *file_ops;
+};
+
+extern const struct file_operations aa_fs_seq_file_ops;
+
+#define AA_FS_FILE_BOOLEAN(_name, _value) \
+ { .name = (_name), .mode = 0444, \
+ .v_type = AA_FS_TYPE_BOOLEAN, .v.boolean = (_value), \
+ .file_ops = &aa_fs_seq_file_ops }
+#define AA_FS_FILE_STRING(_name, _value) \
+ { .name = (_name), .mode = 0444, \
+ .v_type = AA_FS_TYPE_STRING, .v.string = (_value), \
+ .file_ops = &aa_fs_seq_file_ops }
+#define AA_FS_FILE_U64(_name, _value) \
+ { .name = (_name), .mode = 0444, \
+ .v_type = AA_FS_TYPE_U64, .v.u64 = (_value), \
+ .file_ops = &aa_fs_seq_file_ops }
+#define AA_FS_FILE_FOPS(_name, _mode, _fops) \
+ { .name = (_name), .v_type = AA_FS_TYPE_FOPS, \
+ .mode = (_mode), .file_ops = (_fops) }
+#define AA_FS_DIR(_name, _value) \
+ { .name = (_name), .v_type = AA_FS_TYPE_DIR, .v.files = (_value) }
+
+extern void __init aa_destroy_aafs(void);
+
+struct aa_profile;
+struct aa_namespace;
+
+enum aafs_ns_type {
+ AAFS_NS_DIR,
+ AAFS_NS_PROFS,
+ AAFS_NS_NS,
+ AAFS_NS_COUNT,
+ AAFS_NS_MAX_COUNT,
+ AAFS_NS_SIZE,
+ AAFS_NS_MAX_SIZE,
+ AAFS_NS_OWNER,
+ AAFS_NS_SIZEOF,
+};
+
+enum aafs_prof_type {
+ AAFS_PROF_DIR,
+ AAFS_PROF_PROFS,
+ AAFS_PROF_NAME,
+ AAFS_PROF_MODE,
+ AAFS_PROF_ATTACH,
+ AAFS_PROF_HASH,
+ AAFS_PROF_SIZEOF,
+};
+
+#define ns_dir(X) ((X)->dents[AAFS_NS_DIR])
+#define ns_subns_dir(X) ((X)->dents[AAFS_NS_NS])
+#define ns_subprofs_dir(X) ((X)->dents[AAFS_NS_PROFS])
+
+#define prof_dir(X) ((X)->dents[AAFS_PROF_DIR])
+#define prof_child_dir(X) ((X)->dents[AAFS_PROF_PROFS])
+
+void __aa_fs_profile_rmdir(struct aa_profile *profile);
+void __aa_fs_profile_migrate_dents(struct aa_profile *old,
+ struct aa_profile *new);
+int __aa_fs_profile_mkdir(struct aa_profile *profile, struct dentry *parent);
+void __aa_fs_namespace_rmdir(struct aa_namespace *ns);
+int __aa_fs_namespace_mkdir(struct aa_namespace *ns, struct dentry *parent,
+ const char *name);
+
+#endif /* __AA_APPARMORFS_H */
diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h
new file mode 100644
index 00000000000..ba3dfd17f23
--- /dev/null
+++ b/security/apparmor/include/audit.h
@@ -0,0 +1,147 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor auditing function definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __AA_AUDIT_H
+#define __AA_AUDIT_H
+
+#include <linux/audit.h>
+#include <linux/fs.h>
+#include <linux/lsm_audit.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include "file.h"
+
+struct aa_profile;
+
+extern const char *const audit_mode_names[];
+#define AUDIT_MAX_INDEX 5
+enum audit_mode {
+ AUDIT_NORMAL, /* follow normal auditing of accesses */
+ AUDIT_QUIET_DENIED, /* quiet all denied access messages */
+ AUDIT_QUIET, /* quiet all messages */
+ AUDIT_NOQUIET, /* do not quiet audit messages */
+ AUDIT_ALL /* audit all accesses */
+};
+
+enum audit_type {
+ AUDIT_APPARMOR_AUDIT,
+ AUDIT_APPARMOR_ALLOWED,
+ AUDIT_APPARMOR_DENIED,
+ AUDIT_APPARMOR_HINT,
+ AUDIT_APPARMOR_STATUS,
+ AUDIT_APPARMOR_ERROR,
+ AUDIT_APPARMOR_KILL,
+ AUDIT_APPARMOR_AUTO
+};
+
+extern const char *const op_table[];
+enum aa_ops {
+ OP_NULL,
+
+ OP_SYSCTL,
+ OP_CAPABLE,
+
+ OP_UNLINK,
+ OP_MKDIR,
+ OP_RMDIR,
+ OP_MKNOD,
+ OP_TRUNC,
+ OP_LINK,
+ OP_SYMLINK,
+ OP_RENAME_SRC,
+ OP_RENAME_DEST,
+ OP_CHMOD,
+ OP_CHOWN,
+ OP_GETATTR,
+ OP_OPEN,
+
+ OP_FPERM,
+ OP_FLOCK,
+ OP_FMMAP,
+ OP_FMPROT,
+
+ OP_CREATE,
+ OP_POST_CREATE,
+ OP_BIND,
+ OP_CONNECT,
+ OP_LISTEN,
+ OP_ACCEPT,
+ OP_SENDMSG,
+ OP_RECVMSG,
+ OP_GETSOCKNAME,
+ OP_GETPEERNAME,
+ OP_GETSOCKOPT,
+ OP_SETSOCKOPT,
+ OP_SOCK_SHUTDOWN,
+
+ OP_PTRACE,
+
+ OP_EXEC,
+ OP_CHANGE_HAT,
+ OP_CHANGE_PROFILE,
+ OP_CHANGE_ONEXEC,
+
+ OP_SETPROCATTR,
+ OP_SETRLIMIT,
+
+ OP_PROF_REPL,
+ OP_PROF_LOAD,
+ OP_PROF_RM,
+};
+
+
+struct apparmor_audit_data {
+ int error;
+ int op;
+ int type;
+ void *profile;
+ const char *name;
+ const char *info;
+ union {
+ void *target;
+ struct {
+ long pos;
+ void *target;
+ } iface;
+ struct {
+ int rlim;
+ unsigned long max;
+ } rlim;
+ struct {
+ const char *target;
+ u32 request;
+ u32 denied;
+ kuid_t ouid;
+ } fs;
+ };
+};
+
+/* define a short hand for apparmor_audit_data structure */
+#define aad apparmor_audit_data
+
+void aa_audit_msg(int type, struct common_audit_data *sa,
+ void (*cb) (struct audit_buffer *, void *));
+int aa_audit(int type, struct aa_profile *profile, gfp_t gfp,
+ struct common_audit_data *sa,
+ void (*cb) (struct audit_buffer *, void *));
+
+static inline int complain_error(int error)
+{
+ if (error == -EPERM || error == -EACCES)
+ return 0;
+ return error;
+}
+
+#endif /* __AA_AUDIT_H */
diff --git a/security/apparmor/include/capability.h b/security/apparmor/include/capability.h
new file mode 100644
index 00000000000..fc3fa381d85
--- /dev/null
+++ b/security/apparmor/include/capability.h
@@ -0,0 +1,48 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor capability mediation definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2013 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __AA_CAPABILITY_H
+#define __AA_CAPABILITY_H
+
+#include <linux/sched.h>
+
+#include "apparmorfs.h"
+
+struct aa_profile;
+
+/* aa_caps - confinement data for capabilities
+ * @allowed: capabilities mask
+ * @audit: caps that are to be audited
+ * @quiet: caps that should not be audited
+ * @kill: caps that when requested will result in the task being killed
+ * @extended: caps that are subject finer grained mediation
+ */
+struct aa_caps {
+ kernel_cap_t allow;
+ kernel_cap_t audit;
+ kernel_cap_t quiet;
+ kernel_cap_t kill;
+ kernel_cap_t extended;
+};
+
+extern struct aa_fs_entry aa_fs_entry_caps[];
+
+int aa_capable(struct aa_profile *profile, int cap, int audit);
+
+static inline void aa_free_cap_rules(struct aa_caps *caps)
+{
+ /* NOP */
+}
+
+#endif /* __AA_CAPBILITY_H */
diff --git a/security/apparmor/include/context.h b/security/apparmor/include/context.h
new file mode 100644
index 00000000000..6bf65798e5d
--- /dev/null
+++ b/security/apparmor/include/context.h
@@ -0,0 +1,178 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor contexts used to associate "labels" to objects.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __AA_CONTEXT_H
+#define __AA_CONTEXT_H
+
+#include <linux/cred.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+
+#include "policy.h"
+
+#define cred_cxt(X) (X)->security
+#define current_cxt() cred_cxt(current_cred())
+
+/* struct aa_file_cxt - the AppArmor context the file was opened in
+ * @perms: the permission the file was opened with
+ *
+ * The file_cxt could currently be directly stored in file->f_security
+ * as the profile reference is now stored in the f_cred. However the
+ * cxt struct will expand in the future so we keep the struct.
+ */
+struct aa_file_cxt {
+ u16 allow;
+};
+
+/**
+ * aa_alloc_file_context - allocate file_cxt
+ * @gfp: gfp flags for allocation
+ *
+ * Returns: file_cxt or NULL on failure
+ */
+static inline struct aa_file_cxt *aa_alloc_file_context(gfp_t gfp)
+{
+ return kzalloc(sizeof(struct aa_file_cxt), gfp);
+}
+
+/**
+ * aa_free_file_context - free a file_cxt
+ * @cxt: file_cxt to free (MAYBE_NULL)
+ */
+static inline void aa_free_file_context(struct aa_file_cxt *cxt)
+{
+ if (cxt)
+ kzfree(cxt);
+}
+
+/**
+ * struct aa_task_cxt - primary label for confined tasks
+ * @profile: the current profile (NOT NULL)
+ * @exec: profile to transition to on next exec (MAYBE NULL)
+ * @previous: profile the task may return to (MAYBE NULL)
+ * @token: magic value the task must know for returning to @previous_profile
+ *
+ * Contains the task's current profile (which could change due to
+ * change_hat). Plus the hat_magic needed during change_hat.
+ *
+ * TODO: make so a task can be confined by a stack of contexts
+ */
+struct aa_task_cxt {
+ struct aa_profile *profile;
+ struct aa_profile *onexec;
+ struct aa_profile *previous;
+ u64 token;
+};
+
+struct aa_task_cxt *aa_alloc_task_context(gfp_t flags);
+void aa_free_task_context(struct aa_task_cxt *cxt);
+void aa_dup_task_context(struct aa_task_cxt *new,
+ const struct aa_task_cxt *old);
+int aa_replace_current_profile(struct aa_profile *profile);
+int aa_set_current_onexec(struct aa_profile *profile);
+int aa_set_current_hat(struct aa_profile *profile, u64 token);
+int aa_restore_previous_profile(u64 cookie);
+struct aa_profile *aa_get_task_profile(struct task_struct *task);
+
+
+/**
+ * aa_cred_profile - obtain cred's profiles
+ * @cred: cred to obtain profiles from (NOT NULL)
+ *
+ * Returns: confining profile
+ *
+ * does NOT increment reference count
+ */
+static inline struct aa_profile *aa_cred_profile(const struct cred *cred)
+{
+ struct aa_task_cxt *cxt = cred_cxt(cred);
+ BUG_ON(!cxt || !cxt->profile);
+ return cxt->profile;
+}
+
+/**
+ * __aa_task_profile - retrieve another task's profile
+ * @task: task to query (NOT NULL)
+ *
+ * Returns: @task's profile without incrementing its ref count
+ *
+ * If @task != current needs to be called in RCU safe critical section
+ */
+static inline struct aa_profile *__aa_task_profile(struct task_struct *task)
+{
+ return aa_cred_profile(__task_cred(task));
+}
+
+/**
+ * __aa_task_is_confined - determine if @task has any confinement
+ * @task: task to check confinement of (NOT NULL)
+ *
+ * If @task != current needs to be called in RCU safe critical section
+ */
+static inline bool __aa_task_is_confined(struct task_struct *task)
+{
+ return !unconfined(__aa_task_profile(task));
+}
+
+/**
+ * __aa_current_profile - find the current tasks confining profile
+ *
+ * Returns: up to date confining profile or the ns unconfined profile (NOT NULL)
+ *
+ * This fn will not update the tasks cred to the most up to date version
+ * of the profile so it is safe to call when inside of locks.
+ */
+static inline struct aa_profile *__aa_current_profile(void)
+{
+ return aa_cred_profile(current_cred());
+}
+
+/**
+ * aa_current_profile - find the current tasks confining profile and do updates
+ *
+ * Returns: up to date confining profile or the ns unconfined profile (NOT NULL)
+ *
+ * This fn will update the tasks cred structure if the profile has been
+ * replaced. Not safe to call inside locks
+ */
+static inline struct aa_profile *aa_current_profile(void)
+{
+ const struct aa_task_cxt *cxt = current_cxt();
+ struct aa_profile *profile;
+ BUG_ON(!cxt || !cxt->profile);
+
+ if (PROFILE_INVALID(cxt->profile)) {
+ profile = aa_get_newest_profile(cxt->profile);
+ aa_replace_current_profile(profile);
+ aa_put_profile(profile);
+ cxt = current_cxt();
+ }
+
+ return cxt->profile;
+}
+
+/**
+ * aa_clear_task_cxt_trans - clear transition tracking info from the cxt
+ * @cxt: task context to clear (NOT NULL)
+ */
+static inline void aa_clear_task_cxt_trans(struct aa_task_cxt *cxt)
+{
+ aa_put_profile(cxt->previous);
+ aa_put_profile(cxt->onexec);
+ cxt->previous = NULL;
+ cxt->onexec = NULL;
+ cxt->token = 0;
+}
+
+#endif /* __AA_CONTEXT_H */
diff --git a/security/apparmor/include/crypto.h b/security/apparmor/include/crypto.h
new file mode 100644
index 00000000000..dc418e5024d
--- /dev/null
+++ b/security/apparmor/include/crypto.h
@@ -0,0 +1,36 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor policy loading interface function definitions.
+ *
+ * Copyright 2013 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __APPARMOR_CRYPTO_H
+#define __APPARMOR_CRYPTO_H
+
+#include "policy.h"
+
+#ifdef CONFIG_SECURITY_APPARMOR_HASH
+unsigned int aa_hash_size(void);
+int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start,
+ size_t len);
+#else
+static inline int aa_calc_profile_hash(struct aa_profile *profile, u32 version,
+ void *start, size_t len)
+{
+ return 0;
+}
+
+static inline unsigned int aa_hash_size(void)
+{
+ return 0;
+}
+#endif
+
+#endif /* __APPARMOR_CRYPTO_H */
diff --git a/security/apparmor/include/domain.h b/security/apparmor/include/domain.h
new file mode 100644
index 00000000000..de04464f0a3
--- /dev/null
+++ b/security/apparmor/include/domain.h
@@ -0,0 +1,36 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor security domain transition function definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include <linux/binfmts.h>
+#include <linux/types.h>
+
+#ifndef __AA_DOMAIN_H
+#define __AA_DOMAIN_H
+
+struct aa_domain {
+ int size;
+ char **table;
+};
+
+int apparmor_bprm_set_creds(struct linux_binprm *bprm);
+int apparmor_bprm_secureexec(struct linux_binprm *bprm);
+void apparmor_bprm_committing_creds(struct linux_binprm *bprm);
+void apparmor_bprm_committed_creds(struct linux_binprm *bprm);
+
+void aa_free_domain_entries(struct aa_domain *domain);
+int aa_change_hat(const char *hats[], int count, u64 token, bool permtest);
+int aa_change_profile(const char *ns_name, const char *name, bool onexec,
+ bool permtest);
+
+#endif /* __AA_DOMAIN_H */
diff --git a/security/apparmor/include/file.h b/security/apparmor/include/file.h
new file mode 100644
index 00000000000..2c922b86bd4
--- /dev/null
+++ b/security/apparmor/include/file.h
@@ -0,0 +1,216 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor file mediation function definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __AA_FILE_H
+#define __AA_FILE_H
+
+#include "domain.h"
+#include "match.h"
+
+struct aa_profile;
+struct path;
+
+/*
+ * We use MAY_EXEC, MAY_WRITE, MAY_READ, MAY_APPEND and the following flags
+ * for profile permissions
+ */
+#define AA_MAY_CREATE 0x0010
+#define AA_MAY_DELETE 0x0020
+#define AA_MAY_META_WRITE 0x0040
+#define AA_MAY_META_READ 0x0080
+
+#define AA_MAY_CHMOD 0x0100
+#define AA_MAY_CHOWN 0x0200
+#define AA_MAY_LOCK 0x0400
+#define AA_EXEC_MMAP 0x0800
+
+#define AA_MAY_LINK 0x1000
+#define AA_LINK_SUBSET AA_MAY_LOCK /* overlaid */
+#define AA_MAY_ONEXEC 0x40000000 /* exec allows onexec */
+#define AA_MAY_CHANGE_PROFILE 0x80000000
+#define AA_MAY_CHANGEHAT 0x80000000 /* ctrl auditing only */
+
+#define AA_AUDIT_FILE_MASK (MAY_READ | MAY_WRITE | MAY_EXEC | MAY_APPEND |\
+ AA_MAY_CREATE | AA_MAY_DELETE | \
+ AA_MAY_META_READ | AA_MAY_META_WRITE | \
+ AA_MAY_CHMOD | AA_MAY_CHOWN | AA_MAY_LOCK | \
+ AA_EXEC_MMAP | AA_MAY_LINK)
+
+/*
+ * The xindex is broken into 3 parts
+ * - index - an index into either the exec name table or the variable table
+ * - exec type - which determines how the executable name and index are used
+ * - flags - which modify how the destination name is applied
+ */
+#define AA_X_INDEX_MASK 0x03ff
+
+#define AA_X_TYPE_MASK 0x0c00
+#define AA_X_TYPE_SHIFT 10
+#define AA_X_NONE 0x0000
+#define AA_X_NAME 0x0400 /* use executable name px */
+#define AA_X_TABLE 0x0800 /* use a specified name ->n# */
+
+#define AA_X_UNSAFE 0x1000
+#define AA_X_CHILD 0x2000 /* make >AA_X_NONE apply to children */
+#define AA_X_INHERIT 0x4000
+#define AA_X_UNCONFINED 0x8000
+
+/* AA_SECURE_X_NEEDED - is passed in the bprm->unsafe field */
+#define AA_SECURE_X_NEEDED 0x8000
+
+/* need to make conditional which ones are being set */
+struct path_cond {
+ kuid_t uid;
+ umode_t mode;
+};
+
+/* struct file_perms - file permission
+ * @allow: mask of permissions that are allowed
+ * @audit: mask of permissions to force an audit message for
+ * @quiet: mask of permissions to quiet audit messages for
+ * @kill: mask of permissions that when matched will kill the task
+ * @xindex: exec transition index if @allow contains MAY_EXEC
+ *
+ * The @audit and @queit mask should be mutually exclusive.
+ */
+struct file_perms {
+ u32 allow;
+ u32 audit;
+ u32 quiet;
+ u32 kill;
+ u16 xindex;
+};
+
+extern struct file_perms nullperms;
+
+#define COMBINED_PERM_MASK(X) ((X).allow | (X).audit | (X).quiet | (X).kill)
+
+/* FIXME: split perms from dfa and match this to description
+ * also add delegation info.
+ */
+static inline u16 dfa_map_xindex(u16 mask)
+{
+ u16 old_index = (mask >> 10) & 0xf;
+ u16 index = 0;
+
+ if (mask & 0x100)
+ index |= AA_X_UNSAFE;
+ if (mask & 0x200)
+ index |= AA_X_INHERIT;
+ if (mask & 0x80)
+ index |= AA_X_UNCONFINED;
+
+ if (old_index == 1) {
+ index |= AA_X_UNCONFINED;
+ } else if (old_index == 2) {
+ index |= AA_X_NAME;
+ } else if (old_index == 3) {
+ index |= AA_X_NAME | AA_X_CHILD;
+ } else if (old_index) {
+ index |= AA_X_TABLE;
+ index |= old_index - 4;
+ }
+
+ return index;
+}
+
+/*
+ * map old dfa inline permissions to new format
+ */
+#define dfa_user_allow(dfa, state) (((ACCEPT_TABLE(dfa)[state]) & 0x7f) | \
+ ((ACCEPT_TABLE(dfa)[state]) & 0x80000000))
+#define dfa_user_audit(dfa, state) ((ACCEPT_TABLE2(dfa)[state]) & 0x7f)
+#define dfa_user_quiet(dfa, state) (((ACCEPT_TABLE2(dfa)[state]) >> 7) & 0x7f)
+#define dfa_user_xindex(dfa, state) \
+ (dfa_map_xindex(ACCEPT_TABLE(dfa)[state] & 0x3fff))
+
+#define dfa_other_allow(dfa, state) ((((ACCEPT_TABLE(dfa)[state]) >> 14) & \
+ 0x7f) | \
+ ((ACCEPT_TABLE(dfa)[state]) & 0x80000000))
+#define dfa_other_audit(dfa, state) (((ACCEPT_TABLE2(dfa)[state]) >> 14) & 0x7f)
+#define dfa_other_quiet(dfa, state) \
+ ((((ACCEPT_TABLE2(dfa)[state]) >> 7) >> 14) & 0x7f)
+#define dfa_other_xindex(dfa, state) \
+ dfa_map_xindex((ACCEPT_TABLE(dfa)[state] >> 14) & 0x3fff)
+
+int aa_audit_file(struct aa_profile *profile, struct file_perms *perms,
+ gfp_t gfp, int op, u32 request, const char *name,
+ const char *target, kuid_t ouid, const char *info, int error);
+
+/**
+ * struct aa_file_rules - components used for file rule permissions
+ * @dfa: dfa to match path names and conditionals against
+ * @perms: permission table indexed by the matched state accept entry of @dfa
+ * @trans: transition table for indexed by named x transitions
+ *
+ * File permission are determined by matching a path against @dfa and then
+ * then using the value of the accept entry for the matching state as
+ * an index into @perms. If a named exec transition is required it is
+ * looked up in the transition table.
+ */
+struct aa_file_rules {
+ unsigned int start;
+ struct aa_dfa *dfa;
+ /* struct perms perms; */
+ struct aa_domain trans;
+ /* TODO: add delegate table */
+};
+
+unsigned int aa_str_perms(struct aa_dfa *dfa, unsigned int start,
+ const char *name, struct path_cond *cond,
+ struct file_perms *perms);
+
+int aa_path_perm(int op, struct aa_profile *profile, struct path *path,
+ int flags, u32 request, struct path_cond *cond);
+
+int aa_path_link(struct aa_profile *profile, struct dentry *old_dentry,
+ struct path *new_dir, struct dentry *new_dentry);
+
+int aa_file_perm(int op, struct aa_profile *profile, struct file *file,
+ u32 request);
+
+static inline void aa_free_file_rules(struct aa_file_rules *rules)
+{
+ aa_put_dfa(rules->dfa);
+ aa_free_domain_entries(&rules->trans);
+}
+
+/**
+ * aa_map_file_perms - map file flags to AppArmor permissions
+ * @file: open file to map flags to AppArmor permissions
+ *
+ * Returns: apparmor permission set for the file
+ */
+static inline u32 aa_map_file_to_perms(struct file *file)
+{
+ int flags = file->f_flags;
+ u32 perms = 0;
+
+ if (file->f_mode & FMODE_WRITE)
+ perms |= MAY_WRITE;
+ if (file->f_mode & FMODE_READ)
+ perms |= MAY_READ;
+
+ if ((flags & O_APPEND) && (perms & MAY_WRITE))
+ perms = (perms & ~MAY_WRITE) | MAY_APPEND;
+ /* trunc implies write permission */
+ if (flags & O_TRUNC)
+ perms |= MAY_WRITE;
+ if (flags & O_CREAT)
+ perms |= AA_MAY_CREATE;
+
+ return perms;
+}
+
+#endif /* __AA_FILE_H */
diff --git a/security/apparmor/include/ipc.h b/security/apparmor/include/ipc.h
new file mode 100644
index 00000000000..288ca76e2fb
--- /dev/null
+++ b/security/apparmor/include/ipc.h
@@ -0,0 +1,28 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor ipc mediation function definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __AA_IPC_H
+#define __AA_IPC_H
+
+#include <linux/sched.h>
+
+struct aa_profile;
+
+int aa_may_ptrace(struct aa_profile *tracer, struct aa_profile *tracee,
+ unsigned int mode);
+
+int aa_ptrace(struct task_struct *tracer, struct task_struct *tracee,
+ unsigned int mode);
+
+#endif /* __AA_IPC_H */
diff --git a/security/apparmor/include/match.h b/security/apparmor/include/match.h
new file mode 100644
index 00000000000..001c43aa040
--- /dev/null
+++ b/security/apparmor/include/match.h
@@ -0,0 +1,141 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor policy dfa matching engine definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2012 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __AA_MATCH_H
+#define __AA_MATCH_H
+
+#include <linux/kref.h>
+
+#define DFA_NOMATCH 0
+#define DFA_START 1
+
+
+/**
+ * The format used for transition tables is based on the GNU flex table
+ * file format (--tables-file option; see Table File Format in the flex
+ * info pages and the flex sources for documentation). The magic number
+ * used in the header is 0x1B5E783D instead of 0xF13C57B1 though, because
+ * new tables have been defined and others YY_ID_CHK (check) and YY_ID_DEF
+ * (default) tables are used slightly differently (see the apparmor-parser
+ * package).
+ *
+ *
+ * The data in the packed dfa is stored in network byte order, and the tables
+ * are arranged for flexibility. We convert the table data to host native
+ * byte order.
+ *
+ * The dfa begins with a table set header, and is followed by the actual
+ * tables.
+ */
+
+#define YYTH_MAGIC 0x1B5E783D
+
+struct table_set_header {
+ u32 th_magic; /* YYTH_MAGIC */
+ u32 th_hsize;
+ u32 th_ssize;
+ u16 th_flags;
+ char th_version[];
+};
+
+/* The YYTD_ID are one less than flex table mappings. The flex id
+ * has 1 subtracted at table load time, this allows us to directly use the
+ * ID's as indexes.
+ */
+#define YYTD_ID_ACCEPT 0
+#define YYTD_ID_BASE 1
+#define YYTD_ID_CHK 2
+#define YYTD_ID_DEF 3
+#define YYTD_ID_EC 4
+#define YYTD_ID_META 5
+#define YYTD_ID_ACCEPT2 6
+#define YYTD_ID_NXT 7
+#define YYTD_ID_TSIZE 8
+
+#define YYTD_DATA8 1
+#define YYTD_DATA16 2
+#define YYTD_DATA32 4
+#define YYTD_DATA64 8
+
+/* ACCEPT & ACCEPT2 tables gets 6 dedicated flags, YYTD_DATAX define the
+ * first flags
+ */
+#define ACCEPT1_FLAGS(X) ((X) & 0x3f)
+#define ACCEPT2_FLAGS(X) ACCEPT1_FLAGS((X) >> YYTD_ID_ACCEPT2)
+#define TO_ACCEPT1_FLAG(X) ACCEPT1_FLAGS(X)
+#define TO_ACCEPT2_FLAG(X) (ACCEPT1_FLAGS(X) << YYTD_ID_ACCEPT2)
+#define DFA_FLAG_VERIFY_STATES 0x1000
+
+struct table_header {
+ u16 td_id;
+ u16 td_flags;
+ u32 td_hilen;
+ u32 td_lolen;
+ char td_data[];
+};
+
+#define DEFAULT_TABLE(DFA) ((u16 *)((DFA)->tables[YYTD_ID_DEF]->td_data))
+#define BASE_TABLE(DFA) ((u32 *)((DFA)->tables[YYTD_ID_BASE]->td_data))
+#define NEXT_TABLE(DFA) ((u16 *)((DFA)->tables[YYTD_ID_NXT]->td_data))
+#define CHECK_TABLE(DFA) ((u16 *)((DFA)->tables[YYTD_ID_CHK]->td_data))
+#define EQUIV_TABLE(DFA) ((u8 *)((DFA)->tables[YYTD_ID_EC]->td_data))
+#define ACCEPT_TABLE(DFA) ((u32 *)((DFA)->tables[YYTD_ID_ACCEPT]->td_data))
+#define ACCEPT_TABLE2(DFA) ((u32 *)((DFA)->tables[YYTD_ID_ACCEPT2]->td_data))
+
+struct aa_dfa {
+ struct kref count;
+ u16 flags;
+ struct table_header *tables[YYTD_ID_TSIZE];
+};
+
+#define byte_to_byte(X) (X)
+
+#define UNPACK_ARRAY(TABLE, BLOB, LEN, TYPE, NTOHX) \
+ do { \
+ typeof(LEN) __i; \
+ TYPE *__t = (TYPE *) TABLE; \
+ TYPE *__b = (TYPE *) BLOB; \
+ for (__i = 0; __i < LEN; __i++) { \
+ __t[__i] = NTOHX(__b[__i]); \
+ } \
+ } while (0)
+
+static inline size_t table_size(size_t len, size_t el_size)
+{
+ return ALIGN(sizeof(struct table_header) + len * el_size, 8);
+}
+
+struct aa_dfa *aa_dfa_unpack(void *blob, size_t size, int flags);
+unsigned int aa_dfa_match_len(struct aa_dfa *dfa, unsigned int start,
+ const char *str, int len);
+unsigned int aa_dfa_match(struct aa_dfa *dfa, unsigned int start,
+ const char *str);
+unsigned int aa_dfa_next(struct aa_dfa *dfa, unsigned int state,
+ const char c);
+
+void aa_dfa_free_kref(struct kref *kref);
+
+/**
+ * aa_put_dfa - put a dfa refcount
+ * @dfa: dfa to put refcount (MAYBE NULL)
+ *
+ * Requires: if @dfa != NULL that a valid refcount be held
+ */
+static inline void aa_put_dfa(struct aa_dfa *dfa)
+{
+ if (dfa)
+ kref_put(&dfa->count, aa_dfa_free_kref);
+}
+
+#endif /* __AA_MATCH_H */
diff --git a/security/apparmor/include/path.h b/security/apparmor/include/path.h
new file mode 100644
index 00000000000..286ac75dc88
--- /dev/null
+++ b/security/apparmor/include/path.h
@@ -0,0 +1,32 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor basic path manipulation function definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __AA_PATH_H
+#define __AA_PATH_H
+
+
+enum path_flags {
+ PATH_IS_DIR = 0x1, /* path is a directory */
+ PATH_CONNECT_PATH = 0x4, /* connect disconnected paths to / */
+ PATH_CHROOT_REL = 0x8, /* do path lookup relative to chroot */
+ PATH_CHROOT_NSCONNECT = 0x10, /* connect paths that are at ns root */
+
+ PATH_DELEGATE_DELETED = 0x08000, /* delegate deleted files */
+ PATH_MEDIATE_DELETED = 0x10000, /* mediate deleted paths */
+};
+
+int aa_path_name(struct path *path, int flags, char **buffer,
+ const char **name, const char **info);
+
+#endif /* __AA_PATH_H */
diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
new file mode 100644
index 00000000000..c28b0f20ab5
--- /dev/null
+++ b/security/apparmor/include/policy.h
@@ -0,0 +1,408 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor policy definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __AA_POLICY_H
+#define __AA_POLICY_H
+
+#include <linux/capability.h>
+#include <linux/cred.h>
+#include <linux/kref.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/socket.h>
+
+#include "apparmor.h"
+#include "audit.h"
+#include "capability.h"
+#include "domain.h"
+#include "file.h"
+#include "resource.h"
+
+extern const char *const aa_profile_mode_names[];
+#define APPARMOR_MODE_NAMES_MAX_INDEX 4
+
+#define PROFILE_MODE(_profile, _mode) \
+ ((aa_g_profile_mode == (_mode)) || \
+ ((_profile)->mode == (_mode)))
+
+#define COMPLAIN_MODE(_profile) PROFILE_MODE((_profile), APPARMOR_COMPLAIN)
+
+#define KILL_MODE(_profile) PROFILE_MODE((_profile), APPARMOR_KILL)
+
+#define PROFILE_IS_HAT(_profile) ((_profile)->flags & PFLAG_HAT)
+
+#define PROFILE_INVALID(_profile) ((_profile)->flags & PFLAG_INVALID)
+
+#define on_list_rcu(X) (!list_empty(X) && (X)->prev != LIST_POISON2)
+
+/*
+ * FIXME: currently need a clean way to replace and remove profiles as a
+ * set. It should be done at the namespace level.
+ * Either, with a set of profiles loaded at the namespace level or via
+ * a mark and remove marked interface.
+ */
+enum profile_mode {
+ APPARMOR_ENFORCE, /* enforce access rules */
+ APPARMOR_COMPLAIN, /* allow and log access violations */
+ APPARMOR_KILL, /* kill task on access violation */
+ APPARMOR_UNCONFINED, /* profile set to unconfined */
+};
+
+enum profile_flags {
+ PFLAG_HAT = 1, /* profile is a hat */
+ PFLAG_NULL = 4, /* profile is null learning profile */
+ PFLAG_IX_ON_NAME_ERROR = 8, /* fallback to ix on name lookup fail */
+ PFLAG_IMMUTABLE = 0x10, /* don't allow changes/replacement */
+ PFLAG_USER_DEFINED = 0x20, /* user based profile - lower privs */
+ PFLAG_NO_LIST_REF = 0x40, /* list doesn't keep profile ref */
+ PFLAG_OLD_NULL_TRANS = 0x100, /* use // as the null transition */
+ PFLAG_INVALID = 0x200, /* profile replaced/removed */
+ PFLAG_NS_COUNT = 0x400, /* carries NS ref count */
+
+ /* These flags must correspond with PATH_flags */
+ PFLAG_MEDIATE_DELETED = 0x10000, /* mediate instead delegate deleted */
+};
+
+struct aa_profile;
+
+/* struct aa_policy - common part of both namespaces and profiles
+ * @name: name of the object
+ * @hname - The hierarchical name
+ * @list: list policy object is on
+ * @profiles: head of the profiles list contained in the object
+ */
+struct aa_policy {
+ char *name;
+ char *hname;
+ struct list_head list;
+ struct list_head profiles;
+};
+
+/* struct aa_ns_acct - accounting of profiles in namespace
+ * @max_size: maximum space allowed for all profiles in namespace
+ * @max_count: maximum number of profiles that can be in this namespace
+ * @size: current size of profiles
+ * @count: current count of profiles (includes null profiles)
+ */
+struct aa_ns_acct {
+ int max_size;
+ int max_count;
+ int size;
+ int count;
+};
+
+/* struct aa_namespace - namespace for a set of profiles
+ * @base: common policy
+ * @parent: parent of namespace
+ * @lock: lock for modifying the object
+ * @acct: accounting for the namespace
+ * @unconfined: special unconfined profile for the namespace
+ * @sub_ns: list of namespaces under the current namespace.
+ * @uniq_null: uniq value used for null learning profiles
+ * @uniq_id: a unique id count for the profiles in the namespace
+ * @dents: dentries for the namespaces file entries in apparmorfs
+ *
+ * An aa_namespace defines the set profiles that are searched to determine
+ * which profile to attach to a task. Profiles can not be shared between
+ * aa_namespaces and profile names within a namespace are guaranteed to be
+ * unique. When profiles in separate namespaces have the same name they
+ * are NOT considered to be equivalent.
+ *
+ * Namespaces are hierarchical and only namespaces and profiles below the
+ * current namespace are visible.
+ *
+ * Namespace names must be unique and can not contain the characters :/\0
+ *
+ * FIXME TODO: add vserver support of namespaces (can it all be done in
+ * userspace?)
+ */
+struct aa_namespace {
+ struct aa_policy base;
+ struct aa_namespace *parent;
+ struct mutex lock;
+ struct aa_ns_acct acct;
+ struct aa_profile *unconfined;
+ struct list_head sub_ns;
+ atomic_t uniq_null;
+ long uniq_id;
+
+ struct dentry *dents[AAFS_NS_SIZEOF];
+};
+
+/* struct aa_policydb - match engine for a policy
+ * dfa: dfa pattern match
+ * start: set of start states for the different classes of data
+ */
+struct aa_policydb {
+ /* Generic policy DFA specific rule types will be subsections of it */
+ struct aa_dfa *dfa;
+ unsigned int start[AA_CLASS_LAST + 1];
+
+};
+
+struct aa_replacedby {
+ struct kref count;
+ struct aa_profile __rcu *profile;
+};
+
+
+/* struct aa_profile - basic confinement data
+ * @base - base components of the profile (name, refcount, lists, lock ...)
+ * @count: reference count of the obj
+ * @rcu: rcu head used when removing from @list
+ * @parent: parent of profile
+ * @ns: namespace the profile is in
+ * @replacedby: is set to the profile that replaced this profile
+ * @rename: optional profile name that this profile renamed
+ * @attach: human readable attachment string
+ * @xmatch: optional extended matching for unconfined executables names
+ * @xmatch_len: xmatch prefix len, used to determine xmatch priority
+ * @audit: the auditing mode of the profile
+ * @mode: the enforcement mode of the profile
+ * @flags: flags controlling profile behavior
+ * @path_flags: flags controlling path generation behavior
+ * @size: the memory consumed by this profiles rules
+ * @policy: general match rules governing policy
+ * @file: The set of rules governing basic file access and domain transitions
+ * @caps: capabilities for the profile
+ * @rlimits: rlimits for the profile
+ *
+ * @dents: dentries for the profiles file entries in apparmorfs
+ * @dirname: name of the profile dir in apparmorfs
+ *
+ * The AppArmor profile contains the basic confinement data. Each profile
+ * has a name, and exists in a namespace. The @name and @exec_match are
+ * used to determine profile attachment against unconfined tasks. All other
+ * attachments are determined by profile X transition rules.
+ *
+ * The @replacedby struct is write protected by the profile lock.
+ *
+ * Profiles have a hierarchy where hats and children profiles keep
+ * a reference to their parent.
+ *
+ * Profile names can not begin with a : and can not contain the \0
+ * character. If a profile name begins with / it will be considered when
+ * determining profile attachment on "unconfined" tasks.
+ */
+struct aa_profile {
+ struct aa_policy base;
+ struct kref count;
+ struct rcu_head rcu;
+ struct aa_profile __rcu *parent;
+
+ struct aa_namespace *ns;
+ struct aa_replacedby *replacedby;
+ const char *rename;
+
+ const char *attach;
+ struct aa_dfa *xmatch;
+ int xmatch_len;
+ enum audit_mode audit;
+ long mode;
+ long flags;
+ u32 path_flags;
+ int size;
+
+ struct aa_policydb policy;
+ struct aa_file_rules file;
+ struct aa_caps caps;
+ struct aa_rlimit rlimits;
+
+ unsigned char *hash;
+ char *dirname;
+ struct dentry *dents[AAFS_PROF_SIZEOF];
+};
+
+extern struct aa_namespace *root_ns;
+extern enum profile_mode aa_g_profile_mode;
+
+void aa_add_profile(struct aa_policy *common, struct aa_profile *profile);
+
+bool aa_ns_visible(struct aa_namespace *curr, struct aa_namespace *view);
+const char *aa_ns_name(struct aa_namespace *parent, struct aa_namespace *child);
+int aa_alloc_root_ns(void);
+void aa_free_root_ns(void);
+void aa_free_namespace_kref(struct kref *kref);
+
+struct aa_namespace *aa_find_namespace(struct aa_namespace *root,
+ const char *name);
+
+
+void aa_free_replacedby_kref(struct kref *kref);
+struct aa_profile *aa_alloc_profile(const char *name);
+struct aa_profile *aa_new_null_profile(struct aa_profile *parent, int hat);
+void aa_free_profile(struct aa_profile *profile);
+void aa_free_profile_kref(struct kref *kref);
+struct aa_profile *aa_find_child(struct aa_profile *parent, const char *name);
+struct aa_profile *aa_lookup_profile(struct aa_namespace *ns, const char *name);
+struct aa_profile *aa_match_profile(struct aa_namespace *ns, const char *name);
+
+ssize_t aa_replace_profiles(void *udata, size_t size, bool noreplace);
+ssize_t aa_remove_profiles(char *name, size_t size);
+
+#define PROF_ADD 1
+#define PROF_REPLACE 0
+
+#define unconfined(X) ((X)->mode == APPARMOR_UNCONFINED)
+
+
+static inline struct aa_profile *aa_deref_parent(struct aa_profile *p)
+{
+ return rcu_dereference_protected(p->parent,
+ mutex_is_locked(&p->ns->lock));
+}
+
+/**
+ * aa_get_profile - increment refcount on profile @p
+ * @p: profile (MAYBE NULL)
+ *
+ * Returns: pointer to @p if @p is NULL will return NULL
+ * Requires: @p must be held with valid refcount when called
+ */
+static inline struct aa_profile *aa_get_profile(struct aa_profile *p)
+{
+ if (p)
+ kref_get(&(p->count));
+
+ return p;
+}
+
+/**
+ * aa_get_profile_not0 - increment refcount on profile @p found via lookup
+ * @p: profile (MAYBE NULL)
+ *
+ * Returns: pointer to @p if @p is NULL will return NULL
+ * Requires: @p must be held with valid refcount when called
+ */
+static inline struct aa_profile *aa_get_profile_not0(struct aa_profile *p)
+{
+ if (p && kref_get_not0(&p->count))
+ return p;
+
+ return NULL;
+}
+
+/**
+ * aa_get_profile_rcu - increment a refcount profile that can be replaced
+ * @p: pointer to profile that can be replaced (NOT NULL)
+ *
+ * Returns: pointer to a refcounted profile.
+ * else NULL if no profile
+ */
+static inline struct aa_profile *aa_get_profile_rcu(struct aa_profile __rcu **p)
+{
+ struct aa_profile *c;
+
+ rcu_read_lock();
+ do {
+ c = rcu_dereference(*p);
+ } while (c && !kref_get_not0(&c->count));
+ rcu_read_unlock();
+
+ return c;
+}
+
+/**
+ * aa_get_newest_profile - find the newest version of @profile
+ * @profile: the profile to check for newer versions of
+ *
+ * Returns: refcounted newest version of @profile taking into account
+ * replacement, renames and removals
+ * return @profile.
+ */
+static inline struct aa_profile *aa_get_newest_profile(struct aa_profile *p)
+{
+ if (!p)
+ return NULL;
+
+ if (PROFILE_INVALID(p))
+ return aa_get_profile_rcu(&p->replacedby->profile);
+
+ return aa_get_profile(p);
+}
+
+/**
+ * aa_put_profile - decrement refcount on profile @p
+ * @p: profile (MAYBE NULL)
+ */
+static inline void aa_put_profile(struct aa_profile *p)
+{
+ if (p)
+ kref_put(&p->count, aa_free_profile_kref);
+}
+
+static inline struct aa_replacedby *aa_get_replacedby(struct aa_replacedby *p)
+{
+ if (p)
+ kref_get(&(p->count));
+
+ return p;
+}
+
+static inline void aa_put_replacedby(struct aa_replacedby *p)
+{
+ if (p)
+ kref_put(&p->count, aa_free_replacedby_kref);
+}
+
+/* requires profile list write lock held */
+static inline void __aa_update_replacedby(struct aa_profile *orig,
+ struct aa_profile *new)
+{
+ struct aa_profile *tmp;
+ tmp = rcu_dereference_protected(orig->replacedby->profile,
+ mutex_is_locked(&orig->ns->lock));
+ rcu_assign_pointer(orig->replacedby->profile, aa_get_profile(new));
+ orig->flags |= PFLAG_INVALID;
+ aa_put_profile(tmp);
+}
+
+/**
+ * aa_get_namespace - increment references count on @ns
+ * @ns: namespace to increment reference count of (MAYBE NULL)
+ *
+ * Returns: pointer to @ns, if @ns is NULL returns NULL
+ * Requires: @ns must be held with valid refcount when called
+ */
+static inline struct aa_namespace *aa_get_namespace(struct aa_namespace *ns)
+{
+ if (ns)
+ aa_get_profile(ns->unconfined);
+
+ return ns;
+}
+
+/**
+ * aa_put_namespace - decrement refcount on @ns
+ * @ns: namespace to put reference of
+ *
+ * Decrement reference count of @ns and if no longer in use free it
+ */
+static inline void aa_put_namespace(struct aa_namespace *ns)
+{
+ if (ns)
+ aa_put_profile(ns->unconfined);
+}
+
+static inline int AUDIT_MODE(struct aa_profile *profile)
+{
+ if (aa_g_audit != AUDIT_NORMAL)
+ return aa_g_audit;
+
+ return profile->audit;
+}
+
+bool aa_may_manage_policy(int op);
+
+#endif /* __AA_POLICY_H */
diff --git a/security/apparmor/include/policy_unpack.h b/security/apparmor/include/policy_unpack.h
new file mode 100644
index 00000000000..c214fb88b1b
--- /dev/null
+++ b/security/apparmor/include/policy_unpack.h
@@ -0,0 +1,39 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor policy loading interface function definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __POLICY_INTERFACE_H
+#define __POLICY_INTERFACE_H
+
+#include <linux/list.h>
+
+struct aa_load_ent {
+ struct list_head list;
+ struct aa_profile *new;
+ struct aa_profile *old;
+ struct aa_profile *rename;
+};
+
+void aa_load_ent_free(struct aa_load_ent *ent);
+struct aa_load_ent *aa_load_ent_alloc(void);
+
+#define PACKED_FLAG_HAT 1
+
+#define PACKED_MODE_ENFORCE 0
+#define PACKED_MODE_COMPLAIN 1
+#define PACKED_MODE_KILL 2
+#define PACKED_MODE_UNCONFINED 3
+
+int aa_unpack(void *udata, size_t size, struct list_head *lh, const char **ns);
+
+#endif /* __POLICY_INTERFACE_H */
diff --git a/security/apparmor/include/procattr.h b/security/apparmor/include/procattr.h
new file mode 100644
index 00000000000..6bd5f33d953
--- /dev/null
+++ b/security/apparmor/include/procattr.h
@@ -0,0 +1,25 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor /proc/<pid>/attr/ interface function definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __AA_PROCATTR_H
+#define __AA_PROCATTR_H
+
+#define AA_DO_TEST 1
+#define AA_ONEXEC 1
+
+int aa_getprocattr(struct aa_profile *profile, char **string);
+int aa_setprocattr_changehat(char *args, size_t size, int test);
+int aa_setprocattr_changeprofile(char *fqname, bool onexec, int test);
+
+#endif /* __AA_PROCATTR_H */
diff --git a/security/apparmor/include/resource.h b/security/apparmor/include/resource.h
new file mode 100644
index 00000000000..d3f4cf02795
--- /dev/null
+++ b/security/apparmor/include/resource.h
@@ -0,0 +1,50 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor resource limits function definitions.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __AA_RESOURCE_H
+#define __AA_RESOURCE_H
+
+#include <linux/resource.h>
+#include <linux/sched.h>
+
+#include "apparmorfs.h"
+
+struct aa_profile;
+
+/* struct aa_rlimit - rlimit settings for the profile
+ * @mask: which hard limits to set
+ * @limits: rlimit values that override task limits
+ *
+ * AppArmor rlimits are used to set confined task rlimits. Only the
+ * limits specified in @mask will be controlled by apparmor.
+ */
+struct aa_rlimit {
+ unsigned int mask;
+ struct rlimit limits[RLIM_NLIMITS];
+};
+
+extern struct aa_fs_entry aa_fs_entry_rlimit[];
+
+int aa_map_resource(int resource);
+int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *,
+ unsigned int resource, struct rlimit *new_rlim);
+
+void __aa_transition_rlimits(struct aa_profile *old, struct aa_profile *new);
+
+static inline void aa_free_rlimit_rules(struct aa_rlimit *rlims)
+{
+ /* NOP */
+}
+
+#endif /* __AA_RESOURCE_H */
diff --git a/security/apparmor/include/sid.h b/security/apparmor/include/sid.h
new file mode 100644
index 00000000000..513ca0e4896
--- /dev/null
+++ b/security/apparmor/include/sid.h
@@ -0,0 +1,26 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor security identifier (sid) definitions
+ *
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __AA_SID_H
+#define __AA_SID_H
+
+#include <linux/types.h>
+
+/* sid value that will not be allocated */
+#define AA_SID_INVALID 0
+#define AA_SID_ALLOC AA_SID_INVALID
+
+u32 aa_alloc_sid(void);
+void aa_free_sid(u32 sid);
+
+#endif /* __AA_SID_H */
diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c
new file mode 100644
index 00000000000..777ac1c4725
--- /dev/null
+++ b/security/apparmor/ipc.c
@@ -0,0 +1,111 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor ipc mediation
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include <linux/gfp.h>
+#include <linux/ptrace.h>
+
+#include "include/audit.h"
+#include "include/capability.h"
+#include "include/context.h"
+#include "include/policy.h"
+#include "include/ipc.h"
+
+/* call back to audit ptrace fields */
+static void audit_cb(struct audit_buffer *ab, void *va)
+{
+ struct common_audit_data *sa = va;
+ audit_log_format(ab, " target=");
+ audit_log_untrustedstring(ab, sa->aad->target);
+}
+
+/**
+ * aa_audit_ptrace - do auditing for ptrace
+ * @profile: profile being enforced (NOT NULL)
+ * @target: profile being traced (NOT NULL)
+ * @error: error condition
+ *
+ * Returns: %0 or error code
+ */
+static int aa_audit_ptrace(struct aa_profile *profile,
+ struct aa_profile *target, int error)
+{
+ struct common_audit_data sa;
+ struct apparmor_audit_data aad = {0,};
+ sa.type = LSM_AUDIT_DATA_NONE;
+ sa.aad = &aad;
+ aad.op = OP_PTRACE;
+ aad.target = target;
+ aad.error = error;
+
+ return aa_audit(AUDIT_APPARMOR_AUTO, profile, GFP_ATOMIC, &sa,
+ audit_cb);
+}
+
+/**
+ * aa_may_ptrace - test if tracer task can trace the tracee
+ * @tracer: profile of the task doing the tracing (NOT NULL)
+ * @tracee: task to be traced
+ * @mode: whether PTRACE_MODE_READ || PTRACE_MODE_ATTACH
+ *
+ * Returns: %0 else error code if permission denied or error
+ */
+int aa_may_ptrace(struct aa_profile *tracer, struct aa_profile *tracee,
+ unsigned int mode)
+{
+ /* TODO: currently only based on capability, not extended ptrace
+ * rules,
+ * Test mode for PTRACE_MODE_READ || PTRACE_MODE_ATTACH
+ */
+
+ if (unconfined(tracer) || tracer == tracee)
+ return 0;
+ /* log this capability request */
+ return aa_capable(tracer, CAP_SYS_PTRACE, 1);
+}
+
+/**
+ * aa_ptrace - do ptrace permission check and auditing
+ * @tracer: task doing the tracing (NOT NULL)
+ * @tracee: task being traced (NOT NULL)
+ * @mode: ptrace mode either PTRACE_MODE_READ || PTRACE_MODE_ATTACH
+ *
+ * Returns: %0 else error code if permission denied or error
+ */
+int aa_ptrace(struct task_struct *tracer, struct task_struct *tracee,
+ unsigned int mode)
+{
+ /*
+ * tracer can ptrace tracee when
+ * - tracer is unconfined ||
+ * - tracer is in complain mode
+ * - tracer has rules allowing it to trace tracee currently this is:
+ * - confined by the same profile ||
+ * - tracer profile has CAP_SYS_PTRACE
+ */
+
+ struct aa_profile *tracer_p = aa_get_task_profile(tracer);
+ int error = 0;
+
+ if (!unconfined(tracer_p)) {
+ struct aa_profile *tracee_p = aa_get_task_profile(tracee);
+
+ error = aa_may_ptrace(tracer_p, tracee_p, mode);
+ error = aa_audit_ptrace(tracer_p, tracee_p, error);
+
+ aa_put_profile(tracee_p);
+ }
+ aa_put_profile(tracer_p);
+
+ return error;
+}
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
new file mode 100644
index 00000000000..c1827e06845
--- /dev/null
+++ b/security/apparmor/lib.c
@@ -0,0 +1,106 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains basic common functions used in AppArmor
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+
+#include "include/audit.h"
+#include "include/apparmor.h"
+
+
+/**
+ * aa_split_fqname - split a fqname into a profile and namespace name
+ * @fqname: a full qualified name in namespace profile format (NOT NULL)
+ * @ns_name: pointer to portion of the string containing the ns name (NOT NULL)
+ *
+ * Returns: profile name or NULL if one is not specified
+ *
+ * Split a namespace name from a profile name (see policy.c for naming
+ * description). If a portion of the name is missing it returns NULL for
+ * that portion.
+ *
+ * NOTE: may modify the @fqname string. The pointers returned point
+ * into the @fqname string.
+ */
+char *aa_split_fqname(char *fqname, char **ns_name)
+{
+ char *name = strim(fqname);
+
+ *ns_name = NULL;
+ if (name[0] == ':') {
+ char *split = strchr(&name[1], ':');
+ *ns_name = skip_spaces(&name[1]);
+ if (split) {
+ /* overwrite ':' with \0 */
+ *split++ = 0;
+ if (strncmp(split, "//", 2) == 0)
+ split += 2;
+ name = skip_spaces(split);
+ } else
+ /* a ns name without a following profile is allowed */
+ name = NULL;
+ }
+ if (name && *name == 0)
+ name = NULL;
+
+ return name;
+}
+
+/**
+ * aa_info_message - log a none profile related status message
+ * @str: message to log
+ */
+void aa_info_message(const char *str)
+{
+ if (audit_enabled) {
+ struct common_audit_data sa;
+ struct apparmor_audit_data aad = {0,};
+ sa.type = LSM_AUDIT_DATA_NONE;
+ sa.aad = &aad;
+ aad.info = str;
+ aa_audit_msg(AUDIT_APPARMOR_STATUS, &sa, NULL);
+ }
+ printk(KERN_INFO "AppArmor: %s\n", str);
+}
+
+/**
+ * __aa_kvmalloc - do allocation preferring kmalloc but falling back to vmalloc
+ * @size: how many bytes of memory are required
+ * @flags: the type of memory to allocate (see kmalloc).
+ *
+ * Return: allocated buffer or NULL if failed
+ *
+ * It is possible that policy being loaded from the user is larger than
+ * what can be allocated by kmalloc, in those cases fall back to vmalloc.
+ */
+void *__aa_kvmalloc(size_t size, gfp_t flags)
+{
+ void *buffer = NULL;
+
+ if (size == 0)
+ return NULL;
+
+ /* do not attempt kmalloc if we need more than 16 pages at once */
+ if (size <= (16*PAGE_SIZE))
+ buffer = kmalloc(size, flags | GFP_NOIO | __GFP_NOWARN);
+ if (!buffer) {
+ if (flags & __GFP_ZERO)
+ buffer = vzalloc(size);
+ else
+ buffer = vmalloc(size);
+ }
+ return buffer;
+}
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
new file mode 100644
index 00000000000..99810009333
--- /dev/null
+++ b/security/apparmor/lsm.c
@@ -0,0 +1,949 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor LSM hooks.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include <linux/security.h>
+#include <linux/moduleparam.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/ptrace.h>
+#include <linux/ctype.h>
+#include <linux/sysctl.h>
+#include <linux/audit.h>
+#include <linux/user_namespace.h>
+#include <net/sock.h>
+
+#include "include/apparmor.h"
+#include "include/apparmorfs.h"
+#include "include/audit.h"
+#include "include/capability.h"
+#include "include/context.h"
+#include "include/file.h"
+#include "include/ipc.h"
+#include "include/path.h"
+#include "include/policy.h"
+#include "include/procattr.h"
+
+/* Flag indicating whether initialization completed */
+int apparmor_initialized __initdata;
+
+/*
+ * LSM hook functions
+ */
+
+/*
+ * free the associated aa_task_cxt and put its profiles
+ */
+static void apparmor_cred_free(struct cred *cred)
+{
+ aa_free_task_context(cred_cxt(cred));
+ cred_cxt(cred) = NULL;
+}
+
+/*
+ * allocate the apparmor part of blank credentials
+ */
+static int apparmor_cred_alloc_blank(struct cred *cred, gfp_t gfp)
+{
+ /* freed by apparmor_cred_free */
+ struct aa_task_cxt *cxt = aa_alloc_task_context(gfp);
+ if (!cxt)
+ return -ENOMEM;
+
+ cred_cxt(cred) = cxt;
+ return 0;
+}
+
+/*
+ * prepare new aa_task_cxt for modification by prepare_cred block
+ */
+static int apparmor_cred_prepare(struct cred *new, const struct cred *old,
+ gfp_t gfp)
+{
+ /* freed by apparmor_cred_free */
+ struct aa_task_cxt *cxt = aa_alloc_task_context(gfp);
+ if (!cxt)
+ return -ENOMEM;
+
+ aa_dup_task_context(cxt, cred_cxt(old));
+ cred_cxt(new) = cxt;
+ return 0;
+}
+
+/*
+ * transfer the apparmor data to a blank set of creds
+ */
+static void apparmor_cred_transfer(struct cred *new, const struct cred *old)
+{
+ const struct aa_task_cxt *old_cxt = cred_cxt(old);
+ struct aa_task_cxt *new_cxt = cred_cxt(new);
+
+ aa_dup_task_context(new_cxt, old_cxt);
+}
+
+static int apparmor_ptrace_access_check(struct task_struct *child,
+ unsigned int mode)
+{
+ int error = cap_ptrace_access_check(child, mode);
+ if (error)
+ return error;
+
+ return aa_ptrace(current, child, mode);
+}
+
+static int apparmor_ptrace_traceme(struct task_struct *parent)
+{
+ int error = cap_ptrace_traceme(parent);
+ if (error)
+ return error;
+
+ return aa_ptrace(parent, current, PTRACE_MODE_ATTACH);
+}
+
+/* Derived from security/commoncap.c:cap_capget */
+static int apparmor_capget(struct task_struct *target, kernel_cap_t *effective,
+ kernel_cap_t *inheritable, kernel_cap_t *permitted)
+{
+ struct aa_profile *profile;
+ const struct cred *cred;
+
+ rcu_read_lock();
+ cred = __task_cred(target);
+ profile = aa_cred_profile(cred);
+
+ *effective = cred->cap_effective;
+ *inheritable = cred->cap_inheritable;
+ *permitted = cred->cap_permitted;
+
+ if (!unconfined(profile) && !COMPLAIN_MODE(profile)) {
+ *effective = cap_intersect(*effective, profile->caps.allow);
+ *permitted = cap_intersect(*permitted, profile->caps.allow);
+ }
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static int apparmor_capable(const struct cred *cred, struct user_namespace *ns,
+ int cap, int audit)
+{
+ struct aa_profile *profile;
+ /* cap_capable returns 0 on success, else -EPERM */
+ int error = cap_capable(cred, ns, cap, audit);
+ if (!error) {
+ profile = aa_cred_profile(cred);
+ if (!unconfined(profile))
+ error = aa_capable(profile, cap, audit);
+ }
+ return error;
+}
+
+/**
+ * common_perm - basic common permission check wrapper fn for paths
+ * @op: operation being checked
+ * @path: path to check permission of (NOT NULL)
+ * @mask: requested permissions mask
+ * @cond: conditional info for the permission request (NOT NULL)
+ *
+ * Returns: %0 else error code if error or permission denied
+ */
+static int common_perm(int op, struct path *path, u32 mask,
+ struct path_cond *cond)
+{
+ struct aa_profile *profile;
+ int error = 0;
+
+ profile = __aa_current_profile();
+ if (!unconfined(profile))
+ error = aa_path_perm(op, profile, path, 0, mask, cond);
+
+ return error;
+}
+
+/**
+ * common_perm_dir_dentry - common permission wrapper when path is dir, dentry
+ * @op: operation being checked
+ * @dir: directory of the dentry (NOT NULL)
+ * @dentry: dentry to check (NOT NULL)
+ * @mask: requested permissions mask
+ * @cond: conditional info for the permission request (NOT NULL)
+ *
+ * Returns: %0 else error code if error or permission denied
+ */
+static int common_perm_dir_dentry(int op, struct path *dir,
+ struct dentry *dentry, u32 mask,
+ struct path_cond *cond)
+{
+ struct path path = { dir->mnt, dentry };
+
+ return common_perm(op, &path, mask, cond);
+}
+
+/**
+ * common_perm_mnt_dentry - common permission wrapper when mnt, dentry
+ * @op: operation being checked
+ * @mnt: mount point of dentry (NOT NULL)
+ * @dentry: dentry to check (NOT NULL)
+ * @mask: requested permissions mask
+ *
+ * Returns: %0 else error code if error or permission denied
+ */
+static int common_perm_mnt_dentry(int op, struct vfsmount *mnt,
+ struct dentry *dentry, u32 mask)
+{
+ struct path path = { mnt, dentry };
+ struct path_cond cond = { dentry->d_inode->i_uid,
+ dentry->d_inode->i_mode
+ };
+
+ return common_perm(op, &path, mask, &cond);
+}
+
+/**
+ * common_perm_rm - common permission wrapper for operations doing rm
+ * @op: operation being checked
+ * @dir: directory that the dentry is in (NOT NULL)
+ * @dentry: dentry being rm'd (NOT NULL)
+ * @mask: requested permission mask
+ *
+ * Returns: %0 else error code if error or permission denied
+ */
+static int common_perm_rm(int op, struct path *dir,
+ struct dentry *dentry, u32 mask)
+{
+ struct inode *inode = dentry->d_inode;
+ struct path_cond cond = { };
+
+ if (!inode || !dir->mnt || !mediated_filesystem(inode))
+ return 0;
+
+ cond.uid = inode->i_uid;
+ cond.mode = inode->i_mode;
+
+ return common_perm_dir_dentry(op, dir, dentry, mask, &cond);
+}
+
+/**
+ * common_perm_create - common permission wrapper for operations doing create
+ * @op: operation being checked
+ * @dir: directory that dentry will be created in (NOT NULL)
+ * @dentry: dentry to create (NOT NULL)
+ * @mask: request permission mask
+ * @mode: created file mode
+ *
+ * Returns: %0 else error code if error or permission denied
+ */
+static int common_perm_create(int op, struct path *dir, struct dentry *dentry,
+ u32 mask, umode_t mode)
+{
+ struct path_cond cond = { current_fsuid(), mode };
+
+ if (!dir->mnt || !mediated_filesystem(dir->dentry->d_inode))
+ return 0;
+
+ return common_perm_dir_dentry(op, dir, dentry, mask, &cond);
+}
+
+static int apparmor_path_unlink(struct path *dir, struct dentry *dentry)
+{
+ return common_perm_rm(OP_UNLINK, dir, dentry, AA_MAY_DELETE);
+}
+
+static int apparmor_path_mkdir(struct path *dir, struct dentry *dentry,
+ umode_t mode)
+{
+ return common_perm_create(OP_MKDIR, dir, dentry, AA_MAY_CREATE,
+ S_IFDIR);
+}
+
+static int apparmor_path_rmdir(struct path *dir, struct dentry *dentry)
+{
+ return common_perm_rm(OP_RMDIR, dir, dentry, AA_MAY_DELETE);
+}
+
+static int apparmor_path_mknod(struct path *dir, struct dentry *dentry,
+ umode_t mode, unsigned int dev)
+{
+ return common_perm_create(OP_MKNOD, dir, dentry, AA_MAY_CREATE, mode);
+}
+
+static int apparmor_path_truncate(struct path *path)
+{
+ struct path_cond cond = { path->dentry->d_inode->i_uid,
+ path->dentry->d_inode->i_mode
+ };
+
+ if (!path->mnt || !mediated_filesystem(path->dentry->d_inode))
+ return 0;
+
+ return common_perm(OP_TRUNC, path, MAY_WRITE | AA_MAY_META_WRITE,
+ &cond);
+}
+
+static int apparmor_path_symlink(struct path *dir, struct dentry *dentry,
+ const char *old_name)
+{
+ return common_perm_create(OP_SYMLINK, dir, dentry, AA_MAY_CREATE,
+ S_IFLNK);
+}
+
+static int apparmor_path_link(struct dentry *old_dentry, struct path *new_dir,
+ struct dentry *new_dentry)
+{
+ struct aa_profile *profile;
+ int error = 0;
+
+ if (!mediated_filesystem(old_dentry->d_inode))
+ return 0;
+
+ profile = aa_current_profile();
+ if (!unconfined(profile))
+ error = aa_path_link(profile, old_dentry, new_dir, new_dentry);
+ return error;
+}
+
+static int apparmor_path_rename(struct path *old_dir, struct dentry *old_dentry,
+ struct path *new_dir, struct dentry *new_dentry)
+{
+ struct aa_profile *profile;
+ int error = 0;
+
+ if (!mediated_filesystem(old_dentry->d_inode))
+ return 0;
+
+ profile = aa_current_profile();
+ if (!unconfined(profile)) {
+ struct path old_path = { old_dir->mnt, old_dentry };
+ struct path new_path = { new_dir->mnt, new_dentry };
+ struct path_cond cond = { old_dentry->d_inode->i_uid,
+ old_dentry->d_inode->i_mode
+ };
+
+ error = aa_path_perm(OP_RENAME_SRC, profile, &old_path, 0,
+ MAY_READ | AA_MAY_META_READ | MAY_WRITE |
+ AA_MAY_META_WRITE | AA_MAY_DELETE,
+ &cond);
+ if (!error)
+ error = aa_path_perm(OP_RENAME_DEST, profile, &new_path,
+ 0, MAY_WRITE | AA_MAY_META_WRITE |
+ AA_MAY_CREATE, &cond);
+
+ }
+ return error;
+}
+
+static int apparmor_path_chmod(struct path *path, umode_t mode)
+{
+ if (!mediated_filesystem(path->dentry->d_inode))
+ return 0;
+
+ return common_perm_mnt_dentry(OP_CHMOD, path->mnt, path->dentry, AA_MAY_CHMOD);
+}
+
+static int apparmor_path_chown(struct path *path, kuid_t uid, kgid_t gid)
+{
+ struct path_cond cond = { path->dentry->d_inode->i_uid,
+ path->dentry->d_inode->i_mode
+ };
+
+ if (!mediated_filesystem(path->dentry->d_inode))
+ return 0;
+
+ return common_perm(OP_CHOWN, path, AA_MAY_CHOWN, &cond);
+}
+
+static int apparmor_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
+{
+ if (!mediated_filesystem(dentry->d_inode))
+ return 0;
+
+ return common_perm_mnt_dentry(OP_GETATTR, mnt, dentry,
+ AA_MAY_META_READ);
+}
+
+static int apparmor_file_open(struct file *file, const struct cred *cred)
+{
+ struct aa_file_cxt *fcxt = file->f_security;
+ struct aa_profile *profile;
+ int error = 0;
+
+ if (!mediated_filesystem(file_inode(file)))
+ return 0;
+
+ /* If in exec, permission is handled by bprm hooks.
+ * Cache permissions granted by the previous exec check, with
+ * implicit read and executable mmap which are required to
+ * actually execute the image.
+ */
+ if (current->in_execve) {
+ fcxt->allow = MAY_EXEC | MAY_READ | AA_EXEC_MMAP;
+ return 0;
+ }
+
+ profile = aa_cred_profile(cred);
+ if (!unconfined(profile)) {
+ struct inode *inode = file_inode(file);
+ struct path_cond cond = { inode->i_uid, inode->i_mode };
+
+ error = aa_path_perm(OP_OPEN, profile, &file->f_path, 0,
+ aa_map_file_to_perms(file), &cond);
+ /* todo cache full allowed permissions set and state */
+ fcxt->allow = aa_map_file_to_perms(file);
+ }
+
+ return error;
+}
+
+static int apparmor_file_alloc_security(struct file *file)
+{
+ /* freed by apparmor_file_free_security */
+ file->f_security = aa_alloc_file_context(GFP_KERNEL);
+ if (!file->f_security)
+ return -ENOMEM;
+ return 0;
+
+}
+
+static void apparmor_file_free_security(struct file *file)
+{
+ struct aa_file_cxt *cxt = file->f_security;
+
+ aa_free_file_context(cxt);
+}
+
+static int common_file_perm(int op, struct file *file, u32 mask)
+{
+ struct aa_file_cxt *fcxt = file->f_security;
+ struct aa_profile *profile, *fprofile = aa_cred_profile(file->f_cred);
+ int error = 0;
+
+ BUG_ON(!fprofile);
+
+ if (!file->f_path.mnt ||
+ !mediated_filesystem(file_inode(file)))
+ return 0;
+
+ profile = __aa_current_profile();
+
+ /* revalidate access, if task is unconfined, or the cached cred
+ * doesn't match or if the request is for more permissions than
+ * was granted.
+ *
+ * Note: the test for !unconfined(fprofile) is to handle file
+ * delegation from unconfined tasks
+ */
+ if (!unconfined(profile) && !unconfined(fprofile) &&
+ ((fprofile != profile) || (mask & ~fcxt->allow)))
+ error = aa_file_perm(op, profile, file, mask);
+
+ return error;
+}
+
+static int apparmor_file_permission(struct file *file, int mask)
+{
+ return common_file_perm(OP_FPERM, file, mask);
+}
+
+static int apparmor_file_lock(struct file *file, unsigned int cmd)
+{
+ u32 mask = AA_MAY_LOCK;
+
+ if (cmd == F_WRLCK)
+ mask |= MAY_WRITE;
+
+ return common_file_perm(OP_FLOCK, file, mask);
+}
+
+static int common_mmap(int op, struct file *file, unsigned long prot,
+ unsigned long flags)
+{
+ int mask = 0;
+
+ if (!file || !file->f_security)
+ return 0;
+
+ if (prot & PROT_READ)
+ mask |= MAY_READ;
+ /*
+ * Private mappings don't require write perms since they don't
+ * write back to the files
+ */
+ if ((prot & PROT_WRITE) && !(flags & MAP_PRIVATE))
+ mask |= MAY_WRITE;
+ if (prot & PROT_EXEC)
+ mask |= AA_EXEC_MMAP;
+
+ return common_file_perm(op, file, mask);
+}
+
+static int apparmor_mmap_file(struct file *file, unsigned long reqprot,
+ unsigned long prot, unsigned long flags)
+{
+ return common_mmap(OP_FMMAP, file, prot, flags);
+}
+
+static int apparmor_file_mprotect(struct vm_area_struct *vma,
+ unsigned long reqprot, unsigned long prot)
+{
+ return common_mmap(OP_FMPROT, vma->vm_file, prot,
+ !(vma->vm_flags & VM_SHARED) ? MAP_PRIVATE : 0);
+}
+
+static int apparmor_getprocattr(struct task_struct *task, char *name,
+ char **value)
+{
+ int error = -ENOENT;
+ /* released below */
+ const struct cred *cred = get_task_cred(task);
+ struct aa_task_cxt *cxt = cred_cxt(cred);
+ struct aa_profile *profile = NULL;
+
+ if (strcmp(name, "current") == 0)
+ profile = aa_get_newest_profile(cxt->profile);
+ else if (strcmp(name, "prev") == 0 && cxt->previous)
+ profile = aa_get_newest_profile(cxt->previous);
+ else if (strcmp(name, "exec") == 0 && cxt->onexec)
+ profile = aa_get_newest_profile(cxt->onexec);
+ else
+ error = -EINVAL;
+
+ if (profile)
+ error = aa_getprocattr(profile, value);
+
+ aa_put_profile(profile);
+ put_cred(cred);
+
+ return error;
+}
+
+static int apparmor_setprocattr(struct task_struct *task, char *name,
+ void *value, size_t size)
+{
+ struct common_audit_data sa;
+ struct apparmor_audit_data aad = {0,};
+ char *command, *args = value;
+ size_t arg_size;
+ int error;
+
+ if (size == 0)
+ return -EINVAL;
+ /* args points to a PAGE_SIZE buffer, AppArmor requires that
+ * the buffer must be null terminated or have size <= PAGE_SIZE -1
+ * so that AppArmor can null terminate them
+ */
+ if (args[size - 1] != '\0') {
+ if (size == PAGE_SIZE)
+ return -EINVAL;
+ args[size] = '\0';
+ }
+
+ /* task can only write its own attributes */
+ if (current != task)
+ return -EACCES;
+
+ args = value;
+ args = strim(args);
+ command = strsep(&args, " ");
+ if (!args)
+ return -EINVAL;
+ args = skip_spaces(args);
+ if (!*args)
+ return -EINVAL;
+
+ arg_size = size - (args - (char *) value);
+ if (strcmp(name, "current") == 0) {
+ if (strcmp(command, "changehat") == 0) {
+ error = aa_setprocattr_changehat(args, arg_size,
+ !AA_DO_TEST);
+ } else if (strcmp(command, "permhat") == 0) {
+ error = aa_setprocattr_changehat(args, arg_size,
+ AA_DO_TEST);
+ } else if (strcmp(command, "changeprofile") == 0) {
+ error = aa_setprocattr_changeprofile(args, !AA_ONEXEC,
+ !AA_DO_TEST);
+ } else if (strcmp(command, "permprofile") == 0) {
+ error = aa_setprocattr_changeprofile(args, !AA_ONEXEC,
+ AA_DO_TEST);
+ } else
+ goto fail;
+ } else if (strcmp(name, "exec") == 0) {
+ if (strcmp(command, "exec") == 0)
+ error = aa_setprocattr_changeprofile(args, AA_ONEXEC,
+ !AA_DO_TEST);
+ else
+ goto fail;
+ } else
+ /* only support the "current" and "exec" process attributes */
+ return -EINVAL;
+
+ if (!error)
+ error = size;
+ return error;
+
+fail:
+ sa.type = LSM_AUDIT_DATA_NONE;
+ sa.aad = &aad;
+ aad.profile = aa_current_profile();
+ aad.op = OP_SETPROCATTR;
+ aad.info = name;
+ aad.error = -EINVAL;
+ aa_audit_msg(AUDIT_APPARMOR_DENIED, &sa, NULL);
+ return -EINVAL;
+}
+
+static int apparmor_task_setrlimit(struct task_struct *task,
+ unsigned int resource, struct rlimit *new_rlim)
+{
+ struct aa_profile *profile = __aa_current_profile();
+ int error = 0;
+
+ if (!unconfined(profile))
+ error = aa_task_setrlimit(profile, task, resource, new_rlim);
+
+ return error;
+}
+
+static struct security_operations apparmor_ops = {
+ .name = "apparmor",
+
+ .ptrace_access_check = apparmor_ptrace_access_check,
+ .ptrace_traceme = apparmor_ptrace_traceme,
+ .capget = apparmor_capget,
+ .capable = apparmor_capable,
+
+ .path_link = apparmor_path_link,
+ .path_unlink = apparmor_path_unlink,
+ .path_symlink = apparmor_path_symlink,
+ .path_mkdir = apparmor_path_mkdir,
+ .path_rmdir = apparmor_path_rmdir,
+ .path_mknod = apparmor_path_mknod,
+ .path_rename = apparmor_path_rename,
+ .path_chmod = apparmor_path_chmod,
+ .path_chown = apparmor_path_chown,
+ .path_truncate = apparmor_path_truncate,
+ .inode_getattr = apparmor_inode_getattr,
+
+ .file_open = apparmor_file_open,
+ .file_permission = apparmor_file_permission,
+ .file_alloc_security = apparmor_file_alloc_security,
+ .file_free_security = apparmor_file_free_security,
+ .mmap_file = apparmor_mmap_file,
+ .mmap_addr = cap_mmap_addr,
+ .file_mprotect = apparmor_file_mprotect,
+ .file_lock = apparmor_file_lock,
+
+ .getprocattr = apparmor_getprocattr,
+ .setprocattr = apparmor_setprocattr,
+
+ .cred_alloc_blank = apparmor_cred_alloc_blank,
+ .cred_free = apparmor_cred_free,
+ .cred_prepare = apparmor_cred_prepare,
+ .cred_transfer = apparmor_cred_transfer,
+
+ .bprm_set_creds = apparmor_bprm_set_creds,
+ .bprm_committing_creds = apparmor_bprm_committing_creds,
+ .bprm_committed_creds = apparmor_bprm_committed_creds,
+ .bprm_secureexec = apparmor_bprm_secureexec,
+
+ .task_setrlimit = apparmor_task_setrlimit,
+};
+
+/*
+ * AppArmor sysfs module parameters
+ */
+
+static int param_set_aabool(const char *val, const struct kernel_param *kp);
+static int param_get_aabool(char *buffer, const struct kernel_param *kp);
+#define param_check_aabool param_check_bool
+static struct kernel_param_ops param_ops_aabool = {
+ .flags = KERNEL_PARAM_FL_NOARG,
+ .set = param_set_aabool,
+ .get = param_get_aabool
+};
+
+static int param_set_aauint(const char *val, const struct kernel_param *kp);
+static int param_get_aauint(char *buffer, const struct kernel_param *kp);
+#define param_check_aauint param_check_uint
+static struct kernel_param_ops param_ops_aauint = {
+ .set = param_set_aauint,
+ .get = param_get_aauint
+};
+
+static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp);
+static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp);
+#define param_check_aalockpolicy param_check_bool
+static struct kernel_param_ops param_ops_aalockpolicy = {
+ .flags = KERNEL_PARAM_FL_NOARG,
+ .set = param_set_aalockpolicy,
+ .get = param_get_aalockpolicy
+};
+
+static int param_set_audit(const char *val, struct kernel_param *kp);
+static int param_get_audit(char *buffer, struct kernel_param *kp);
+
+static int param_set_mode(const char *val, struct kernel_param *kp);
+static int param_get_mode(char *buffer, struct kernel_param *kp);
+
+/* Flag values, also controllable via /sys/module/apparmor/parameters
+ * We define special types as we want to do additional mediation.
+ */
+
+/* AppArmor global enforcement switch - complain, enforce, kill */
+enum profile_mode aa_g_profile_mode = APPARMOR_ENFORCE;
+module_param_call(mode, param_set_mode, param_get_mode,
+ &aa_g_profile_mode, S_IRUSR | S_IWUSR);
+
+/* Debug mode */
+bool aa_g_debug;
+module_param_named(debug, aa_g_debug, aabool, S_IRUSR | S_IWUSR);
+
+/* Audit mode */
+enum audit_mode aa_g_audit;
+module_param_call(audit, param_set_audit, param_get_audit,
+ &aa_g_audit, S_IRUSR | S_IWUSR);
+
+/* Determines if audit header is included in audited messages. This
+ * provides more context if the audit daemon is not running
+ */
+bool aa_g_audit_header = 1;
+module_param_named(audit_header, aa_g_audit_header, aabool,
+ S_IRUSR | S_IWUSR);
+
+/* lock out loading/removal of policy
+ * TODO: add in at boot loading of policy, which is the only way to
+ * load policy, if lock_policy is set
+ */
+bool aa_g_lock_policy;
+module_param_named(lock_policy, aa_g_lock_policy, aalockpolicy,
+ S_IRUSR | S_IWUSR);
+
+/* Syscall logging mode */
+bool aa_g_logsyscall;
+module_param_named(logsyscall, aa_g_logsyscall, aabool, S_IRUSR | S_IWUSR);
+
+/* Maximum pathname length before accesses will start getting rejected */
+unsigned int aa_g_path_max = 2 * PATH_MAX;
+module_param_named(path_max, aa_g_path_max, aauint, S_IRUSR | S_IWUSR);
+
+/* Determines how paranoid loading of policy is and how much verification
+ * on the loaded policy is done.
+ */
+bool aa_g_paranoid_load = 1;
+module_param_named(paranoid_load, aa_g_paranoid_load, aabool,
+ S_IRUSR | S_IWUSR);
+
+/* Boot time disable flag */
+static bool apparmor_enabled = CONFIG_SECURITY_APPARMOR_BOOTPARAM_VALUE;
+module_param_named(enabled, apparmor_enabled, bool, S_IRUGO);
+
+static int __init apparmor_enabled_setup(char *str)
+{
+ unsigned long enabled;
+ int error = kstrtoul(str, 0, &enabled);
+ if (!error)
+ apparmor_enabled = enabled ? 1 : 0;
+ return 1;
+}
+
+__setup("apparmor=", apparmor_enabled_setup);
+
+/* set global flag turning off the ability to load policy */
+static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp)
+{
+ if (!capable(CAP_MAC_ADMIN))
+ return -EPERM;
+ if (aa_g_lock_policy)
+ return -EACCES;
+ return param_set_bool(val, kp);
+}
+
+static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp)
+{
+ if (!capable(CAP_MAC_ADMIN))
+ return -EPERM;
+ return param_get_bool(buffer, kp);
+}
+
+static int param_set_aabool(const char *val, const struct kernel_param *kp)
+{
+ if (!capable(CAP_MAC_ADMIN))
+ return -EPERM;
+ return param_set_bool(val, kp);
+}
+
+static int param_get_aabool(char *buffer, const struct kernel_param *kp)
+{
+ if (!capable(CAP_MAC_ADMIN))
+ return -EPERM;
+ return param_get_bool(buffer, kp);
+}
+
+static int param_set_aauint(const char *val, const struct kernel_param *kp)
+{
+ if (!capable(CAP_MAC_ADMIN))
+ return -EPERM;
+ return param_set_uint(val, kp);
+}
+
+static int param_get_aauint(char *buffer, const struct kernel_param *kp)
+{
+ if (!capable(CAP_MAC_ADMIN))
+ return -EPERM;
+ return param_get_uint(buffer, kp);
+}
+
+static int param_get_audit(char *buffer, struct kernel_param *kp)
+{
+ if (!capable(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ if (!apparmor_enabled)
+ return -EINVAL;
+
+ return sprintf(buffer, "%s", audit_mode_names[aa_g_audit]);
+}
+
+static int param_set_audit(const char *val, struct kernel_param *kp)
+{
+ int i;
+ if (!capable(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ if (!apparmor_enabled)
+ return -EINVAL;
+
+ if (!val)
+ return -EINVAL;
+
+ for (i = 0; i < AUDIT_MAX_INDEX; i++) {
+ if (strcmp(val, audit_mode_names[i]) == 0) {
+ aa_g_audit = i;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static int param_get_mode(char *buffer, struct kernel_param *kp)
+{
+ if (!capable(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ if (!apparmor_enabled)
+ return -EINVAL;
+
+ return sprintf(buffer, "%s", aa_profile_mode_names[aa_g_profile_mode]);
+}
+
+static int param_set_mode(const char *val, struct kernel_param *kp)
+{
+ int i;
+ if (!capable(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ if (!apparmor_enabled)
+ return -EINVAL;
+
+ if (!val)
+ return -EINVAL;
+
+ for (i = 0; i < APPARMOR_MODE_NAMES_MAX_INDEX; i++) {
+ if (strcmp(val, aa_profile_mode_names[i]) == 0) {
+ aa_g_profile_mode = i;
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * AppArmor init functions
+ */
+
+/**
+ * set_init_cxt - set a task context and profile on the first task.
+ *
+ * TODO: allow setting an alternate profile than unconfined
+ */
+static int __init set_init_cxt(void)
+{
+ struct cred *cred = (struct cred *)current->real_cred;
+ struct aa_task_cxt *cxt;
+
+ cxt = aa_alloc_task_context(GFP_KERNEL);
+ if (!cxt)
+ return -ENOMEM;
+
+ cxt->profile = aa_get_profile(root_ns->unconfined);
+ cred_cxt(cred) = cxt;
+
+ return 0;
+}
+
+static int __init apparmor_init(void)
+{
+ int error;
+
+ if (!apparmor_enabled || !security_module_enable(&apparmor_ops)) {
+ aa_info_message("AppArmor disabled by boot time parameter");
+ apparmor_enabled = 0;
+ return 0;
+ }
+
+ error = aa_alloc_root_ns();
+ if (error) {
+ AA_ERROR("Unable to allocate default profile namespace\n");
+ goto alloc_out;
+ }
+
+ error = set_init_cxt();
+ if (error) {
+ AA_ERROR("Failed to set context on init task\n");
+ goto register_security_out;
+ }
+
+ error = register_security(&apparmor_ops);
+ if (error) {
+ struct cred *cred = (struct cred *)current->real_cred;
+ aa_free_task_context(cred_cxt(cred));
+ cred_cxt(cred) = NULL;
+ AA_ERROR("Unable to register AppArmor\n");
+ goto register_security_out;
+ }
+
+ /* Report that AppArmor successfully initialized */
+ apparmor_initialized = 1;
+ if (aa_g_profile_mode == APPARMOR_COMPLAIN)
+ aa_info_message("AppArmor initialized: complain mode enabled");
+ else if (aa_g_profile_mode == APPARMOR_KILL)
+ aa_info_message("AppArmor initialized: kill mode enabled");
+ else
+ aa_info_message("AppArmor initialized");
+
+ return error;
+
+register_security_out:
+ aa_free_root_ns();
+
+alloc_out:
+ aa_destroy_aafs();
+
+ apparmor_enabled = 0;
+ return error;
+}
+
+security_initcall(apparmor_init);
diff --git a/security/apparmor/match.c b/security/apparmor/match.c
new file mode 100644
index 00000000000..727eb4200d5
--- /dev/null
+++ b/security/apparmor/match.c
@@ -0,0 +1,428 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor dfa based regular expression matching engine
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2012 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/err.h>
+#include <linux/kref.h>
+
+#include "include/apparmor.h"
+#include "include/match.h"
+
+#define base_idx(X) ((X) & 0xffffff)
+
+/**
+ * unpack_table - unpack a dfa table (one of accept, default, base, next check)
+ * @blob: data to unpack (NOT NULL)
+ * @bsize: size of blob
+ *
+ * Returns: pointer to table else NULL on failure
+ *
+ * NOTE: must be freed by kvfree (not kfree)
+ */
+static struct table_header *unpack_table(char *blob, size_t bsize)
+{
+ struct table_header *table = NULL;
+ struct table_header th;
+ size_t tsize;
+
+ if (bsize < sizeof(struct table_header))
+ goto out;
+
+ /* loaded td_id's start at 1, subtract 1 now to avoid doing
+ * it every time we use td_id as an index
+ */
+ th.td_id = be16_to_cpu(*(u16 *) (blob)) - 1;
+ th.td_flags = be16_to_cpu(*(u16 *) (blob + 2));
+ th.td_lolen = be32_to_cpu(*(u32 *) (blob + 8));
+ blob += sizeof(struct table_header);
+
+ if (!(th.td_flags == YYTD_DATA16 || th.td_flags == YYTD_DATA32 ||
+ th.td_flags == YYTD_DATA8))
+ goto out;
+
+ tsize = table_size(th.td_lolen, th.td_flags);
+ if (bsize < tsize)
+ goto out;
+
+ table = kvzalloc(tsize);
+ if (table) {
+ *table = th;
+ if (th.td_flags == YYTD_DATA8)
+ UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
+ u8, byte_to_byte);
+ else if (th.td_flags == YYTD_DATA16)
+ UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
+ u16, be16_to_cpu);
+ else if (th.td_flags == YYTD_DATA32)
+ UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
+ u32, be32_to_cpu);
+ else
+ goto fail;
+ }
+
+out:
+ /* if table was vmalloced make sure the page tables are synced
+ * before it is used, as it goes live to all cpus.
+ */
+ if (is_vmalloc_addr(table))
+ vm_unmap_aliases();
+ return table;
+fail:
+ kvfree(table);
+ return NULL;
+}
+
+/**
+ * verify_dfa - verify that transitions and states in the tables are in bounds.
+ * @dfa: dfa to test (NOT NULL)
+ * @flags: flags controlling what type of accept table are acceptable
+ *
+ * Assumes dfa has gone through the first pass verification done by unpacking
+ * NOTE: this does not valid accept table values
+ *
+ * Returns: %0 else error code on failure to verify
+ */
+static int verify_dfa(struct aa_dfa *dfa, int flags)
+{
+ size_t i, state_count, trans_count;
+ int error = -EPROTO;
+
+ /* check that required tables exist */
+ if (!(dfa->tables[YYTD_ID_DEF] &&
+ dfa->tables[YYTD_ID_BASE] &&
+ dfa->tables[YYTD_ID_NXT] && dfa->tables[YYTD_ID_CHK]))
+ goto out;
+
+ /* accept.size == default.size == base.size */
+ state_count = dfa->tables[YYTD_ID_BASE]->td_lolen;
+ if (ACCEPT1_FLAGS(flags)) {
+ if (!dfa->tables[YYTD_ID_ACCEPT])
+ goto out;
+ if (state_count != dfa->tables[YYTD_ID_ACCEPT]->td_lolen)
+ goto out;
+ }
+ if (ACCEPT2_FLAGS(flags)) {
+ if (!dfa->tables[YYTD_ID_ACCEPT2])
+ goto out;
+ if (state_count != dfa->tables[YYTD_ID_ACCEPT2]->td_lolen)
+ goto out;
+ }
+ if (state_count != dfa->tables[YYTD_ID_DEF]->td_lolen)
+ goto out;
+
+ /* next.size == chk.size */
+ trans_count = dfa->tables[YYTD_ID_NXT]->td_lolen;
+ if (trans_count != dfa->tables[YYTD_ID_CHK]->td_lolen)
+ goto out;
+
+ /* if equivalence classes then its table size must be 256 */
+ if (dfa->tables[YYTD_ID_EC] &&
+ dfa->tables[YYTD_ID_EC]->td_lolen != 256)
+ goto out;
+
+ if (flags & DFA_FLAG_VERIFY_STATES) {
+ for (i = 0; i < state_count; i++) {
+ if (DEFAULT_TABLE(dfa)[i] >= state_count)
+ goto out;
+ if (base_idx(BASE_TABLE(dfa)[i]) + 255 >= trans_count) {
+ printk(KERN_ERR "AppArmor DFA next/check upper "
+ "bounds error\n");
+ goto out;
+ }
+ }
+
+ for (i = 0; i < trans_count; i++) {
+ if (NEXT_TABLE(dfa)[i] >= state_count)
+ goto out;
+ if (CHECK_TABLE(dfa)[i] >= state_count)
+ goto out;
+ }
+ }
+
+ error = 0;
+out:
+ return error;
+}
+
+/**
+ * dfa_free - free a dfa allocated by aa_dfa_unpack
+ * @dfa: the dfa to free (MAYBE NULL)
+ *
+ * Requires: reference count to dfa == 0
+ */
+static void dfa_free(struct aa_dfa *dfa)
+{
+ if (dfa) {
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dfa->tables); i++) {
+ kvfree(dfa->tables[i]);
+ dfa->tables[i] = NULL;
+ }
+ kfree(dfa);
+ }
+}
+
+/**
+ * aa_dfa_free_kref - free aa_dfa by kref (called by aa_put_dfa)
+ * @kr: kref callback for freeing of a dfa (NOT NULL)
+ */
+void aa_dfa_free_kref(struct kref *kref)
+{
+ struct aa_dfa *dfa = container_of(kref, struct aa_dfa, count);
+ dfa_free(dfa);
+}
+
+/**
+ * aa_dfa_unpack - unpack the binary tables of a serialized dfa
+ * @blob: aligned serialized stream of data to unpack (NOT NULL)
+ * @size: size of data to unpack
+ * @flags: flags controlling what type of accept tables are acceptable
+ *
+ * Unpack a dfa that has been serialized. To find information on the dfa
+ * format look in Documentation/security/apparmor.txt
+ * Assumes the dfa @blob stream has been aligned on a 8 byte boundary
+ *
+ * Returns: an unpacked dfa ready for matching or ERR_PTR on failure
+ */
+struct aa_dfa *aa_dfa_unpack(void *blob, size_t size, int flags)
+{
+ int hsize;
+ int error = -ENOMEM;
+ char *data = blob;
+ struct table_header *table = NULL;
+ struct aa_dfa *dfa = kzalloc(sizeof(struct aa_dfa), GFP_KERNEL);
+ if (!dfa)
+ goto fail;
+
+ kref_init(&dfa->count);
+
+ error = -EPROTO;
+
+ /* get dfa table set header */
+ if (size < sizeof(struct table_set_header))
+ goto fail;
+
+ if (ntohl(*(u32 *) data) != YYTH_MAGIC)
+ goto fail;
+
+ hsize = ntohl(*(u32 *) (data + 4));
+ if (size < hsize)
+ goto fail;
+
+ dfa->flags = ntohs(*(u16 *) (data + 12));
+ data += hsize;
+ size -= hsize;
+
+ while (size > 0) {
+ table = unpack_table(data, size);
+ if (!table)
+ goto fail;
+
+ switch (table->td_id) {
+ case YYTD_ID_ACCEPT:
+ if (!(table->td_flags & ACCEPT1_FLAGS(flags)))
+ goto fail;
+ break;
+ case YYTD_ID_ACCEPT2:
+ if (!(table->td_flags & ACCEPT2_FLAGS(flags)))
+ goto fail;
+ break;
+ case YYTD_ID_BASE:
+ if (table->td_flags != YYTD_DATA32)
+ goto fail;
+ break;
+ case YYTD_ID_DEF:
+ case YYTD_ID_NXT:
+ case YYTD_ID_CHK:
+ if (table->td_flags != YYTD_DATA16)
+ goto fail;
+ break;
+ case YYTD_ID_EC:
+ if (table->td_flags != YYTD_DATA8)
+ goto fail;
+ break;
+ default:
+ goto fail;
+ }
+ /* check for duplicate table entry */
+ if (dfa->tables[table->td_id])
+ goto fail;
+ dfa->tables[table->td_id] = table;
+ data += table_size(table->td_lolen, table->td_flags);
+ size -= table_size(table->td_lolen, table->td_flags);
+ table = NULL;
+ }
+
+ error = verify_dfa(dfa, flags);
+ if (error)
+ goto fail;
+
+ return dfa;
+
+fail:
+ kvfree(table);
+ dfa_free(dfa);
+ return ERR_PTR(error);
+}
+
+/**
+ * aa_dfa_match_len - traverse @dfa to find state @str stops at
+ * @dfa: the dfa to match @str against (NOT NULL)
+ * @start: the state of the dfa to start matching in
+ * @str: the string of bytes to match against the dfa (NOT NULL)
+ * @len: length of the string of bytes to match
+ *
+ * aa_dfa_match_len will match @str against the dfa and return the state it
+ * finished matching in. The final state can be used to look up the accepting
+ * label, or as the start state of a continuing match.
+ *
+ * This function will happily match again the 0 byte and only finishes
+ * when @len input is consumed.
+ *
+ * Returns: final state reached after input is consumed
+ */
+unsigned int aa_dfa_match_len(struct aa_dfa *dfa, unsigned int start,
+ const char *str, int len)
+{
+ u16 *def = DEFAULT_TABLE(dfa);
+ u32 *base = BASE_TABLE(dfa);
+ u16 *next = NEXT_TABLE(dfa);
+ u16 *check = CHECK_TABLE(dfa);
+ unsigned int state = start, pos;
+
+ if (state == 0)
+ return 0;
+
+ /* current state is <state>, matching character *str */
+ if (dfa->tables[YYTD_ID_EC]) {
+ /* Equivalence class table defined */
+ u8 *equiv = EQUIV_TABLE(dfa);
+ /* default is direct to next state */
+ for (; len; len--) {
+ pos = base_idx(base[state]) + equiv[(u8) *str++];
+ if (check[pos] == state)
+ state = next[pos];
+ else
+ state = def[state];
+ }
+ } else {
+ /* default is direct to next state */
+ for (; len; len--) {
+ pos = base_idx(base[state]) + (u8) *str++;
+ if (check[pos] == state)
+ state = next[pos];
+ else
+ state = def[state];
+ }
+ }
+
+ return state;
+}
+
+/**
+ * aa_dfa_match - traverse @dfa to find state @str stops at
+ * @dfa: the dfa to match @str against (NOT NULL)
+ * @start: the state of the dfa to start matching in
+ * @str: the null terminated string of bytes to match against the dfa (NOT NULL)
+ *
+ * aa_dfa_match will match @str against the dfa and return the state it
+ * finished matching in. The final state can be used to look up the accepting
+ * label, or as the start state of a continuing match.
+ *
+ * Returns: final state reached after input is consumed
+ */
+unsigned int aa_dfa_match(struct aa_dfa *dfa, unsigned int start,
+ const char *str)
+{
+ u16 *def = DEFAULT_TABLE(dfa);
+ u32 *base = BASE_TABLE(dfa);
+ u16 *next = NEXT_TABLE(dfa);
+ u16 *check = CHECK_TABLE(dfa);
+ unsigned int state = start, pos;
+
+ if (state == 0)
+ return 0;
+
+ /* current state is <state>, matching character *str */
+ if (dfa->tables[YYTD_ID_EC]) {
+ /* Equivalence class table defined */
+ u8 *equiv = EQUIV_TABLE(dfa);
+ /* default is direct to next state */
+ while (*str) {
+ pos = base_idx(base[state]) + equiv[(u8) *str++];
+ if (check[pos] == state)
+ state = next[pos];
+ else
+ state = def[state];
+ }
+ } else {
+ /* default is direct to next state */
+ while (*str) {
+ pos = base_idx(base[state]) + (u8) *str++;
+ if (check[pos] == state)
+ state = next[pos];
+ else
+ state = def[state];
+ }
+ }
+
+ return state;
+}
+
+/**
+ * aa_dfa_next - step one character to the next state in the dfa
+ * @dfa: the dfa to tranverse (NOT NULL)
+ * @state: the state to start in
+ * @c: the input character to transition on
+ *
+ * aa_dfa_match will step through the dfa by one input character @c
+ *
+ * Returns: state reach after input @c
+ */
+unsigned int aa_dfa_next(struct aa_dfa *dfa, unsigned int state,
+ const char c)
+{
+ u16 *def = DEFAULT_TABLE(dfa);
+ u32 *base = BASE_TABLE(dfa);
+ u16 *next = NEXT_TABLE(dfa);
+ u16 *check = CHECK_TABLE(dfa);
+ unsigned int pos;
+
+ /* current state is <state>, matching character *str */
+ if (dfa->tables[YYTD_ID_EC]) {
+ /* Equivalence class table defined */
+ u8 *equiv = EQUIV_TABLE(dfa);
+ /* default is direct to next state */
+
+ pos = base_idx(base[state]) + equiv[(u8) c];
+ if (check[pos] == state)
+ state = next[pos];
+ else
+ state = def[state];
+ } else {
+ /* default is direct to next state */
+ pos = base_idx(base[state]) + (u8) c;
+ if (check[pos] == state)
+ state = next[pos];
+ else
+ state = def[state];
+ }
+
+ return state;
+}
diff --git a/security/apparmor/path.c b/security/apparmor/path.c
new file mode 100644
index 00000000000..35b394a75d7
--- /dev/null
+++ b/security/apparmor/path.c
@@ -0,0 +1,236 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor function for pathnames
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include <linux/magic.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/nsproxy.h>
+#include <linux/path.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/fs_struct.h>
+
+#include "include/apparmor.h"
+#include "include/path.h"
+#include "include/policy.h"
+
+
+/* modified from dcache.c */
+static int prepend(char **buffer, int buflen, const char *str, int namelen)
+{
+ buflen -= namelen;
+ if (buflen < 0)
+ return -ENAMETOOLONG;
+ *buffer -= namelen;
+ memcpy(*buffer, str, namelen);
+ return 0;
+}
+
+#define CHROOT_NSCONNECT (PATH_CHROOT_REL | PATH_CHROOT_NSCONNECT)
+
+/**
+ * d_namespace_path - lookup a name associated with a given path
+ * @path: path to lookup (NOT NULL)
+ * @buf: buffer to store path to (NOT NULL)
+ * @buflen: length of @buf
+ * @name: Returns - pointer for start of path name with in @buf (NOT NULL)
+ * @flags: flags controlling path lookup
+ *
+ * Handle path name lookup.
+ *
+ * Returns: %0 else error code if path lookup fails
+ * When no error the path name is returned in @name which points to
+ * to a position in @buf
+ */
+static int d_namespace_path(struct path *path, char *buf, int buflen,
+ char **name, int flags)
+{
+ char *res;
+ int error = 0;
+ int connected = 1;
+
+ if (path->mnt->mnt_flags & MNT_INTERNAL) {
+ /* it's not mounted anywhere */
+ res = dentry_path(path->dentry, buf, buflen);
+ *name = res;
+ if (IS_ERR(res)) {
+ *name = buf;
+ return PTR_ERR(res);
+ }
+ if (path->dentry->d_sb->s_magic == PROC_SUPER_MAGIC &&
+ strncmp(*name, "/sys/", 5) == 0) {
+ /* TODO: convert over to using a per namespace
+ * control instead of hard coded /proc
+ */
+ return prepend(name, *name - buf, "/proc", 5);
+ }
+ return 0;
+ }
+
+ /* resolve paths relative to chroot?*/
+ if (flags & PATH_CHROOT_REL) {
+ struct path root;
+ get_fs_root(current->fs, &root);
+ res = __d_path(path, &root, buf, buflen);
+ path_put(&root);
+ } else {
+ res = d_absolute_path(path, buf, buflen);
+ if (!our_mnt(path->mnt))
+ connected = 0;
+ }
+
+ /* handle error conditions - and still allow a partial path to
+ * be returned.
+ */
+ if (!res || IS_ERR(res)) {
+ if (PTR_ERR(res) == -ENAMETOOLONG)
+ return -ENAMETOOLONG;
+ connected = 0;
+ res = dentry_path_raw(path->dentry, buf, buflen);
+ if (IS_ERR(res)) {
+ error = PTR_ERR(res);
+ *name = buf;
+ goto out;
+ };
+ } else if (!our_mnt(path->mnt))
+ connected = 0;
+
+ *name = res;
+
+ /* Handle two cases:
+ * 1. A deleted dentry && profile is not allowing mediation of deleted
+ * 2. On some filesystems, newly allocated dentries appear to the
+ * security_path hooks as a deleted dentry except without an inode
+ * allocated.
+ */
+ if (d_unlinked(path->dentry) && path->dentry->d_inode &&
+ !(flags & PATH_MEDIATE_DELETED)) {
+ error = -ENOENT;
+ goto out;
+ }
+
+ /* If the path is not connected to the expected root,
+ * check if it is a sysctl and handle specially else remove any
+ * leading / that __d_path may have returned.
+ * Unless
+ * specifically directed to connect the path,
+ * OR
+ * if in a chroot and doing chroot relative paths and the path
+ * resolves to the namespace root (would be connected outside
+ * of chroot) and specifically directed to connect paths to
+ * namespace root.
+ */
+ if (!connected) {
+ if (!(flags & PATH_CONNECT_PATH) &&
+ !(((flags & CHROOT_NSCONNECT) == CHROOT_NSCONNECT) &&
+ our_mnt(path->mnt))) {
+ /* disconnected path, don't return pathname starting
+ * with '/'
+ */
+ error = -EACCES;
+ if (*res == '/')
+ *name = res + 1;
+ }
+ }
+
+out:
+ return error;
+}
+
+/**
+ * get_name_to_buffer - get the pathname to a buffer ensure dir / is appended
+ * @path: path to get name for (NOT NULL)
+ * @flags: flags controlling path lookup
+ * @buffer: buffer to put name in (NOT NULL)
+ * @size: size of buffer
+ * @name: Returns - contains position of path name in @buffer (NOT NULL)
+ *
+ * Returns: %0 else error on failure
+ */
+static int get_name_to_buffer(struct path *path, int flags, char *buffer,
+ int size, char **name, const char **info)
+{
+ int adjust = (flags & PATH_IS_DIR) ? 1 : 0;
+ int error = d_namespace_path(path, buffer, size - adjust, name, flags);
+
+ if (!error && (flags & PATH_IS_DIR) && (*name)[1] != '\0')
+ /*
+ * Append "/" to the pathname. The root directory is a special
+ * case; it already ends in slash.
+ */
+ strcpy(&buffer[size - 2], "/");
+
+ if (info && error) {
+ if (error == -ENOENT)
+ *info = "Failed name lookup - deleted entry";
+ else if (error == -EACCES)
+ *info = "Failed name lookup - disconnected path";
+ else if (error == -ENAMETOOLONG)
+ *info = "Failed name lookup - name too long";
+ else
+ *info = "Failed name lookup";
+ }
+
+ return error;
+}
+
+/**
+ * aa_path_name - compute the pathname of a file
+ * @path: path the file (NOT NULL)
+ * @flags: flags controlling path name generation
+ * @buffer: buffer that aa_get_name() allocated (NOT NULL)
+ * @name: Returns - the generated path name if !error (NOT NULL)
+ * @info: Returns - information on why the path lookup failed (MAYBE NULL)
+ *
+ * @name is a pointer to the beginning of the pathname (which usually differs
+ * from the beginning of the buffer), or NULL. If there is an error @name
+ * may contain a partial or invalid name that can be used for audit purposes,
+ * but it can not be used for mediation.
+ *
+ * We need PATH_IS_DIR to indicate whether the file is a directory or not
+ * because the file may not yet exist, and so we cannot check the inode's
+ * file type.
+ *
+ * Returns: %0 else error code if could retrieve name
+ */
+int aa_path_name(struct path *path, int flags, char **buffer, const char **name,
+ const char **info)
+{
+ char *buf, *str = NULL;
+ int size = 256;
+ int error;
+
+ *name = NULL;
+ *buffer = NULL;
+ for (;;) {
+ /* freed by caller */
+ buf = kmalloc(size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ error = get_name_to_buffer(path, flags, buf, size, &str, info);
+ if (error != -ENAMETOOLONG)
+ break;
+
+ kfree(buf);
+ size <<= 1;
+ if (size > aa_g_path_max)
+ return -ENAMETOOLONG;
+ *info = NULL;
+ }
+ *buffer = buf;
+ *name = str;
+
+ return error;
+}
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
new file mode 100644
index 00000000000..705c2879d3a
--- /dev/null
+++ b/security/apparmor/policy.c
@@ -0,0 +1,1301 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor policy manipulation functions
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ *
+ * AppArmor policy is based around profiles, which contain the rules a
+ * task is confined by. Every task in the system has a profile attached
+ * to it determined either by matching "unconfined" tasks against the
+ * visible set of profiles or by following a profiles attachment rules.
+ *
+ * Each profile exists in a profile namespace which is a container of
+ * visible profiles. Each namespace contains a special "unconfined" profile,
+ * which doesn't enforce any confinement on a task beyond DAC.
+ *
+ * Namespace and profile names can be written together in either
+ * of two syntaxes.
+ * :namespace:profile - used by kernel interfaces for easy detection
+ * namespace://profile - used by policy
+ *
+ * Profile names can not start with : or @ or ^ and may not contain \0
+ *
+ * Reserved profile names
+ * unconfined - special automatically generated unconfined profile
+ * inherit - special name to indicate profile inheritance
+ * null-XXXX-YYYY - special automatically generated learning profiles
+ *
+ * Namespace names may not start with / or @ and may not contain \0 or :
+ * Reserved namespace names
+ * user-XXXX - user defined profiles
+ *
+ * a // in a profile or namespace name indicates a hierarchical name with the
+ * name before the // being the parent and the name after the child.
+ *
+ * Profile and namespace hierarchies serve two different but similar purposes.
+ * The namespace contains the set of visible profiles that are considered
+ * for attachment. The hierarchy of namespaces allows for virtualizing
+ * the namespace so that for example a chroot can have its own set of profiles
+ * which may define some local user namespaces.
+ * The profile hierarchy severs two distinct purposes,
+ * - it allows for sub profiles or hats, which allows an application to run
+ * subprograms under its own profile with different restriction than it
+ * self, and not have it use the system profile.
+ * eg. if a mail program starts an editor, the policy might make the
+ * restrictions tighter on the editor tighter than the mail program,
+ * and definitely different than general editor restrictions
+ * - it allows for binary hierarchy of profiles, so that execution history
+ * is preserved. This feature isn't exploited by AppArmor reference policy
+ * but is allowed. NOTE: this is currently suboptimal because profile
+ * aliasing is not currently implemented so that a profile for each
+ * level must be defined.
+ * eg. /bin/bash///bin/ls as a name would indicate /bin/ls was started
+ * from /bin/bash
+ *
+ * A profile or namespace name that can contain one or more // separators
+ * is referred to as an hname (hierarchical).
+ * eg. /bin/bash//bin/ls
+ *
+ * An fqname is a name that may contain both namespace and profile hnames.
+ * eg. :ns:/bin/bash//bin/ls
+ *
+ * NOTES:
+ * - locking of profile lists is currently fairly coarse. All profile
+ * lists within a namespace use the namespace lock.
+ * FIXME: move profile lists to using rcu_lists
+ */
+
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/string.h>
+
+#include "include/apparmor.h"
+#include "include/capability.h"
+#include "include/context.h"
+#include "include/file.h"
+#include "include/ipc.h"
+#include "include/match.h"
+#include "include/path.h"
+#include "include/policy.h"
+#include "include/policy_unpack.h"
+#include "include/resource.h"
+
+
+/* root profile namespace */
+struct aa_namespace *root_ns;
+
+const char *const aa_profile_mode_names[] = {
+ "enforce",
+ "complain",
+ "kill",
+ "unconfined",
+};
+
+/**
+ * hname_tail - find the last component of an hname
+ * @name: hname to find the base profile name component of (NOT NULL)
+ *
+ * Returns: the tail (base profile name) name component of an hname
+ */
+static const char *hname_tail(const char *hname)
+{
+ char *split;
+ hname = strim((char *)hname);
+ for (split = strstr(hname, "//"); split; split = strstr(hname, "//"))
+ hname = split + 2;
+
+ return hname;
+}
+
+/**
+ * policy_init - initialize a policy structure
+ * @policy: policy to initialize (NOT NULL)
+ * @prefix: prefix name if any is required. (MAYBE NULL)
+ * @name: name of the policy, init will make a copy of it (NOT NULL)
+ *
+ * Note: this fn creates a copy of strings passed in
+ *
+ * Returns: true if policy init successful
+ */
+static bool policy_init(struct aa_policy *policy, const char *prefix,
+ const char *name)
+{
+ /* freed by policy_free */
+ if (prefix) {
+ policy->hname = kmalloc(strlen(prefix) + strlen(name) + 3,
+ GFP_KERNEL);
+ if (policy->hname)
+ sprintf(policy->hname, "%s//%s", prefix, name);
+ } else
+ policy->hname = kstrdup(name, GFP_KERNEL);
+ if (!policy->hname)
+ return 0;
+ /* base.name is a substring of fqname */
+ policy->name = (char *)hname_tail(policy->hname);
+ INIT_LIST_HEAD(&policy->list);
+ INIT_LIST_HEAD(&policy->profiles);
+
+ return 1;
+}
+
+/**
+ * policy_destroy - free the elements referenced by @policy
+ * @policy: policy that is to have its elements freed (NOT NULL)
+ */
+static void policy_destroy(struct aa_policy *policy)
+{
+ /* still contains profiles -- invalid */
+ if (on_list_rcu(&policy->profiles)) {
+ AA_ERROR("%s: internal error, "
+ "policy '%s' still contains profiles\n",
+ __func__, policy->name);
+ BUG();
+ }
+ if (on_list_rcu(&policy->list)) {
+ AA_ERROR("%s: internal error, policy '%s' still on list\n",
+ __func__, policy->name);
+ BUG();
+ }
+
+ /* don't free name as its a subset of hname */
+ kzfree(policy->hname);
+}
+
+/**
+ * __policy_find - find a policy by @name on a policy list
+ * @head: list to search (NOT NULL)
+ * @name: name to search for (NOT NULL)
+ *
+ * Requires: rcu_read_lock be held
+ *
+ * Returns: unrefcounted policy that match @name or NULL if not found
+ */
+static struct aa_policy *__policy_find(struct list_head *head, const char *name)
+{
+ struct aa_policy *policy;
+
+ list_for_each_entry_rcu(policy, head, list) {
+ if (!strcmp(policy->name, name))
+ return policy;
+ }
+ return NULL;
+}
+
+/**
+ * __policy_strn_find - find a policy that's name matches @len chars of @str
+ * @head: list to search (NOT NULL)
+ * @str: string to search for (NOT NULL)
+ * @len: length of match required
+ *
+ * Requires: rcu_read_lock be held
+ *
+ * Returns: unrefcounted policy that match @str or NULL if not found
+ *
+ * if @len == strlen(@strlen) then this is equiv to __policy_find
+ * other wise it allows searching for policy by a partial match of name
+ */
+static struct aa_policy *__policy_strn_find(struct list_head *head,
+ const char *str, int len)
+{
+ struct aa_policy *policy;
+
+ list_for_each_entry_rcu(policy, head, list) {
+ if (aa_strneq(policy->name, str, len))
+ return policy;
+ }
+
+ return NULL;
+}
+
+/*
+ * Routines for AppArmor namespaces
+ */
+
+static const char *hidden_ns_name = "---";
+/**
+ * aa_ns_visible - test if @view is visible from @curr
+ * @curr: namespace to treat as the parent (NOT NULL)
+ * @view: namespace to test if visible from @curr (NOT NULL)
+ *
+ * Returns: true if @view is visible from @curr else false
+ */
+bool aa_ns_visible(struct aa_namespace *curr, struct aa_namespace *view)
+{
+ if (curr == view)
+ return true;
+
+ for ( ; view; view = view->parent) {
+ if (view->parent == curr)
+ return true;
+ }
+ return false;
+}
+
+/**
+ * aa_na_name - Find the ns name to display for @view from @curr
+ * @curr - current namespace (NOT NULL)
+ * @view - namespace attempting to view (NOT NULL)
+ *
+ * Returns: name of @view visible from @curr
+ */
+const char *aa_ns_name(struct aa_namespace *curr, struct aa_namespace *view)
+{
+ /* if view == curr then the namespace name isn't displayed */
+ if (curr == view)
+ return "";
+
+ if (aa_ns_visible(curr, view)) {
+ /* at this point if a ns is visible it is in a view ns
+ * thus the curr ns.hname is a prefix of its name.
+ * Only output the virtualized portion of the name
+ * Add + 2 to skip over // separating curr hname prefix
+ * from the visible tail of the views hname
+ */
+ return view->base.hname + strlen(curr->base.hname) + 2;
+ } else
+ return hidden_ns_name;
+}
+
+/**
+ * alloc_namespace - allocate, initialize and return a new namespace
+ * @prefix: parent namespace name (MAYBE NULL)
+ * @name: a preallocated name (NOT NULL)
+ *
+ * Returns: refcounted namespace or NULL on failure.
+ */
+static struct aa_namespace *alloc_namespace(const char *prefix,
+ const char *name)
+{
+ struct aa_namespace *ns;
+
+ ns = kzalloc(sizeof(*ns), GFP_KERNEL);
+ AA_DEBUG("%s(%p)\n", __func__, ns);
+ if (!ns)
+ return NULL;
+ if (!policy_init(&ns->base, prefix, name))
+ goto fail_ns;
+
+ INIT_LIST_HEAD(&ns->sub_ns);
+ mutex_init(&ns->lock);
+
+ /* released by free_namespace */
+ ns->unconfined = aa_alloc_profile("unconfined");
+ if (!ns->unconfined)
+ goto fail_unconfined;
+
+ ns->unconfined->flags = PFLAG_IX_ON_NAME_ERROR |
+ PFLAG_IMMUTABLE | PFLAG_NS_COUNT;
+ ns->unconfined->mode = APPARMOR_UNCONFINED;
+
+ /* ns and ns->unconfined share ns->unconfined refcount */
+ ns->unconfined->ns = ns;
+
+ atomic_set(&ns->uniq_null, 0);
+
+ return ns;
+
+fail_unconfined:
+ kzfree(ns->base.hname);
+fail_ns:
+ kzfree(ns);
+ return NULL;
+}
+
+/**
+ * free_namespace - free a profile namespace
+ * @ns: the namespace to free (MAYBE NULL)
+ *
+ * Requires: All references to the namespace must have been put, if the
+ * namespace was referenced by a profile confining a task,
+ */
+static void free_namespace(struct aa_namespace *ns)
+{
+ if (!ns)
+ return;
+
+ policy_destroy(&ns->base);
+ aa_put_namespace(ns->parent);
+
+ ns->unconfined->ns = NULL;
+ aa_free_profile(ns->unconfined);
+ kzfree(ns);
+}
+
+/**
+ * __aa_find_namespace - find a namespace on a list by @name
+ * @head: list to search for namespace on (NOT NULL)
+ * @name: name of namespace to look for (NOT NULL)
+ *
+ * Returns: unrefcounted namespace
+ *
+ * Requires: rcu_read_lock be held
+ */
+static struct aa_namespace *__aa_find_namespace(struct list_head *head,
+ const char *name)
+{
+ return (struct aa_namespace *)__policy_find(head, name);
+}
+
+/**
+ * aa_find_namespace - look up a profile namespace on the namespace list
+ * @root: namespace to search in (NOT NULL)
+ * @name: name of namespace to find (NOT NULL)
+ *
+ * Returns: a refcounted namespace on the list, or NULL if no namespace
+ * called @name exists.
+ *
+ * refcount released by caller
+ */
+struct aa_namespace *aa_find_namespace(struct aa_namespace *root,
+ const char *name)
+{
+ struct aa_namespace *ns = NULL;
+
+ rcu_read_lock();
+ ns = aa_get_namespace(__aa_find_namespace(&root->sub_ns, name));
+ rcu_read_unlock();
+
+ return ns;
+}
+
+/**
+ * aa_prepare_namespace - find an existing or create a new namespace of @name
+ * @name: the namespace to find or add (MAYBE NULL)
+ *
+ * Returns: refcounted namespace or NULL if failed to create one
+ */
+static struct aa_namespace *aa_prepare_namespace(const char *name)
+{
+ struct aa_namespace *ns, *root;
+
+ root = aa_current_profile()->ns;
+
+ mutex_lock(&root->lock);
+
+ /* if name isn't specified the profile is loaded to the current ns */
+ if (!name) {
+ /* released by caller */
+ ns = aa_get_namespace(root);
+ goto out;
+ }
+
+ /* try and find the specified ns and if it doesn't exist create it */
+ /* released by caller */
+ ns = aa_get_namespace(__aa_find_namespace(&root->sub_ns, name));
+ if (!ns) {
+ ns = alloc_namespace(root->base.hname, name);
+ if (!ns)
+ goto out;
+ if (__aa_fs_namespace_mkdir(ns, ns_subns_dir(root), name)) {
+ AA_ERROR("Failed to create interface for ns %s\n",
+ ns->base.name);
+ free_namespace(ns);
+ ns = NULL;
+ goto out;
+ }
+ ns->parent = aa_get_namespace(root);
+ list_add_rcu(&ns->base.list, &root->sub_ns);
+ /* add list ref */
+ aa_get_namespace(ns);
+ }
+out:
+ mutex_unlock(&root->lock);
+
+ /* return ref */
+ return ns;
+}
+
+/**
+ * __list_add_profile - add a profile to a list
+ * @list: list to add it to (NOT NULL)
+ * @profile: the profile to add (NOT NULL)
+ *
+ * refcount @profile, should be put by __list_remove_profile
+ *
+ * Requires: namespace lock be held, or list not be shared
+ */
+static void __list_add_profile(struct list_head *list,
+ struct aa_profile *profile)
+{
+ list_add_rcu(&profile->base.list, list);
+ /* get list reference */
+ aa_get_profile(profile);
+}
+
+/**
+ * __list_remove_profile - remove a profile from the list it is on
+ * @profile: the profile to remove (NOT NULL)
+ *
+ * remove a profile from the list, warning generally removal should
+ * be done with __replace_profile as most profile removals are
+ * replacements to the unconfined profile.
+ *
+ * put @profile list refcount
+ *
+ * Requires: namespace lock be held, or list not have been live
+ */
+static void __list_remove_profile(struct aa_profile *profile)
+{
+ list_del_rcu(&profile->base.list);
+ aa_put_profile(profile);
+}
+
+static void __profile_list_release(struct list_head *head);
+
+/**
+ * __remove_profile - remove old profile, and children
+ * @profile: profile to be replaced (NOT NULL)
+ *
+ * Requires: namespace list lock be held, or list not be shared
+ */
+static void __remove_profile(struct aa_profile *profile)
+{
+ /* release any children lists first */
+ __profile_list_release(&profile->base.profiles);
+ /* released by free_profile */
+ __aa_update_replacedby(profile, profile->ns->unconfined);
+ __aa_fs_profile_rmdir(profile);
+ __list_remove_profile(profile);
+}
+
+/**
+ * __profile_list_release - remove all profiles on the list and put refs
+ * @head: list of profiles (NOT NULL)
+ *
+ * Requires: namespace lock be held
+ */
+static void __profile_list_release(struct list_head *head)
+{
+ struct aa_profile *profile, *tmp;
+ list_for_each_entry_safe(profile, tmp, head, base.list)
+ __remove_profile(profile);
+}
+
+static void __ns_list_release(struct list_head *head);
+
+/**
+ * destroy_namespace - remove everything contained by @ns
+ * @ns: namespace to have it contents removed (NOT NULL)
+ */
+static void destroy_namespace(struct aa_namespace *ns)
+{
+ if (!ns)
+ return;
+
+ mutex_lock(&ns->lock);
+ /* release all profiles in this namespace */
+ __profile_list_release(&ns->base.profiles);
+
+ /* release all sub namespaces */
+ __ns_list_release(&ns->sub_ns);
+
+ if (ns->parent)
+ __aa_update_replacedby(ns->unconfined, ns->parent->unconfined);
+ __aa_fs_namespace_rmdir(ns);
+ mutex_unlock(&ns->lock);
+}
+
+/**
+ * __remove_namespace - remove a namespace and all its children
+ * @ns: namespace to be removed (NOT NULL)
+ *
+ * Requires: ns->parent->lock be held and ns removed from parent.
+ */
+static void __remove_namespace(struct aa_namespace *ns)
+{
+ /* remove ns from namespace list */
+ list_del_rcu(&ns->base.list);
+ destroy_namespace(ns);
+ aa_put_namespace(ns);
+}
+
+/**
+ * __ns_list_release - remove all profile namespaces on the list put refs
+ * @head: list of profile namespaces (NOT NULL)
+ *
+ * Requires: namespace lock be held
+ */
+static void __ns_list_release(struct list_head *head)
+{
+ struct aa_namespace *ns, *tmp;
+ list_for_each_entry_safe(ns, tmp, head, base.list)
+ __remove_namespace(ns);
+
+}
+
+/**
+ * aa_alloc_root_ns - allocate the root profile namespace
+ *
+ * Returns: %0 on success else error
+ *
+ */
+int __init aa_alloc_root_ns(void)
+{
+ /* released by aa_free_root_ns - used as list ref*/
+ root_ns = alloc_namespace(NULL, "root");
+ if (!root_ns)
+ return -ENOMEM;
+
+ return 0;
+}
+
+ /**
+ * aa_free_root_ns - free the root profile namespace
+ */
+void __init aa_free_root_ns(void)
+ {
+ struct aa_namespace *ns = root_ns;
+ root_ns = NULL;
+
+ destroy_namespace(ns);
+ aa_put_namespace(ns);
+}
+
+
+static void free_replacedby(struct aa_replacedby *r)
+{
+ if (r) {
+ /* r->profile will not be updated any more as r is dead */
+ aa_put_profile(rcu_dereference_protected(r->profile, true));
+ kzfree(r);
+ }
+}
+
+
+void aa_free_replacedby_kref(struct kref *kref)
+{
+ struct aa_replacedby *r = container_of(kref, struct aa_replacedby,
+ count);
+ free_replacedby(r);
+}
+
+/**
+ * aa_free_profile - free a profile
+ * @profile: the profile to free (MAYBE NULL)
+ *
+ * Free a profile, its hats and null_profile. All references to the profile,
+ * its hats and null_profile must have been put.
+ *
+ * If the profile was referenced from a task context, free_profile() will
+ * be called from an rcu callback routine, so we must not sleep here.
+ */
+void aa_free_profile(struct aa_profile *profile)
+{
+ AA_DEBUG("%s(%p)\n", __func__, profile);
+
+ if (!profile)
+ return;
+
+ /* free children profiles */
+ policy_destroy(&profile->base);
+ aa_put_profile(rcu_access_pointer(profile->parent));
+
+ aa_put_namespace(profile->ns);
+ kzfree(profile->rename);
+
+ aa_free_file_rules(&profile->file);
+ aa_free_cap_rules(&profile->caps);
+ aa_free_rlimit_rules(&profile->rlimits);
+
+ kzfree(profile->dirname);
+ aa_put_dfa(profile->xmatch);
+ aa_put_dfa(profile->policy.dfa);
+ aa_put_replacedby(profile->replacedby);
+
+ kzfree(profile->hash);
+ kzfree(profile);
+}
+
+/**
+ * aa_free_profile_rcu - free aa_profile by rcu (called by aa_free_profile_kref)
+ * @head: rcu_head callback for freeing of a profile (NOT NULL)
+ */
+static void aa_free_profile_rcu(struct rcu_head *head)
+{
+ struct aa_profile *p = container_of(head, struct aa_profile, rcu);
+ if (p->flags & PFLAG_NS_COUNT)
+ free_namespace(p->ns);
+ else
+ aa_free_profile(p);
+}
+
+/**
+ * aa_free_profile_kref - free aa_profile by kref (called by aa_put_profile)
+ * @kr: kref callback for freeing of a profile (NOT NULL)
+ */
+void aa_free_profile_kref(struct kref *kref)
+{
+ struct aa_profile *p = container_of(kref, struct aa_profile, count);
+ call_rcu(&p->rcu, aa_free_profile_rcu);
+}
+
+/**
+ * aa_alloc_profile - allocate, initialize and return a new profile
+ * @hname: name of the profile (NOT NULL)
+ *
+ * Returns: refcount profile or NULL on failure
+ */
+struct aa_profile *aa_alloc_profile(const char *hname)
+{
+ struct aa_profile *profile;
+
+ /* freed by free_profile - usually through aa_put_profile */
+ profile = kzalloc(sizeof(*profile), GFP_KERNEL);
+ if (!profile)
+ return NULL;
+
+ profile->replacedby = kzalloc(sizeof(struct aa_replacedby), GFP_KERNEL);
+ if (!profile->replacedby)
+ goto fail;
+ kref_init(&profile->replacedby->count);
+
+ if (!policy_init(&profile->base, NULL, hname))
+ goto fail;
+ kref_init(&profile->count);
+
+ /* refcount released by caller */
+ return profile;
+
+fail:
+ kzfree(profile->replacedby);
+ kzfree(profile);
+
+ return NULL;
+}
+
+/**
+ * aa_new_null_profile - create a new null-X learning profile
+ * @parent: profile that caused this profile to be created (NOT NULL)
+ * @hat: true if the null- learning profile is a hat
+ *
+ * Create a null- complain mode profile used in learning mode. The name of
+ * the profile is unique and follows the format of parent//null-<uniq>.
+ *
+ * null profiles are added to the profile list but the list does not
+ * hold a count on them so that they are automatically released when
+ * not in use.
+ *
+ * Returns: new refcounted profile else NULL on failure
+ */
+struct aa_profile *aa_new_null_profile(struct aa_profile *parent, int hat)
+{
+ struct aa_profile *profile = NULL;
+ char *name;
+ int uniq = atomic_inc_return(&parent->ns->uniq_null);
+
+ /* freed below */
+ name = kmalloc(strlen(parent->base.hname) + 2 + 7 + 8, GFP_KERNEL);
+ if (!name)
+ goto fail;
+ sprintf(name, "%s//null-%x", parent->base.hname, uniq);
+
+ profile = aa_alloc_profile(name);
+ kfree(name);
+ if (!profile)
+ goto fail;
+
+ profile->mode = APPARMOR_COMPLAIN;
+ profile->flags = PFLAG_NULL;
+ if (hat)
+ profile->flags |= PFLAG_HAT;
+
+ /* released on free_profile */
+ rcu_assign_pointer(profile->parent, aa_get_profile(parent));
+ profile->ns = aa_get_namespace(parent->ns);
+
+ mutex_lock(&profile->ns->lock);
+ __list_add_profile(&parent->base.profiles, profile);
+ mutex_unlock(&profile->ns->lock);
+
+ /* refcount released by caller */
+ return profile;
+
+fail:
+ return NULL;
+}
+
+/* TODO: profile accounting - setup in remove */
+
+/**
+ * __find_child - find a profile on @head list with a name matching @name
+ * @head: list to search (NOT NULL)
+ * @name: name of profile (NOT NULL)
+ *
+ * Requires: rcu_read_lock be held
+ *
+ * Returns: unrefcounted profile ptr, or NULL if not found
+ */
+static struct aa_profile *__find_child(struct list_head *head, const char *name)
+{
+ return (struct aa_profile *)__policy_find(head, name);
+}
+
+/**
+ * __strn_find_child - find a profile on @head list using substring of @name
+ * @head: list to search (NOT NULL)
+ * @name: name of profile (NOT NULL)
+ * @len: length of @name substring to match
+ *
+ * Requires: rcu_read_lock be held
+ *
+ * Returns: unrefcounted profile ptr, or NULL if not found
+ */
+static struct aa_profile *__strn_find_child(struct list_head *head,
+ const char *name, int len)
+{
+ return (struct aa_profile *)__policy_strn_find(head, name, len);
+}
+
+/**
+ * aa_find_child - find a profile by @name in @parent
+ * @parent: profile to search (NOT NULL)
+ * @name: profile name to search for (NOT NULL)
+ *
+ * Returns: a refcounted profile or NULL if not found
+ */
+struct aa_profile *aa_find_child(struct aa_profile *parent, const char *name)
+{
+ struct aa_profile *profile;
+
+ rcu_read_lock();
+ profile = aa_get_profile(__find_child(&parent->base.profiles, name));
+ rcu_read_unlock();
+
+ /* refcount released by caller */
+ return profile;
+}
+
+/**
+ * __lookup_parent - lookup the parent of a profile of name @hname
+ * @ns: namespace to lookup profile in (NOT NULL)
+ * @hname: hierarchical profile name to find parent of (NOT NULL)
+ *
+ * Lookups up the parent of a fully qualified profile name, the profile
+ * that matches hname does not need to exist, in general this
+ * is used to load a new profile.
+ *
+ * Requires: rcu_read_lock be held
+ *
+ * Returns: unrefcounted policy or NULL if not found
+ */
+static struct aa_policy *__lookup_parent(struct aa_namespace *ns,
+ const char *hname)
+{
+ struct aa_policy *policy;
+ struct aa_profile *profile = NULL;
+ char *split;
+
+ policy = &ns->base;
+
+ for (split = strstr(hname, "//"); split;) {
+ profile = __strn_find_child(&policy->profiles, hname,
+ split - hname);
+ if (!profile)
+ return NULL;
+ policy = &profile->base;
+ hname = split + 2;
+ split = strstr(hname, "//");
+ }
+ if (!profile)
+ return &ns->base;
+ return &profile->base;
+}
+
+/**
+ * __lookup_profile - lookup the profile matching @hname
+ * @base: base list to start looking up profile name from (NOT NULL)
+ * @hname: hierarchical profile name (NOT NULL)
+ *
+ * Requires: rcu_read_lock be held
+ *
+ * Returns: unrefcounted profile pointer or NULL if not found
+ *
+ * Do a relative name lookup, recursing through profile tree.
+ */
+static struct aa_profile *__lookup_profile(struct aa_policy *base,
+ const char *hname)
+{
+ struct aa_profile *profile = NULL;
+ char *split;
+
+ for (split = strstr(hname, "//"); split;) {
+ profile = __strn_find_child(&base->profiles, hname,
+ split - hname);
+ if (!profile)
+ return NULL;
+
+ base = &profile->base;
+ hname = split + 2;
+ split = strstr(hname, "//");
+ }
+
+ profile = __find_child(&base->profiles, hname);
+
+ return profile;
+}
+
+/**
+ * aa_lookup_profile - find a profile by its full or partial name
+ * @ns: the namespace to start from (NOT NULL)
+ * @hname: name to do lookup on. Does not contain namespace prefix (NOT NULL)
+ *
+ * Returns: refcounted profile or NULL if not found
+ */
+struct aa_profile *aa_lookup_profile(struct aa_namespace *ns, const char *hname)
+{
+ struct aa_profile *profile;
+
+ rcu_read_lock();
+ do {
+ profile = __lookup_profile(&ns->base, hname);
+ } while (profile && !aa_get_profile_not0(profile));
+ rcu_read_unlock();
+
+ /* the unconfined profile is not in the regular profile list */
+ if (!profile && strcmp(hname, "unconfined") == 0)
+ profile = aa_get_newest_profile(ns->unconfined);
+
+ /* refcount released by caller */
+ return profile;
+}
+
+/**
+ * replacement_allowed - test to see if replacement is allowed
+ * @profile: profile to test if it can be replaced (MAYBE NULL)
+ * @noreplace: true if replacement shouldn't be allowed but addition is okay
+ * @info: Returns - info about why replacement failed (NOT NULL)
+ *
+ * Returns: %0 if replacement allowed else error code
+ */
+static int replacement_allowed(struct aa_profile *profile, int noreplace,
+ const char **info)
+{
+ if (profile) {
+ if (profile->flags & PFLAG_IMMUTABLE) {
+ *info = "cannot replace immutible profile";
+ return -EPERM;
+ } else if (noreplace) {
+ *info = "profile already exists";
+ return -EEXIST;
+ }
+ }
+ return 0;
+}
+
+/**
+ * aa_audit_policy - Do auditing of policy changes
+ * @op: policy operation being performed
+ * @gfp: memory allocation flags
+ * @name: name of profile being manipulated (NOT NULL)
+ * @info: any extra information to be audited (MAYBE NULL)
+ * @error: error code
+ *
+ * Returns: the error to be returned after audit is done
+ */
+static int audit_policy(int op, gfp_t gfp, const char *name, const char *info,
+ int error)
+{
+ struct common_audit_data sa;
+ struct apparmor_audit_data aad = {0,};
+ sa.type = LSM_AUDIT_DATA_NONE;
+ sa.aad = &aad;
+ aad.op = op;
+ aad.name = name;
+ aad.info = info;
+ aad.error = error;
+
+ return aa_audit(AUDIT_APPARMOR_STATUS, __aa_current_profile(), gfp,
+ &sa, NULL);
+}
+
+/**
+ * aa_may_manage_policy - can the current task manage policy
+ * @op: the policy manipulation operation being done
+ *
+ * Returns: true if the task is allowed to manipulate policy
+ */
+bool aa_may_manage_policy(int op)
+{
+ /* check if loading policy is locked out */
+ if (aa_g_lock_policy) {
+ audit_policy(op, GFP_KERNEL, NULL, "policy_locked", -EACCES);
+ return 0;
+ }
+
+ if (!capable(CAP_MAC_ADMIN)) {
+ audit_policy(op, GFP_KERNEL, NULL, "not policy admin", -EACCES);
+ return 0;
+ }
+
+ return 1;
+}
+
+static struct aa_profile *__list_lookup_parent(struct list_head *lh,
+ struct aa_profile *profile)
+{
+ const char *base = hname_tail(profile->base.hname);
+ long len = base - profile->base.hname;
+ struct aa_load_ent *ent;
+
+ /* parent won't have trailing // so remove from len */
+ if (len <= 2)
+ return NULL;
+ len -= 2;
+
+ list_for_each_entry(ent, lh, list) {
+ if (ent->new == profile)
+ continue;
+ if (strncmp(ent->new->base.hname, profile->base.hname, len) ==
+ 0 && ent->new->base.hname[len] == 0)
+ return ent->new;
+ }
+
+ return NULL;
+}
+
+/**
+ * __replace_profile - replace @old with @new on a list
+ * @old: profile to be replaced (NOT NULL)
+ * @new: profile to replace @old with (NOT NULL)
+ * @share_replacedby: transfer @old->replacedby to @new
+ *
+ * Will duplicate and refcount elements that @new inherits from @old
+ * and will inherit @old children.
+ *
+ * refcount @new for list, put @old list refcount
+ *
+ * Requires: namespace list lock be held, or list not be shared
+ */
+static void __replace_profile(struct aa_profile *old, struct aa_profile *new,
+ bool share_replacedby)
+{
+ struct aa_profile *child, *tmp;
+
+ if (!list_empty(&old->base.profiles)) {
+ LIST_HEAD(lh);
+ list_splice_init_rcu(&old->base.profiles, &lh, synchronize_rcu);
+
+ list_for_each_entry_safe(child, tmp, &lh, base.list) {
+ struct aa_profile *p;
+
+ list_del_init(&child->base.list);
+ p = __find_child(&new->base.profiles, child->base.name);
+ if (p) {
+ /* @p replaces @child */
+ __replace_profile(child, p, share_replacedby);
+ continue;
+ }
+
+ /* inherit @child and its children */
+ /* TODO: update hname of inherited children */
+ /* list refcount transferred to @new */
+ p = aa_deref_parent(child);
+ rcu_assign_pointer(child->parent, aa_get_profile(new));
+ list_add_rcu(&child->base.list, &new->base.profiles);
+ aa_put_profile(p);
+ }
+ }
+
+ if (!rcu_access_pointer(new->parent)) {
+ struct aa_profile *parent = aa_deref_parent(old);
+ rcu_assign_pointer(new->parent, aa_get_profile(parent));
+ }
+ __aa_update_replacedby(old, new);
+ if (share_replacedby) {
+ aa_put_replacedby(new->replacedby);
+ new->replacedby = aa_get_replacedby(old->replacedby);
+ } else if (!rcu_access_pointer(new->replacedby->profile))
+ /* aafs interface uses replacedby */
+ rcu_assign_pointer(new->replacedby->profile,
+ aa_get_profile(new));
+ __aa_fs_profile_migrate_dents(old, new);
+
+ if (list_empty(&new->base.list)) {
+ /* new is not on a list already */
+ list_replace_rcu(&old->base.list, &new->base.list);
+ aa_get_profile(new);
+ aa_put_profile(old);
+ } else
+ __list_remove_profile(old);
+}
+
+/**
+ * __lookup_replace - lookup replacement information for a profile
+ * @ns - namespace the lookup occurs in
+ * @hname - name of profile to lookup
+ * @noreplace - true if not replacing an existing profile
+ * @p - Returns: profile to be replaced
+ * @info - Returns: info string on why lookup failed
+ *
+ * Returns: profile to replace (no ref) on success else ptr error
+ */
+static int __lookup_replace(struct aa_namespace *ns, const char *hname,
+ bool noreplace, struct aa_profile **p,
+ const char **info)
+{
+ *p = aa_get_profile(__lookup_profile(&ns->base, hname));
+ if (*p) {
+ int error = replacement_allowed(*p, noreplace, info);
+ if (error) {
+ *info = "profile can not be replaced";
+ return error;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * aa_replace_profiles - replace profile(s) on the profile list
+ * @udata: serialized data stream (NOT NULL)
+ * @size: size of the serialized data stream
+ * @noreplace: true if only doing addition, no replacement allowed
+ *
+ * unpack and replace a profile on the profile list and uses of that profile
+ * by any aa_task_cxt. If the profile does not exist on the profile list
+ * it is added.
+ *
+ * Returns: size of data consumed else error code on failure.
+ */
+ssize_t aa_replace_profiles(void *udata, size_t size, bool noreplace)
+{
+ const char *ns_name, *name = NULL, *info = NULL;
+ struct aa_namespace *ns = NULL;
+ struct aa_load_ent *ent, *tmp;
+ int op = OP_PROF_REPL;
+ ssize_t error;
+ LIST_HEAD(lh);
+
+ /* released below */
+ error = aa_unpack(udata, size, &lh, &ns_name);
+ if (error)
+ goto out;
+
+ /* released below */
+ ns = aa_prepare_namespace(ns_name);
+ if (!ns) {
+ info = "failed to prepare namespace";
+ error = -ENOMEM;
+ name = ns_name;
+ goto fail;
+ }
+
+ mutex_lock(&ns->lock);
+ /* setup parent and ns info */
+ list_for_each_entry(ent, &lh, list) {
+ struct aa_policy *policy;
+
+ name = ent->new->base.hname;
+ error = __lookup_replace(ns, ent->new->base.hname, noreplace,
+ &ent->old, &info);
+ if (error)
+ goto fail_lock;
+
+ if (ent->new->rename) {
+ error = __lookup_replace(ns, ent->new->rename,
+ noreplace, &ent->rename,
+ &info);
+ if (error)
+ goto fail_lock;
+ }
+
+ /* released when @new is freed */
+ ent->new->ns = aa_get_namespace(ns);
+
+ if (ent->old || ent->rename)
+ continue;
+
+ /* no ref on policy only use inside lock */
+ policy = __lookup_parent(ns, ent->new->base.hname);
+ if (!policy) {
+ struct aa_profile *p;
+ p = __list_lookup_parent(&lh, ent->new);
+ if (!p) {
+ error = -ENOENT;
+ info = "parent does not exist";
+ name = ent->new->base.hname;
+ goto fail_lock;
+ }
+ rcu_assign_pointer(ent->new->parent, aa_get_profile(p));
+ } else if (policy != &ns->base) {
+ /* released on profile replacement or free_profile */
+ struct aa_profile *p = (struct aa_profile *) policy;
+ rcu_assign_pointer(ent->new->parent, aa_get_profile(p));
+ }
+ }
+
+ /* create new fs entries for introspection if needed */
+ list_for_each_entry(ent, &lh, list) {
+ if (ent->old) {
+ /* inherit old interface files */
+
+ /* if (ent->rename)
+ TODO: support rename */
+ /* } else if (ent->rename) {
+ TODO: support rename */
+ } else {
+ struct dentry *parent;
+ if (rcu_access_pointer(ent->new->parent)) {
+ struct aa_profile *p;
+ p = aa_deref_parent(ent->new);
+ parent = prof_child_dir(p);
+ } else
+ parent = ns_subprofs_dir(ent->new->ns);
+ error = __aa_fs_profile_mkdir(ent->new, parent);
+ }
+
+ if (error) {
+ info = "failed to create ";
+ goto fail_lock;
+ }
+ }
+
+ /* Done with checks that may fail - do actual replacement */
+ list_for_each_entry_safe(ent, tmp, &lh, list) {
+ list_del_init(&ent->list);
+ op = (!ent->old && !ent->rename) ? OP_PROF_LOAD : OP_PROF_REPL;
+
+ audit_policy(op, GFP_ATOMIC, ent->new->base.name, NULL, error);
+
+ if (ent->old) {
+ __replace_profile(ent->old, ent->new, 1);
+ if (ent->rename) {
+ /* aafs interface uses replacedby */
+ struct aa_replacedby *r = ent->new->replacedby;
+ rcu_assign_pointer(r->profile,
+ aa_get_profile(ent->new));
+ __replace_profile(ent->rename, ent->new, 0);
+ }
+ } else if (ent->rename) {
+ /* aafs interface uses replacedby */
+ rcu_assign_pointer(ent->new->replacedby->profile,
+ aa_get_profile(ent->new));
+ __replace_profile(ent->rename, ent->new, 0);
+ } else if (ent->new->parent) {
+ struct aa_profile *parent, *newest;
+ parent = aa_deref_parent(ent->new);
+ newest = aa_get_newest_profile(parent);
+
+ /* parent replaced in this atomic set? */
+ if (newest != parent) {
+ aa_get_profile(newest);
+ aa_put_profile(parent);
+ rcu_assign_pointer(ent->new->parent, newest);
+ } else
+ aa_put_profile(newest);
+ /* aafs interface uses replacedby */
+ rcu_assign_pointer(ent->new->replacedby->profile,
+ aa_get_profile(ent->new));
+ __list_add_profile(&parent->base.profiles, ent->new);
+ } else {
+ /* aafs interface uses replacedby */
+ rcu_assign_pointer(ent->new->replacedby->profile,
+ aa_get_profile(ent->new));
+ __list_add_profile(&ns->base.profiles, ent->new);
+ }
+ aa_load_ent_free(ent);
+ }
+ mutex_unlock(&ns->lock);
+
+out:
+ aa_put_namespace(ns);
+
+ if (error)
+ return error;
+ return size;
+
+fail_lock:
+ mutex_unlock(&ns->lock);
+fail:
+ error = audit_policy(op, GFP_KERNEL, name, info, error);
+
+ list_for_each_entry_safe(ent, tmp, &lh, list) {
+ list_del_init(&ent->list);
+ aa_load_ent_free(ent);
+ }
+
+ goto out;
+}
+
+/**
+ * aa_remove_profiles - remove profile(s) from the system
+ * @fqname: name of the profile or namespace to remove (NOT NULL)
+ * @size: size of the name
+ *
+ * Remove a profile or sub namespace from the current namespace, so that
+ * they can not be found anymore and mark them as replaced by unconfined
+ *
+ * NOTE: removing confinement does not restore rlimits to preconfinemnet values
+ *
+ * Returns: size of data consume else error code if fails
+ */
+ssize_t aa_remove_profiles(char *fqname, size_t size)
+{
+ struct aa_namespace *root, *ns = NULL;
+ struct aa_profile *profile = NULL;
+ const char *name = fqname, *info = NULL;
+ ssize_t error = 0;
+
+ if (*fqname == 0) {
+ info = "no profile specified";
+ error = -ENOENT;
+ goto fail;
+ }
+
+ root = aa_current_profile()->ns;
+
+ if (fqname[0] == ':') {
+ char *ns_name;
+ name = aa_split_fqname(fqname, &ns_name);
+ /* released below */
+ ns = aa_find_namespace(root, ns_name);
+ if (!ns) {
+ info = "namespace does not exist";
+ error = -ENOENT;
+ goto fail;
+ }
+ } else
+ /* released below */
+ ns = aa_get_namespace(root);
+
+ if (!name) {
+ /* remove namespace - can only happen if fqname[0] == ':' */
+ mutex_lock(&ns->parent->lock);
+ __remove_namespace(ns);
+ mutex_unlock(&ns->parent->lock);
+ } else {
+ /* remove profile */
+ mutex_lock(&ns->lock);
+ profile = aa_get_profile(__lookup_profile(&ns->base, name));
+ if (!profile) {
+ error = -ENOENT;
+ info = "profile does not exist";
+ goto fail_ns_lock;
+ }
+ name = profile->base.hname;
+ __remove_profile(profile);
+ mutex_unlock(&ns->lock);
+ }
+
+ /* don't fail removal if audit fails */
+ (void) audit_policy(OP_PROF_RM, GFP_KERNEL, name, info, error);
+ aa_put_namespace(ns);
+ aa_put_profile(profile);
+ return size;
+
+fail_ns_lock:
+ mutex_unlock(&ns->lock);
+ aa_put_namespace(ns);
+
+fail:
+ (void) audit_policy(OP_PROF_RM, GFP_KERNEL, name, info, error);
+ return error;
+}
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
new file mode 100644
index 00000000000..a689f10930b
--- /dev/null
+++ b/security/apparmor/policy_unpack.c
@@ -0,0 +1,805 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor functions for unpacking policy loaded from
+ * userspace.
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * AppArmor uses a serialized binary format for loading policy. To find
+ * policy format documentation look in Documentation/security/apparmor.txt
+ * All policy is validated before it is used.
+ */
+
+#include <asm/unaligned.h>
+#include <linux/ctype.h>
+#include <linux/errno.h>
+
+#include "include/apparmor.h"
+#include "include/audit.h"
+#include "include/context.h"
+#include "include/crypto.h"
+#include "include/match.h"
+#include "include/policy.h"
+#include "include/policy_unpack.h"
+
+/*
+ * The AppArmor interface treats data as a type byte followed by the
+ * actual data. The interface has the notion of a a named entry
+ * which has a name (AA_NAME typecode followed by name string) followed by
+ * the entries typecode and data. Named types allow for optional
+ * elements and extensions to be added and tested for without breaking
+ * backwards compatibility.
+ */
+
+enum aa_code {
+ AA_U8,
+ AA_U16,
+ AA_U32,
+ AA_U64,
+ AA_NAME, /* same as string except it is items name */
+ AA_STRING,
+ AA_BLOB,
+ AA_STRUCT,
+ AA_STRUCTEND,
+ AA_LIST,
+ AA_LISTEND,
+ AA_ARRAY,
+ AA_ARRAYEND,
+};
+
+/*
+ * aa_ext is the read of the buffer containing the serialized profile. The
+ * data is copied into a kernel buffer in apparmorfs and then handed off to
+ * the unpack routines.
+ */
+struct aa_ext {
+ void *start;
+ void *end;
+ void *pos; /* pointer to current position in the buffer */
+ u32 version;
+};
+
+/* audit callback for unpack fields */
+static void audit_cb(struct audit_buffer *ab, void *va)
+{
+ struct common_audit_data *sa = va;
+ if (sa->aad->iface.target) {
+ struct aa_profile *name = sa->aad->iface.target;
+ audit_log_format(ab, " name=");
+ audit_log_untrustedstring(ab, name->base.hname);
+ }
+ if (sa->aad->iface.pos)
+ audit_log_format(ab, " offset=%ld", sa->aad->iface.pos);
+}
+
+/**
+ * audit_iface - do audit message for policy unpacking/load/replace/remove
+ * @new: profile if it has been allocated (MAYBE NULL)
+ * @name: name of the profile being manipulated (MAYBE NULL)
+ * @info: any extra info about the failure (MAYBE NULL)
+ * @e: buffer position info
+ * @error: error code
+ *
+ * Returns: %0 or error
+ */
+static int audit_iface(struct aa_profile *new, const char *name,
+ const char *info, struct aa_ext *e, int error)
+{
+ struct aa_profile *profile = __aa_current_profile();
+ struct common_audit_data sa;
+ struct apparmor_audit_data aad = {0,};
+ sa.type = LSM_AUDIT_DATA_NONE;
+ sa.aad = &aad;
+ if (e)
+ aad.iface.pos = e->pos - e->start;
+ aad.iface.target = new;
+ aad.name = name;
+ aad.info = info;
+ aad.error = error;
+
+ return aa_audit(AUDIT_APPARMOR_STATUS, profile, GFP_KERNEL, &sa,
+ audit_cb);
+}
+
+/* test if read will be in packed data bounds */
+static bool inbounds(struct aa_ext *e, size_t size)
+{
+ return (size <= e->end - e->pos);
+}
+
+/**
+ * aa_u16_chunck - test and do bounds checking for a u16 size based chunk
+ * @e: serialized data read head (NOT NULL)
+ * @chunk: start address for chunk of data (NOT NULL)
+ *
+ * Returns: the size of chunk found with the read head at the end of the chunk.
+ */
+static size_t unpack_u16_chunk(struct aa_ext *e, char **chunk)
+{
+ size_t size = 0;
+
+ if (!inbounds(e, sizeof(u16)))
+ return 0;
+ size = le16_to_cpu(get_unaligned((u16 *) e->pos));
+ e->pos += sizeof(u16);
+ if (!inbounds(e, size))
+ return 0;
+ *chunk = e->pos;
+ e->pos += size;
+ return size;
+}
+
+/* unpack control byte */
+static bool unpack_X(struct aa_ext *e, enum aa_code code)
+{
+ if (!inbounds(e, 1))
+ return 0;
+ if (*(u8 *) e->pos != code)
+ return 0;
+ e->pos++;
+ return 1;
+}
+
+/**
+ * unpack_nameX - check is the next element is of type X with a name of @name
+ * @e: serialized data extent information (NOT NULL)
+ * @code: type code
+ * @name: name to match to the serialized element. (MAYBE NULL)
+ *
+ * check that the next serialized data element is of type X and has a tag
+ * name @name. If @name is specified then there must be a matching
+ * name element in the stream. If @name is NULL any name element will be
+ * skipped and only the typecode will be tested.
+ *
+ * Returns 1 on success (both type code and name tests match) and the read
+ * head is advanced past the headers
+ *
+ * Returns: 0 if either match fails, the read head does not move
+ */
+static bool unpack_nameX(struct aa_ext *e, enum aa_code code, const char *name)
+{
+ /*
+ * May need to reset pos if name or type doesn't match
+ */
+ void *pos = e->pos;
+ /*
+ * Check for presence of a tagname, and if present name size
+ * AA_NAME tag value is a u16.
+ */
+ if (unpack_X(e, AA_NAME)) {
+ char *tag = NULL;
+ size_t size = unpack_u16_chunk(e, &tag);
+ /* if a name is specified it must match. otherwise skip tag */
+ if (name && (!size || strcmp(name, tag)))
+ goto fail;
+ } else if (name) {
+ /* if a name is specified and there is no name tag fail */
+ goto fail;
+ }
+
+ /* now check if type code matches */
+ if (unpack_X(e, code))
+ return 1;
+
+fail:
+ e->pos = pos;
+ return 0;
+}
+
+static bool unpack_u32(struct aa_ext *e, u32 *data, const char *name)
+{
+ if (unpack_nameX(e, AA_U32, name)) {
+ if (!inbounds(e, sizeof(u32)))
+ return 0;
+ if (data)
+ *data = le32_to_cpu(get_unaligned((u32 *) e->pos));
+ e->pos += sizeof(u32);
+ return 1;
+ }
+ return 0;
+}
+
+static bool unpack_u64(struct aa_ext *e, u64 *data, const char *name)
+{
+ if (unpack_nameX(e, AA_U64, name)) {
+ if (!inbounds(e, sizeof(u64)))
+ return 0;
+ if (data)
+ *data = le64_to_cpu(get_unaligned((u64 *) e->pos));
+ e->pos += sizeof(u64);
+ return 1;
+ }
+ return 0;
+}
+
+static size_t unpack_array(struct aa_ext *e, const char *name)
+{
+ if (unpack_nameX(e, AA_ARRAY, name)) {
+ int size;
+ if (!inbounds(e, sizeof(u16)))
+ return 0;
+ size = (int)le16_to_cpu(get_unaligned((u16 *) e->pos));
+ e->pos += sizeof(u16);
+ return size;
+ }
+ return 0;
+}
+
+static size_t unpack_blob(struct aa_ext *e, char **blob, const char *name)
+{
+ if (unpack_nameX(e, AA_BLOB, name)) {
+ u32 size;
+ if (!inbounds(e, sizeof(u32)))
+ return 0;
+ size = le32_to_cpu(get_unaligned((u32 *) e->pos));
+ e->pos += sizeof(u32);
+ if (inbounds(e, (size_t) size)) {
+ *blob = e->pos;
+ e->pos += size;
+ return size;
+ }
+ }
+ return 0;
+}
+
+static int unpack_str(struct aa_ext *e, const char **string, const char *name)
+{
+ char *src_str;
+ size_t size = 0;
+ void *pos = e->pos;
+ *string = NULL;
+ if (unpack_nameX(e, AA_STRING, name)) {
+ size = unpack_u16_chunk(e, &src_str);
+ if (size) {
+ /* strings are null terminated, length is size - 1 */
+ if (src_str[size - 1] != 0)
+ goto fail;
+ *string = src_str;
+ }
+ }
+ return size;
+
+fail:
+ e->pos = pos;
+ return 0;
+}
+
+static int unpack_strdup(struct aa_ext *e, char **string, const char *name)
+{
+ const char *tmp;
+ void *pos = e->pos;
+ int res = unpack_str(e, &tmp, name);
+ *string = NULL;
+
+ if (!res)
+ return 0;
+
+ *string = kmemdup(tmp, res, GFP_KERNEL);
+ if (!*string) {
+ e->pos = pos;
+ return 0;
+ }
+
+ return res;
+}
+
+#define DFA_VALID_PERM_MASK 0xffffffff
+#define DFA_VALID_PERM2_MASK 0xffffffff
+
+/**
+ * verify_accept - verify the accept tables of a dfa
+ * @dfa: dfa to verify accept tables of (NOT NULL)
+ * @flags: flags governing dfa
+ *
+ * Returns: 1 if valid accept tables else 0 if error
+ */
+static bool verify_accept(struct aa_dfa *dfa, int flags)
+{
+ int i;
+
+ /* verify accept permissions */
+ for (i = 0; i < dfa->tables[YYTD_ID_ACCEPT]->td_lolen; i++) {
+ int mode = ACCEPT_TABLE(dfa)[i];
+
+ if (mode & ~DFA_VALID_PERM_MASK)
+ return 0;
+
+ if (ACCEPT_TABLE2(dfa)[i] & ~DFA_VALID_PERM2_MASK)
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * unpack_dfa - unpack a file rule dfa
+ * @e: serialized data extent information (NOT NULL)
+ *
+ * returns dfa or ERR_PTR or NULL if no dfa
+ */
+static struct aa_dfa *unpack_dfa(struct aa_ext *e)
+{
+ char *blob = NULL;
+ size_t size;
+ struct aa_dfa *dfa = NULL;
+
+ size = unpack_blob(e, &blob, "aadfa");
+ if (size) {
+ /*
+ * The dfa is aligned with in the blob to 8 bytes
+ * from the beginning of the stream.
+ * alignment adjust needed by dfa unpack
+ */
+ size_t sz = blob - (char *) e->start -
+ ((e->pos - e->start) & 7);
+ size_t pad = ALIGN(sz, 8) - sz;
+ int flags = TO_ACCEPT1_FLAG(YYTD_DATA32) |
+ TO_ACCEPT2_FLAG(YYTD_DATA32);
+
+
+ if (aa_g_paranoid_load)
+ flags |= DFA_FLAG_VERIFY_STATES;
+
+ dfa = aa_dfa_unpack(blob + pad, size - pad, flags);
+
+ if (IS_ERR(dfa))
+ return dfa;
+
+ if (!verify_accept(dfa, flags))
+ goto fail;
+ }
+
+ return dfa;
+
+fail:
+ aa_put_dfa(dfa);
+ return ERR_PTR(-EPROTO);
+}
+
+/**
+ * unpack_trans_table - unpack a profile transition table
+ * @e: serialized data extent information (NOT NULL)
+ * @profile: profile to add the accept table to (NOT NULL)
+ *
+ * Returns: 1 if table successfully unpacked
+ */
+static bool unpack_trans_table(struct aa_ext *e, struct aa_profile *profile)
+{
+ void *pos = e->pos;
+
+ /* exec table is optional */
+ if (unpack_nameX(e, AA_STRUCT, "xtable")) {
+ int i, size;
+
+ size = unpack_array(e, NULL);
+ /* currently 4 exec bits and entries 0-3 are reserved iupcx */
+ if (size > 16 - 4)
+ goto fail;
+ profile->file.trans.table = kzalloc(sizeof(char *) * size,
+ GFP_KERNEL);
+ if (!profile->file.trans.table)
+ goto fail;
+
+ profile->file.trans.size = size;
+ for (i = 0; i < size; i++) {
+ char *str;
+ int c, j, size2 = unpack_strdup(e, &str, NULL);
+ /* unpack_strdup verifies that the last character is
+ * null termination byte.
+ */
+ if (!size2)
+ goto fail;
+ profile->file.trans.table[i] = str;
+ /* verify that name doesn't start with space */
+ if (isspace(*str))
+ goto fail;
+
+ /* count internal # of internal \0 */
+ for (c = j = 0; j < size2 - 2; j++) {
+ if (!str[j])
+ c++;
+ }
+ if (*str == ':') {
+ /* beginning with : requires an embedded \0,
+ * verify that exactly 1 internal \0 exists
+ * trailing \0 already verified by unpack_strdup
+ */
+ if (c != 1)
+ goto fail;
+ /* first character after : must be valid */
+ if (!str[1])
+ goto fail;
+ } else if (c)
+ /* fail - all other cases with embedded \0 */
+ goto fail;
+ }
+ if (!unpack_nameX(e, AA_ARRAYEND, NULL))
+ goto fail;
+ if (!unpack_nameX(e, AA_STRUCTEND, NULL))
+ goto fail;
+ }
+ return 1;
+
+fail:
+ aa_free_domain_entries(&profile->file.trans);
+ e->pos = pos;
+ return 0;
+}
+
+static bool unpack_rlimits(struct aa_ext *e, struct aa_profile *profile)
+{
+ void *pos = e->pos;
+
+ /* rlimits are optional */
+ if (unpack_nameX(e, AA_STRUCT, "rlimits")) {
+ int i, size;
+ u32 tmp = 0;
+ if (!unpack_u32(e, &tmp, NULL))
+ goto fail;
+ profile->rlimits.mask = tmp;
+
+ size = unpack_array(e, NULL);
+ if (size > RLIM_NLIMITS)
+ goto fail;
+ for (i = 0; i < size; i++) {
+ u64 tmp2 = 0;
+ int a = aa_map_resource(i);
+ if (!unpack_u64(e, &tmp2, NULL))
+ goto fail;
+ profile->rlimits.limits[a].rlim_max = tmp2;
+ }
+ if (!unpack_nameX(e, AA_ARRAYEND, NULL))
+ goto fail;
+ if (!unpack_nameX(e, AA_STRUCTEND, NULL))
+ goto fail;
+ }
+ return 1;
+
+fail:
+ e->pos = pos;
+ return 0;
+}
+
+/**
+ * unpack_profile - unpack a serialized profile
+ * @e: serialized data extent information (NOT NULL)
+ *
+ * NOTE: unpack profile sets audit struct if there is a failure
+ */
+static struct aa_profile *unpack_profile(struct aa_ext *e)
+{
+ struct aa_profile *profile = NULL;
+ const char *name = NULL;
+ int i, error = -EPROTO;
+ kernel_cap_t tmpcap;
+ u32 tmp;
+
+ /* check that we have the right struct being passed */
+ if (!unpack_nameX(e, AA_STRUCT, "profile"))
+ goto fail;
+ if (!unpack_str(e, &name, NULL))
+ goto fail;
+
+ profile = aa_alloc_profile(name);
+ if (!profile)
+ return ERR_PTR(-ENOMEM);
+
+ /* profile renaming is optional */
+ (void) unpack_str(e, &profile->rename, "rename");
+
+ /* attachment string is optional */
+ (void) unpack_str(e, &profile->attach, "attach");
+
+ /* xmatch is optional and may be NULL */
+ profile->xmatch = unpack_dfa(e);
+ if (IS_ERR(profile->xmatch)) {
+ error = PTR_ERR(profile->xmatch);
+ profile->xmatch = NULL;
+ goto fail;
+ }
+ /* xmatch_len is not optional if xmatch is set */
+ if (profile->xmatch) {
+ if (!unpack_u32(e, &tmp, NULL))
+ goto fail;
+ profile->xmatch_len = tmp;
+ }
+
+ /* per profile debug flags (complain, audit) */
+ if (!unpack_nameX(e, AA_STRUCT, "flags"))
+ goto fail;
+ if (!unpack_u32(e, &tmp, NULL))
+ goto fail;
+ if (tmp & PACKED_FLAG_HAT)
+ profile->flags |= PFLAG_HAT;
+ if (!unpack_u32(e, &tmp, NULL))
+ goto fail;
+ if (tmp == PACKED_MODE_COMPLAIN)
+ profile->mode = APPARMOR_COMPLAIN;
+ else if (tmp == PACKED_MODE_KILL)
+ profile->mode = APPARMOR_KILL;
+ else if (tmp == PACKED_MODE_UNCONFINED)
+ profile->mode = APPARMOR_UNCONFINED;
+ if (!unpack_u32(e, &tmp, NULL))
+ goto fail;
+ if (tmp)
+ profile->audit = AUDIT_ALL;
+
+ if (!unpack_nameX(e, AA_STRUCTEND, NULL))
+ goto fail;
+
+ /* path_flags is optional */
+ if (unpack_u32(e, &profile->path_flags, "path_flags"))
+ profile->path_flags |= profile->flags & PFLAG_MEDIATE_DELETED;
+ else
+ /* set a default value if path_flags field is not present */
+ profile->path_flags = PFLAG_MEDIATE_DELETED;
+
+ if (!unpack_u32(e, &(profile->caps.allow.cap[0]), NULL))
+ goto fail;
+ if (!unpack_u32(e, &(profile->caps.audit.cap[0]), NULL))
+ goto fail;
+ if (!unpack_u32(e, &(profile->caps.quiet.cap[0]), NULL))
+ goto fail;
+ if (!unpack_u32(e, &tmpcap.cap[0], NULL))
+ goto fail;
+
+ if (unpack_nameX(e, AA_STRUCT, "caps64")) {
+ /* optional upper half of 64 bit caps */
+ if (!unpack_u32(e, &(profile->caps.allow.cap[1]), NULL))
+ goto fail;
+ if (!unpack_u32(e, &(profile->caps.audit.cap[1]), NULL))
+ goto fail;
+ if (!unpack_u32(e, &(profile->caps.quiet.cap[1]), NULL))
+ goto fail;
+ if (!unpack_u32(e, &(tmpcap.cap[1]), NULL))
+ goto fail;
+ if (!unpack_nameX(e, AA_STRUCTEND, NULL))
+ goto fail;
+ }
+
+ if (unpack_nameX(e, AA_STRUCT, "capsx")) {
+ /* optional extended caps mediation mask */
+ if (!unpack_u32(e, &(profile->caps.extended.cap[0]), NULL))
+ goto fail;
+ if (!unpack_u32(e, &(profile->caps.extended.cap[1]), NULL))
+ goto fail;
+ if (!unpack_nameX(e, AA_STRUCTEND, NULL))
+ goto fail;
+ }
+
+ if (!unpack_rlimits(e, profile))
+ goto fail;
+
+ if (unpack_nameX(e, AA_STRUCT, "policydb")) {
+ /* generic policy dfa - optional and may be NULL */
+ profile->policy.dfa = unpack_dfa(e);
+ if (IS_ERR(profile->policy.dfa)) {
+ error = PTR_ERR(profile->policy.dfa);
+ profile->policy.dfa = NULL;
+ goto fail;
+ }
+ if (!unpack_u32(e, &profile->policy.start[0], "start"))
+ /* default start state */
+ profile->policy.start[0] = DFA_START;
+ /* setup class index */
+ for (i = AA_CLASS_FILE; i <= AA_CLASS_LAST; i++) {
+ profile->policy.start[i] =
+ aa_dfa_next(profile->policy.dfa,
+ profile->policy.start[0],
+ i);
+ }
+ if (!unpack_nameX(e, AA_STRUCTEND, NULL))
+ goto fail;
+ }
+
+ /* get file rules */
+ profile->file.dfa = unpack_dfa(e);
+ if (IS_ERR(profile->file.dfa)) {
+ error = PTR_ERR(profile->file.dfa);
+ profile->file.dfa = NULL;
+ goto fail;
+ }
+
+ if (!unpack_u32(e, &profile->file.start, "dfa_start"))
+ /* default start state */
+ profile->file.start = DFA_START;
+
+ if (!unpack_trans_table(e, profile))
+ goto fail;
+
+ if (!unpack_nameX(e, AA_STRUCTEND, NULL))
+ goto fail;
+
+ return profile;
+
+fail:
+ if (profile)
+ name = NULL;
+ else if (!name)
+ name = "unknown";
+ audit_iface(profile, name, "failed to unpack profile", e, error);
+ aa_free_profile(profile);
+
+ return ERR_PTR(error);
+}
+
+/**
+ * verify_head - unpack serialized stream header
+ * @e: serialized data read head (NOT NULL)
+ * @required: whether the header is required or optional
+ * @ns: Returns - namespace if one is specified else NULL (NOT NULL)
+ *
+ * Returns: error or 0 if header is good
+ */
+static int verify_header(struct aa_ext *e, int required, const char **ns)
+{
+ int error = -EPROTONOSUPPORT;
+ const char *name = NULL;
+ *ns = NULL;
+
+ /* get the interface version */
+ if (!unpack_u32(e, &e->version, "version")) {
+ if (required) {
+ audit_iface(NULL, NULL, "invalid profile format", e,
+ error);
+ return error;
+ }
+
+ /* check that the interface version is currently supported */
+ if (e->version != 5) {
+ audit_iface(NULL, NULL, "unsupported interface version",
+ e, error);
+ return error;
+ }
+ }
+
+
+ /* read the namespace if present */
+ if (unpack_str(e, &name, "namespace")) {
+ if (*ns && strcmp(*ns, name))
+ audit_iface(NULL, NULL, "invalid ns change", e, error);
+ else if (!*ns)
+ *ns = name;
+ }
+
+ return 0;
+}
+
+static bool verify_xindex(int xindex, int table_size)
+{
+ int index, xtype;
+ xtype = xindex & AA_X_TYPE_MASK;
+ index = xindex & AA_X_INDEX_MASK;
+ if (xtype == AA_X_TABLE && index > table_size)
+ return 0;
+ return 1;
+}
+
+/* verify dfa xindexes are in range of transition tables */
+static bool verify_dfa_xindex(struct aa_dfa *dfa, int table_size)
+{
+ int i;
+ for (i = 0; i < dfa->tables[YYTD_ID_ACCEPT]->td_lolen; i++) {
+ if (!verify_xindex(dfa_user_xindex(dfa, i), table_size))
+ return 0;
+ if (!verify_xindex(dfa_other_xindex(dfa, i), table_size))
+ return 0;
+ }
+ return 1;
+}
+
+/**
+ * verify_profile - Do post unpack analysis to verify profile consistency
+ * @profile: profile to verify (NOT NULL)
+ *
+ * Returns: 0 if passes verification else error
+ */
+static int verify_profile(struct aa_profile *profile)
+{
+ if (aa_g_paranoid_load) {
+ if (profile->file.dfa &&
+ !verify_dfa_xindex(profile->file.dfa,
+ profile->file.trans.size)) {
+ audit_iface(profile, NULL, "Invalid named transition",
+ NULL, -EPROTO);
+ return -EPROTO;
+ }
+ }
+
+ return 0;
+}
+
+void aa_load_ent_free(struct aa_load_ent *ent)
+{
+ if (ent) {
+ aa_put_profile(ent->rename);
+ aa_put_profile(ent->old);
+ aa_put_profile(ent->new);
+ kzfree(ent);
+ }
+}
+
+struct aa_load_ent *aa_load_ent_alloc(void)
+{
+ struct aa_load_ent *ent = kzalloc(sizeof(*ent), GFP_KERNEL);
+ if (ent)
+ INIT_LIST_HEAD(&ent->list);
+ return ent;
+}
+
+/**
+ * aa_unpack - unpack packed binary profile(s) data loaded from user space
+ * @udata: user data copied to kmem (NOT NULL)
+ * @size: the size of the user data
+ * @lh: list to place unpacked profiles in a aa_repl_ws
+ * @ns: Returns namespace profile is in if specified else NULL (NOT NULL)
+ *
+ * Unpack user data and return refcounted allocated profile(s) stored in
+ * @lh in order of discovery, with the list chain stored in base.list
+ * or error
+ *
+ * Returns: profile(s) on @lh else error pointer if fails to unpack
+ */
+int aa_unpack(void *udata, size_t size, struct list_head *lh, const char **ns)
+{
+ struct aa_load_ent *tmp, *ent;
+ struct aa_profile *profile = NULL;
+ int error;
+ struct aa_ext e = {
+ .start = udata,
+ .end = udata + size,
+ .pos = udata,
+ };
+
+ *ns = NULL;
+ while (e.pos < e.end) {
+ void *start;
+ error = verify_header(&e, e.pos == e.start, ns);
+ if (error)
+ goto fail;
+
+ start = e.pos;
+ profile = unpack_profile(&e);
+ if (IS_ERR(profile)) {
+ error = PTR_ERR(profile);
+ goto fail;
+ }
+
+ error = verify_profile(profile);
+ if (error)
+ goto fail_profile;
+
+ error = aa_calc_profile_hash(profile, e.version, start,
+ e.pos - start);
+ if (error)
+ goto fail_profile;
+
+ ent = aa_load_ent_alloc();
+ if (!ent) {
+ error = -ENOMEM;
+ goto fail_profile;
+ }
+
+ ent->new = profile;
+ list_add_tail(&ent->list, lh);
+ }
+
+ return 0;
+
+fail_profile:
+ aa_put_profile(profile);
+
+fail:
+ list_for_each_entry_safe(ent, tmp, lh, list) {
+ list_del_init(&ent->list);
+ aa_load_ent_free(ent);
+ }
+
+ return error;
+}
diff --git a/security/apparmor/procattr.c b/security/apparmor/procattr.c
new file mode 100644
index 00000000000..b125acc9aa2
--- /dev/null
+++ b/security/apparmor/procattr.c
@@ -0,0 +1,165 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor /proc/<pid>/attr/ interface functions
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include "include/apparmor.h"
+#include "include/context.h"
+#include "include/policy.h"
+#include "include/domain.h"
+#include "include/procattr.h"
+
+
+/**
+ * aa_getprocattr - Return the profile information for @profile
+ * @profile: the profile to print profile info about (NOT NULL)
+ * @string: Returns - string containing the profile info (NOT NULL)
+ *
+ * Returns: length of @string on success else error on failure
+ *
+ * Requires: profile != NULL
+ *
+ * Creates a string containing the namespace_name://profile_name for
+ * @profile.
+ *
+ * Returns: size of string placed in @string else error code on failure
+ */
+int aa_getprocattr(struct aa_profile *profile, char **string)
+{
+ char *str;
+ int len = 0, mode_len = 0, ns_len = 0, name_len;
+ const char *mode_str = aa_profile_mode_names[profile->mode];
+ const char *ns_name = NULL;
+ struct aa_namespace *ns = profile->ns;
+ struct aa_namespace *current_ns = __aa_current_profile()->ns;
+ char *s;
+
+ if (!aa_ns_visible(current_ns, ns))
+ return -EACCES;
+
+ ns_name = aa_ns_name(current_ns, ns);
+ ns_len = strlen(ns_name);
+
+ /* if the visible ns_name is > 0 increase size for : :// seperator */
+ if (ns_len)
+ ns_len += 4;
+
+ /* unconfined profiles don't have a mode string appended */
+ if (!unconfined(profile))
+ mode_len = strlen(mode_str) + 3; /* + 3 for _() */
+
+ name_len = strlen(profile->base.hname);
+ len = mode_len + ns_len + name_len + 1; /* + 1 for \n */
+ s = str = kmalloc(len + 1, GFP_KERNEL); /* + 1 \0 */
+ if (!str)
+ return -ENOMEM;
+
+ if (ns_len) {
+ /* skip over prefix current_ns->base.hname and separating // */
+ sprintf(s, ":%s://", ns_name);
+ s += ns_len;
+ }
+ if (unconfined(profile))
+ /* mode string not being appended */
+ sprintf(s, "%s\n", profile->base.hname);
+ else
+ sprintf(s, "%s (%s)\n", profile->base.hname, mode_str);
+ *string = str;
+
+ /* NOTE: len does not include \0 of string, not saved as part of file */
+ return len;
+}
+
+/**
+ * split_token_from_name - separate a string of form <token>^<name>
+ * @op: operation being checked
+ * @args: string to parse (NOT NULL)
+ * @token: stores returned parsed token value (NOT NULL)
+ *
+ * Returns: start position of name after token else NULL on failure
+ */
+static char *split_token_from_name(int op, char *args, u64 * token)
+{
+ char *name;
+
+ *token = simple_strtoull(args, &name, 16);
+ if ((name == args) || *name != '^') {
+ AA_ERROR("%s: Invalid input '%s'", op_table[op], args);
+ return ERR_PTR(-EINVAL);
+ }
+
+ name++; /* skip ^ */
+ if (!*name)
+ name = NULL;
+ return name;
+}
+
+/**
+ * aa_setprocattr_chagnehat - handle procattr interface to change_hat
+ * @args: args received from writing to /proc/<pid>/attr/current (NOT NULL)
+ * @size: size of the args
+ * @test: true if this is a test of change_hat permissions
+ *
+ * Returns: %0 or error code if change_hat fails
+ */
+int aa_setprocattr_changehat(char *args, size_t size, int test)
+{
+ char *hat;
+ u64 token;
+ const char *hats[16]; /* current hard limit on # of names */
+ int count = 0;
+
+ hat = split_token_from_name(OP_CHANGE_HAT, args, &token);
+ if (IS_ERR(hat))
+ return PTR_ERR(hat);
+
+ if (!hat && !token) {
+ AA_ERROR("change_hat: Invalid input, NULL hat and NULL magic");
+ return -EINVAL;
+ }
+
+ if (hat) {
+ /* set up hat name vector, args guaranteed null terminated
+ * at args[size] by setprocattr.
+ *
+ * If there are multiple hat names in the buffer each is
+ * separated by a \0. Ie. userspace writes them pre tokenized
+ */
+ char *end = args + size;
+ for (count = 0; (hat < end) && count < 16; ++count) {
+ char *next = hat + strlen(hat) + 1;
+ hats[count] = hat;
+ hat = next;
+ }
+ }
+
+ AA_DEBUG("%s: Magic 0x%llx Hat '%s'\n",
+ __func__, token, hat ? hat : NULL);
+
+ return aa_change_hat(hats, count, token, test);
+}
+
+/**
+ * aa_setprocattr_changeprofile - handle procattr interface to changeprofile
+ * @fqname: args received from writting to /proc/<pid>/attr/current (NOT NULL)
+ * @onexec: true if change_profile should be delayed until exec
+ * @test: true if this is a test of change_profile permissions
+ *
+ * Returns: %0 or error code if change_profile fails
+ */
+int aa_setprocattr_changeprofile(char *fqname, bool onexec, int test)
+{
+ char *name, *ns_name;
+
+ name = aa_split_fqname(fqname, &ns_name);
+ return aa_change_profile(ns_name, name, onexec, test);
+}
diff --git a/security/apparmor/resource.c b/security/apparmor/resource.c
new file mode 100644
index 00000000000..748bf0ca6c9
--- /dev/null
+++ b/security/apparmor/resource.c
@@ -0,0 +1,154 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor resource mediation and attachment
+ *
+ * Copyright (C) 1998-2008 Novell/SUSE
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#include <linux/audit.h>
+
+#include "include/audit.h"
+#include "include/context.h"
+#include "include/resource.h"
+#include "include/policy.h"
+
+/*
+ * Table of rlimit names: we generate it from resource.h.
+ */
+#include "rlim_names.h"
+
+struct aa_fs_entry aa_fs_entry_rlimit[] = {
+ AA_FS_FILE_STRING("mask", AA_FS_RLIMIT_MASK),
+ { }
+};
+
+/* audit callback for resource specific fields */
+static void audit_cb(struct audit_buffer *ab, void *va)
+{
+ struct common_audit_data *sa = va;
+
+ audit_log_format(ab, " rlimit=%s value=%lu",
+ rlim_names[sa->aad->rlim.rlim], sa->aad->rlim.max);
+}
+
+/**
+ * audit_resource - audit setting resource limit
+ * @profile: profile being enforced (NOT NULL)
+ * @resoure: rlimit being auditing
+ * @value: value being set
+ * @error: error value
+ *
+ * Returns: 0 or sa->error else other error code on failure
+ */
+static int audit_resource(struct aa_profile *profile, unsigned int resource,
+ unsigned long value, int error)
+{
+ struct common_audit_data sa;
+ struct apparmor_audit_data aad = {0,};
+
+ sa.type = LSM_AUDIT_DATA_NONE;
+ sa.aad = &aad;
+ aad.op = OP_SETRLIMIT,
+ aad.rlim.rlim = resource;
+ aad.rlim.max = value;
+ aad.error = error;
+ return aa_audit(AUDIT_APPARMOR_AUTO, profile, GFP_KERNEL, &sa,
+ audit_cb);
+}
+
+/**
+ * aa_map_resouce - map compiled policy resource to internal #
+ * @resource: flattened policy resource number
+ *
+ * Returns: resource # for the current architecture.
+ *
+ * rlimit resource can vary based on architecture, map the compiled policy
+ * resource # to the internal representation for the architecture.
+ */
+int aa_map_resource(int resource)
+{
+ return rlim_map[resource];
+}
+
+/**
+ * aa_task_setrlimit - test permission to set an rlimit
+ * @profile - profile confining the task (NOT NULL)
+ * @task - task the resource is being set on
+ * @resource - the resource being set
+ * @new_rlim - the new resource limit (NOT NULL)
+ *
+ * Control raising the processes hard limit.
+ *
+ * Returns: 0 or error code if setting resource failed
+ */
+int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *task,
+ unsigned int resource, struct rlimit *new_rlim)
+{
+ struct aa_profile *task_profile;
+ int error = 0;
+
+ rcu_read_lock();
+ task_profile = aa_get_profile(aa_cred_profile(__task_cred(task)));
+ rcu_read_unlock();
+
+ /* TODO: extend resource control to handle other (non current)
+ * profiles. AppArmor rules currently have the implicit assumption
+ * that the task is setting the resource of a task confined with
+ * the same profile.
+ */
+ if (profile != task_profile ||
+ (profile->rlimits.mask & (1 << resource) &&
+ new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max))
+ error = -EACCES;
+
+ aa_put_profile(task_profile);
+
+ return audit_resource(profile, resource, new_rlim->rlim_max, error);
+}
+
+/**
+ * __aa_transition_rlimits - apply new profile rlimits
+ * @old: old profile on task (NOT NULL)
+ * @new: new profile with rlimits to apply (NOT NULL)
+ */
+void __aa_transition_rlimits(struct aa_profile *old, struct aa_profile *new)
+{
+ unsigned int mask = 0;
+ struct rlimit *rlim, *initrlim;
+ int i;
+
+ /* for any rlimits the profile controlled reset the soft limit
+ * to the less of the tasks hard limit and the init tasks soft limit
+ */
+ if (old->rlimits.mask) {
+ for (i = 0, mask = 1; i < RLIM_NLIMITS; i++, mask <<= 1) {
+ if (old->rlimits.mask & mask) {
+ rlim = current->signal->rlim + i;
+ initrlim = init_task.signal->rlim + i;
+ rlim->rlim_cur = min(rlim->rlim_max,
+ initrlim->rlim_cur);
+ }
+ }
+ }
+
+ /* set any new hard limits as dictated by the new profile */
+ if (!new->rlimits.mask)
+ return;
+ for (i = 0, mask = 1; i < RLIM_NLIMITS; i++, mask <<= 1) {
+ if (!(new->rlimits.mask & mask))
+ continue;
+
+ rlim = current->signal->rlim + i;
+ rlim->rlim_max = min(rlim->rlim_max,
+ new->rlimits.limits[i].rlim_max);
+ /* soft limit should not exceed hard limit */
+ rlim->rlim_cur = min(rlim->rlim_cur, rlim->rlim_max);
+ }
+}
diff --git a/security/apparmor/sid.c b/security/apparmor/sid.c
new file mode 100644
index 00000000000..f0b34f76ebe
--- /dev/null
+++ b/security/apparmor/sid.c
@@ -0,0 +1,55 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor security identifier (sid) manipulation fns
+ *
+ * Copyright 2009-2010 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ *
+ * AppArmor allocates a unique sid for every profile loaded. If a profile
+ * is replaced it receives the sid of the profile it is replacing.
+ *
+ * The sid value of 0 is invalid.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+
+#include "include/sid.h"
+
+/* global counter from which sids are allocated */
+static u32 global_sid;
+static DEFINE_SPINLOCK(sid_lock);
+
+/* TODO FIXME: add sid to profile mapping, and sid recycling */
+
+/**
+ * aa_alloc_sid - allocate a new sid for a profile
+ */
+u32 aa_alloc_sid(void)
+{
+ u32 sid;
+
+ /*
+ * TODO FIXME: sid recycling - part of profile mapping table
+ */
+ spin_lock(&sid_lock);
+ sid = (++global_sid);
+ spin_unlock(&sid_lock);
+ return sid;
+}
+
+/**
+ * aa_free_sid - free a sid
+ * @sid: sid to free
+ */
+void aa_free_sid(u32 sid)
+{
+ ; /* NOP ATM */
+}
diff --git a/security/capability.c b/security/capability.c
index 5c700e1a4fd..e76373de312 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -12,12 +12,7 @@
#include <linux/security.h>
-static int cap_acct(struct file *file)
-{
- return 0;
-}
-
-static int cap_sysctl(ctl_table *table, int op)
+static int cap_syslog(int type)
{
return 0;
}
@@ -32,7 +27,7 @@ static int cap_quota_on(struct dentry *dentry)
return 0;
}
-static int cap_bprm_check_security (struct linux_binprm *bprm)
+static int cap_bprm_check_security(struct linux_binprm *bprm)
{
return 0;
}
@@ -59,28 +54,28 @@ static int cap_sb_copy_data(char *orig, char *copy)
return 0;
}
-static int cap_sb_kern_mount(struct super_block *sb, int flags, void *data)
+static int cap_sb_remount(struct super_block *sb, void *data)
{
return 0;
}
-static int cap_sb_show_options(struct seq_file *m, struct super_block *sb)
+static int cap_sb_kern_mount(struct super_block *sb, int flags, void *data)
{
return 0;
}
-static int cap_sb_statfs(struct dentry *dentry)
+static int cap_sb_show_options(struct seq_file *m, struct super_block *sb)
{
return 0;
}
-static int cap_sb_mount(char *dev_name, struct path *path, char *type,
- unsigned long flags, void *data)
+static int cap_sb_statfs(struct dentry *dentry)
{
return 0;
}
-static int cap_sb_check_sb(struct vfsmount *mnt, struct path *path)
+static int cap_sb_mount(const char *dev_name, struct path *path,
+ const char *type, unsigned long flags, void *data)
{
return 0;
}
@@ -90,43 +85,26 @@ static int cap_sb_umount(struct vfsmount *mnt, int flags)
return 0;
}
-static void cap_sb_umount_close(struct vfsmount *mnt)
-{
-}
-
-static void cap_sb_umount_busy(struct vfsmount *mnt)
-{
-}
-
-static void cap_sb_post_remount(struct vfsmount *mnt, unsigned long flags,
- void *data)
-{
-}
-
-static void cap_sb_post_addmount(struct vfsmount *mnt, struct path *path)
-{
-}
-
static int cap_sb_pivotroot(struct path *old_path, struct path *new_path)
{
return 0;
}
-static void cap_sb_post_pivotroot(struct path *old_path, struct path *new_path)
-{
-}
-
static int cap_sb_set_mnt_opts(struct super_block *sb,
- struct security_mnt_opts *opts)
+ struct security_mnt_opts *opts,
+ unsigned long kern_flags,
+ unsigned long *set_kern_flags)
+
{
if (unlikely(opts->num_mnt_opts))
return -EOPNOTSUPP;
return 0;
}
-static void cap_sb_clone_mnt_opts(const struct super_block *oldsb,
+static int cap_sb_clone_mnt_opts(const struct super_block *oldsb,
struct super_block *newsb)
{
+ return 0;
}
static int cap_sb_parse_opts_str(char *options, struct security_mnt_opts *opts)
@@ -134,6 +112,13 @@ static int cap_sb_parse_opts_str(char *options, struct security_mnt_opts *opts)
return 0;
}
+static int cap_dentry_init_security(struct dentry *dentry, int mode,
+ struct qstr *name, void **ctx,
+ u32 *ctxlen)
+{
+ return -EOPNOTSUPP;
+}
+
static int cap_inode_alloc_security(struct inode *inode)
{
return 0;
@@ -144,13 +129,14 @@ static void cap_inode_free_security(struct inode *inode)
}
static int cap_inode_init_security(struct inode *inode, struct inode *dir,
- char **name, void **value, size_t *len)
+ const struct qstr *qstr, const char **name,
+ void **value, size_t *len)
{
return -EOPNOTSUPP;
}
static int cap_inode_create(struct inode *inode, struct dentry *dentry,
- int mask)
+ umode_t mask)
{
return 0;
}
@@ -173,7 +159,7 @@ static int cap_inode_symlink(struct inode *inode, struct dentry *dentry,
}
static int cap_inode_mkdir(struct inode *inode, struct dentry *dentry,
- int mask)
+ umode_t mask)
{
return 0;
}
@@ -184,7 +170,7 @@ static int cap_inode_rmdir(struct inode *inode, struct dentry *dentry)
}
static int cap_inode_mknod(struct inode *inode, struct dentry *dentry,
- int mode, dev_t dev)
+ umode_t mode, dev_t dev)
{
return 0;
}
@@ -221,10 +207,6 @@ static int cap_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
return 0;
}
-static void cap_inode_delete(struct inode *ino)
-{
-}
-
static void cap_inode_post_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
@@ -264,13 +246,13 @@ static void cap_inode_getsecid(const struct inode *inode, u32 *secid)
}
#ifdef CONFIG_SECURITY_PATH
-static int cap_path_mknod(struct path *dir, struct dentry *dentry, int mode,
+static int cap_path_mknod(struct path *dir, struct dentry *dentry, umode_t mode,
unsigned int dev)
{
return 0;
}
-static int cap_path_mkdir(struct path *dir, struct dentry *dentry, int mode)
+static int cap_path_mkdir(struct path *dir, struct dentry *dentry, umode_t mode)
{
return 0;
}
@@ -303,19 +285,17 @@ static int cap_path_rename(struct path *old_path, struct dentry *old_dentry,
return 0;
}
-static int cap_path_truncate(struct path *path, loff_t length,
- unsigned int time_attrs)
+static int cap_path_truncate(struct path *path)
{
return 0;
}
-static int cap_path_chmod(struct dentry *dentry, struct vfsmount *mnt,
- mode_t mode)
+static int cap_path_chmod(struct path *path, umode_t mode)
{
return 0;
}
-static int cap_path_chown(struct path *path, uid_t uid, gid_t gid)
+static int cap_path_chown(struct path *path, kuid_t uid, kgid_t gid)
{
return 0;
}
@@ -379,7 +359,7 @@ static int cap_file_receive(struct file *file)
return 0;
}
-static int cap_dentry_open(struct file *file, const struct cred *cred)
+static int cap_file_open(struct file *file, const struct cred *cred)
{
return 0;
}
@@ -389,6 +369,10 @@ static int cap_task_create(unsigned long clone_flags)
return 0;
}
+static void cap_task_free(struct task_struct *task)
+{
+}
+
static int cap_cred_alloc_blank(struct cred *cred, gfp_t gfp)
{
return 0;
@@ -403,10 +387,6 @@ static int cap_cred_prepare(struct cred *new, const struct cred *old, gfp_t gfp)
return 0;
}
-static void cap_cred_commit(struct cred *new, const struct cred *old)
-{
-}
-
static void cap_cred_transfer(struct cred *new, const struct cred *old)
{
}
@@ -426,12 +406,7 @@ static int cap_kernel_module_request(char *kmod_name)
return 0;
}
-static int cap_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags)
-{
- return 0;
-}
-
-static int cap_task_setgid(gid_t id0, gid_t id1, gid_t id2, int flags)
+static int cap_kernel_module_from_file(struct file *file)
{
return 0;
}
@@ -456,17 +431,13 @@ static void cap_task_getsecid(struct task_struct *p, u32 *secid)
*secid = 0;
}
-static int cap_task_setgroups(struct group_info *group_info)
-{
- return 0;
-}
-
static int cap_task_getioprio(struct task_struct *p)
{
return 0;
}
-static int cap_task_setrlimit(unsigned int resource, struct rlimit *new_rlim)
+static int cap_task_setrlimit(struct task_struct *p, unsigned int resource,
+ struct rlimit *new_rlim)
{
return 0;
}
@@ -597,7 +568,7 @@ static int cap_sem_semop(struct sem_array *sma, struct sembuf *sops,
}
#ifdef CONFIG_SECURITY_NETWORK
-static int cap_unix_stream_connect(struct socket *sock, struct socket *other,
+static int cap_unix_stream_connect(struct sock *sock, struct sock *other,
struct sock *newsk)
{
return 0;
@@ -731,31 +702,63 @@ static void cap_inet_conn_established(struct sock *sk, struct sk_buff *skb)
{
}
+static int cap_secmark_relabel_packet(u32 secid)
+{
+ return 0;
+}
+
+static void cap_secmark_refcount_inc(void)
+{
+}
+static void cap_secmark_refcount_dec(void)
+{
+}
static void cap_req_classify_flow(const struct request_sock *req,
struct flowi *fl)
{
}
+static int cap_tun_dev_alloc_security(void **security)
+{
+ return 0;
+}
+
+static void cap_tun_dev_free_security(void *security)
+{
+}
+
static int cap_tun_dev_create(void)
{
return 0;
}
-static void cap_tun_dev_post_create(struct sock *sk)
+static int cap_tun_dev_attach_queue(void *security)
{
+ return 0;
+}
+
+static int cap_tun_dev_attach(struct sock *sk, void *security)
+{
+ return 0;
}
-static int cap_tun_dev_attach(struct sock *sk)
+static int cap_tun_dev_open(void *security)
{
return 0;
}
+
+static void cap_skb_owned_by(struct sk_buff *skb, struct sock *sk)
+{
+}
+
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
static int cap_xfrm_policy_alloc_security(struct xfrm_sec_ctx **ctxp,
- struct xfrm_user_sec_ctx *sec_ctx)
+ struct xfrm_user_sec_ctx *sec_ctx,
+ gfp_t gfp)
{
return 0;
}
@@ -775,9 +778,15 @@ static int cap_xfrm_policy_delete_security(struct xfrm_sec_ctx *ctx)
return 0;
}
-static int cap_xfrm_state_alloc_security(struct xfrm_state *x,
- struct xfrm_user_sec_ctx *sec_ctx,
- u32 secid)
+static int cap_xfrm_state_alloc(struct xfrm_state *x,
+ struct xfrm_user_sec_ctx *sec_ctx)
+{
+ return 0;
+}
+
+static int cap_xfrm_state_alloc_acquire(struct xfrm_state *x,
+ struct xfrm_sec_ctx *polsec,
+ u32 secid)
{
return 0;
}
@@ -798,7 +807,7 @@ static int cap_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 sk_sid, u8 dir)
static int cap_xfrm_state_pol_flow_match(struct xfrm_state *x,
struct xfrm_policy *xp,
- struct flowi *fl)
+ const struct flowi *fl)
{
return 1;
}
@@ -824,6 +833,11 @@ static int cap_setprocattr(struct task_struct *p, char *name, void *value,
return -EINVAL;
}
+static int cap_ismaclabel(const char *name)
+{
+ return 0;
+}
+
static int cap_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
{
return -EOPNOTSUPP;
@@ -831,7 +845,8 @@ static int cap_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
static int cap_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
{
- return -EOPNOTSUPP;
+ *secid = 0;
+ return 0;
}
static void cap_release_secctx(char *secdata, u32 seclen)
@@ -850,7 +865,7 @@ static int cap_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
static int cap_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
{
- return 0;
+ return -EOPNOTSUPP;
}
#ifdef CONFIG_KEYS
static int cap_key_alloc(struct key *key, const struct cred *cred,
@@ -864,7 +879,7 @@ static void cap_key_free(struct key *key)
}
static int cap_key_permission(key_ref_t key_ref, const struct cred *cred,
- key_perm_t perm)
+ unsigned perm)
{
return 0;
}
@@ -875,13 +890,6 @@ static int cap_key_getsecurity(struct key *key, char **_buffer)
return 0;
}
-static int cap_key_session_to_parent(const struct cred *cred,
- const struct cred *parent_cred,
- struct key *key)
-{
- return 0;
-}
-
#endif /* CONFIG_KEYS */
#ifdef CONFIG_AUDIT
@@ -906,10 +914,6 @@ static void cap_audit_rule_free(void *lsmrule)
}
#endif /* CONFIG_AUDIT */
-struct security_operations default_security_ops = {
- .name = "default",
-};
-
#define set_to_cap_if_null(ops, function) \
do { \
if (!ops->function) { \
@@ -919,17 +923,15 @@ struct security_operations default_security_ops = {
} \
} while (0)
-void security_fixup_ops(struct security_operations *ops)
+void __init security_fixup_ops(struct security_operations *ops)
{
set_to_cap_if_null(ops, ptrace_access_check);
set_to_cap_if_null(ops, ptrace_traceme);
set_to_cap_if_null(ops, capget);
set_to_cap_if_null(ops, capset);
- set_to_cap_if_null(ops, acct);
set_to_cap_if_null(ops, capable);
set_to_cap_if_null(ops, quotactl);
set_to_cap_if_null(ops, quota_on);
- set_to_cap_if_null(ops, sysctl);
set_to_cap_if_null(ops, syslog);
set_to_cap_if_null(ops, settime);
set_to_cap_if_null(ops, vm_enough_memory);
@@ -941,21 +943,17 @@ void security_fixup_ops(struct security_operations *ops)
set_to_cap_if_null(ops, sb_alloc_security);
set_to_cap_if_null(ops, sb_free_security);
set_to_cap_if_null(ops, sb_copy_data);
+ set_to_cap_if_null(ops, sb_remount);
set_to_cap_if_null(ops, sb_kern_mount);
set_to_cap_if_null(ops, sb_show_options);
set_to_cap_if_null(ops, sb_statfs);
set_to_cap_if_null(ops, sb_mount);
- set_to_cap_if_null(ops, sb_check_sb);
set_to_cap_if_null(ops, sb_umount);
- set_to_cap_if_null(ops, sb_umount_close);
- set_to_cap_if_null(ops, sb_umount_busy);
- set_to_cap_if_null(ops, sb_post_remount);
- set_to_cap_if_null(ops, sb_post_addmount);
set_to_cap_if_null(ops, sb_pivotroot);
- set_to_cap_if_null(ops, sb_post_pivotroot);
set_to_cap_if_null(ops, sb_set_mnt_opts);
set_to_cap_if_null(ops, sb_clone_mnt_opts);
set_to_cap_if_null(ops, sb_parse_opts_str);
+ set_to_cap_if_null(ops, dentry_init_security);
set_to_cap_if_null(ops, inode_alloc_security);
set_to_cap_if_null(ops, inode_free_security);
set_to_cap_if_null(ops, inode_init_security);
@@ -972,7 +970,6 @@ void security_fixup_ops(struct security_operations *ops)
set_to_cap_if_null(ops, inode_permission);
set_to_cap_if_null(ops, inode_setattr);
set_to_cap_if_null(ops, inode_getattr);
- set_to_cap_if_null(ops, inode_delete);
set_to_cap_if_null(ops, inode_setxattr);
set_to_cap_if_null(ops, inode_post_setxattr);
set_to_cap_if_null(ops, inode_getxattr);
@@ -1001,31 +998,30 @@ void security_fixup_ops(struct security_operations *ops)
set_to_cap_if_null(ops, file_alloc_security);
set_to_cap_if_null(ops, file_free_security);
set_to_cap_if_null(ops, file_ioctl);
- set_to_cap_if_null(ops, file_mmap);
+ set_to_cap_if_null(ops, mmap_addr);
+ set_to_cap_if_null(ops, mmap_file);
set_to_cap_if_null(ops, file_mprotect);
set_to_cap_if_null(ops, file_lock);
set_to_cap_if_null(ops, file_fcntl);
set_to_cap_if_null(ops, file_set_fowner);
set_to_cap_if_null(ops, file_send_sigiotask);
set_to_cap_if_null(ops, file_receive);
- set_to_cap_if_null(ops, dentry_open);
+ set_to_cap_if_null(ops, file_open);
set_to_cap_if_null(ops, task_create);
+ set_to_cap_if_null(ops, task_free);
set_to_cap_if_null(ops, cred_alloc_blank);
set_to_cap_if_null(ops, cred_free);
set_to_cap_if_null(ops, cred_prepare);
- set_to_cap_if_null(ops, cred_commit);
set_to_cap_if_null(ops, cred_transfer);
set_to_cap_if_null(ops, kernel_act_as);
set_to_cap_if_null(ops, kernel_create_files_as);
set_to_cap_if_null(ops, kernel_module_request);
- set_to_cap_if_null(ops, task_setuid);
+ set_to_cap_if_null(ops, kernel_module_from_file);
set_to_cap_if_null(ops, task_fix_setuid);
- set_to_cap_if_null(ops, task_setgid);
set_to_cap_if_null(ops, task_setpgid);
set_to_cap_if_null(ops, task_getpgid);
set_to_cap_if_null(ops, task_getsid);
set_to_cap_if_null(ops, task_getsecid);
- set_to_cap_if_null(ops, task_setgroups);
set_to_cap_if_null(ops, task_setnice);
set_to_cap_if_null(ops, task_setioprio);
set_to_cap_if_null(ops, task_getioprio);
@@ -1058,10 +1054,10 @@ void security_fixup_ops(struct security_operations *ops)
set_to_cap_if_null(ops, sem_semctl);
set_to_cap_if_null(ops, sem_semop);
set_to_cap_if_null(ops, netlink_send);
- set_to_cap_if_null(ops, netlink_recv);
set_to_cap_if_null(ops, d_instantiate);
set_to_cap_if_null(ops, getprocattr);
set_to_cap_if_null(ops, setprocattr);
+ set_to_cap_if_null(ops, ismaclabel);
set_to_cap_if_null(ops, secid_to_secctx);
set_to_cap_if_null(ops, secctx_to_secid);
set_to_cap_if_null(ops, release_secctx);
@@ -1095,17 +1091,25 @@ void security_fixup_ops(struct security_operations *ops)
set_to_cap_if_null(ops, inet_conn_request);
set_to_cap_if_null(ops, inet_csk_clone);
set_to_cap_if_null(ops, inet_conn_established);
+ set_to_cap_if_null(ops, secmark_relabel_packet);
+ set_to_cap_if_null(ops, secmark_refcount_inc);
+ set_to_cap_if_null(ops, secmark_refcount_dec);
set_to_cap_if_null(ops, req_classify_flow);
+ set_to_cap_if_null(ops, tun_dev_alloc_security);
+ set_to_cap_if_null(ops, tun_dev_free_security);
set_to_cap_if_null(ops, tun_dev_create);
- set_to_cap_if_null(ops, tun_dev_post_create);
+ set_to_cap_if_null(ops, tun_dev_open);
+ set_to_cap_if_null(ops, tun_dev_attach_queue);
set_to_cap_if_null(ops, tun_dev_attach);
+ set_to_cap_if_null(ops, skb_owned_by);
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
set_to_cap_if_null(ops, xfrm_policy_alloc_security);
set_to_cap_if_null(ops, xfrm_policy_clone_security);
set_to_cap_if_null(ops, xfrm_policy_free_security);
set_to_cap_if_null(ops, xfrm_policy_delete_security);
- set_to_cap_if_null(ops, xfrm_state_alloc_security);
+ set_to_cap_if_null(ops, xfrm_state_alloc);
+ set_to_cap_if_null(ops, xfrm_state_alloc_acquire);
set_to_cap_if_null(ops, xfrm_state_free_security);
set_to_cap_if_null(ops, xfrm_state_delete_security);
set_to_cap_if_null(ops, xfrm_policy_lookup);
@@ -1117,7 +1121,6 @@ void security_fixup_ops(struct security_operations *ops)
set_to_cap_if_null(ops, key_free);
set_to_cap_if_null(ops, key_permission);
set_to_cap_if_null(ops, key_getsecurity);
- set_to_cap_if_null(ops, key_session_to_parent);
#endif /* CONFIG_KEYS */
#ifdef CONFIG_AUDIT
set_to_cap_if_null(ops, audit_rule_init);
diff --git a/security/commoncap.c b/security/commoncap.c
index f800fdb3de9..b9d613e0ef1 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -27,6 +27,9 @@
#include <linux/sched.h>
#include <linux/prctl.h>
#include <linux/securebits.h>
+#include <linux/user_namespace.h>
+#include <linux/binfmts.h>
+#include <linux/personality.h>
/*
* If a non-root user executes a setuid-root binary in
@@ -39,7 +42,7 @@
*
* Warn if that happens, once per boot.
*/
-static void warn_setuid_and_fcaps_mixed(char *fname)
+static void warn_setuid_and_fcaps_mixed(const char *fname)
{
static int warned;
if (!warned) {
@@ -52,22 +55,13 @@ static void warn_setuid_and_fcaps_mixed(char *fname)
int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
{
- NETLINK_CB(skb).eff_cap = current_cap();
return 0;
}
-int cap_netlink_recv(struct sk_buff *skb, int cap)
-{
- if (!cap_raised(NETLINK_CB(skb).eff_cap, cap))
- return -EPERM;
- return 0;
-}
-EXPORT_SYMBOL(cap_netlink_recv);
-
/**
* cap_capable - Determine whether a task has a particular effective capability
- * @tsk: The task to query
* @cred: The credentials to use
+ * @ns: The user namespace in which we need the capability
* @cap: The capability to check for
* @audit: Whether to write an audit message or not
*
@@ -79,10 +73,39 @@ EXPORT_SYMBOL(cap_netlink_recv);
* cap_has_capability() returns 0 when a task has a capability, but the
* kernel's capable() and has_capability() returns 1 for this case.
*/
-int cap_capable(struct task_struct *tsk, const struct cred *cred, int cap,
- int audit)
+int cap_capable(const struct cred *cred, struct user_namespace *targ_ns,
+ int cap, int audit)
{
- return cap_raised(cred->cap_effective, cap) ? 0 : -EPERM;
+ struct user_namespace *ns = targ_ns;
+
+ /* See if cred has the capability in the target user namespace
+ * by examining the target user namespace and all of the target
+ * user namespace's parents.
+ */
+ for (;;) {
+ /* Do we have the necessary capabilities? */
+ if (ns == cred->user_ns)
+ return cap_raised(cred->cap_effective, cap) ? 0 : -EPERM;
+
+ /* Have we tried all of the parent namespaces? */
+ if (ns == &init_user_ns)
+ return -EPERM;
+
+ /*
+ * The owner of the user namespace in the parent of the
+ * user namespace has all caps.
+ */
+ if ((ns->parent == cred->user_ns) && uid_eq(ns->owner, cred->euid))
+ return 0;
+
+ /*
+ * If you have a capability in a parent user ns, then you have
+ * it over all children user namespaces as well.
+ */
+ ns = ns->parent;
+ }
+
+ /* We never get here */
}
/**
@@ -93,7 +116,7 @@ int cap_capable(struct task_struct *tsk, const struct cred *cred, int cap,
* Determine whether the current process may set the system clock and timezone
* information, returning 0 if permission granted, -ve if denied.
*/
-int cap_settime(struct timespec *ts, struct timezone *tz)
+int cap_settime(const struct timespec *ts, const struct timezone *tz)
{
if (!capable(CAP_SYS_TIME))
return -EPERM;
@@ -106,18 +129,30 @@ int cap_settime(struct timespec *ts, struct timezone *tz)
* @child: The process to be accessed
* @mode: The mode of attachment.
*
+ * If we are in the same or an ancestor user_ns and have all the target
+ * task's capabilities, then ptrace access is allowed.
+ * If we have the ptrace capability to the target user_ns, then ptrace
+ * access is allowed.
+ * Else denied.
+ *
* Determine whether a process may access another, returning 0 if permission
* granted, -ve if denied.
*/
int cap_ptrace_access_check(struct task_struct *child, unsigned int mode)
{
int ret = 0;
+ const struct cred *cred, *child_cred;
rcu_read_lock();
- if (!cap_issubset(__task_cred(child)->cap_permitted,
- current_cred()->cap_permitted) &&
- !capable(CAP_SYS_PTRACE))
- ret = -EPERM;
+ cred = current_cred();
+ child_cred = __task_cred(child);
+ if (cred->user_ns == child_cred->user_ns &&
+ cap_issubset(child_cred->cap_permitted, cred->cap_permitted))
+ goto out;
+ if (ns_capable(child_cred->user_ns, CAP_SYS_PTRACE))
+ goto out;
+ ret = -EPERM;
+out:
rcu_read_unlock();
return ret;
}
@@ -126,18 +161,30 @@ int cap_ptrace_access_check(struct task_struct *child, unsigned int mode)
* cap_ptrace_traceme - Determine whether another process may trace the current
* @parent: The task proposed to be the tracer
*
+ * If parent is in the same or an ancestor user_ns and has all current's
+ * capabilities, then ptrace access is allowed.
+ * If parent has the ptrace capability to current's user_ns, then ptrace
+ * access is allowed.
+ * Else denied.
+ *
* Determine whether the nominated task is permitted to trace the current
* process, returning 0 if permission is granted, -ve if denied.
*/
int cap_ptrace_traceme(struct task_struct *parent)
{
int ret = 0;
+ const struct cred *cred, *child_cred;
rcu_read_lock();
- if (!cap_issubset(current_cred()->cap_permitted,
- __task_cred(parent)->cap_permitted) &&
- !has_capability(parent, CAP_SYS_PTRACE))
- ret = -EPERM;
+ cred = __task_cred(parent);
+ child_cred = current_cred();
+ if (cred->user_ns == child_cred->user_ns &&
+ cap_issubset(child_cred->cap_permitted, cred->cap_permitted))
+ goto out;
+ if (has_ns_capability(parent, child_cred->user_ns, CAP_SYS_PTRACE))
+ goto out;
+ ret = -EPERM;
+out:
rcu_read_unlock();
return ret;
}
@@ -177,8 +224,8 @@ static inline int cap_inh_is_capped(void)
/* they are so limited unless the current task has the CAP_SETPCAP
* capability
*/
- if (cap_capable(current, current_cred(), CAP_SETPCAP,
- SECURITY_CAP_AUDIT) == 0)
+ if (cap_capable(current_cred(), current_cred()->user_ns,
+ CAP_SETPCAP, SECURITY_CAP_AUDIT) == 0)
return 0;
return 1;
}
@@ -286,7 +333,8 @@ int cap_inode_killpriv(struct dentry *dentry)
*/
static inline int bprm_caps_from_vfs_caps(struct cpu_vfs_cap_data *caps,
struct linux_binprm *bprm,
- bool *effective)
+ bool *effective,
+ bool *has_cap)
{
struct cred *new = bprm->cred;
unsigned i;
@@ -295,6 +343,9 @@ static inline int bprm_caps_from_vfs_caps(struct cpu_vfs_cap_data *caps,
if (caps->magic_etc & VFS_CAP_FLAGS_EFFECTIVE)
*effective = true;
+ if (caps->magic_etc & VFS_CAP_REVISION_MASK)
+ *has_cap = true;
+
CAP_FOR_EACH_U32(i) {
__u32 permitted = caps->permitted.cap[i];
__u32 inheritable = caps->inheritable.cap[i];
@@ -378,7 +429,7 @@ int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data
* its xattrs and, if present, apply them to the proposed credentials being
* constructed by execve().
*/
-static int get_file_caps(struct linux_binprm *bprm, bool *effective)
+static int get_file_caps(struct linux_binprm *bprm, bool *effective, bool *has_cap)
{
struct dentry *dentry;
int rc = 0;
@@ -389,7 +440,7 @@ static int get_file_caps(struct linux_binprm *bprm, bool *effective)
if (!file_caps_enabled)
return 0;
- if (bprm->file->f_vfsmnt->mnt_flags & MNT_NOSUID)
+ if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
return 0;
dentry = dget(bprm->file->f_dentry);
@@ -404,7 +455,7 @@ static int get_file_caps(struct linux_binprm *bprm, bool *effective)
goto out;
}
- rc = bprm_caps_from_vfs_caps(&vcaps, bprm, effective);
+ rc = bprm_caps_from_vfs_caps(&vcaps, bprm, effective, has_cap);
if (rc == -EINVAL)
printk(KERN_NOTICE "%s: cap_from_disk returned %d for %s\n",
__func__, rc, bprm->filename);
@@ -429,21 +480,24 @@ int cap_bprm_set_creds(struct linux_binprm *bprm)
{
const struct cred *old = current_cred();
struct cred *new = bprm->cred;
- bool effective;
+ bool effective, has_cap = false;
int ret;
+ kuid_t root_uid;
effective = false;
- ret = get_file_caps(bprm, &effective);
+ ret = get_file_caps(bprm, &effective, &has_cap);
if (ret < 0)
return ret;
+ root_uid = make_kuid(new->user_ns, 0);
+
if (!issecure(SECURE_NOROOT)) {
/*
* If the legacy file capability is set, then don't set privs
* for a setuid root binary run by a non-root user. Do set it
* for a root user just to cause least surprise to an admin.
*/
- if (effective && new->uid != 0 && new->euid == 0) {
+ if (has_cap && !uid_eq(new->uid, root_uid) && uid_eq(new->euid, root_uid)) {
warn_setuid_and_fcaps_mixed(bprm->filename);
goto skip;
}
@@ -454,25 +508,33 @@ int cap_bprm_set_creds(struct linux_binprm *bprm)
*
* If only the real uid is 0, we do not set the effective bit.
*/
- if (new->euid == 0 || new->uid == 0) {
+ if (uid_eq(new->euid, root_uid) || uid_eq(new->uid, root_uid)) {
/* pP' = (cap_bset & ~0) | (pI & ~0) */
new->cap_permitted = cap_combine(old->cap_bset,
old->cap_inheritable);
}
- if (new->euid == 0)
+ if (uid_eq(new->euid, root_uid))
effective = true;
}
skip:
+ /* if we have fs caps, clear dangerous personality flags */
+ if (!cap_issubset(new->cap_permitted, old->cap_permitted))
+ bprm->per_clear |= PER_CLEAR_ON_SETID;
+
+
/* Don't let someone trace a set[ug]id/setpcap binary with the revised
- * credentials unless they have the appropriate permit
+ * credentials unless they have the appropriate permit.
+ *
+ * In addition, if NO_NEW_PRIVS, then ensure we get no new privs.
*/
- if ((new->euid != old->uid ||
- new->egid != old->gid ||
+ if ((!uid_eq(new->euid, old->uid) ||
+ !gid_eq(new->egid, old->gid) ||
!cap_issubset(new->cap_permitted, old->cap_permitted)) &&
bprm->unsafe & ~LSM_UNSAFE_PTRACE_CAP) {
/* downgrade; they get no more than they had, and maybe less */
- if (!capable(CAP_SETUID)) {
+ if (!capable(CAP_SETUID) ||
+ (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS)) {
new->euid = new->uid;
new->egid = new->gid;
}
@@ -483,15 +545,10 @@ skip:
new->suid = new->fsuid = new->euid;
new->sgid = new->fsgid = new->egid;
- /* For init, we want to retain the capabilities set in the initial
- * task. Thus we skip the usual capability rules
- */
- if (!is_global_init(current)) {
- if (effective)
- new->cap_effective = new->cap_permitted;
- else
- cap_clear(new->cap_effective);
- }
+ if (effective)
+ new->cap_effective = new->cap_permitted;
+ else
+ cap_clear(new->cap_effective);
bprm->cap_effective = effective;
/*
@@ -508,7 +565,7 @@ skip:
*/
if (!cap_isclear(new->cap_effective)) {
if (!cap_issubset(CAP_FULL_SET, new->cap_effective) ||
- new->euid != 0 || new->uid != 0 ||
+ !uid_eq(new->euid, root_uid) || !uid_eq(new->uid, root_uid) ||
issecure(SECURE_NOROOT)) {
ret = audit_log_bprm_fcaps(bprm, new, old);
if (ret < 0)
@@ -533,16 +590,17 @@ skip:
int cap_bprm_secureexec(struct linux_binprm *bprm)
{
const struct cred *cred = current_cred();
+ kuid_t root_uid = make_kuid(cred->user_ns, 0);
- if (cred->uid != 0) {
+ if (!uid_eq(cred->uid, root_uid)) {
if (bprm->cap_effective)
return 1;
if (!cap_isclear(cred->cap_permitted))
return 1;
}
- return (cred->euid != cred->uid ||
- cred->egid != cred->gid);
+ return (!uid_eq(cred->euid, cred->uid) ||
+ !gid_eq(cred->egid, cred->gid));
}
/**
@@ -569,7 +627,7 @@ int cap_inode_setxattr(struct dentry *dentry, const char *name,
}
if (!strncmp(name, XATTR_SECURITY_PREFIX,
- sizeof(XATTR_SECURITY_PREFIX) - 1) &&
+ sizeof(XATTR_SECURITY_PREFIX) - 1) &&
!capable(CAP_SYS_ADMIN))
return -EPERM;
return 0;
@@ -595,7 +653,7 @@ int cap_inode_removexattr(struct dentry *dentry, const char *name)
}
if (!strncmp(name, XATTR_SECURITY_PREFIX,
- sizeof(XATTR_SECURITY_PREFIX) - 1) &&
+ sizeof(XATTR_SECURITY_PREFIX) - 1) &&
!capable(CAP_SYS_ADMIN))
return -EPERM;
return 0;
@@ -632,15 +690,21 @@ int cap_inode_removexattr(struct dentry *dentry, const char *name)
*/
static inline void cap_emulate_setxuid(struct cred *new, const struct cred *old)
{
- if ((old->uid == 0 || old->euid == 0 || old->suid == 0) &&
- (new->uid != 0 && new->euid != 0 && new->suid != 0) &&
+ kuid_t root_uid = make_kuid(old->user_ns, 0);
+
+ if ((uid_eq(old->uid, root_uid) ||
+ uid_eq(old->euid, root_uid) ||
+ uid_eq(old->suid, root_uid)) &&
+ (!uid_eq(new->uid, root_uid) &&
+ !uid_eq(new->euid, root_uid) &&
+ !uid_eq(new->suid, root_uid)) &&
!issecure(SECURE_KEEP_CAPS)) {
cap_clear(new->cap_permitted);
cap_clear(new->cap_effective);
}
- if (old->euid == 0 && new->euid != 0)
+ if (uid_eq(old->euid, root_uid) && !uid_eq(new->euid, root_uid))
cap_clear(new->cap_effective);
- if (old->euid != 0 && new->euid == 0)
+ if (!uid_eq(old->euid, root_uid) && uid_eq(new->euid, root_uid))
new->cap_effective = new->cap_permitted;
}
@@ -673,11 +737,12 @@ int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags)
* if not, we might be a bit too harsh here.
*/
if (!issecure(SECURE_NO_SETUID_FIXUP)) {
- if (old->fsuid == 0 && new->fsuid != 0)
+ kuid_t root_uid = make_kuid(old->user_ns, 0);
+ if (uid_eq(old->fsuid, root_uid) && !uid_eq(new->fsuid, root_uid))
new->cap_effective =
cap_drop_fs_set(new->cap_effective);
- if (old->fsuid != 0 && new->fsuid == 0)
+ if (!uid_eq(old->fsuid, root_uid) && uid_eq(new->fsuid, root_uid))
new->cap_effective =
cap_raise_fs_set(new->cap_effective,
new->cap_permitted);
@@ -703,29 +768,26 @@ int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags)
*/
static int cap_safe_nice(struct task_struct *p)
{
- int is_subset;
+ int is_subset, ret = 0;
rcu_read_lock();
is_subset = cap_issubset(__task_cred(p)->cap_permitted,
current_cred()->cap_permitted);
+ if (!is_subset && !ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE))
+ ret = -EPERM;
rcu_read_unlock();
- if (!is_subset && !capable(CAP_SYS_NICE))
- return -EPERM;
- return 0;
+ return ret;
}
/**
* cap_task_setscheduler - Detemine if scheduler policy change is permitted
* @p: The task to affect
- * @policy: The policy to effect
- * @lp: The parameters to the scheduling policy
*
* Detemine if the requested scheduler policy change is permitted for the
* specified task, returning 0 if permission is granted, -ve if denied.
*/
-int cap_task_setscheduler(struct task_struct *p, int policy,
- struct sched_param *lp)
+int cap_task_setscheduler(struct task_struct *p)
{
return cap_safe_nice(p);
}
@@ -762,7 +824,7 @@ int cap_task_setnice(struct task_struct *p, int nice)
*/
static long cap_prctl_drop(struct cred *new, unsigned long cap)
{
- if (!capable(CAP_SETPCAP))
+ if (!ns_capable(current_user_ns(), CAP_SETPCAP))
return -EPERM;
if (!cap_valid(cap))
return -EINVAL;
@@ -832,7 +894,8 @@ int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
& (new->securebits ^ arg2)) /*[1]*/
|| ((new->securebits & SECURE_ALL_LOCKS & ~arg2)) /*[2]*/
|| (arg2 & ~(SECURE_ALL_LOCKS | SECURE_ALL_BITS)) /*[3]*/
- || (cap_capable(current, current_cred(), CAP_SETPCAP,
+ || (cap_capable(current_cred(),
+ current_cred()->user_ns, CAP_SETPCAP,
SECURITY_CAP_AUDIT) != 0) /*[4]*/
/*
* [1] no changing of bits that are locked
@@ -886,20 +949,6 @@ error:
}
/**
- * cap_syslog - Determine whether syslog function is permitted
- * @type: Function requested
- *
- * Determine whether the current process is permitted to use a particular
- * syslog function, returning 0 if permission is granted, -ve if not.
- */
-int cap_syslog(int type)
-{
- if ((type != 3 && type != 10) && !capable(CAP_SYS_ADMIN))
- return -EPERM;
- return 0;
-}
-
-/**
* cap_vm_enough_memory - Determine whether a new virtual mapping is permitted
* @mm: The VM space in which the new mapping is to be made
* @pages: The size of the mapping
@@ -911,34 +960,27 @@ int cap_vm_enough_memory(struct mm_struct *mm, long pages)
{
int cap_sys_admin = 0;
- if (cap_capable(current, current_cred(), CAP_SYS_ADMIN,
+ if (cap_capable(current_cred(), &init_user_ns, CAP_SYS_ADMIN,
SECURITY_CAP_NOAUDIT) == 0)
cap_sys_admin = 1;
return __vm_enough_memory(mm, pages, cap_sys_admin);
}
/*
- * cap_file_mmap - check if able to map given addr
- * @file: unused
- * @reqprot: unused
- * @prot: unused
- * @flags: unused
+ * cap_mmap_addr - check if able to map given addr
* @addr: address attempting to be mapped
- * @addr_only: unused
*
- * If the process is attempting to map memory below mmap_min_addr they need
+ * If the process is attempting to map memory below dac_mmap_min_addr they need
* CAP_SYS_RAWIO. The other parameters to this function are unused by the
* capability security module. Returns 0 if this mapping should be allowed
* -EPERM if not.
*/
-int cap_file_mmap(struct file *file, unsigned long reqprot,
- unsigned long prot, unsigned long flags,
- unsigned long addr, unsigned long addr_only)
+int cap_mmap_addr(unsigned long addr)
{
int ret = 0;
if (addr < dac_mmap_min_addr) {
- ret = cap_capable(current, current_cred(), CAP_SYS_RAWIO,
+ ret = cap_capable(current_cred(), &init_user_ns, CAP_SYS_RAWIO,
SECURITY_CAP_AUDIT);
/* set PF_SUPERPRIV if it turns out we allow the low mmap */
if (ret == 0)
@@ -946,3 +988,9 @@ int cap_file_mmap(struct file *file, unsigned long reqprot,
}
return ret;
}
+
+int cap_mmap_file(struct file *file, unsigned long reqprot,
+ unsigned long prot, unsigned long flags)
+{
+ return 0;
+}
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index 6cf8fd2b79e..d9d69e6930e 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -10,6 +10,7 @@
#include <linux/list.h>
#include <linux/uaccess.h>
#include <linux/seq_file.h>
+#include <linux/slab.h>
#include <linux/rcupdate.h>
#include <linux/mutex.h>
@@ -24,13 +25,19 @@
static DEFINE_MUTEX(devcgroup_mutex);
+enum devcg_behavior {
+ DEVCG_DEFAULT_NONE,
+ DEVCG_DEFAULT_ALLOW,
+ DEVCG_DEFAULT_DENY,
+};
+
/*
- * whitelist locking rules:
+ * exception list locking rules:
* hold devcgroup_mutex for update/read.
* hold rcu_read_lock() for read.
*/
-struct dev_whitelist_item {
+struct dev_exception_item {
u32 major, minor;
short type;
short access;
@@ -40,45 +47,31 @@ struct dev_whitelist_item {
struct dev_cgroup {
struct cgroup_subsys_state css;
- struct list_head whitelist;
+ struct list_head exceptions;
+ enum devcg_behavior behavior;
};
static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
{
- return container_of(s, struct dev_cgroup, css);
-}
-
-static inline struct dev_cgroup *cgroup_to_devcgroup(struct cgroup *cgroup)
-{
- return css_to_devcgroup(cgroup_subsys_state(cgroup, devices_subsys_id));
+ return s ? container_of(s, struct dev_cgroup, css) : NULL;
}
static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
{
- return css_to_devcgroup(task_subsys_state(task, devices_subsys_id));
-}
-
-struct cgroup_subsys devices_subsys;
-
-static int devcgroup_can_attach(struct cgroup_subsys *ss,
- struct cgroup *new_cgroup, struct task_struct *task,
- bool threadgroup)
-{
- if (current != task && !capable(CAP_SYS_ADMIN))
- return -EPERM;
-
- return 0;
+ return css_to_devcgroup(task_css(task, devices_cgrp_id));
}
/*
* called under devcgroup_mutex
*/
-static int dev_whitelist_copy(struct list_head *dest, struct list_head *orig)
+static int dev_exceptions_copy(struct list_head *dest, struct list_head *orig)
{
- struct dev_whitelist_item *wh, *tmp, *new;
+ struct dev_exception_item *ex, *tmp, *new;
- list_for_each_entry(wh, orig, list) {
- new = kmemdup(wh, sizeof(*wh), GFP_KERNEL);
+ lockdep_assert_held(&devcgroup_mutex);
+
+ list_for_each_entry(ex, orig, list) {
+ new = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
if (!new)
goto free_and_exit;
list_add_tail(&new->list, dest);
@@ -87,132 +80,157 @@ static int dev_whitelist_copy(struct list_head *dest, struct list_head *orig)
return 0;
free_and_exit:
- list_for_each_entry_safe(wh, tmp, dest, list) {
- list_del(&wh->list);
- kfree(wh);
+ list_for_each_entry_safe(ex, tmp, dest, list) {
+ list_del(&ex->list);
+ kfree(ex);
}
return -ENOMEM;
}
-/* Stupid prototype - don't bother combining existing entries */
/*
* called under devcgroup_mutex
*/
-static int dev_whitelist_add(struct dev_cgroup *dev_cgroup,
- struct dev_whitelist_item *wh)
+static int dev_exception_add(struct dev_cgroup *dev_cgroup,
+ struct dev_exception_item *ex)
{
- struct dev_whitelist_item *whcopy, *walk;
+ struct dev_exception_item *excopy, *walk;
+
+ lockdep_assert_held(&devcgroup_mutex);
- whcopy = kmemdup(wh, sizeof(*wh), GFP_KERNEL);
- if (!whcopy)
+ excopy = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
+ if (!excopy)
return -ENOMEM;
- list_for_each_entry(walk, &dev_cgroup->whitelist, list) {
- if (walk->type != wh->type)
+ list_for_each_entry(walk, &dev_cgroup->exceptions, list) {
+ if (walk->type != ex->type)
continue;
- if (walk->major != wh->major)
+ if (walk->major != ex->major)
continue;
- if (walk->minor != wh->minor)
+ if (walk->minor != ex->minor)
continue;
- walk->access |= wh->access;
- kfree(whcopy);
- whcopy = NULL;
+ walk->access |= ex->access;
+ kfree(excopy);
+ excopy = NULL;
}
- if (whcopy != NULL)
- list_add_tail_rcu(&whcopy->list, &dev_cgroup->whitelist);
+ if (excopy != NULL)
+ list_add_tail_rcu(&excopy->list, &dev_cgroup->exceptions);
return 0;
}
-static void whitelist_item_free(struct rcu_head *rcu)
-{
- struct dev_whitelist_item *item;
-
- item = container_of(rcu, struct dev_whitelist_item, rcu);
- kfree(item);
-}
-
/*
* called under devcgroup_mutex
*/
-static void dev_whitelist_rm(struct dev_cgroup *dev_cgroup,
- struct dev_whitelist_item *wh)
+static void dev_exception_rm(struct dev_cgroup *dev_cgroup,
+ struct dev_exception_item *ex)
{
- struct dev_whitelist_item *walk, *tmp;
+ struct dev_exception_item *walk, *tmp;
- list_for_each_entry_safe(walk, tmp, &dev_cgroup->whitelist, list) {
- if (walk->type == DEV_ALL)
- goto remove;
- if (walk->type != wh->type)
+ lockdep_assert_held(&devcgroup_mutex);
+
+ list_for_each_entry_safe(walk, tmp, &dev_cgroup->exceptions, list) {
+ if (walk->type != ex->type)
continue;
- if (walk->major != ~0 && walk->major != wh->major)
+ if (walk->major != ex->major)
continue;
- if (walk->minor != ~0 && walk->minor != wh->minor)
+ if (walk->minor != ex->minor)
continue;
-remove:
- walk->access &= ~wh->access;
+ walk->access &= ~ex->access;
if (!walk->access) {
list_del_rcu(&walk->list);
- call_rcu(&walk->rcu, whitelist_item_free);
+ kfree_rcu(walk, rcu);
}
}
}
+static void __dev_exception_clean(struct dev_cgroup *dev_cgroup)
+{
+ struct dev_exception_item *ex, *tmp;
+
+ list_for_each_entry_safe(ex, tmp, &dev_cgroup->exceptions, list) {
+ list_del_rcu(&ex->list);
+ kfree_rcu(ex, rcu);
+ }
+}
+
+/**
+ * dev_exception_clean - frees all entries of the exception list
+ * @dev_cgroup: dev_cgroup with the exception list to be cleaned
+ *
+ * called under devcgroup_mutex
+ */
+static void dev_exception_clean(struct dev_cgroup *dev_cgroup)
+{
+ lockdep_assert_held(&devcgroup_mutex);
+
+ __dev_exception_clean(dev_cgroup);
+}
+
+static inline bool is_devcg_online(const struct dev_cgroup *devcg)
+{
+ return (devcg->behavior != DEVCG_DEFAULT_NONE);
+}
+
+/**
+ * devcgroup_online - initializes devcgroup's behavior and exceptions based on
+ * parent's
+ * @css: css getting online
+ * returns 0 in case of success, error code otherwise
+ */
+static int devcgroup_online(struct cgroup_subsys_state *css)
+{
+ struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
+ struct dev_cgroup *parent_dev_cgroup = css_to_devcgroup(css->parent);
+ int ret = 0;
+
+ mutex_lock(&devcgroup_mutex);
+
+ if (parent_dev_cgroup == NULL)
+ dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW;
+ else {
+ ret = dev_exceptions_copy(&dev_cgroup->exceptions,
+ &parent_dev_cgroup->exceptions);
+ if (!ret)
+ dev_cgroup->behavior = parent_dev_cgroup->behavior;
+ }
+ mutex_unlock(&devcgroup_mutex);
+
+ return ret;
+}
+
+static void devcgroup_offline(struct cgroup_subsys_state *css)
+{
+ struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
+
+ mutex_lock(&devcgroup_mutex);
+ dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
+ mutex_unlock(&devcgroup_mutex);
+}
+
/*
* called from kernel/cgroup.c with cgroup_lock() held.
*/
-static struct cgroup_subsys_state *devcgroup_create(struct cgroup_subsys *ss,
- struct cgroup *cgroup)
+static struct cgroup_subsys_state *
+devcgroup_css_alloc(struct cgroup_subsys_state *parent_css)
{
- struct dev_cgroup *dev_cgroup, *parent_dev_cgroup;
- struct cgroup *parent_cgroup;
- int ret;
+ struct dev_cgroup *dev_cgroup;
dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL);
if (!dev_cgroup)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&dev_cgroup->whitelist);
- parent_cgroup = cgroup->parent;
-
- if (parent_cgroup == NULL) {
- struct dev_whitelist_item *wh;
- wh = kmalloc(sizeof(*wh), GFP_KERNEL);
- if (!wh) {
- kfree(dev_cgroup);
- return ERR_PTR(-ENOMEM);
- }
- wh->minor = wh->major = ~0;
- wh->type = DEV_ALL;
- wh->access = ACC_MASK;
- list_add(&wh->list, &dev_cgroup->whitelist);
- } else {
- parent_dev_cgroup = cgroup_to_devcgroup(parent_cgroup);
- mutex_lock(&devcgroup_mutex);
- ret = dev_whitelist_copy(&dev_cgroup->whitelist,
- &parent_dev_cgroup->whitelist);
- mutex_unlock(&devcgroup_mutex);
- if (ret) {
- kfree(dev_cgroup);
- return ERR_PTR(ret);
- }
- }
+ INIT_LIST_HEAD(&dev_cgroup->exceptions);
+ dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
return &dev_cgroup->css;
}
-static void devcgroup_destroy(struct cgroup_subsys *ss,
- struct cgroup *cgroup)
+static void devcgroup_css_free(struct cgroup_subsys_state *css)
{
- struct dev_cgroup *dev_cgroup;
- struct dev_whitelist_item *wh, *tmp;
+ struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
- dev_cgroup = cgroup_to_devcgroup(cgroup);
- list_for_each_entry_safe(wh, tmp, &dev_cgroup->whitelist, list) {
- list_del(&wh->list);
- kfree(wh);
- }
+ __dev_exception_clean(dev_cgroup);
kfree(dev_cgroup);
}
@@ -254,112 +272,384 @@ static void set_majmin(char *str, unsigned m)
sprintf(str, "%u", m);
}
-static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft,
- struct seq_file *m)
+static int devcgroup_seq_show(struct seq_file *m, void *v)
{
- struct dev_cgroup *devcgroup = cgroup_to_devcgroup(cgroup);
- struct dev_whitelist_item *wh;
+ struct dev_cgroup *devcgroup = css_to_devcgroup(seq_css(m));
+ struct dev_exception_item *ex;
char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN];
rcu_read_lock();
- list_for_each_entry_rcu(wh, &devcgroup->whitelist, list) {
- set_access(acc, wh->access);
- set_majmin(maj, wh->major);
- set_majmin(min, wh->minor);
- seq_printf(m, "%c %s:%s %s\n", type_to_char(wh->type),
+ /*
+ * To preserve the compatibility:
+ * - Only show the "all devices" when the default policy is to allow
+ * - List the exceptions in case the default policy is to deny
+ * This way, the file remains as a "whitelist of devices"
+ */
+ if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
+ set_access(acc, ACC_MASK);
+ set_majmin(maj, ~0);
+ set_majmin(min, ~0);
+ seq_printf(m, "%c %s:%s %s\n", type_to_char(DEV_ALL),
maj, min, acc);
+ } else {
+ list_for_each_entry_rcu(ex, &devcgroup->exceptions, list) {
+ set_access(acc, ex->access);
+ set_majmin(maj, ex->major);
+ set_majmin(min, ex->minor);
+ seq_printf(m, "%c %s:%s %s\n", type_to_char(ex->type),
+ maj, min, acc);
+ }
}
rcu_read_unlock();
return 0;
}
-/*
- * may_access_whitelist:
- * does the access granted to dev_cgroup c contain the access
- * requested in whitelist item refwh.
- * return 1 if yes, 0 if no.
- * call with devcgroup_mutex held
+/**
+ * match_exception - iterates the exception list trying to find a complete match
+ * @exceptions: list of exceptions
+ * @type: device type (DEV_BLOCK or DEV_CHAR)
+ * @major: device file major number, ~0 to match all
+ * @minor: device file minor number, ~0 to match all
+ * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD)
+ *
+ * It is considered a complete match if an exception is found that will
+ * contain the entire range of provided parameters.
+ *
+ * Return: true in case it matches an exception completely
*/
-static int may_access_whitelist(struct dev_cgroup *c,
- struct dev_whitelist_item *refwh)
+static bool match_exception(struct list_head *exceptions, short type,
+ u32 major, u32 minor, short access)
{
- struct dev_whitelist_item *whitem;
+ struct dev_exception_item *ex;
- list_for_each_entry(whitem, &c->whitelist, list) {
- if (whitem->type & DEV_ALL)
- return 1;
- if ((refwh->type & DEV_BLOCK) && !(whitem->type & DEV_BLOCK))
+ list_for_each_entry_rcu(ex, exceptions, list) {
+ if ((type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
continue;
- if ((refwh->type & DEV_CHAR) && !(whitem->type & DEV_CHAR))
+ if ((type & DEV_CHAR) && !(ex->type & DEV_CHAR))
continue;
- if (whitem->major != ~0 && whitem->major != refwh->major)
+ if (ex->major != ~0 && ex->major != major)
continue;
- if (whitem->minor != ~0 && whitem->minor != refwh->minor)
+ if (ex->minor != ~0 && ex->minor != minor)
continue;
- if (refwh->access & (~whitem->access))
+ /* provided access cannot have more than the exception rule */
+ if (access & (~ex->access))
continue;
- return 1;
+ return true;
}
- return 0;
+ return false;
+}
+
+/**
+ * match_exception_partial - iterates the exception list trying to find a partial match
+ * @exceptions: list of exceptions
+ * @type: device type (DEV_BLOCK or DEV_CHAR)
+ * @major: device file major number, ~0 to match all
+ * @minor: device file minor number, ~0 to match all
+ * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD)
+ *
+ * It is considered a partial match if an exception's range is found to
+ * contain *any* of the devices specified by provided parameters. This is
+ * used to make sure no extra access is being granted that is forbidden by
+ * any of the exception list.
+ *
+ * Return: true in case the provided range mat matches an exception completely
+ */
+static bool match_exception_partial(struct list_head *exceptions, short type,
+ u32 major, u32 minor, short access)
+{
+ struct dev_exception_item *ex;
+
+ list_for_each_entry_rcu(ex, exceptions, list) {
+ if ((type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
+ continue;
+ if ((type & DEV_CHAR) && !(ex->type & DEV_CHAR))
+ continue;
+ /*
+ * We must be sure that both the exception and the provided
+ * range aren't masking all devices
+ */
+ if (ex->major != ~0 && major != ~0 && ex->major != major)
+ continue;
+ if (ex->minor != ~0 && minor != ~0 && ex->minor != minor)
+ continue;
+ /*
+ * In order to make sure the provided range isn't matching
+ * an exception, all its access bits shouldn't match the
+ * exception's access bits
+ */
+ if (!(access & ex->access))
+ continue;
+ return true;
+ }
+ return false;
+}
+
+/**
+ * verify_new_ex - verifies if a new exception is allowed by parent cgroup's permissions
+ * @dev_cgroup: dev cgroup to be tested against
+ * @refex: new exception
+ * @behavior: behavior of the exception's dev_cgroup
+ *
+ * This is used to make sure a child cgroup won't have more privileges
+ * than its parent
+ */
+static bool verify_new_ex(struct dev_cgroup *dev_cgroup,
+ struct dev_exception_item *refex,
+ enum devcg_behavior behavior)
+{
+ bool match = false;
+
+ rcu_lockdep_assert(rcu_read_lock_held() ||
+ lockdep_is_held(&devcgroup_mutex),
+ "device_cgroup:verify_new_ex called without proper synchronization");
+
+ if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) {
+ if (behavior == DEVCG_DEFAULT_ALLOW) {
+ /*
+ * new exception in the child doesn't matter, only
+ * adding extra restrictions
+ */
+ return true;
+ } else {
+ /*
+ * new exception in the child will add more devices
+ * that can be acessed, so it can't match any of
+ * parent's exceptions, even slightly
+ */
+ match = match_exception_partial(&dev_cgroup->exceptions,
+ refex->type,
+ refex->major,
+ refex->minor,
+ refex->access);
+
+ if (match)
+ return false;
+ return true;
+ }
+ } else {
+ /*
+ * Only behavior == DEVCG_DEFAULT_DENY allowed here, therefore
+ * the new exception will add access to more devices and must
+ * be contained completely in an parent's exception to be
+ * allowed
+ */
+ match = match_exception(&dev_cgroup->exceptions, refex->type,
+ refex->major, refex->minor,
+ refex->access);
+
+ if (match)
+ /* parent has an exception that matches the proposed */
+ return true;
+ else
+ return false;
+ }
+ return false;
}
/*
* parent_has_perm:
- * when adding a new allow rule to a device whitelist, the rule
+ * when adding a new allow rule to a device exception list, the rule
* must be allowed in the parent device
*/
static int parent_has_perm(struct dev_cgroup *childcg,
- struct dev_whitelist_item *wh)
+ struct dev_exception_item *ex)
{
- struct cgroup *pcg = childcg->css.cgroup->parent;
- struct dev_cgroup *parent;
+ struct dev_cgroup *parent = css_to_devcgroup(childcg->css.parent);
- if (!pcg)
+ if (!parent)
return 1;
- parent = cgroup_to_devcgroup(pcg);
- return may_access_whitelist(parent, wh);
+ return verify_new_ex(parent, ex, childcg->behavior);
+}
+
+/**
+ * parent_allows_removal - verify if it's ok to remove an exception
+ * @childcg: child cgroup from where the exception will be removed
+ * @ex: exception being removed
+ *
+ * When removing an exception in cgroups with default ALLOW policy, it must
+ * be checked if removing it will give the child cgroup more access than the
+ * parent.
+ *
+ * Return: true if it's ok to remove exception, false otherwise
+ */
+static bool parent_allows_removal(struct dev_cgroup *childcg,
+ struct dev_exception_item *ex)
+{
+ struct dev_cgroup *parent = css_to_devcgroup(childcg->css.parent);
+
+ if (!parent)
+ return true;
+
+ /* It's always allowed to remove access to devices */
+ if (childcg->behavior == DEVCG_DEFAULT_DENY)
+ return true;
+
+ /*
+ * Make sure you're not removing part or a whole exception existing in
+ * the parent cgroup
+ */
+ return !match_exception_partial(&parent->exceptions, ex->type,
+ ex->major, ex->minor, ex->access);
+}
+
+/**
+ * may_allow_all - checks if it's possible to change the behavior to
+ * allow based on parent's rules.
+ * @parent: device cgroup's parent
+ * returns: != 0 in case it's allowed, 0 otherwise
+ */
+static inline int may_allow_all(struct dev_cgroup *parent)
+{
+ if (!parent)
+ return 1;
+ return parent->behavior == DEVCG_DEFAULT_ALLOW;
+}
+
+/**
+ * revalidate_active_exceptions - walks through the active exception list and
+ * revalidates the exceptions based on parent's
+ * behavior and exceptions. The exceptions that
+ * are no longer valid will be removed.
+ * Called with devcgroup_mutex held.
+ * @devcg: cgroup which exceptions will be checked
+ *
+ * This is one of the three key functions for hierarchy implementation.
+ * This function is responsible for re-evaluating all the cgroup's active
+ * exceptions due to a parent's exception change.
+ * Refer to Documentation/cgroups/devices.txt for more details.
+ */
+static void revalidate_active_exceptions(struct dev_cgroup *devcg)
+{
+ struct dev_exception_item *ex;
+ struct list_head *this, *tmp;
+
+ list_for_each_safe(this, tmp, &devcg->exceptions) {
+ ex = container_of(this, struct dev_exception_item, list);
+ if (!parent_has_perm(devcg, ex))
+ dev_exception_rm(devcg, ex);
+ }
+}
+
+/**
+ * propagate_exception - propagates a new exception to the children
+ * @devcg_root: device cgroup that added a new exception
+ * @ex: new exception to be propagated
+ *
+ * returns: 0 in case of success, != 0 in case of error
+ */
+static int propagate_exception(struct dev_cgroup *devcg_root,
+ struct dev_exception_item *ex)
+{
+ struct cgroup_subsys_state *pos;
+ int rc = 0;
+
+ rcu_read_lock();
+
+ css_for_each_descendant_pre(pos, &devcg_root->css) {
+ struct dev_cgroup *devcg = css_to_devcgroup(pos);
+
+ /*
+ * Because devcgroup_mutex is held, no devcg will become
+ * online or offline during the tree walk (see on/offline
+ * methods), and online ones are safe to access outside RCU
+ * read lock without bumping refcnt.
+ */
+ if (pos == &devcg_root->css || !is_devcg_online(devcg))
+ continue;
+
+ rcu_read_unlock();
+
+ /*
+ * in case both root's behavior and devcg is allow, a new
+ * restriction means adding to the exception list
+ */
+ if (devcg_root->behavior == DEVCG_DEFAULT_ALLOW &&
+ devcg->behavior == DEVCG_DEFAULT_ALLOW) {
+ rc = dev_exception_add(devcg, ex);
+ if (rc)
+ break;
+ } else {
+ /*
+ * in the other possible cases:
+ * root's behavior: allow, devcg's: deny
+ * root's behavior: deny, devcg's: deny
+ * the exception will be removed
+ */
+ dev_exception_rm(devcg, ex);
+ }
+ revalidate_active_exceptions(devcg);
+
+ rcu_read_lock();
+ }
+
+ rcu_read_unlock();
+ return rc;
}
/*
- * Modify the whitelist using allow/deny rules.
+ * Modify the exception list using allow/deny rules.
* CAP_SYS_ADMIN is needed for this. It's at least separate from CAP_MKNOD
* so we can give a container CAP_MKNOD to let it create devices but not
- * modify the whitelist.
+ * modify the exception list.
* It seems likely we'll want to add a CAP_CONTAINER capability to allow
* us to also grant CAP_SYS_ADMIN to containers without giving away the
- * device whitelist controls, but for now we'll stick with CAP_SYS_ADMIN
+ * device exception list controls, but for now we'll stick with CAP_SYS_ADMIN
*
* Taking rules away is always allowed (given CAP_SYS_ADMIN). Granting
* new access is only allowed if you're in the top-level cgroup, or your
* parent cgroup has the access you're asking for.
*/
static int devcgroup_update_access(struct dev_cgroup *devcgroup,
- int filetype, const char *buffer)
+ int filetype, char *buffer)
{
const char *b;
- char *endp;
- int count;
- struct dev_whitelist_item wh;
+ char temp[12]; /* 11 + 1 characters needed for a u32 */
+ int count, rc = 0;
+ struct dev_exception_item ex;
+ struct dev_cgroup *parent = css_to_devcgroup(devcgroup->css.parent);
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- memset(&wh, 0, sizeof(wh));
+ memset(&ex, 0, sizeof(ex));
b = buffer;
switch (*b) {
case 'a':
- wh.type = DEV_ALL;
- wh.access = ACC_MASK;
- wh.major = ~0;
- wh.minor = ~0;
- goto handle;
+ switch (filetype) {
+ case DEVCG_ALLOW:
+ if (css_has_online_children(&devcgroup->css))
+ return -EINVAL;
+
+ if (!may_allow_all(parent))
+ return -EPERM;
+ dev_exception_clean(devcgroup);
+ devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
+ if (!parent)
+ break;
+
+ rc = dev_exceptions_copy(&devcgroup->exceptions,
+ &parent->exceptions);
+ if (rc)
+ return rc;
+ break;
+ case DEVCG_DENY:
+ if (css_has_online_children(&devcgroup->css))
+ return -EINVAL;
+
+ dev_exception_clean(devcgroup);
+ devcgroup->behavior = DEVCG_DEFAULT_DENY;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
case 'b':
- wh.type = DEV_BLOCK;
+ ex.type = DEV_BLOCK;
break;
case 'c':
- wh.type = DEV_CHAR;
+ ex.type = DEV_CHAR;
break;
default:
return -EINVAL;
@@ -369,11 +659,19 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
return -EINVAL;
b++;
if (*b == '*') {
- wh.major = ~0;
+ ex.major = ~0;
b++;
} else if (isdigit(*b)) {
- wh.major = simple_strtoul(b, &endp, 10);
- b = endp;
+ memset(temp, 0, sizeof(temp));
+ for (count = 0; count < sizeof(temp) - 1; count++) {
+ temp[count] = *b;
+ b++;
+ if (!isdigit(*b))
+ break;
+ }
+ rc = kstrtou32(temp, 10, &ex.major);
+ if (rc)
+ return -EINVAL;
} else {
return -EINVAL;
}
@@ -383,11 +681,19 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
/* read minor */
if (*b == '*') {
- wh.minor = ~0;
+ ex.minor = ~0;
b++;
} else if (isdigit(*b)) {
- wh.minor = simple_strtoul(b, &endp, 10);
- b = endp;
+ memset(temp, 0, sizeof(temp));
+ for (count = 0; count < sizeof(temp) - 1; count++) {
+ temp[count] = *b;
+ b++;
+ if (!isdigit(*b))
+ break;
+ }
+ rc = kstrtou32(temp, 10, &ex.minor);
+ if (rc)
+ return -EINVAL;
} else {
return -EINVAL;
}
@@ -396,13 +702,13 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
for (b++, count = 0; count < 3; count++, b++) {
switch (*b) {
case 'r':
- wh.access |= ACC_READ;
+ ex.access |= ACC_READ;
break;
case 'w':
- wh.access |= ACC_WRITE;
+ ex.access |= ACC_WRITE;
break;
case 'm':
- wh.access |= ACC_MKNOD;
+ ex.access |= ACC_MKNOD;
break;
case '\n':
case '\0':
@@ -413,140 +719,150 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
}
}
-handle:
switch (filetype) {
case DEVCG_ALLOW:
- if (!parent_has_perm(devcgroup, &wh))
+ /*
+ * If the default policy is to allow by default, try to remove
+ * an matching exception instead. And be silent about it: we
+ * don't want to break compatibility
+ */
+ if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
+ /* Check if the parent allows removing it first */
+ if (!parent_allows_removal(devcgroup, &ex))
+ return -EPERM;
+ dev_exception_rm(devcgroup, &ex);
+ break;
+ }
+
+ if (!parent_has_perm(devcgroup, &ex))
return -EPERM;
- return dev_whitelist_add(devcgroup, &wh);
+ rc = dev_exception_add(devcgroup, &ex);
+ break;
case DEVCG_DENY:
- dev_whitelist_rm(devcgroup, &wh);
+ /*
+ * If the default policy is to deny by default, try to remove
+ * an matching exception instead. And be silent about it: we
+ * don't want to break compatibility
+ */
+ if (devcgroup->behavior == DEVCG_DEFAULT_DENY)
+ dev_exception_rm(devcgroup, &ex);
+ else
+ rc = dev_exception_add(devcgroup, &ex);
+
+ if (rc)
+ break;
+ /* we only propagate new restrictions */
+ rc = propagate_exception(devcgroup, &ex);
break;
default:
- return -EINVAL;
+ rc = -EINVAL;
}
- return 0;
+ return rc;
}
-static int devcgroup_access_write(struct cgroup *cgrp, struct cftype *cft,
- const char *buffer)
+static ssize_t devcgroup_access_write(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off)
{
int retval;
mutex_lock(&devcgroup_mutex);
- retval = devcgroup_update_access(cgroup_to_devcgroup(cgrp),
- cft->private, buffer);
+ retval = devcgroup_update_access(css_to_devcgroup(of_css(of)),
+ of_cft(of)->private, strstrip(buf));
mutex_unlock(&devcgroup_mutex);
- return retval;
+ return retval ?: nbytes;
}
static struct cftype dev_cgroup_files[] = {
{
.name = "allow",
- .write_string = devcgroup_access_write,
+ .write = devcgroup_access_write,
.private = DEVCG_ALLOW,
},
{
.name = "deny",
- .write_string = devcgroup_access_write,
+ .write = devcgroup_access_write,
.private = DEVCG_DENY,
},
{
.name = "list",
- .read_seq_string = devcgroup_seq_read,
+ .seq_show = devcgroup_seq_show,
.private = DEVCG_LIST,
},
+ { } /* terminate */
};
-static int devcgroup_populate(struct cgroup_subsys *ss,
- struct cgroup *cgroup)
-{
- return cgroup_add_files(cgroup, ss, dev_cgroup_files,
- ARRAY_SIZE(dev_cgroup_files));
-}
-
-struct cgroup_subsys devices_subsys = {
- .name = "devices",
- .can_attach = devcgroup_can_attach,
- .create = devcgroup_create,
- .destroy = devcgroup_destroy,
- .populate = devcgroup_populate,
- .subsys_id = devices_subsys_id,
+struct cgroup_subsys devices_cgrp_subsys = {
+ .css_alloc = devcgroup_css_alloc,
+ .css_free = devcgroup_css_free,
+ .css_online = devcgroup_online,
+ .css_offline = devcgroup_offline,
+ .base_cftypes = dev_cgroup_files,
};
-int devcgroup_inode_permission(struct inode *inode, int mask)
+/**
+ * __devcgroup_check_permission - checks if an inode operation is permitted
+ * @dev_cgroup: the dev cgroup to be tested against
+ * @type: device type
+ * @major: device major number
+ * @minor: device minor number
+ * @access: combination of ACC_WRITE, ACC_READ and ACC_MKNOD
+ *
+ * returns 0 on success, -EPERM case the operation is not permitted
+ */
+static int __devcgroup_check_permission(short type, u32 major, u32 minor,
+ short access)
{
struct dev_cgroup *dev_cgroup;
- struct dev_whitelist_item *wh;
-
- dev_t device = inode->i_rdev;
- if (!device)
- return 0;
- if (!S_ISBLK(inode->i_mode) && !S_ISCHR(inode->i_mode))
- return 0;
+ bool rc;
rcu_read_lock();
-
dev_cgroup = task_devcgroup(current);
+ if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW)
+ /* Can't match any of the exceptions, even partially */
+ rc = !match_exception_partial(&dev_cgroup->exceptions,
+ type, major, minor, access);
+ else
+ /* Need to match completely one exception to be allowed */
+ rc = match_exception(&dev_cgroup->exceptions, type, major,
+ minor, access);
+ rcu_read_unlock();
- list_for_each_entry_rcu(wh, &dev_cgroup->whitelist, list) {
- if (wh->type & DEV_ALL)
- goto found;
- if ((wh->type & DEV_BLOCK) && !S_ISBLK(inode->i_mode))
- continue;
- if ((wh->type & DEV_CHAR) && !S_ISCHR(inode->i_mode))
- continue;
- if (wh->major != ~0 && wh->major != imajor(inode))
- continue;
- if (wh->minor != ~0 && wh->minor != iminor(inode))
- continue;
-
- if ((mask & MAY_WRITE) && !(wh->access & ACC_WRITE))
- continue;
- if ((mask & MAY_READ) && !(wh->access & ACC_READ))
- continue;
-found:
- rcu_read_unlock();
- return 0;
- }
+ if (!rc)
+ return -EPERM;
- rcu_read_unlock();
+ return 0;
+}
- return -EPERM;
+int __devcgroup_inode_permission(struct inode *inode, int mask)
+{
+ short type, access = 0;
+
+ if (S_ISBLK(inode->i_mode))
+ type = DEV_BLOCK;
+ if (S_ISCHR(inode->i_mode))
+ type = DEV_CHAR;
+ if (mask & MAY_WRITE)
+ access |= ACC_WRITE;
+ if (mask & MAY_READ)
+ access |= ACC_READ;
+
+ return __devcgroup_check_permission(type, imajor(inode), iminor(inode),
+ access);
}
int devcgroup_inode_mknod(int mode, dev_t dev)
{
- struct dev_cgroup *dev_cgroup;
- struct dev_whitelist_item *wh;
+ short type;
if (!S_ISBLK(mode) && !S_ISCHR(mode))
return 0;
- rcu_read_lock();
-
- dev_cgroup = task_devcgroup(current);
-
- list_for_each_entry_rcu(wh, &dev_cgroup->whitelist, list) {
- if (wh->type & DEV_ALL)
- goto found;
- if ((wh->type & DEV_BLOCK) && !S_ISBLK(mode))
- continue;
- if ((wh->type & DEV_CHAR) && !S_ISCHR(mode))
- continue;
- if (wh->major != ~0 && wh->major != MAJOR(dev))
- continue;
- if (wh->minor != ~0 && wh->minor != MINOR(dev))
- continue;
-
- if (!(wh->access & ACC_MKNOD))
- continue;
-found:
- rcu_read_unlock();
- return 0;
- }
+ if (S_ISBLK(mode))
+ type = DEV_BLOCK;
+ else
+ type = DEV_CHAR;
- rcu_read_unlock();
+ return __devcgroup_check_permission(type, MAJOR(dev), MINOR(dev),
+ ACC_MKNOD);
- return -EPERM;
}
diff --git a/security/inode.c b/security/inode.c
index f7496c6a022..43ce6e19015 100644
--- a/security/inode.c
+++ b/security/inode.c
@@ -25,98 +25,6 @@
static struct vfsmount *mount;
static int mount_count;
-/*
- * TODO:
- * I think I can get rid of these default_file_ops, but not quite sure...
- */
-static ssize_t default_read_file(struct file *file, char __user *buf,
- size_t count, loff_t *ppos)
-{
- return 0;
-}
-
-static ssize_t default_write_file(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- return count;
-}
-
-static int default_open(struct inode *inode, struct file *file)
-{
- if (inode->i_private)
- file->private_data = inode->i_private;
-
- return 0;
-}
-
-static const struct file_operations default_file_ops = {
- .read = default_read_file,
- .write = default_write_file,
- .open = default_open,
-};
-
-static struct inode *get_inode(struct super_block *sb, int mode, dev_t dev)
-{
- struct inode *inode = new_inode(sb);
-
- if (inode) {
- inode->i_mode = mode;
- inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
- switch (mode & S_IFMT) {
- default:
- init_special_inode(inode, mode, dev);
- break;
- case S_IFREG:
- inode->i_fop = &default_file_ops;
- break;
- case S_IFDIR:
- inode->i_op = &simple_dir_inode_operations;
- inode->i_fop = &simple_dir_operations;
-
- /* directory inodes start off with i_nlink == 2 (for "." entry) */
- inc_nlink(inode);
- break;
- }
- }
- return inode;
-}
-
-/* SMP-safe */
-static int mknod(struct inode *dir, struct dentry *dentry,
- int mode, dev_t dev)
-{
- struct inode *inode;
- int error = -EPERM;
-
- if (dentry->d_inode)
- return -EEXIST;
-
- inode = get_inode(dir->i_sb, mode, dev);
- if (inode) {
- d_instantiate(dentry, inode);
- dget(dentry);
- error = 0;
- }
- return error;
-}
-
-static int mkdir(struct inode *dir, struct dentry *dentry, int mode)
-{
- int res;
-
- mode = (mode & (S_IRWXUGO | S_ISVTX)) | S_IFDIR;
- res = mknod(dir, dentry, mode, 0);
- if (!res)
- inc_nlink(dir);
- return res;
-}
-
-static int create(struct inode *dir, struct dentry *dentry, int mode)
-{
- mode = (mode & S_IALLUGO) | S_IFREG;
- return mknod(dir, dentry, mode, 0);
-}
-
static inline int positive(struct dentry *dentry)
{
return dentry->d_inode && !d_unhashed(dentry);
@@ -129,57 +37,20 @@ static int fill_super(struct super_block *sb, void *data, int silent)
return simple_fill_super(sb, SECURITYFS_MAGIC, files);
}
-static int get_sb(struct file_system_type *fs_type,
+static struct dentry *get_sb(struct file_system_type *fs_type,
int flags, const char *dev_name,
- void *data, struct vfsmount *mnt)
+ void *data)
{
- return get_sb_single(fs_type, flags, data, fill_super, mnt);
+ return mount_single(fs_type, flags, data, fill_super);
}
static struct file_system_type fs_type = {
.owner = THIS_MODULE,
.name = "securityfs",
- .get_sb = get_sb,
+ .mount = get_sb,
.kill_sb = kill_litter_super,
};
-static int create_by_name(const char *name, mode_t mode,
- struct dentry *parent,
- struct dentry **dentry)
-{
- int error = 0;
-
- *dentry = NULL;
-
- /* If the parent is not specified, we create it in the root.
- * We need the root dentry to do this, which is in the super
- * block. A pointer to that is in the struct vfsmount that we
- * have around.
- */
- if (!parent ) {
- if (mount && mount->mnt_sb) {
- parent = mount->mnt_sb->s_root;
- }
- }
- if (!parent) {
- pr_debug("securityfs: Ah! can not find a parent!\n");
- return -EFAULT;
- }
-
- mutex_lock(&parent->d_inode->i_mutex);
- *dentry = lookup_one_len(name, parent, strlen(name));
- if (!IS_ERR(dentry)) {
- if ((mode & S_IFMT) == S_IFDIR)
- error = mkdir(parent->d_inode, *dentry, mode);
- else
- error = create(parent->d_inode, *dentry, mode);
- } else
- error = PTR_ERR(dentry);
- mutex_unlock(&parent->d_inode->i_mutex);
-
- return error;
-}
-
/**
* securityfs_create_file - create a file in the securityfs filesystem
*
@@ -208,35 +79,70 @@ static int create_by_name(const char *name, mode_t mode,
* If securityfs is not enabled in the kernel, the value %-ENODEV is
* returned.
*/
-struct dentry *securityfs_create_file(const char *name, mode_t mode,
+struct dentry *securityfs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops)
{
- struct dentry *dentry = NULL;
+ struct dentry *dentry;
+ int is_dir = S_ISDIR(mode);
+ struct inode *dir, *inode;
int error;
+ if (!is_dir) {
+ BUG_ON(!fops);
+ mode = (mode & S_IALLUGO) | S_IFREG;
+ }
+
pr_debug("securityfs: creating file '%s'\n",name);
error = simple_pin_fs(&fs_type, &mount, &mount_count);
- if (error) {
- dentry = ERR_PTR(error);
- goto exit;
+ if (error)
+ return ERR_PTR(error);
+
+ if (!parent)
+ parent = mount->mnt_root;
+
+ dir = parent->d_inode;
+
+ mutex_lock(&dir->i_mutex);
+ dentry = lookup_one_len(name, parent, strlen(name));
+ if (IS_ERR(dentry))
+ goto out;
+
+ if (dentry->d_inode) {
+ error = -EEXIST;
+ goto out1;
}
- error = create_by_name(name, mode, parent, &dentry);
- if (error) {
- dentry = ERR_PTR(error);
- simple_release_fs(&mount, &mount_count);
- goto exit;
+ inode = new_inode(dir->i_sb);
+ if (!inode) {
+ error = -ENOMEM;
+ goto out1;
}
- if (dentry->d_inode) {
- if (fops)
- dentry->d_inode->i_fop = fops;
- if (data)
- dentry->d_inode->i_private = data;
+ inode->i_ino = get_next_ino();
+ inode->i_mode = mode;
+ inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ inode->i_private = data;
+ if (is_dir) {
+ inode->i_op = &simple_dir_inode_operations;
+ inode->i_fop = &simple_dir_operations;
+ inc_nlink(inode);
+ inc_nlink(dir);
+ } else {
+ inode->i_fop = fops;
}
-exit:
+ d_instantiate(dentry, inode);
+ dget(dentry);
+ mutex_unlock(&dir->i_mutex);
+ return dentry;
+
+out1:
+ dput(dentry);
+ dentry = ERR_PTR(error);
+out:
+ mutex_unlock(&dir->i_mutex);
+ simple_release_fs(&mount, &mount_count);
return dentry;
}
EXPORT_SYMBOL_GPL(securityfs_create_file);
diff --git a/security/integrity/Kconfig b/security/integrity/Kconfig
new file mode 100644
index 00000000000..245c6d92065
--- /dev/null
+++ b/security/integrity/Kconfig
@@ -0,0 +1,48 @@
+#
+config INTEGRITY
+ def_bool y
+ depends on IMA || EVM
+
+config INTEGRITY_SIGNATURE
+ boolean "Digital signature verification using multiple keyrings"
+ depends on INTEGRITY && KEYS
+ default n
+ select SIGNATURE
+ help
+ This option enables digital signature verification support
+ using multiple keyrings. It defines separate keyrings for each
+ of the different use cases - evm, ima, and modules.
+ Different keyrings improves search performance, but also allow
+ to "lock" certain keyring to prevent adding new keys.
+ This is useful for evm and module keyrings, when keys are
+ usually only added from initramfs.
+
+config INTEGRITY_AUDIT
+ bool "Enables integrity auditing support "
+ depends on INTEGRITY && AUDIT
+ default y
+ help
+ In addition to enabling integrity auditing support, this
+ option adds a kernel parameter 'integrity_audit', which
+ controls the level of integrity auditing messages.
+ 0 - basic integrity auditing messages (default)
+ 1 - additional integrity auditing messages
+
+ Additional informational integrity auditing messages would
+ be enabled by specifying 'integrity_audit=1' on the kernel
+ command line.
+
+config INTEGRITY_ASYMMETRIC_KEYS
+ boolean "Enable asymmetric keys support"
+ depends on INTEGRITY_SIGNATURE
+ default n
+ select ASYMMETRIC_KEY_TYPE
+ select ASYMMETRIC_PUBLIC_KEY_SUBTYPE
+ select PUBLIC_KEY_ALGO_RSA
+ select X509_CERTIFICATE_PARSER
+ help
+ This option enables digital signature verification using
+ asymmetric keys.
+
+source security/integrity/ima/Kconfig
+source security/integrity/evm/Kconfig
diff --git a/security/integrity/Makefile b/security/integrity/Makefile
new file mode 100644
index 00000000000..0793f4811cb
--- /dev/null
+++ b/security/integrity/Makefile
@@ -0,0 +1,15 @@
+#
+# Makefile for caching inode integrity data (iint)
+#
+
+obj-$(CONFIG_INTEGRITY) += integrity.o
+obj-$(CONFIG_INTEGRITY_AUDIT) += integrity_audit.o
+obj-$(CONFIG_INTEGRITY_SIGNATURE) += digsig.o
+obj-$(CONFIG_INTEGRITY_ASYMMETRIC_KEYS) += digsig_asymmetric.o
+
+integrity-y := iint.o
+
+subdir-$(CONFIG_IMA) += ima
+obj-$(CONFIG_IMA) += ima/
+subdir-$(CONFIG_EVM) += evm
+obj-$(CONFIG_EVM) += evm/
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
new file mode 100644
index 00000000000..b4af4ebc5be
--- /dev/null
+++ b/security/integrity/digsig.c
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2011 Intel Corporation
+ *
+ * Author:
+ * Dmitry Kasatkin <dmitry.kasatkin@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/err.h>
+#include <linux/rbtree.h>
+#include <linux/key-type.h>
+#include <linux/digsig.h>
+
+#include "integrity.h"
+
+static struct key *keyring[INTEGRITY_KEYRING_MAX];
+
+static const char *keyring_name[INTEGRITY_KEYRING_MAX] = {
+ "_evm",
+ "_module",
+ "_ima",
+};
+
+int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
+ const char *digest, int digestlen)
+{
+ if (id >= INTEGRITY_KEYRING_MAX)
+ return -EINVAL;
+
+ if (!keyring[id]) {
+ keyring[id] =
+ request_key(&key_type_keyring, keyring_name[id], NULL);
+ if (IS_ERR(keyring[id])) {
+ int err = PTR_ERR(keyring[id]);
+ pr_err("no %s keyring: %d\n", keyring_name[id], err);
+ keyring[id] = NULL;
+ return err;
+ }
+ }
+
+ switch (sig[1]) {
+ case 1:
+ /* v1 API expect signature without xattr type */
+ return digsig_verify(keyring[id], sig + 1, siglen - 1,
+ digest, digestlen);
+ case 2:
+ return asymmetric_verify(keyring[id], sig, siglen,
+ digest, digestlen);
+ }
+
+ return -EOPNOTSUPP;
+}
diff --git a/security/integrity/digsig_asymmetric.c b/security/integrity/digsig_asymmetric.c
new file mode 100644
index 00000000000..9eae4809006
--- /dev/null
+++ b/security/integrity/digsig_asymmetric.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2013 Intel Corporation
+ *
+ * Author:
+ * Dmitry Kasatkin <dmitry.kasatkin@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/err.h>
+#include <linux/key-type.h>
+#include <crypto/public_key.h>
+#include <keys/asymmetric-type.h>
+
+#include "integrity.h"
+
+/*
+ * Request an asymmetric key.
+ */
+static struct key *request_asymmetric_key(struct key *keyring, uint32_t keyid)
+{
+ struct key *key;
+ char name[12];
+
+ sprintf(name, "id:%x", keyid);
+
+ pr_debug("key search: \"%s\"\n", name);
+
+ if (keyring) {
+ /* search in specific keyring */
+ key_ref_t kref;
+ kref = keyring_search(make_key_ref(keyring, 1),
+ &key_type_asymmetric, name);
+ if (IS_ERR(kref))
+ key = ERR_CAST(kref);
+ else
+ key = key_ref_to_ptr(kref);
+ } else {
+ key = request_key(&key_type_asymmetric, name, NULL);
+ }
+
+ if (IS_ERR(key)) {
+ pr_warn("Request for unknown key '%s' err %ld\n",
+ name, PTR_ERR(key));
+ switch (PTR_ERR(key)) {
+ /* Hide some search errors */
+ case -EACCES:
+ case -ENOTDIR:
+ case -EAGAIN:
+ return ERR_PTR(-ENOKEY);
+ default:
+ return key;
+ }
+ }
+
+ pr_debug("%s() = 0 [%x]\n", __func__, key_serial(key));
+
+ return key;
+}
+
+int asymmetric_verify(struct key *keyring, const char *sig,
+ int siglen, const char *data, int datalen)
+{
+ struct public_key_signature pks;
+ struct signature_v2_hdr *hdr = (struct signature_v2_hdr *)sig;
+ struct key *key;
+ int ret = -ENOMEM;
+
+ if (siglen <= sizeof(*hdr))
+ return -EBADMSG;
+
+ siglen -= sizeof(*hdr);
+
+ if (siglen != __be16_to_cpu(hdr->sig_size))
+ return -EBADMSG;
+
+ if (hdr->hash_algo >= PKEY_HASH__LAST)
+ return -ENOPKG;
+
+ key = request_asymmetric_key(keyring, __be32_to_cpu(hdr->keyid));
+ if (IS_ERR(key))
+ return PTR_ERR(key);
+
+ memset(&pks, 0, sizeof(pks));
+
+ pks.pkey_hash_algo = hdr->hash_algo;
+ pks.digest = (u8 *)data;
+ pks.digest_size = datalen;
+ pks.nr_mpi = 1;
+ pks.rsa.s = mpi_read_raw_data(hdr->sig, siglen);
+
+ if (pks.rsa.s)
+ ret = verify_signature(key, &pks);
+
+ mpi_free(pks.rsa.s);
+ key_put(key);
+ pr_debug("%s() = %d\n", __func__, ret);
+ return ret;
+}
diff --git a/security/integrity/evm/Kconfig b/security/integrity/evm/Kconfig
new file mode 100644
index 00000000000..d606f3d12d6
--- /dev/null
+++ b/security/integrity/evm/Kconfig
@@ -0,0 +1,52 @@
+config EVM
+ boolean "EVM support"
+ depends on SECURITY
+ select KEYS
+ select ENCRYPTED_KEYS
+ select CRYPTO_HMAC
+ select CRYPTO_SHA1
+ default n
+ help
+ EVM protects a file's security extended attributes against
+ integrity attacks.
+
+ If you are unsure how to answer this question, answer N.
+
+if EVM
+
+menu "EVM options"
+
+config EVM_ATTR_FSUUID
+ bool "FSUUID (version 2)"
+ default y
+ depends on EVM
+ help
+ Include filesystem UUID for HMAC calculation.
+
+ Default value is 'selected', which is former version 2.
+ if 'not selected', it is former version 1
+
+ WARNING: changing the HMAC calculation method or adding
+ additional info to the calculation, requires existing EVM
+ labeled file systems to be relabeled.
+
+config EVM_EXTRA_SMACK_XATTRS
+ bool "Additional SMACK xattrs"
+ depends on EVM && SECURITY_SMACK
+ default n
+ help
+ Include additional SMACK xattrs for HMAC calculation.
+
+ In addition to the original security xattrs (eg. security.selinux,
+ security.SMACK64, security.capability, and security.ima) included
+ in the HMAC calculation, enabling this option includes newly defined
+ Smack xattrs: security.SMACK64EXEC, security.SMACK64TRANSMUTE and
+ security.SMACK64MMAP.
+
+ WARNING: changing the HMAC calculation method or adding
+ additional info to the calculation, requires existing EVM
+ labeled file systems to be relabeled.
+
+endmenu
+
+endif
diff --git a/security/integrity/evm/Makefile b/security/integrity/evm/Makefile
new file mode 100644
index 00000000000..7393c415a06
--- /dev/null
+++ b/security/integrity/evm/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for building the Extended Verification Module(EVM)
+#
+obj-$(CONFIG_EVM) += evm.o
+
+evm-y := evm_main.o evm_crypto.o evm_secfs.o
+evm-$(CONFIG_FS_POSIX_ACL) += evm_posix_acl.o
diff --git a/security/integrity/evm/evm.h b/security/integrity/evm/evm.h
new file mode 100644
index 00000000000..88bfe77efa1
--- /dev/null
+++ b/security/integrity/evm/evm.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2005-2010 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * File: evm.h
+ *
+ */
+
+#ifndef __INTEGRITY_EVM_H
+#define __INTEGRITY_EVM_H
+
+#include <linux/xattr.h>
+#include <linux/security.h>
+
+#include "../integrity.h"
+
+extern int evm_initialized;
+extern char *evm_hmac;
+extern char *evm_hash;
+
+#define EVM_ATTR_FSUUID 0x0001
+
+extern int evm_hmac_attrs;
+
+extern struct crypto_shash *hmac_tfm;
+extern struct crypto_shash *hash_tfm;
+
+/* List of EVM protected security xattrs */
+extern char *evm_config_xattrnames[];
+
+int evm_init_key(void);
+int evm_update_evmxattr(struct dentry *dentry,
+ const char *req_xattr_name,
+ const char *req_xattr_value,
+ size_t req_xattr_value_len);
+int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name,
+ const char *req_xattr_value,
+ size_t req_xattr_value_len, char *digest);
+int evm_calc_hash(struct dentry *dentry, const char *req_xattr_name,
+ const char *req_xattr_value,
+ size_t req_xattr_value_len, char *digest);
+int evm_init_hmac(struct inode *inode, const struct xattr *xattr,
+ char *hmac_val);
+int evm_init_secfs(void);
+
+#endif
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
new file mode 100644
index 00000000000..5e9687f02e1
--- /dev/null
+++ b/security/integrity/evm/evm_crypto.c
@@ -0,0 +1,262 @@
+/*
+ * Copyright (C) 2005-2010 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * File: evm_crypto.c
+ * Using root's kernel master key (kmk), calculate the HMAC
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/xattr.h>
+#include <keys/encrypted-type.h>
+#include <crypto/hash.h>
+#include "evm.h"
+
+#define EVMKEY "evm-key"
+#define MAX_KEY_SIZE 128
+static unsigned char evmkey[MAX_KEY_SIZE];
+static int evmkey_len = MAX_KEY_SIZE;
+
+struct crypto_shash *hmac_tfm;
+struct crypto_shash *hash_tfm;
+
+static DEFINE_MUTEX(mutex);
+
+static struct shash_desc *init_desc(char type)
+{
+ long rc;
+ char *algo;
+ struct crypto_shash **tfm;
+ struct shash_desc *desc;
+
+ if (type == EVM_XATTR_HMAC) {
+ tfm = &hmac_tfm;
+ algo = evm_hmac;
+ } else {
+ tfm = &hash_tfm;
+ algo = evm_hash;
+ }
+
+ if (*tfm == NULL) {
+ mutex_lock(&mutex);
+ if (*tfm)
+ goto out;
+ *tfm = crypto_alloc_shash(algo, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(*tfm)) {
+ rc = PTR_ERR(*tfm);
+ pr_err("Can not allocate %s (reason: %ld)\n", algo, rc);
+ *tfm = NULL;
+ mutex_unlock(&mutex);
+ return ERR_PTR(rc);
+ }
+ if (type == EVM_XATTR_HMAC) {
+ rc = crypto_shash_setkey(*tfm, evmkey, evmkey_len);
+ if (rc) {
+ crypto_free_shash(*tfm);
+ *tfm = NULL;
+ mutex_unlock(&mutex);
+ return ERR_PTR(rc);
+ }
+ }
+out:
+ mutex_unlock(&mutex);
+ }
+
+ desc = kmalloc(sizeof(*desc) + crypto_shash_descsize(*tfm),
+ GFP_KERNEL);
+ if (!desc)
+ return ERR_PTR(-ENOMEM);
+
+ desc->tfm = *tfm;
+ desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
+
+ rc = crypto_shash_init(desc);
+ if (rc) {
+ kfree(desc);
+ return ERR_PTR(rc);
+ }
+ return desc;
+}
+
+/* Protect against 'cutting & pasting' security.evm xattr, include inode
+ * specific info.
+ *
+ * (Additional directory/file metadata needs to be added for more complete
+ * protection.)
+ */
+static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
+ char *digest)
+{
+ struct h_misc {
+ unsigned long ino;
+ __u32 generation;
+ uid_t uid;
+ gid_t gid;
+ umode_t mode;
+ } hmac_misc;
+
+ memset(&hmac_misc, 0, sizeof(hmac_misc));
+ hmac_misc.ino = inode->i_ino;
+ hmac_misc.generation = inode->i_generation;
+ hmac_misc.uid = from_kuid(&init_user_ns, inode->i_uid);
+ hmac_misc.gid = from_kgid(&init_user_ns, inode->i_gid);
+ hmac_misc.mode = inode->i_mode;
+ crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof(hmac_misc));
+ if (evm_hmac_attrs & EVM_ATTR_FSUUID)
+ crypto_shash_update(desc, inode->i_sb->s_uuid,
+ sizeof(inode->i_sb->s_uuid));
+ crypto_shash_final(desc, digest);
+}
+
+/*
+ * Calculate the HMAC value across the set of protected security xattrs.
+ *
+ * Instead of retrieving the requested xattr, for performance, calculate
+ * the hmac using the requested xattr value. Don't alloc/free memory for
+ * each xattr, but attempt to re-use the previously allocated memory.
+ */
+static int evm_calc_hmac_or_hash(struct dentry *dentry,
+ const char *req_xattr_name,
+ const char *req_xattr_value,
+ size_t req_xattr_value_len,
+ char type, char *digest)
+{
+ struct inode *inode = dentry->d_inode;
+ struct shash_desc *desc;
+ char **xattrname;
+ size_t xattr_size = 0;
+ char *xattr_value = NULL;
+ int error;
+ int size;
+
+ if (!inode->i_op->getxattr)
+ return -EOPNOTSUPP;
+ desc = init_desc(type);
+ if (IS_ERR(desc))
+ return PTR_ERR(desc);
+
+ error = -ENODATA;
+ for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++) {
+ if ((req_xattr_name && req_xattr_value)
+ && !strcmp(*xattrname, req_xattr_name)) {
+ error = 0;
+ crypto_shash_update(desc, (const u8 *)req_xattr_value,
+ req_xattr_value_len);
+ continue;
+ }
+ size = vfs_getxattr_alloc(dentry, *xattrname,
+ &xattr_value, xattr_size, GFP_NOFS);
+ if (size == -ENOMEM) {
+ error = -ENOMEM;
+ goto out;
+ }
+ if (size < 0)
+ continue;
+
+ error = 0;
+ xattr_size = size;
+ crypto_shash_update(desc, (const u8 *)xattr_value, xattr_size);
+ }
+ hmac_add_misc(desc, inode, digest);
+
+out:
+ kfree(xattr_value);
+ kfree(desc);
+ return error;
+}
+
+int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name,
+ const char *req_xattr_value, size_t req_xattr_value_len,
+ char *digest)
+{
+ return evm_calc_hmac_or_hash(dentry, req_xattr_name, req_xattr_value,
+ req_xattr_value_len, EVM_XATTR_HMAC, digest);
+}
+
+int evm_calc_hash(struct dentry *dentry, const char *req_xattr_name,
+ const char *req_xattr_value, size_t req_xattr_value_len,
+ char *digest)
+{
+ return evm_calc_hmac_or_hash(dentry, req_xattr_name, req_xattr_value,
+ req_xattr_value_len, IMA_XATTR_DIGEST, digest);
+}
+
+/*
+ * Calculate the hmac and update security.evm xattr
+ *
+ * Expects to be called with i_mutex locked.
+ */
+int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name,
+ const char *xattr_value, size_t xattr_value_len)
+{
+ struct inode *inode = dentry->d_inode;
+ struct evm_ima_xattr_data xattr_data;
+ int rc = 0;
+
+ rc = evm_calc_hmac(dentry, xattr_name, xattr_value,
+ xattr_value_len, xattr_data.digest);
+ if (rc == 0) {
+ xattr_data.type = EVM_XATTR_HMAC;
+ rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_EVM,
+ &xattr_data,
+ sizeof(xattr_data), 0);
+ } else if (rc == -ENODATA && inode->i_op->removexattr) {
+ rc = inode->i_op->removexattr(dentry, XATTR_NAME_EVM);
+ }
+ return rc;
+}
+
+int evm_init_hmac(struct inode *inode, const struct xattr *lsm_xattr,
+ char *hmac_val)
+{
+ struct shash_desc *desc;
+
+ desc = init_desc(EVM_XATTR_HMAC);
+ if (IS_ERR(desc)) {
+ pr_info("init_desc failed\n");
+ return PTR_ERR(desc);
+ }
+
+ crypto_shash_update(desc, lsm_xattr->value, lsm_xattr->value_len);
+ hmac_add_misc(desc, inode, hmac_val);
+ kfree(desc);
+ return 0;
+}
+
+/*
+ * Get the key from the TPM for the SHA1-HMAC
+ */
+int evm_init_key(void)
+{
+ struct key *evm_key;
+ struct encrypted_key_payload *ekp;
+ int rc = 0;
+
+ evm_key = request_key(&key_type_encrypted, EVMKEY, NULL);
+ if (IS_ERR(evm_key))
+ return -ENOENT;
+
+ down_read(&evm_key->sem);
+ ekp = evm_key->payload.data;
+ if (ekp->decrypted_datalen > MAX_KEY_SIZE) {
+ rc = -EINVAL;
+ goto out;
+ }
+ memcpy(evmkey, ekp->decrypted_data, ekp->decrypted_datalen);
+out:
+ /* burn the original key contents */
+ memset(ekp->decrypted_data, 0, ekp->decrypted_datalen);
+ up_read(&evm_key->sem);
+ key_put(evm_key);
+ return rc;
+}
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
new file mode 100644
index 00000000000..3bcb80df4d0
--- /dev/null
+++ b/security/integrity/evm/evm_main.c
@@ -0,0 +1,485 @@
+/*
+ * Copyright (C) 2005-2010 IBM Corporation
+ *
+ * Author:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Kylene Hall <kjhall@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * File: evm_main.c
+ * implements evm_inode_setxattr, evm_inode_post_setxattr,
+ * evm_inode_removexattr, and evm_verifyxattr
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/crypto.h>
+#include <linux/audit.h>
+#include <linux/xattr.h>
+#include <linux/integrity.h>
+#include <linux/evm.h>
+#include <crypto/hash.h>
+#include "evm.h"
+
+int evm_initialized;
+
+static char *integrity_status_msg[] = {
+ "pass", "fail", "no_label", "no_xattrs", "unknown"
+};
+char *evm_hmac = "hmac(sha1)";
+char *evm_hash = "sha1";
+int evm_hmac_attrs;
+
+char *evm_config_xattrnames[] = {
+#ifdef CONFIG_SECURITY_SELINUX
+ XATTR_NAME_SELINUX,
+#endif
+#ifdef CONFIG_SECURITY_SMACK
+ XATTR_NAME_SMACK,
+#ifdef CONFIG_EVM_EXTRA_SMACK_XATTRS
+ XATTR_NAME_SMACKEXEC,
+ XATTR_NAME_SMACKTRANSMUTE,
+ XATTR_NAME_SMACKMMAP,
+#endif
+#endif
+#ifdef CONFIG_IMA_APPRAISE
+ XATTR_NAME_IMA,
+#endif
+ XATTR_NAME_CAPS,
+ NULL
+};
+
+static int evm_fixmode;
+static int __init evm_set_fixmode(char *str)
+{
+ if (strncmp(str, "fix", 3) == 0)
+ evm_fixmode = 1;
+ return 0;
+}
+__setup("evm=", evm_set_fixmode);
+
+static void __init evm_init_config(void)
+{
+#ifdef CONFIG_EVM_ATTR_FSUUID
+ evm_hmac_attrs |= EVM_ATTR_FSUUID;
+#endif
+ pr_info("HMAC attrs: 0x%x\n", evm_hmac_attrs);
+}
+
+static int evm_find_protected_xattrs(struct dentry *dentry)
+{
+ struct inode *inode = dentry->d_inode;
+ char **xattr;
+ int error;
+ int count = 0;
+
+ if (!inode->i_op->getxattr)
+ return -EOPNOTSUPP;
+
+ for (xattr = evm_config_xattrnames; *xattr != NULL; xattr++) {
+ error = inode->i_op->getxattr(dentry, *xattr, NULL, 0);
+ if (error < 0) {
+ if (error == -ENODATA)
+ continue;
+ return error;
+ }
+ count++;
+ }
+
+ return count;
+}
+
+/*
+ * evm_verify_hmac - calculate and compare the HMAC with the EVM xattr
+ *
+ * Compute the HMAC on the dentry's protected set of extended attributes
+ * and compare it against the stored security.evm xattr.
+ *
+ * For performance:
+ * - use the previoulsy retrieved xattr value and length to calculate the
+ * HMAC.)
+ * - cache the verification result in the iint, when available.
+ *
+ * Returns integrity status
+ */
+static enum integrity_status evm_verify_hmac(struct dentry *dentry,
+ const char *xattr_name,
+ char *xattr_value,
+ size_t xattr_value_len,
+ struct integrity_iint_cache *iint)
+{
+ struct evm_ima_xattr_data *xattr_data = NULL;
+ struct evm_ima_xattr_data calc;
+ enum integrity_status evm_status = INTEGRITY_PASS;
+ int rc, xattr_len;
+
+ if (iint && iint->evm_status == INTEGRITY_PASS)
+ return iint->evm_status;
+
+ /* if status is not PASS, try to check again - against -ENOMEM */
+
+ /* first need to know the sig type */
+ rc = vfs_getxattr_alloc(dentry, XATTR_NAME_EVM, (char **)&xattr_data, 0,
+ GFP_NOFS);
+ if (rc <= 0) {
+ if (rc == 0)
+ evm_status = INTEGRITY_FAIL; /* empty */
+ else if (rc == -ENODATA) {
+ rc = evm_find_protected_xattrs(dentry);
+ if (rc > 0)
+ evm_status = INTEGRITY_NOLABEL;
+ else if (rc == 0)
+ evm_status = INTEGRITY_NOXATTRS; /* new file */
+ }
+ goto out;
+ }
+
+ xattr_len = rc;
+
+ /* check value type */
+ switch (xattr_data->type) {
+ case EVM_XATTR_HMAC:
+ rc = evm_calc_hmac(dentry, xattr_name, xattr_value,
+ xattr_value_len, calc.digest);
+ if (rc)
+ break;
+ rc = memcmp(xattr_data->digest, calc.digest,
+ sizeof(calc.digest));
+ if (rc)
+ rc = -EINVAL;
+ break;
+ case EVM_IMA_XATTR_DIGSIG:
+ rc = evm_calc_hash(dentry, xattr_name, xattr_value,
+ xattr_value_len, calc.digest);
+ if (rc)
+ break;
+ rc = integrity_digsig_verify(INTEGRITY_KEYRING_EVM,
+ (const char *)xattr_data, xattr_len,
+ calc.digest, sizeof(calc.digest));
+ if (!rc) {
+ /* we probably want to replace rsa with hmac here */
+ evm_update_evmxattr(dentry, xattr_name, xattr_value,
+ xattr_value_len);
+ }
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+
+ if (rc)
+ evm_status = (rc == -ENODATA) ?
+ INTEGRITY_NOXATTRS : INTEGRITY_FAIL;
+out:
+ if (iint)
+ iint->evm_status = evm_status;
+ kfree(xattr_data);
+ return evm_status;
+}
+
+static int evm_protected_xattr(const char *req_xattr_name)
+{
+ char **xattrname;
+ int namelen;
+ int found = 0;
+
+ namelen = strlen(req_xattr_name);
+ for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++) {
+ if ((strlen(*xattrname) == namelen)
+ && (strncmp(req_xattr_name, *xattrname, namelen) == 0)) {
+ found = 1;
+ break;
+ }
+ if (strncmp(req_xattr_name,
+ *xattrname + XATTR_SECURITY_PREFIX_LEN,
+ strlen(req_xattr_name)) == 0) {
+ found = 1;
+ break;
+ }
+ }
+ return found;
+}
+
+/**
+ * evm_verifyxattr - verify the integrity of the requested xattr
+ * @dentry: object of the verify xattr
+ * @xattr_name: requested xattr
+ * @xattr_value: requested xattr value
+ * @xattr_value_len: requested xattr value length
+ *
+ * Calculate the HMAC for the given dentry and verify it against the stored
+ * security.evm xattr. For performance, use the xattr value and length
+ * previously retrieved to calculate the HMAC.
+ *
+ * Returns the xattr integrity status.
+ *
+ * This function requires the caller to lock the inode's i_mutex before it
+ * is executed.
+ */
+enum integrity_status evm_verifyxattr(struct dentry *dentry,
+ const char *xattr_name,
+ void *xattr_value, size_t xattr_value_len,
+ struct integrity_iint_cache *iint)
+{
+ if (!evm_initialized || !evm_protected_xattr(xattr_name))
+ return INTEGRITY_UNKNOWN;
+
+ if (!iint) {
+ iint = integrity_iint_find(dentry->d_inode);
+ if (!iint)
+ return INTEGRITY_UNKNOWN;
+ }
+ return evm_verify_hmac(dentry, xattr_name, xattr_value,
+ xattr_value_len, iint);
+}
+EXPORT_SYMBOL_GPL(evm_verifyxattr);
+
+/*
+ * evm_verify_current_integrity - verify the dentry's metadata integrity
+ * @dentry: pointer to the affected dentry
+ *
+ * Verify and return the dentry's metadata integrity. The exceptions are
+ * before EVM is initialized or in 'fix' mode.
+ */
+static enum integrity_status evm_verify_current_integrity(struct dentry *dentry)
+{
+ struct inode *inode = dentry->d_inode;
+
+ if (!evm_initialized || !S_ISREG(inode->i_mode) || evm_fixmode)
+ return 0;
+ return evm_verify_hmac(dentry, NULL, NULL, 0, NULL);
+}
+
+/*
+ * evm_protect_xattr - protect the EVM extended attribute
+ *
+ * Prevent security.evm from being modified or removed without the
+ * necessary permissions or when the existing value is invalid.
+ *
+ * The posix xattr acls are 'system' prefixed, which normally would not
+ * affect security.evm. An interesting side affect of writing posix xattr
+ * acls is their modifying of the i_mode, which is included in security.evm.
+ * For posix xattr acls only, permit security.evm, even if it currently
+ * doesn't exist, to be updated.
+ */
+static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name,
+ const void *xattr_value, size_t xattr_value_len)
+{
+ enum integrity_status evm_status;
+
+ if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ } else if (!evm_protected_xattr(xattr_name)) {
+ if (!posix_xattr_acl(xattr_name))
+ return 0;
+ evm_status = evm_verify_current_integrity(dentry);
+ if ((evm_status == INTEGRITY_PASS) ||
+ (evm_status == INTEGRITY_NOXATTRS))
+ return 0;
+ goto out;
+ }
+ evm_status = evm_verify_current_integrity(dentry);
+out:
+ if (evm_status != INTEGRITY_PASS)
+ integrity_audit_msg(AUDIT_INTEGRITY_METADATA, dentry->d_inode,
+ dentry->d_name.name, "appraise_metadata",
+ integrity_status_msg[evm_status],
+ -EPERM, 0);
+ return evm_status == INTEGRITY_PASS ? 0 : -EPERM;
+}
+
+/**
+ * evm_inode_setxattr - protect the EVM extended attribute
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: pointer to the affected extended attribute name
+ * @xattr_value: pointer to the new extended attribute value
+ * @xattr_value_len: pointer to the new extended attribute value length
+ *
+ * Before allowing the 'security.evm' protected xattr to be updated,
+ * verify the existing value is valid. As only the kernel should have
+ * access to the EVM encrypted key needed to calculate the HMAC, prevent
+ * userspace from writing HMAC value. Writing 'security.evm' requires
+ * requires CAP_SYS_ADMIN privileges.
+ */
+int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
+ const void *xattr_value, size_t xattr_value_len)
+{
+ const struct evm_ima_xattr_data *xattr_data = xattr_value;
+
+ if ((strcmp(xattr_name, XATTR_NAME_EVM) == 0)
+ && (xattr_data->type == EVM_XATTR_HMAC))
+ return -EPERM;
+ return evm_protect_xattr(dentry, xattr_name, xattr_value,
+ xattr_value_len);
+}
+
+/**
+ * evm_inode_removexattr - protect the EVM extended attribute
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: pointer to the affected extended attribute name
+ *
+ * Removing 'security.evm' requires CAP_SYS_ADMIN privileges and that
+ * the current value is valid.
+ */
+int evm_inode_removexattr(struct dentry *dentry, const char *xattr_name)
+{
+ return evm_protect_xattr(dentry, xattr_name, NULL, 0);
+}
+
+/**
+ * evm_inode_post_setxattr - update 'security.evm' to reflect the changes
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: pointer to the affected extended attribute name
+ * @xattr_value: pointer to the new extended attribute value
+ * @xattr_value_len: pointer to the new extended attribute value length
+ *
+ * Update the HMAC stored in 'security.evm' to reflect the change.
+ *
+ * No need to take the i_mutex lock here, as this function is called from
+ * __vfs_setxattr_noperm(). The caller of which has taken the inode's
+ * i_mutex lock.
+ */
+void evm_inode_post_setxattr(struct dentry *dentry, const char *xattr_name,
+ const void *xattr_value, size_t xattr_value_len)
+{
+ if (!evm_initialized || (!evm_protected_xattr(xattr_name)
+ && !posix_xattr_acl(xattr_name)))
+ return;
+
+ evm_update_evmxattr(dentry, xattr_name, xattr_value, xattr_value_len);
+ return;
+}
+
+/**
+ * evm_inode_post_removexattr - update 'security.evm' after removing the xattr
+ * @dentry: pointer to the affected dentry
+ * @xattr_name: pointer to the affected extended attribute name
+ *
+ * Update the HMAC stored in 'security.evm' to reflect removal of the xattr.
+ */
+void evm_inode_post_removexattr(struct dentry *dentry, const char *xattr_name)
+{
+ struct inode *inode = dentry->d_inode;
+
+ if (!evm_initialized || !evm_protected_xattr(xattr_name))
+ return;
+
+ mutex_lock(&inode->i_mutex);
+ evm_update_evmxattr(dentry, xattr_name, NULL, 0);
+ mutex_unlock(&inode->i_mutex);
+ return;
+}
+
+/**
+ * evm_inode_setattr - prevent updating an invalid EVM extended attribute
+ * @dentry: pointer to the affected dentry
+ */
+int evm_inode_setattr(struct dentry *dentry, struct iattr *attr)
+{
+ unsigned int ia_valid = attr->ia_valid;
+ enum integrity_status evm_status;
+
+ if (!(ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID)))
+ return 0;
+ evm_status = evm_verify_current_integrity(dentry);
+ if ((evm_status == INTEGRITY_PASS) ||
+ (evm_status == INTEGRITY_NOXATTRS))
+ return 0;
+ integrity_audit_msg(AUDIT_INTEGRITY_METADATA, dentry->d_inode,
+ dentry->d_name.name, "appraise_metadata",
+ integrity_status_msg[evm_status], -EPERM, 0);
+ return -EPERM;
+}
+
+/**
+ * evm_inode_post_setattr - update 'security.evm' after modifying metadata
+ * @dentry: pointer to the affected dentry
+ * @ia_valid: for the UID and GID status
+ *
+ * For now, update the HMAC stored in 'security.evm' to reflect UID/GID
+ * changes.
+ *
+ * This function is called from notify_change(), which expects the caller
+ * to lock the inode's i_mutex.
+ */
+void evm_inode_post_setattr(struct dentry *dentry, int ia_valid)
+{
+ if (!evm_initialized)
+ return;
+
+ if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID))
+ evm_update_evmxattr(dentry, NULL, NULL, 0);
+ return;
+}
+
+/*
+ * evm_inode_init_security - initializes security.evm
+ */
+int evm_inode_init_security(struct inode *inode,
+ const struct xattr *lsm_xattr,
+ struct xattr *evm_xattr)
+{
+ struct evm_ima_xattr_data *xattr_data;
+ int rc;
+
+ if (!evm_initialized || !evm_protected_xattr(lsm_xattr->name))
+ return 0;
+
+ xattr_data = kzalloc(sizeof(*xattr_data), GFP_NOFS);
+ if (!xattr_data)
+ return -ENOMEM;
+
+ xattr_data->type = EVM_XATTR_HMAC;
+ rc = evm_init_hmac(inode, lsm_xattr, xattr_data->digest);
+ if (rc < 0)
+ goto out;
+
+ evm_xattr->value = xattr_data;
+ evm_xattr->value_len = sizeof(*xattr_data);
+ evm_xattr->name = XATTR_EVM_SUFFIX;
+ return 0;
+out:
+ kfree(xattr_data);
+ return rc;
+}
+EXPORT_SYMBOL_GPL(evm_inode_init_security);
+
+static int __init init_evm(void)
+{
+ int error;
+
+ evm_init_config();
+
+ error = evm_init_secfs();
+ if (error < 0) {
+ pr_info("Error registering secfs\n");
+ goto err;
+ }
+
+ return 0;
+err:
+ return error;
+}
+
+/*
+ * evm_display_config - list the EVM protected security extended attributes
+ */
+static int __init evm_display_config(void)
+{
+ char **xattrname;
+
+ for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++)
+ pr_info("%s\n", *xattrname);
+ return 0;
+}
+
+pure_initcall(evm_display_config);
+late_initcall(init_evm);
+
+MODULE_DESCRIPTION("Extended Verification Module");
+MODULE_LICENSE("GPL");
diff --git a/security/integrity/evm/evm_posix_acl.c b/security/integrity/evm/evm_posix_acl.c
new file mode 100644
index 00000000000..46408b9e62e
--- /dev/null
+++ b/security/integrity/evm/evm_posix_acl.c
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2011 IBM Corporation
+ *
+ * Author:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ */
+
+#include <linux/module.h>
+#include <linux/xattr.h>
+#include <linux/evm.h>
+
+int posix_xattr_acl(const char *xattr)
+{
+ int xattr_len = strlen(xattr);
+
+ if ((strlen(XATTR_NAME_POSIX_ACL_ACCESS) == xattr_len)
+ && (strncmp(XATTR_NAME_POSIX_ACL_ACCESS, xattr, xattr_len) == 0))
+ return 1;
+ if ((strlen(XATTR_NAME_POSIX_ACL_DEFAULT) == xattr_len)
+ && (strncmp(XATTR_NAME_POSIX_ACL_DEFAULT, xattr, xattr_len) == 0))
+ return 1;
+ return 0;
+}
diff --git a/security/integrity/evm/evm_secfs.c b/security/integrity/evm/evm_secfs.c
new file mode 100644
index 00000000000..cf12a04717d
--- /dev/null
+++ b/security/integrity/evm/evm_secfs.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2010 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * File: evm_secfs.c
+ * - Used to signal when key is on keyring
+ * - Get the key and enable EVM
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include "evm.h"
+
+static struct dentry *evm_init_tpm;
+
+/**
+ * evm_read_key - read() for <securityfs>/evm
+ *
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @count: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t evm_read_key(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char temp[80];
+ ssize_t rc;
+
+ if (*ppos != 0)
+ return 0;
+
+ sprintf(temp, "%d", evm_initialized);
+ rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
+
+ return rc;
+}
+
+/**
+ * evm_write_key - write() for <securityfs>/evm
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Used to signal that key is on the kernel key ring.
+ * - get the integrity hmac key from the kernel key ring
+ * - create list of hmac protected extended attributes
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t evm_write_key(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char temp[80];
+ int i, error;
+
+ if (!capable(CAP_SYS_ADMIN) || evm_initialized)
+ return -EPERM;
+
+ if (count >= sizeof(temp) || count == 0)
+ return -EINVAL;
+
+ if (copy_from_user(temp, buf, count) != 0)
+ return -EFAULT;
+
+ temp[count] = '\0';
+
+ if ((sscanf(temp, "%d", &i) != 1) || (i != 1))
+ return -EINVAL;
+
+ error = evm_init_key();
+ if (!error) {
+ evm_initialized = 1;
+ pr_info("initialized\n");
+ } else
+ pr_err("initialization failed\n");
+ return count;
+}
+
+static const struct file_operations evm_key_ops = {
+ .read = evm_read_key,
+ .write = evm_write_key,
+};
+
+int __init evm_init_secfs(void)
+{
+ int error = 0;
+
+ evm_init_tpm = securityfs_create_file("evm", S_IRUSR | S_IRGRP,
+ NULL, NULL, &evm_key_ops);
+ if (!evm_init_tpm || IS_ERR(evm_init_tpm))
+ error = -EFAULT;
+ return error;
+}
diff --git a/security/integrity/iint.c b/security/integrity/iint.c
new file mode 100644
index 00000000000..a521edf4cbd
--- /dev/null
+++ b/security/integrity/iint.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2008 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: integrity_iint.c
+ * - implements the integrity hooks: integrity_inode_alloc,
+ * integrity_inode_free
+ * - cache integrity information associated with an inode
+ * using a rbtree tree.
+ */
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/rbtree.h>
+#include "integrity.h"
+
+static struct rb_root integrity_iint_tree = RB_ROOT;
+static DEFINE_RWLOCK(integrity_iint_lock);
+static struct kmem_cache *iint_cache __read_mostly;
+
+int iint_initialized;
+
+/*
+ * __integrity_iint_find - return the iint associated with an inode
+ */
+static struct integrity_iint_cache *__integrity_iint_find(struct inode *inode)
+{
+ struct integrity_iint_cache *iint;
+ struct rb_node *n = integrity_iint_tree.rb_node;
+
+ while (n) {
+ iint = rb_entry(n, struct integrity_iint_cache, rb_node);
+
+ if (inode < iint->inode)
+ n = n->rb_left;
+ else if (inode > iint->inode)
+ n = n->rb_right;
+ else
+ break;
+ }
+ if (!n)
+ return NULL;
+
+ return iint;
+}
+
+/*
+ * integrity_iint_find - return the iint associated with an inode
+ */
+struct integrity_iint_cache *integrity_iint_find(struct inode *inode)
+{
+ struct integrity_iint_cache *iint;
+
+ if (!IS_IMA(inode))
+ return NULL;
+
+ read_lock(&integrity_iint_lock);
+ iint = __integrity_iint_find(inode);
+ read_unlock(&integrity_iint_lock);
+
+ return iint;
+}
+
+static void iint_free(struct integrity_iint_cache *iint)
+{
+ kfree(iint->ima_hash);
+ iint->ima_hash = NULL;
+ iint->version = 0;
+ iint->flags = 0UL;
+ iint->ima_file_status = INTEGRITY_UNKNOWN;
+ iint->ima_mmap_status = INTEGRITY_UNKNOWN;
+ iint->ima_bprm_status = INTEGRITY_UNKNOWN;
+ iint->ima_module_status = INTEGRITY_UNKNOWN;
+ iint->evm_status = INTEGRITY_UNKNOWN;
+ kmem_cache_free(iint_cache, iint);
+}
+
+/**
+ * integrity_inode_get - find or allocate an iint associated with an inode
+ * @inode: pointer to the inode
+ * @return: allocated iint
+ *
+ * Caller must lock i_mutex
+ */
+struct integrity_iint_cache *integrity_inode_get(struct inode *inode)
+{
+ struct rb_node **p;
+ struct rb_node *node, *parent = NULL;
+ struct integrity_iint_cache *iint, *test_iint;
+
+ iint = integrity_iint_find(inode);
+ if (iint)
+ return iint;
+
+ iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
+ if (!iint)
+ return NULL;
+
+ write_lock(&integrity_iint_lock);
+
+ p = &integrity_iint_tree.rb_node;
+ while (*p) {
+ parent = *p;
+ test_iint = rb_entry(parent, struct integrity_iint_cache,
+ rb_node);
+ if (inode < test_iint->inode)
+ p = &(*p)->rb_left;
+ else
+ p = &(*p)->rb_right;
+ }
+
+ iint->inode = inode;
+ node = &iint->rb_node;
+ inode->i_flags |= S_IMA;
+ rb_link_node(node, parent, p);
+ rb_insert_color(node, &integrity_iint_tree);
+
+ write_unlock(&integrity_iint_lock);
+ return iint;
+}
+
+/**
+ * integrity_inode_free - called on security_inode_free
+ * @inode: pointer to the inode
+ *
+ * Free the integrity information(iint) associated with an inode.
+ */
+void integrity_inode_free(struct inode *inode)
+{
+ struct integrity_iint_cache *iint;
+
+ if (!IS_IMA(inode))
+ return;
+
+ write_lock(&integrity_iint_lock);
+ iint = __integrity_iint_find(inode);
+ rb_erase(&iint->rb_node, &integrity_iint_tree);
+ write_unlock(&integrity_iint_lock);
+
+ iint_free(iint);
+}
+
+static void init_once(void *foo)
+{
+ struct integrity_iint_cache *iint = foo;
+
+ memset(iint, 0, sizeof(*iint));
+ iint->version = 0;
+ iint->flags = 0UL;
+ iint->ima_file_status = INTEGRITY_UNKNOWN;
+ iint->ima_mmap_status = INTEGRITY_UNKNOWN;
+ iint->ima_bprm_status = INTEGRITY_UNKNOWN;
+ iint->ima_module_status = INTEGRITY_UNKNOWN;
+ iint->evm_status = INTEGRITY_UNKNOWN;
+}
+
+static int __init integrity_iintcache_init(void)
+{
+ iint_cache =
+ kmem_cache_create("iint_cache", sizeof(struct integrity_iint_cache),
+ 0, SLAB_PANIC, init_once);
+ iint_initialized = 1;
+ return 0;
+}
+security_initcall(integrity_iintcache_init);
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index 3d7846de806..81a27971d88 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -2,15 +2,17 @@
#
config IMA
bool "Integrity Measurement Architecture(IMA)"
- depends on ACPI
depends on SECURITY
+ select INTEGRITY
select SECURITYFS
select CRYPTO
select CRYPTO_HMAC
select CRYPTO_MD5
select CRYPTO_SHA1
- select TCG_TPM
- select TCG_TIS
+ select CRYPTO_HASH_INFO
+ select TCG_TPM if HAS_IOMEM && !UML
+ select TCG_TIS if TCG_TPM && X86
+ select TCG_IBMVTPM if TCG_TPM && PPC64
help
The Trusted Computing Group(TCG) runtime Integrity
Measurement Architecture(IMA) maintains a list of hash
@@ -37,20 +39,87 @@ config IMA_MEASURE_PCR_IDX
that IMA uses to maintain the integrity aggregate of the
measurement list. If unsure, use the default 10.
-config IMA_AUDIT
- bool
- depends on IMA
- default y
- help
- This option adds a kernel parameter 'ima_audit', which
- allows informational auditing messages to be enabled
- at boot. If this option is selected, informational integrity
- auditing messages can be enabled with 'ima_audit=1' on
- the kernel command line.
-
config IMA_LSM_RULES
bool
depends on IMA && AUDIT && (SECURITY_SELINUX || SECURITY_SMACK)
default y
help
Disabling this option will disregard LSM based policy rules.
+
+choice
+ prompt "Default template"
+ default IMA_NG_TEMPLATE
+ depends on IMA
+ help
+ Select the default IMA measurement template.
+
+ The original 'ima' measurement list template contains a
+ hash, defined as 20 bytes, and a null terminated pathname,
+ limited to 255 characters. The 'ima-ng' measurement list
+ template permits both larger hash digests and longer
+ pathnames.
+
+ config IMA_TEMPLATE
+ bool "ima"
+ config IMA_NG_TEMPLATE
+ bool "ima-ng (default)"
+ config IMA_SIG_TEMPLATE
+ bool "ima-sig"
+endchoice
+
+config IMA_DEFAULT_TEMPLATE
+ string
+ depends on IMA
+ default "ima" if IMA_TEMPLATE
+ default "ima-ng" if IMA_NG_TEMPLATE
+ default "ima-sig" if IMA_SIG_TEMPLATE
+
+choice
+ prompt "Default integrity hash algorithm"
+ default IMA_DEFAULT_HASH_SHA1
+ depends on IMA
+ help
+ Select the default hash algorithm used for the measurement
+ list, integrity appraisal and audit log. The compiled default
+ hash algorithm can be overwritten using the kernel command
+ line 'ima_hash=' option.
+
+ config IMA_DEFAULT_HASH_SHA1
+ bool "SHA1 (default)"
+ depends on CRYPTO_SHA1
+
+ config IMA_DEFAULT_HASH_SHA256
+ bool "SHA256"
+ depends on CRYPTO_SHA256 && !IMA_TEMPLATE
+
+ config IMA_DEFAULT_HASH_SHA512
+ bool "SHA512"
+ depends on CRYPTO_SHA512 && !IMA_TEMPLATE
+
+ config IMA_DEFAULT_HASH_WP512
+ bool "WP512"
+ depends on CRYPTO_WP512 && !IMA_TEMPLATE
+endchoice
+
+config IMA_DEFAULT_HASH
+ string
+ depends on IMA
+ default "sha1" if IMA_DEFAULT_HASH_SHA1
+ default "sha256" if IMA_DEFAULT_HASH_SHA256
+ default "sha512" if IMA_DEFAULT_HASH_SHA512
+ default "wp512" if IMA_DEFAULT_HASH_WP512
+
+config IMA_APPRAISE
+ bool "Appraise integrity measurements"
+ depends on IMA
+ default n
+ help
+ This option enables local measurement integrity appraisal.
+ It requires the system to be labeled with a security extended
+ attribute containing the file hash measurement. To protect
+ the security extended attributes from offline attack, enable
+ and configure EVM.
+
+ For more information on integrity appraisal refer to:
+ <http://linux-ima.sourceforge.net>
+ If unsure, say N.
diff --git a/security/integrity/ima/Makefile b/security/integrity/ima/Makefile
index 787c4cb916c..d79263d2fdb 100644
--- a/security/integrity/ima/Makefile
+++ b/security/integrity/ima/Makefile
@@ -6,4 +6,5 @@
obj-$(CONFIG_IMA) += ima.o
ima-y := ima_fs.o ima_queue.o ima_init.o ima_main.o ima_crypto.o ima_api.o \
- ima_policy.o ima_iint.o ima_audit.o
+ ima_policy.o ima_template.o ima_template_lib.o
+ima-$(CONFIG_IMA_APPRAISE) += ima_appraise.o
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index c41afe6639a..f79fa8be203 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -24,32 +24,61 @@
#include <linux/tpm.h>
#include <linux/audit.h>
-enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_ASCII };
+#include "../integrity.h"
+
+enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_BINARY_NO_FIELD_LEN,
+ IMA_SHOW_BINARY_OLD_STRING_FMT, IMA_SHOW_ASCII };
enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
/* digest size for IMA, fits SHA1 or MD5 */
-#define IMA_DIGEST_SIZE 20
+#define IMA_DIGEST_SIZE SHA1_DIGEST_SIZE
#define IMA_EVENT_NAME_LEN_MAX 255
#define IMA_HASH_BITS 9
#define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS)
+#define IMA_TEMPLATE_FIELD_ID_MAX_LEN 16
+#define IMA_TEMPLATE_NUM_FIELDS_MAX 15
+
+#define IMA_TEMPLATE_IMA_NAME "ima"
+#define IMA_TEMPLATE_IMA_FMT "d|n"
+
/* set during initialization */
extern int ima_initialized;
extern int ima_used_chip;
-extern char *ima_hash;
+extern int ima_hash_algo;
+extern int ima_appraise;
+
+/* IMA template field data definition */
+struct ima_field_data {
+ u8 *data;
+ u32 len;
+};
-/* IMA inode template definition */
-struct ima_template_data {
- u8 digest[IMA_DIGEST_SIZE]; /* sha1/md5 measurement hash */
- char file_name[IMA_EVENT_NAME_LEN_MAX + 1]; /* name + \0 */
+/* IMA template field definition */
+struct ima_template_field {
+ const char field_id[IMA_TEMPLATE_FIELD_ID_MAX_LEN];
+ int (*field_init) (struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len, struct ima_field_data *field_data);
+ void (*field_show) (struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+};
+
+/* IMA template descriptor definition */
+struct ima_template_desc {
+ char *name;
+ char *fmt;
+ int num_fields;
+ struct ima_template_field **fields;
};
struct ima_template_entry {
- u8 digest[IMA_DIGEST_SIZE]; /* sha1 or md5 measurement hash */
- const char *template_name;
- int template_len;
- struct ima_template_data template;
+ u8 digest[TPM_DIGEST_SIZE]; /* sha1 or md5 measurement hash */
+ struct ima_template_desc *template_desc; /* template descriptor */
+ u32 template_data_len;
+ struct ima_field_data template_data[0]; /* template related data */
};
struct ima_queue_entry {
@@ -59,24 +88,29 @@ struct ima_queue_entry {
};
extern struct list_head ima_measurements; /* list of all measurements */
-/* declarations */
-void integrity_audit_msg(int audit_msgno, struct inode *inode,
- const unsigned char *fname, const char *op,
- const char *cause, int result, int info);
-
/* Internal IMA function definitions */
-void ima_iintcache_init(void);
int ima_init(void);
void ima_cleanup(void);
int ima_fs_init(void);
void ima_fs_cleanup(void);
+int ima_inode_alloc(struct inode *inode);
int ima_add_template_entry(struct ima_template_entry *entry, int violation,
- const char *op, struct inode *inode);
-int ima_calc_hash(struct file *file, char *digest);
-int ima_calc_template_hash(int template_len, void *template, char *digest);
-int ima_calc_boot_aggregate(char *digest);
-void ima_add_violation(struct inode *inode, const unsigned char *filename,
+ const char *op, struct inode *inode,
+ const unsigned char *filename);
+int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash);
+int ima_calc_field_array_hash(struct ima_field_data *field_data,
+ struct ima_template_desc *desc, int num_fields,
+ struct ima_digest_data *hash);
+int __init ima_calc_boot_aggregate(struct ima_digest_data *hash);
+void ima_add_violation(struct file *file, const unsigned char *filename,
const char *op, const char *cause);
+int ima_init_crypto(void);
+void ima_putc(struct seq_file *m, void *data, int datalen);
+void ima_print_digest(struct seq_file *m, u8 *digest, int size);
+struct ima_template_desc *ima_template_desc_current(void);
+int ima_init_template(void);
+
+int ima_init_template(void);
/*
* used to protect h_table and sha_table
@@ -95,50 +129,105 @@ static inline unsigned long ima_hash_key(u8 *digest)
return hash_long(*digest, IMA_HASH_BITS);
}
-/* iint cache flags */
-#define IMA_MEASURED 1
-
-/* integrity data associated with an inode */
-struct ima_iint_cache {
- u64 version; /* track inode changes */
- unsigned long flags;
- u8 digest[IMA_DIGEST_SIZE];
- struct mutex mutex; /* protects: version, flags, digest */
- long readcount; /* measured files readcount */
- long writecount; /* measured files writecount */
- long opencount; /* opens reference count */
- struct kref refcount; /* ima_iint_cache reference count */
- struct rcu_head rcu;
-};
-
/* LIM API function definitions */
-int ima_must_measure(struct ima_iint_cache *iint, struct inode *inode,
- int mask, int function);
-int ima_collect_measurement(struct ima_iint_cache *iint, struct file *file);
-void ima_store_measurement(struct ima_iint_cache *iint, struct file *file,
+int ima_get_action(struct inode *inode, int mask, int function);
+int ima_must_measure(struct inode *inode, int mask, int function);
+int ima_collect_measurement(struct integrity_iint_cache *iint,
+ struct file *file,
+ struct evm_ima_xattr_data **xattr_value,
+ int *xattr_len);
+void ima_store_measurement(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len);
+void ima_audit_measurement(struct integrity_iint_cache *iint,
const unsigned char *filename);
+int ima_alloc_init_template(struct integrity_iint_cache *iint,
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len, struct ima_template_entry **entry);
int ima_store_template(struct ima_template_entry *entry, int violation,
- struct inode *inode);
-void ima_template_show(struct seq_file *m, void *e,
- enum ima_show_type show);
+ struct inode *inode, const unsigned char *filename);
+void ima_free_template_entry(struct ima_template_entry *entry);
+const char *ima_d_path(struct path *path, char **pathbuf);
-/* radix tree calls to lookup, insert, delete
+/* rbtree tree calls to lookup, insert, delete
* integrity data associated with an inode.
*/
-struct ima_iint_cache *ima_iint_insert(struct inode *inode);
-struct ima_iint_cache *ima_iint_find_get(struct inode *inode);
-void iint_free(struct kref *kref);
-void iint_rcu_free(struct rcu_head *rcu);
+struct integrity_iint_cache *integrity_iint_insert(struct inode *inode);
+struct integrity_iint_cache *integrity_iint_find(struct inode *inode);
/* IMA policy related functions */
-enum ima_hooks { PATH_CHECK = 1, FILE_MMAP, BPRM_CHECK };
+enum ima_hooks { FILE_CHECK = 1, MMAP_CHECK, BPRM_CHECK, MODULE_CHECK, POST_SETATTR };
-int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask);
+int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask,
+ int flags);
void ima_init_policy(void);
void ima_update_policy(void);
-int ima_parse_add_rule(char *);
+ssize_t ima_parse_add_rule(char *);
void ima_delete_rules(void);
+/* Appraise integrity measurements */
+#define IMA_APPRAISE_ENFORCE 0x01
+#define IMA_APPRAISE_FIX 0x02
+#define IMA_APPRAISE_MODULES 0x04
+
+#ifdef CONFIG_IMA_APPRAISE
+int ima_appraise_measurement(int func, struct integrity_iint_cache *iint,
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len);
+int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func);
+void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file);
+enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint,
+ int func);
+void ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_digest_data *hash);
+int ima_read_xattr(struct dentry *dentry,
+ struct evm_ima_xattr_data **xattr_value);
+
+#else
+static inline int ima_appraise_measurement(int func,
+ struct integrity_iint_cache *iint,
+ struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len)
+{
+ return INTEGRITY_UNKNOWN;
+}
+
+static inline int ima_must_appraise(struct inode *inode, int mask,
+ enum ima_hooks func)
+{
+ return 0;
+}
+
+static inline void ima_update_xattr(struct integrity_iint_cache *iint,
+ struct file *file)
+{
+}
+
+static inline enum integrity_status ima_get_cache_status(struct integrity_iint_cache
+ *iint, int func)
+{
+ return INTEGRITY_UNKNOWN;
+}
+
+static inline void ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value,
+ int xattr_len,
+ struct ima_digest_data *hash)
+{
+}
+
+static inline int ima_read_xattr(struct dentry *dentry,
+ struct evm_ima_xattr_data **xattr_value)
+{
+ return 0;
+}
+
+#endif
+
/* LSM based policy rules require audit */
#ifdef CONFIG_IMA_LSM_RULES
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index 3cd58b60afd..d9cd5ce14d2 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -9,13 +9,68 @@
* License.
*
* File: ima_api.c
- * Implements must_measure, collect_measurement, store_measurement,
- * and store_template.
+ * Implements must_appraise_or_measure, collect_measurement,
+ * appraise_measurement, store_measurement and store_template.
*/
#include <linux/module.h>
-
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/xattr.h>
+#include <linux/evm.h>
+#include <crypto/hash_info.h>
#include "ima.h"
-static const char *IMA_TEMPLATE_NAME = "ima";
+
+/*
+ * ima_free_template_entry - free an existing template entry
+ */
+void ima_free_template_entry(struct ima_template_entry *entry)
+{
+ int i;
+
+ for (i = 0; i < entry->template_desc->num_fields; i++)
+ kfree(entry->template_data[i].data);
+
+ kfree(entry);
+}
+
+/*
+ * ima_alloc_init_template - create and initialize a new template entry
+ */
+int ima_alloc_init_template(struct integrity_iint_cache *iint,
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len, struct ima_template_entry **entry)
+{
+ struct ima_template_desc *template_desc = ima_template_desc_current();
+ int i, result = 0;
+
+ *entry = kzalloc(sizeof(**entry) + template_desc->num_fields *
+ sizeof(struct ima_field_data), GFP_NOFS);
+ if (!*entry)
+ return -ENOMEM;
+
+ (*entry)->template_desc = template_desc;
+ for (i = 0; i < template_desc->num_fields; i++) {
+ struct ima_template_field *field = template_desc->fields[i];
+ u32 len;
+
+ result = field->field_init(iint, file, filename,
+ xattr_value, xattr_len,
+ &((*entry)->template_data[i]));
+ if (result != 0)
+ goto out;
+
+ len = (*entry)->template_data[i].len;
+ (*entry)->template_data_len += sizeof(len);
+ (*entry)->template_data_len += len;
+ }
+ return 0;
+out:
+ ima_free_template_entry(*entry);
+ *entry = NULL;
+ return result;
+}
/*
* ima_store_template - store ima template measurements
@@ -34,28 +89,35 @@ static const char *IMA_TEMPLATE_NAME = "ima";
* Returns 0 on success, error code otherwise
*/
int ima_store_template(struct ima_template_entry *entry,
- int violation, struct inode *inode)
+ int violation, struct inode *inode,
+ const unsigned char *filename)
{
- const char *op = "add_template_measure";
- const char *audit_cause = "hashing_error";
+ static const char op[] = "add_template_measure";
+ static const char audit_cause[] = "hashing_error";
+ char *template_name = entry->template_desc->name;
int result;
-
- memset(entry->digest, 0, sizeof(entry->digest));
- entry->template_name = IMA_TEMPLATE_NAME;
- entry->template_len = sizeof(entry->template);
+ struct {
+ struct ima_digest_data hdr;
+ char digest[TPM_DIGEST_SIZE];
+ } hash;
if (!violation) {
- result = ima_calc_template_hash(entry->template_len,
- &entry->template,
- entry->digest);
+ int num_fields = entry->template_desc->num_fields;
+
+ /* this function uses default algo */
+ hash.hdr.algo = HASH_ALGO_SHA1;
+ result = ima_calc_field_array_hash(&entry->template_data[0],
+ entry->template_desc,
+ num_fields, &hash.hdr);
if (result < 0) {
integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode,
- entry->template_name, op,
+ template_name, op,
audit_cause, result, 0);
return result;
}
+ memcpy(entry->digest, hash.hdr.digest, hash.hdr.length);
}
- result = ima_add_template_entry(entry, violation, op, inode);
+ result = ima_add_template_entry(entry, violation, op, inode, filename);
return result;
}
@@ -66,60 +128,60 @@ int ima_store_template(struct ima_template_entry *entry,
* By extending the PCR with 0xFF's instead of with zeroes, the PCR
* value is invalidated.
*/
-void ima_add_violation(struct inode *inode, const unsigned char *filename,
+void ima_add_violation(struct file *file, const unsigned char *filename,
const char *op, const char *cause)
{
struct ima_template_entry *entry;
+ struct inode *inode = file_inode(file);
int violation = 1;
int result;
/* can overflow, only indicator */
atomic_long_inc(&ima_htable.violations);
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
+ result = ima_alloc_init_template(NULL, file, filename,
+ NULL, 0, &entry);
+ if (result < 0) {
result = -ENOMEM;
goto err_out;
}
- memset(&entry->template, 0, sizeof(entry->template));
- strncpy(entry->template.file_name, filename, IMA_EVENT_NAME_LEN_MAX);
- result = ima_store_template(entry, violation, inode);
+ result = ima_store_template(entry, violation, inode, filename);
if (result < 0)
- kfree(entry);
+ ima_free_template_entry(entry);
err_out:
integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
op, cause, result, 0);
}
/**
- * ima_must_measure - measure decision based on policy.
+ * ima_get_action - appraise & measure decision based on policy.
* @inode: pointer to inode to measure
* @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXECUTE)
- * @function: calling function (PATH_CHECK, BPRM_CHECK, FILE_MMAP)
+ * @function: calling function (FILE_CHECK, BPRM_CHECK, MMAP_CHECK, MODULE_CHECK)
*
* The policy is defined in terms of keypairs:
- * subj=, obj=, type=, func=, mask=, fsmagic=
+ * subj=, obj=, type=, func=, mask=, fsmagic=
* subj,obj, and type: are LSM specific.
- * func: PATH_CHECK | BPRM_CHECK | FILE_MMAP
- * mask: contains the permission mask
+ * func: FILE_CHECK | BPRM_CHECK | MMAP_CHECK | MODULE_CHECK
+ * mask: contains the permission mask
* fsmagic: hex value
*
- * Must be called with iint->mutex held.
+ * Returns IMA_MEASURE, IMA_APPRAISE mask.
*
- * Return 0 to measure. Return 1 if already measured.
- * For matching a DONT_MEASURE policy, no policy, or other
- * error, return an error code.
-*/
-int ima_must_measure(struct ima_iint_cache *iint, struct inode *inode,
- int mask, int function)
+ */
+int ima_get_action(struct inode *inode, int mask, int function)
{
- int must_measure;
+ int flags = IMA_MEASURE | IMA_AUDIT | IMA_APPRAISE;
- if (iint->flags & IMA_MEASURED)
- return 1;
+ if (!ima_appraise)
+ flags &= ~IMA_APPRAISE;
- must_measure = ima_match_policy(inode, function, mask);
- return must_measure ? 0 : -EACCES;
+ return ima_match_policy(inode, function, mask, flags);
+}
+
+int ima_must_measure(struct inode *inode, int mask, int function)
+{
+ return ima_match_policy(inode, function, mask, IMA_MEASURE);
}
/*
@@ -132,18 +194,57 @@ int ima_must_measure(struct ima_iint_cache *iint, struct inode *inode,
*
* Return 0 on success, error code otherwise
*/
-int ima_collect_measurement(struct ima_iint_cache *iint, struct file *file)
+int ima_collect_measurement(struct integrity_iint_cache *iint,
+ struct file *file,
+ struct evm_ima_xattr_data **xattr_value,
+ int *xattr_len)
{
- int result = -EEXIST;
+ const char *audit_cause = "failed";
+ struct inode *inode = file_inode(file);
+ const char *filename = file->f_dentry->d_name.name;
+ int result = 0;
+ struct {
+ struct ima_digest_data hdr;
+ char digest[IMA_MAX_DIGEST_SIZE];
+ } hash;
+
+ if (xattr_value)
+ *xattr_len = ima_read_xattr(file->f_dentry, xattr_value);
- if (!(iint->flags & IMA_MEASURED)) {
- u64 i_version = file->f_dentry->d_inode->i_version;
+ if (!(iint->flags & IMA_COLLECTED)) {
+ u64 i_version = file_inode(file)->i_version;
- memset(iint->digest, 0, IMA_DIGEST_SIZE);
- result = ima_calc_hash(file, iint->digest);
- if (!result)
- iint->version = i_version;
+ if (file->f_flags & O_DIRECT) {
+ audit_cause = "failed(directio)";
+ result = -EACCES;
+ goto out;
+ }
+
+ /* use default hash algorithm */
+ hash.hdr.algo = ima_hash_algo;
+
+ if (xattr_value)
+ ima_get_hash_algo(*xattr_value, *xattr_len, &hash.hdr);
+
+ result = ima_calc_file_hash(file, &hash.hdr);
+ if (!result) {
+ int length = sizeof(hash.hdr) + hash.hdr.length;
+ void *tmpbuf = krealloc(iint->ima_hash, length,
+ GFP_NOFS);
+ if (tmpbuf) {
+ iint->ima_hash = tmpbuf;
+ memcpy(iint->ima_hash, &hash, length);
+ iint->version = i_version;
+ iint->flags |= IMA_COLLECTED;
+ } else
+ result = -ENOMEM;
+ }
}
+out:
+ if (result)
+ integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode,
+ filename, "collect_data", audit_cause,
+ result, 0);
return result;
}
@@ -155,36 +256,89 @@ int ima_collect_measurement(struct ima_iint_cache *iint, struct file *file)
*
* We only get here if the inode has not already been measured,
* but the measurement could already exist:
- * - multiple copies of the same file on either the same or
+ * - multiple copies of the same file on either the same or
* different filesystems.
* - the inode was previously flushed as well as the iint info,
* containing the hashing info.
*
* Must be called with iint->mutex held.
*/
-void ima_store_measurement(struct ima_iint_cache *iint, struct file *file,
- const unsigned char *filename)
+void ima_store_measurement(struct integrity_iint_cache *iint,
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len)
{
- const char *op = "add_template_measure";
- const char *audit_cause = "ENOMEM";
+ static const char op[] = "add_template_measure";
+ static const char audit_cause[] = "ENOMEM";
int result = -ENOMEM;
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct ima_template_entry *entry;
int violation = 0;
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
+ if (iint->flags & IMA_MEASURED)
+ return;
+
+ result = ima_alloc_init_template(iint, file, filename,
+ xattr_value, xattr_len, &entry);
+ if (result < 0) {
integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
op, audit_cause, result, 0);
return;
}
- memset(&entry->template, 0, sizeof(entry->template));
- memcpy(entry->template.digest, iint->digest, IMA_DIGEST_SIZE);
- strncpy(entry->template.file_name, filename, IMA_EVENT_NAME_LEN_MAX);
- result = ima_store_template(entry, violation, inode);
- if (!result)
+ result = ima_store_template(entry, violation, inode, filename);
+ if (!result || result == -EEXIST)
iint->flags |= IMA_MEASURED;
- else
- kfree(entry);
+ if (result < 0)
+ ima_free_template_entry(entry);
+}
+
+void ima_audit_measurement(struct integrity_iint_cache *iint,
+ const unsigned char *filename)
+{
+ struct audit_buffer *ab;
+ char hash[(iint->ima_hash->length * 2) + 1];
+ const char *algo_name = hash_algo_name[iint->ima_hash->algo];
+ char algo_hash[sizeof(hash) + strlen(algo_name) + 2];
+ int i;
+
+ if (iint->flags & IMA_AUDITED)
+ return;
+
+ for (i = 0; i < iint->ima_hash->length; i++)
+ hex_byte_pack(hash + (i * 2), iint->ima_hash->digest[i]);
+ hash[i * 2] = '\0';
+
+ ab = audit_log_start(current->audit_context, GFP_KERNEL,
+ AUDIT_INTEGRITY_RULE);
+ if (!ab)
+ return;
+
+ audit_log_format(ab, "file=");
+ audit_log_untrustedstring(ab, filename);
+ audit_log_format(ab, " hash=");
+ snprintf(algo_hash, sizeof(algo_hash), "%s:%s", algo_name, hash);
+ audit_log_untrustedstring(ab, algo_hash);
+
+ audit_log_task_info(ab, current);
+ audit_log_end(ab);
+
+ iint->flags |= IMA_AUDITED;
+}
+
+const char *ima_d_path(struct path *path, char **pathbuf)
+{
+ char *pathname = NULL;
+
+ /* We will allow 11 spaces for ' (deleted)' to be appended */
+ *pathbuf = kmalloc(PATH_MAX + 11, GFP_KERNEL);
+ if (*pathbuf) {
+ pathname = d_path(path, *pathbuf, PATH_MAX + 11);
+ if (IS_ERR(pathname)) {
+ kfree(*pathbuf);
+ *pathbuf = NULL;
+ pathname = NULL;
+ }
+ }
+ return pathname ?: (const char *)path->dentry->d_name.name;
}
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
new file mode 100644
index 00000000000..d3113d4aaa3
--- /dev/null
+++ b/security/integrity/ima/ima_appraise.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright (C) 2011 IBM Corporation
+ *
+ * Author:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ */
+#include <linux/module.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/xattr.h>
+#include <linux/magic.h>
+#include <linux/ima.h>
+#include <linux/evm.h>
+#include <crypto/hash_info.h>
+
+#include "ima.h"
+
+static int __init default_appraise_setup(char *str)
+{
+ if (strncmp(str, "off", 3) == 0)
+ ima_appraise = 0;
+ else if (strncmp(str, "fix", 3) == 0)
+ ima_appraise = IMA_APPRAISE_FIX;
+ return 1;
+}
+
+__setup("ima_appraise=", default_appraise_setup);
+
+/*
+ * ima_must_appraise - set appraise flag
+ *
+ * Return 1 to appraise
+ */
+int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func)
+{
+ if (!ima_appraise)
+ return 0;
+
+ return ima_match_policy(inode, func, mask, IMA_APPRAISE);
+}
+
+static int ima_fix_xattr(struct dentry *dentry,
+ struct integrity_iint_cache *iint)
+{
+ int rc, offset;
+ u8 algo = iint->ima_hash->algo;
+
+ if (algo <= HASH_ALGO_SHA1) {
+ offset = 1;
+ iint->ima_hash->xattr.sha1.type = IMA_XATTR_DIGEST;
+ } else {
+ offset = 0;
+ iint->ima_hash->xattr.ng.type = IMA_XATTR_DIGEST_NG;
+ iint->ima_hash->xattr.ng.algo = algo;
+ }
+ rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_IMA,
+ &iint->ima_hash->xattr.data[offset],
+ (sizeof(iint->ima_hash->xattr) - offset) +
+ iint->ima_hash->length, 0);
+ return rc;
+}
+
+/* Return specific func appraised cached result */
+enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint,
+ int func)
+{
+ switch (func) {
+ case MMAP_CHECK:
+ return iint->ima_mmap_status;
+ case BPRM_CHECK:
+ return iint->ima_bprm_status;
+ case MODULE_CHECK:
+ return iint->ima_module_status;
+ case FILE_CHECK:
+ default:
+ return iint->ima_file_status;
+ }
+}
+
+static void ima_set_cache_status(struct integrity_iint_cache *iint,
+ int func, enum integrity_status status)
+{
+ switch (func) {
+ case MMAP_CHECK:
+ iint->ima_mmap_status = status;
+ break;
+ case BPRM_CHECK:
+ iint->ima_bprm_status = status;
+ break;
+ case MODULE_CHECK:
+ iint->ima_module_status = status;
+ break;
+ case FILE_CHECK:
+ default:
+ iint->ima_file_status = status;
+ break;
+ }
+}
+
+static void ima_cache_flags(struct integrity_iint_cache *iint, int func)
+{
+ switch (func) {
+ case MMAP_CHECK:
+ iint->flags |= (IMA_MMAP_APPRAISED | IMA_APPRAISED);
+ break;
+ case BPRM_CHECK:
+ iint->flags |= (IMA_BPRM_APPRAISED | IMA_APPRAISED);
+ break;
+ case MODULE_CHECK:
+ iint->flags |= (IMA_MODULE_APPRAISED | IMA_APPRAISED);
+ break;
+ case FILE_CHECK:
+ default:
+ iint->flags |= (IMA_FILE_APPRAISED | IMA_APPRAISED);
+ break;
+ }
+}
+
+void ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_digest_data *hash)
+{
+ struct signature_v2_hdr *sig;
+
+ if (!xattr_value || xattr_len < 2)
+ return;
+
+ switch (xattr_value->type) {
+ case EVM_IMA_XATTR_DIGSIG:
+ sig = (typeof(sig))xattr_value;
+ if (sig->version != 2 || xattr_len <= sizeof(*sig))
+ return;
+ hash->algo = sig->hash_algo;
+ break;
+ case IMA_XATTR_DIGEST_NG:
+ hash->algo = xattr_value->digest[0];
+ break;
+ case IMA_XATTR_DIGEST:
+ /* this is for backward compatibility */
+ if (xattr_len == 21) {
+ unsigned int zero = 0;
+ if (!memcmp(&xattr_value->digest[16], &zero, 4))
+ hash->algo = HASH_ALGO_MD5;
+ else
+ hash->algo = HASH_ALGO_SHA1;
+ } else if (xattr_len == 17)
+ hash->algo = HASH_ALGO_MD5;
+ break;
+ }
+}
+
+int ima_read_xattr(struct dentry *dentry,
+ struct evm_ima_xattr_data **xattr_value)
+{
+ struct inode *inode = dentry->d_inode;
+
+ if (!inode->i_op->getxattr)
+ return 0;
+
+ return vfs_getxattr_alloc(dentry, XATTR_NAME_IMA, (char **)xattr_value,
+ 0, GFP_NOFS);
+}
+
+/*
+ * ima_appraise_measurement - appraise file measurement
+ *
+ * Call evm_verifyxattr() to verify the integrity of 'security.ima'.
+ * Assuming success, compare the xattr hash with the collected measurement.
+ *
+ * Return 0 on success, error code otherwise
+ */
+int ima_appraise_measurement(int func, struct integrity_iint_cache *iint,
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len)
+{
+ static const char op[] = "appraise_data";
+ char *cause = "unknown";
+ struct dentry *dentry = file->f_dentry;
+ struct inode *inode = dentry->d_inode;
+ enum integrity_status status = INTEGRITY_UNKNOWN;
+ int rc = xattr_len, hash_start = 0;
+
+ if (!ima_appraise)
+ return 0;
+ if (!inode->i_op->getxattr)
+ return INTEGRITY_UNKNOWN;
+
+ if (rc <= 0) {
+ if (rc && rc != -ENODATA)
+ goto out;
+
+ cause = "missing-hash";
+ status =
+ (inode->i_size == 0) ? INTEGRITY_PASS : INTEGRITY_NOLABEL;
+ goto out;
+ }
+
+ status = evm_verifyxattr(dentry, XATTR_NAME_IMA, xattr_value, rc, iint);
+ if ((status != INTEGRITY_PASS) && (status != INTEGRITY_UNKNOWN)) {
+ if ((status == INTEGRITY_NOLABEL)
+ || (status == INTEGRITY_NOXATTRS))
+ cause = "missing-HMAC";
+ else if (status == INTEGRITY_FAIL)
+ cause = "invalid-HMAC";
+ goto out;
+ }
+ switch (xattr_value->type) {
+ case IMA_XATTR_DIGEST_NG:
+ /* first byte contains algorithm id */
+ hash_start = 1;
+ case IMA_XATTR_DIGEST:
+ if (iint->flags & IMA_DIGSIG_REQUIRED) {
+ cause = "IMA signature required";
+ status = INTEGRITY_FAIL;
+ break;
+ }
+ if (xattr_len - sizeof(xattr_value->type) - hash_start >=
+ iint->ima_hash->length)
+ /* xattr length may be longer. md5 hash in previous
+ version occupied 20 bytes in xattr, instead of 16
+ */
+ rc = memcmp(&xattr_value->digest[hash_start],
+ iint->ima_hash->digest,
+ iint->ima_hash->length);
+ else
+ rc = -EINVAL;
+ if (rc) {
+ cause = "invalid-hash";
+ status = INTEGRITY_FAIL;
+ break;
+ }
+ status = INTEGRITY_PASS;
+ break;
+ case EVM_IMA_XATTR_DIGSIG:
+ iint->flags |= IMA_DIGSIG;
+ rc = integrity_digsig_verify(INTEGRITY_KEYRING_IMA,
+ (const char *)xattr_value, rc,
+ iint->ima_hash->digest,
+ iint->ima_hash->length);
+ if (rc == -EOPNOTSUPP) {
+ status = INTEGRITY_UNKNOWN;
+ } else if (rc) {
+ cause = "invalid-signature";
+ status = INTEGRITY_FAIL;
+ } else {
+ status = INTEGRITY_PASS;
+ }
+ break;
+ default:
+ status = INTEGRITY_UNKNOWN;
+ cause = "unknown-ima-data";
+ break;
+ }
+
+out:
+ if (status != INTEGRITY_PASS) {
+ if ((ima_appraise & IMA_APPRAISE_FIX) &&
+ (!xattr_value ||
+ xattr_value->type != EVM_IMA_XATTR_DIGSIG)) {
+ if (!ima_fix_xattr(dentry, iint))
+ status = INTEGRITY_PASS;
+ }
+ integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, filename,
+ op, cause, rc, 0);
+ } else {
+ ima_cache_flags(iint, func);
+ }
+ ima_set_cache_status(iint, func, status);
+ return status;
+}
+
+/*
+ * ima_update_xattr - update 'security.ima' hash value
+ */
+void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file)
+{
+ struct dentry *dentry = file->f_dentry;
+ int rc = 0;
+
+ /* do not collect and update hash for digital signatures */
+ if (iint->flags & IMA_DIGSIG)
+ return;
+
+ rc = ima_collect_measurement(iint, file, NULL, NULL);
+ if (rc < 0)
+ return;
+
+ ima_fix_xattr(dentry, iint);
+}
+
+/**
+ * ima_inode_post_setattr - reflect file metadata changes
+ * @dentry: pointer to the affected dentry
+ *
+ * Changes to a dentry's metadata might result in needing to appraise.
+ *
+ * This function is called from notify_change(), which expects the caller
+ * to lock the inode's i_mutex.
+ */
+void ima_inode_post_setattr(struct dentry *dentry)
+{
+ struct inode *inode = dentry->d_inode;
+ struct integrity_iint_cache *iint;
+ int must_appraise, rc;
+
+ if (!ima_initialized || !ima_appraise || !S_ISREG(inode->i_mode)
+ || !inode->i_op->removexattr)
+ return;
+
+ must_appraise = ima_must_appraise(inode, MAY_ACCESS, POST_SETATTR);
+ iint = integrity_iint_find(inode);
+ if (iint) {
+ iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED |
+ IMA_APPRAISE_SUBMASK | IMA_APPRAISED_SUBMASK |
+ IMA_ACTION_FLAGS);
+ if (must_appraise)
+ iint->flags |= IMA_APPRAISE;
+ }
+ if (!must_appraise)
+ rc = inode->i_op->removexattr(dentry, XATTR_NAME_IMA);
+ return;
+}
+
+/*
+ * ima_protect_xattr - protect 'security.ima'
+ *
+ * Ensure that not just anyone can modify or remove 'security.ima'.
+ */
+static int ima_protect_xattr(struct dentry *dentry, const char *xattr_name,
+ const void *xattr_value, size_t xattr_value_len)
+{
+ if (strcmp(xattr_name, XATTR_NAME_IMA) == 0) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ return 1;
+ }
+ return 0;
+}
+
+static void ima_reset_appraise_flags(struct inode *inode, int digsig)
+{
+ struct integrity_iint_cache *iint;
+
+ if (!ima_initialized || !ima_appraise || !S_ISREG(inode->i_mode))
+ return;
+
+ iint = integrity_iint_find(inode);
+ if (!iint)
+ return;
+
+ iint->flags &= ~IMA_DONE_MASK;
+ if (digsig)
+ iint->flags |= IMA_DIGSIG;
+ return;
+}
+
+int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
+ const void *xattr_value, size_t xattr_value_len)
+{
+ const struct evm_ima_xattr_data *xvalue = xattr_value;
+ int result;
+
+ result = ima_protect_xattr(dentry, xattr_name, xattr_value,
+ xattr_value_len);
+ if (result == 1) {
+ ima_reset_appraise_flags(dentry->d_inode,
+ (xvalue->type == EVM_IMA_XATTR_DIGSIG) ? 1 : 0);
+ result = 0;
+ }
+ return result;
+}
+
+int ima_inode_removexattr(struct dentry *dentry, const char *xattr_name)
+{
+ int result;
+
+ result = ima_protect_xattr(dentry, xattr_name, NULL, 0);
+ if (result == 1) {
+ ima_reset_appraise_flags(dentry->d_inode, 0);
+ result = 0;
+ }
+ return result;
+}
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index 46642a19bc7..ccd0ac8fa9a 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -10,59 +10,131 @@
* the Free Software Foundation, version 2 of the License.
*
* File: ima_crypto.c
- * Calculates md5/sha1 file hash, template hash, boot-aggreate hash
+ * Calculates md5/sha1 file hash, template hash, boot-aggreate hash
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/file.h>
#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <linux/err.h>
+#include <linux/slab.h>
+#include <crypto/hash.h>
+#include <crypto/hash_info.h>
#include "ima.h"
-static int init_desc(struct hash_desc *desc)
+static struct crypto_shash *ima_shash_tfm;
+
+/**
+ * ima_kernel_read - read file content
+ *
+ * This is a function for reading file content instead of kernel_read().
+ * It does not perform locking checks to ensure it cannot be blocked.
+ * It does not perform security checks because it is irrelevant for IMA.
+ *
+ */
+static int ima_kernel_read(struct file *file, loff_t offset,
+ char *addr, unsigned long count)
{
- int rc;
+ mm_segment_t old_fs;
+ char __user *buf = addr;
+ ssize_t ret;
+
+ if (!(file->f_mode & FMODE_READ))
+ return -EBADF;
+ if (!file->f_op->read && !file->f_op->aio_read)
+ return -EINVAL;
+
+ old_fs = get_fs();
+ set_fs(get_ds());
+ if (file->f_op->read)
+ ret = file->f_op->read(file, buf, count, &offset);
+ else
+ ret = do_sync_read(file, buf, count, &offset);
+ set_fs(old_fs);
+ return ret;
+}
- desc->tfm = crypto_alloc_hash(ima_hash, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(desc->tfm)) {
- pr_info("failed to load %s transform: %ld\n",
- ima_hash, PTR_ERR(desc->tfm));
- rc = PTR_ERR(desc->tfm);
+int ima_init_crypto(void)
+{
+ long rc;
+
+ ima_shash_tfm = crypto_alloc_shash(hash_algo_name[ima_hash_algo], 0, 0);
+ if (IS_ERR(ima_shash_tfm)) {
+ rc = PTR_ERR(ima_shash_tfm);
+ pr_err("Can not allocate %s (reason: %ld)\n",
+ hash_algo_name[ima_hash_algo], rc);
return rc;
}
- desc->flags = 0;
- rc = crypto_hash_init(desc);
- if (rc)
- crypto_free_hash(desc->tfm);
- return rc;
+ return 0;
+}
+
+static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo)
+{
+ struct crypto_shash *tfm = ima_shash_tfm;
+ int rc;
+
+ if (algo != ima_hash_algo && algo < HASH_ALGO__LAST) {
+ tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0);
+ if (IS_ERR(tfm)) {
+ rc = PTR_ERR(tfm);
+ pr_err("Can not allocate %s (reason: %d)\n",
+ hash_algo_name[algo], rc);
+ }
+ }
+ return tfm;
+}
+
+static void ima_free_tfm(struct crypto_shash *tfm)
+{
+ if (tfm != ima_shash_tfm)
+ crypto_free_shash(tfm);
}
/*
* Calculate the MD5/SHA1 file digest
*/
-int ima_calc_hash(struct file *file, char *digest)
+static int ima_calc_file_hash_tfm(struct file *file,
+ struct ima_digest_data *hash,
+ struct crypto_shash *tfm)
{
- struct hash_desc desc;
- struct scatterlist sg[1];
loff_t i_size, offset = 0;
char *rbuf;
- int rc;
+ int rc, read = 0;
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(tfm)];
+ } desc;
- rc = init_desc(&desc);
+ desc.shash.tfm = tfm;
+ desc.shash.flags = 0;
+
+ hash->length = crypto_shash_digestsize(tfm);
+
+ rc = crypto_shash_init(&desc.shash);
if (rc != 0)
return rc;
- rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (!rbuf) {
- rc = -ENOMEM;
+ i_size = i_size_read(file_inode(file));
+
+ if (i_size == 0)
goto out;
+
+ rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!rbuf)
+ return -ENOMEM;
+
+ if (!(file->f_mode & FMODE_READ)) {
+ file->f_mode |= FMODE_READ;
+ read = 1;
}
- i_size = i_size_read(file->f_dentry->d_inode);
+
while (offset < i_size) {
int rbuf_len;
- rbuf_len = kernel_read(file, offset, rbuf, PAGE_SIZE);
+ rbuf_len = ima_kernel_read(file, offset, rbuf, PAGE_SIZE);
if (rbuf_len < 0) {
rc = rbuf_len;
break;
@@ -70,38 +142,103 @@ int ima_calc_hash(struct file *file, char *digest)
if (rbuf_len == 0)
break;
offset += rbuf_len;
- sg_init_one(sg, rbuf, rbuf_len);
- rc = crypto_hash_update(&desc, sg, rbuf_len);
+ rc = crypto_shash_update(&desc.shash, rbuf, rbuf_len);
if (rc)
break;
}
+ if (read)
+ file->f_mode &= ~FMODE_READ;
kfree(rbuf);
- if (!rc)
- rc = crypto_hash_final(&desc, digest);
out:
- crypto_free_hash(desc.tfm);
+ if (!rc)
+ rc = crypto_shash_final(&desc.shash, hash->digest);
+ return rc;
+}
+
+int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
+{
+ struct crypto_shash *tfm;
+ int rc;
+
+ tfm = ima_alloc_tfm(hash->algo);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ rc = ima_calc_file_hash_tfm(file, hash, tfm);
+
+ ima_free_tfm(tfm);
+
return rc;
}
/*
- * Calculate the hash of a given template
+ * Calculate the hash of template data
*/
-int ima_calc_template_hash(int template_len, void *template, char *digest)
+static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
+ struct ima_template_desc *td,
+ int num_fields,
+ struct ima_digest_data *hash,
+ struct crypto_shash *tfm)
{
- struct hash_desc desc;
- struct scatterlist sg[1];
- int rc;
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(tfm)];
+ } desc;
+ int rc, i;
+
+ desc.shash.tfm = tfm;
+ desc.shash.flags = 0;
- rc = init_desc(&desc);
+ hash->length = crypto_shash_digestsize(tfm);
+
+ rc = crypto_shash_init(&desc.shash);
if (rc != 0)
return rc;
- sg_init_one(sg, template, template_len);
- rc = crypto_hash_update(&desc, sg, template_len);
+ for (i = 0; i < num_fields; i++) {
+ u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 };
+ u8 *data_to_hash = field_data[i].data;
+ u32 datalen = field_data[i].len;
+
+ if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) {
+ rc = crypto_shash_update(&desc.shash,
+ (const u8 *) &field_data[i].len,
+ sizeof(field_data[i].len));
+ if (rc)
+ break;
+ } else if (strcmp(td->fields[i]->field_id, "n") == 0) {
+ memcpy(buffer, data_to_hash, datalen);
+ data_to_hash = buffer;
+ datalen = IMA_EVENT_NAME_LEN_MAX + 1;
+ }
+ rc = crypto_shash_update(&desc.shash, data_to_hash, datalen);
+ if (rc)
+ break;
+ }
+
if (!rc)
- rc = crypto_hash_final(&desc, digest);
- crypto_free_hash(desc.tfm);
+ rc = crypto_shash_final(&desc.shash, hash->digest);
+
+ return rc;
+}
+
+int ima_calc_field_array_hash(struct ima_field_data *field_data,
+ struct ima_template_desc *desc, int num_fields,
+ struct ima_digest_data *hash)
+{
+ struct crypto_shash *tfm;
+ int rc;
+
+ tfm = ima_alloc_tfm(hash->algo);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ rc = ima_calc_field_array_hash_tfm(field_data, desc, num_fields,
+ hash, tfm);
+
+ ima_free_tfm(tfm);
+
return rc;
}
@@ -117,14 +254,20 @@ static void __init ima_pcrread(int idx, u8 *pcr)
/*
* Calculate the boot aggregate hash
*/
-int __init ima_calc_boot_aggregate(char *digest)
+static int __init ima_calc_boot_aggregate_tfm(char *digest,
+ struct crypto_shash *tfm)
{
- struct hash_desc desc;
- struct scatterlist sg;
- u8 pcr_i[IMA_DIGEST_SIZE];
+ u8 pcr_i[TPM_DIGEST_SIZE];
int rc, i;
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(tfm)];
+ } desc;
+
+ desc.shash.tfm = tfm;
+ desc.shash.flags = 0;
- rc = init_desc(&desc);
+ rc = crypto_shash_init(&desc.shash);
if (rc != 0)
return rc;
@@ -132,11 +275,26 @@ int __init ima_calc_boot_aggregate(char *digest)
for (i = TPM_PCR0; i < TPM_PCR8; i++) {
ima_pcrread(i, pcr_i);
/* now accumulate with current aggregate */
- sg_init_one(&sg, pcr_i, IMA_DIGEST_SIZE);
- rc = crypto_hash_update(&desc, &sg, IMA_DIGEST_SIZE);
+ rc = crypto_shash_update(&desc.shash, pcr_i, TPM_DIGEST_SIZE);
}
if (!rc)
- crypto_hash_final(&desc, digest);
- crypto_free_hash(desc.tfm);
+ crypto_shash_final(&desc.shash, digest);
+ return rc;
+}
+
+int __init ima_calc_boot_aggregate(struct ima_digest_data *hash)
+{
+ struct crypto_shash *tfm;
+ int rc;
+
+ tfm = ima_alloc_tfm(hash->algo);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ hash->length = crypto_shash_digestsize(tfm);
+ rc = ima_calc_boot_aggregate_tfm(hash->digest, tfm);
+
+ ima_free_tfm(tfm);
+
return rc;
}
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index 0c72c9c3895..da92fcc08d1 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -16,6 +16,7 @@
* current measurement list and IMA statistics
*/
#include <linux/fcntl.h>
+#include <linux/slab.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/rculist.h>
@@ -44,7 +45,8 @@ static ssize_t ima_show_htable_violations(struct file *filp,
}
static const struct file_operations ima_htable_violations_ops = {
- .read = ima_show_htable_violations
+ .read = ima_show_htable_violations,
+ .llseek = generic_file_llseek,
};
static ssize_t ima_show_measurements_count(struct file *filp,
@@ -56,7 +58,8 @@ static ssize_t ima_show_measurements_count(struct file *filp,
}
static const struct file_operations ima_measurements_count_ops = {
- .read = ima_show_measurements_count
+ .read = ima_show_measurements_count,
+ .llseek = generic_file_llseek,
};
/* returns pointer to hlist_node */
@@ -85,8 +88,7 @@ static void *ima_measurements_next(struct seq_file *m, void *v, loff_t *pos)
* against concurrent list-extension
*/
rcu_read_lock();
- qe = list_entry_rcu(qe->later.next,
- struct ima_queue_entry, later);
+ qe = list_entry_rcu(qe->later.next, struct ima_queue_entry, later);
rcu_read_unlock();
(*pos)++;
@@ -97,7 +99,7 @@ static void ima_measurements_stop(struct seq_file *m, void *v)
{
}
-static void ima_putc(struct seq_file *m, void *data, int datalen)
+void ima_putc(struct seq_file *m, void *data, int datalen)
{
while (datalen--)
seq_putc(m, *(char *)data++);
@@ -108,6 +110,7 @@ static void ima_putc(struct seq_file *m, void *data, int datalen)
* char[20]=template digest
* 32bit-le=template name size
* char[n]=template name
+ * [eventdata length]
* eventdata[n]=template specific data
*/
static int ima_measurements_show(struct seq_file *m, void *v)
@@ -117,6 +120,8 @@ static int ima_measurements_show(struct seq_file *m, void *v)
struct ima_template_entry *e;
int namelen;
u32 pcr = CONFIG_IMA_MEASURE_PCR_IDX;
+ bool is_ima_template = false;
+ int i;
/* get entry */
e = qe->entry;
@@ -128,21 +133,37 @@ static int ima_measurements_show(struct seq_file *m, void *v)
* PCR used is always the same (config option) in
* little-endian format
*/
- ima_putc(m, &pcr, sizeof pcr);
+ ima_putc(m, &pcr, sizeof(pcr));
/* 2nd: template digest */
- ima_putc(m, e->digest, IMA_DIGEST_SIZE);
+ ima_putc(m, e->digest, TPM_DIGEST_SIZE);
/* 3rd: template name size */
- namelen = strlen(e->template_name);
- ima_putc(m, &namelen, sizeof namelen);
+ namelen = strlen(e->template_desc->name);
+ ima_putc(m, &namelen, sizeof(namelen));
/* 4th: template name */
- ima_putc(m, (void *)e->template_name, namelen);
-
- /* 5th: template specific data */
- ima_template_show(m, (struct ima_template_data *)&e->template,
- IMA_SHOW_BINARY);
+ ima_putc(m, e->template_desc->name, namelen);
+
+ /* 5th: template length (except for 'ima' template) */
+ if (strcmp(e->template_desc->name, IMA_TEMPLATE_IMA_NAME) == 0)
+ is_ima_template = true;
+
+ if (!is_ima_template)
+ ima_putc(m, &e->template_data_len,
+ sizeof(e->template_data_len));
+
+ /* 6th: template specific data */
+ for (i = 0; i < e->template_desc->num_fields; i++) {
+ enum ima_show_type show = IMA_SHOW_BINARY;
+ struct ima_template_field *field = e->template_desc->fields[i];
+
+ if (is_ima_template && strcmp(field->field_id, "d") == 0)
+ show = IMA_SHOW_BINARY_NO_FIELD_LEN;
+ if (is_ima_template && strcmp(field->field_id, "n") == 0)
+ show = IMA_SHOW_BINARY_OLD_STRING_FMT;
+ field->field_show(m, show, &e->template_data[i]);
+ }
return 0;
}
@@ -165,41 +186,21 @@ static const struct file_operations ima_measurements_ops = {
.release = seq_release,
};
-static void ima_print_digest(struct seq_file *m, u8 *digest)
+void ima_print_digest(struct seq_file *m, u8 *digest, int size)
{
int i;
- for (i = 0; i < IMA_DIGEST_SIZE; i++)
+ for (i = 0; i < size; i++)
seq_printf(m, "%02x", *(digest + i));
}
-void ima_template_show(struct seq_file *m, void *e, enum ima_show_type show)
-{
- struct ima_template_data *entry = e;
- int namelen;
-
- switch (show) {
- case IMA_SHOW_ASCII:
- ima_print_digest(m, entry->digest);
- seq_printf(m, " %s\n", entry->file_name);
- break;
- case IMA_SHOW_BINARY:
- ima_putc(m, entry->digest, IMA_DIGEST_SIZE);
-
- namelen = strlen(entry->file_name);
- ima_putc(m, &namelen, sizeof namelen);
- ima_putc(m, entry->file_name, namelen);
- default:
- break;
- }
-}
-
/* print in ascii */
static int ima_ascii_measurements_show(struct seq_file *m, void *v)
{
/* the list never shrinks, so we don't need a lock here */
struct ima_queue_entry *qe = v;
struct ima_template_entry *e;
+ int i;
/* get entry */
e = qe->entry;
@@ -210,14 +211,21 @@ static int ima_ascii_measurements_show(struct seq_file *m, void *v)
seq_printf(m, "%2d ", CONFIG_IMA_MEASURE_PCR_IDX);
/* 2nd: SHA1 template hash */
- ima_print_digest(m, e->digest);
+ ima_print_digest(m, e->digest, TPM_DIGEST_SIZE);
/* 3th: template name */
- seq_printf(m, " %s ", e->template_name);
+ seq_printf(m, " %s", e->template_desc->name);
/* 4th: template specific data */
- ima_template_show(m, (struct ima_template_data *)&e->template,
- IMA_SHOW_ASCII);
+ for (i = 0; i < e->template_desc->num_fields; i++) {
+ seq_puts(m, " ");
+ if (e->template_data[i].len == 0)
+ continue;
+
+ e->template_desc->fields[i]->field_show(m, IMA_SHOW_ASCII,
+ &e->template_data[i]);
+ }
+ seq_puts(m, "\n");
return 0;
}
@@ -243,32 +251,34 @@ static const struct file_operations ima_ascii_measurements_ops = {
static ssize_t ima_write_policy(struct file *file, const char __user *buf,
size_t datalen, loff_t *ppos)
{
- char *data;
- int rc;
+ char *data = NULL;
+ ssize_t result;
if (datalen >= PAGE_SIZE)
- return -ENOMEM;
- if (*ppos != 0) {
- /* No partial writes. */
- return -EINVAL;
- }
+ datalen = PAGE_SIZE - 1;
+
+ /* No partial writes. */
+ result = -EINVAL;
+ if (*ppos != 0)
+ goto out;
+
+ result = -ENOMEM;
data = kmalloc(datalen + 1, GFP_KERNEL);
if (!data)
- return -ENOMEM;
+ goto out;
- if (copy_from_user(data, buf, datalen)) {
- kfree(data);
- return -EFAULT;
- }
*(data + datalen) = '\0';
- rc = ima_parse_add_rule(data);
- if (rc < 0) {
- datalen = -EINVAL;
- valid_policy = 0;
- }
+ result = -EFAULT;
+ if (copy_from_user(data, buf, datalen))
+ goto out;
+
+ result = ima_parse_add_rule(data);
+out:
+ if (result < 0)
+ valid_policy = 0;
kfree(data);
- return datalen;
+ return result;
}
static struct dentry *ima_dir;
@@ -282,7 +292,7 @@ static atomic_t policy_opencount = ATOMIC_INIT(1);
/*
* ima_open_policy: sequentialize access to the policy file
*/
-int ima_open_policy(struct inode * inode, struct file * filp)
+static int ima_open_policy(struct inode *inode, struct file *filp)
{
/* No point in being allowed to open it if you aren't going to write */
if (!(filp->f_flags & O_WRONLY))
@@ -316,7 +326,8 @@ static int ima_release_policy(struct inode *inode, struct file *file)
static const struct file_operations ima_measure_policy_ops = {
.open = ima_open_policy,
.write = ima_write_policy,
- .release = ima_release_policy
+ .release = ima_release_policy,
+ .llseek = generic_file_llseek,
};
int __init ima_fs_init(void)
@@ -361,20 +372,11 @@ int __init ima_fs_init(void)
return 0;
out:
- securityfs_remove(runtime_measurements_count);
- securityfs_remove(ascii_runtime_measurements);
- securityfs_remove(binary_runtime_measurements);
- securityfs_remove(ima_dir);
- securityfs_remove(ima_policy);
- return -1;
-}
-
-void __exit ima_fs_cleanup(void)
-{
securityfs_remove(violations);
securityfs_remove(runtime_measurements_count);
securityfs_remove(ascii_runtime_measurements);
securityfs_remove(binary_runtime_measurements);
securityfs_remove(ima_dir);
securityfs_remove(ima_policy);
+ return -1;
}
diff --git a/security/integrity/ima/ima_iint.c b/security/integrity/ima/ima_iint.c
deleted file mode 100644
index fa592ff1ac1..00000000000
--- a/security/integrity/ima/ima_iint.c
+++ /dev/null
@@ -1,149 +0,0 @@
-/*
- * Copyright (C) 2008 IBM Corporation
- *
- * Authors:
- * Mimi Zohar <zohar@us.ibm.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation, version 2 of the
- * License.
- *
- * File: ima_iint.c
- * - implements the IMA hooks: ima_inode_alloc, ima_inode_free
- * - cache integrity information associated with an inode
- * using a radix tree.
- */
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/radix-tree.h>
-#include "ima.h"
-
-RADIX_TREE(ima_iint_store, GFP_ATOMIC);
-DEFINE_SPINLOCK(ima_iint_lock);
-
-static struct kmem_cache *iint_cache __read_mostly;
-
-/* ima_iint_find_get - return the iint associated with an inode
- *
- * ima_iint_find_get gets a reference to the iint. Caller must
- * remember to put the iint reference.
- */
-struct ima_iint_cache *ima_iint_find_get(struct inode *inode)
-{
- struct ima_iint_cache *iint;
-
- rcu_read_lock();
- iint = radix_tree_lookup(&ima_iint_store, (unsigned long)inode);
- if (!iint)
- goto out;
- kref_get(&iint->refcount);
-out:
- rcu_read_unlock();
- return iint;
-}
-
-/**
- * ima_inode_alloc - allocate an iint associated with an inode
- * @inode: pointer to the inode
- */
-int ima_inode_alloc(struct inode *inode)
-{
- struct ima_iint_cache *iint = NULL;
- int rc = 0;
-
- if (!ima_initialized)
- return 0;
-
- iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
- if (!iint)
- return -ENOMEM;
-
- rc = radix_tree_preload(GFP_NOFS);
- if (rc < 0)
- goto out;
-
- spin_lock(&ima_iint_lock);
- rc = radix_tree_insert(&ima_iint_store, (unsigned long)inode, iint);
- spin_unlock(&ima_iint_lock);
-out:
- if (rc < 0)
- kmem_cache_free(iint_cache, iint);
-
- radix_tree_preload_end();
-
- return rc;
-}
-
-/* iint_free - called when the iint refcount goes to zero */
-void iint_free(struct kref *kref)
-{
- struct ima_iint_cache *iint = container_of(kref, struct ima_iint_cache,
- refcount);
- iint->version = 0;
- iint->flags = 0UL;
- if (iint->readcount != 0) {
- printk(KERN_INFO "%s: readcount: %ld\n", __FUNCTION__,
- iint->readcount);
- iint->readcount = 0;
- }
- if (iint->writecount != 0) {
- printk(KERN_INFO "%s: writecount: %ld\n", __FUNCTION__,
- iint->writecount);
- iint->writecount = 0;
- }
- if (iint->opencount != 0) {
- printk(KERN_INFO "%s: opencount: %ld\n", __FUNCTION__,
- iint->opencount);
- iint->opencount = 0;
- }
- kref_set(&iint->refcount, 1);
- kmem_cache_free(iint_cache, iint);
-}
-
-void iint_rcu_free(struct rcu_head *rcu_head)
-{
- struct ima_iint_cache *iint = container_of(rcu_head,
- struct ima_iint_cache, rcu);
- kref_put(&iint->refcount, iint_free);
-}
-
-/**
- * ima_inode_free - called on security_inode_free
- * @inode: pointer to the inode
- *
- * Free the integrity information(iint) associated with an inode.
- */
-void ima_inode_free(struct inode *inode)
-{
- struct ima_iint_cache *iint;
-
- if (!ima_initialized)
- return;
- spin_lock(&ima_iint_lock);
- iint = radix_tree_delete(&ima_iint_store, (unsigned long)inode);
- spin_unlock(&ima_iint_lock);
- if (iint)
- call_rcu(&iint->rcu, iint_rcu_free);
-}
-
-static void init_once(void *foo)
-{
- struct ima_iint_cache *iint = foo;
-
- memset(iint, 0, sizeof *iint);
- iint->version = 0;
- iint->flags = 0UL;
- mutex_init(&iint->mutex);
- iint->readcount = 0;
- iint->writecount = 0;
- iint->opencount = 0;
- kref_set(&iint->refcount, 1);
-}
-
-void __init ima_iintcache_init(void)
-{
- iint_cache =
- kmem_cache_create("iint_cache", sizeof(struct ima_iint_cache), 0,
- SLAB_PANIC, init_once);
-}
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
index a40da7ae590..e8f9d70a465 100644
--- a/security/integrity/ima/ima_init.c
+++ b/security/integrity/ima/ima_init.c
@@ -14,9 +14,14 @@
* File: ima_init.c
* initialization and cleanup functions
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/scatterlist.h>
+#include <linux/slab.h>
#include <linux/err.h>
+#include <crypto/hash_info.h>
#include "ima.h"
/* name for boot aggregate entry */
@@ -40,31 +45,40 @@ int ima_used_chip;
*/
static void __init ima_add_boot_aggregate(void)
{
- struct ima_template_entry *entry;
- const char *op = "add_boot_aggregate";
+ static const char op[] = "add_boot_aggregate";
const char *audit_cause = "ENOMEM";
+ struct ima_template_entry *entry;
+ struct integrity_iint_cache tmp_iint, *iint = &tmp_iint;
int result = -ENOMEM;
- int violation = 1;
+ int violation = 0;
+ struct {
+ struct ima_digest_data hdr;
+ char digest[TPM_DIGEST_SIZE];
+ } hash;
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- goto err_out;
+ memset(iint, 0, sizeof(*iint));
+ memset(&hash, 0, sizeof(hash));
+ iint->ima_hash = &hash.hdr;
+ iint->ima_hash->algo = HASH_ALGO_SHA1;
+ iint->ima_hash->length = SHA1_DIGEST_SIZE;
- memset(&entry->template, 0, sizeof(entry->template));
- strncpy(entry->template.file_name, boot_aggregate_name,
- IMA_EVENT_NAME_LEN_MAX);
if (ima_used_chip) {
- violation = 0;
- result = ima_calc_boot_aggregate(entry->template.digest);
+ result = ima_calc_boot_aggregate(&hash.hdr);
if (result < 0) {
audit_cause = "hashing_error";
- kfree(entry);
goto err_out;
}
}
- result = ima_store_template(entry, violation, NULL);
+
+ result = ima_alloc_init_template(iint, NULL, boot_aggregate_name,
+ NULL, 0, &entry);
if (result < 0)
- kfree(entry);
+ return;
+
+ result = ima_store_template(entry, violation, NULL,
+ boot_aggregate_name);
+ if (result < 0)
+ ima_free_template_entry(entry);
return;
err_out:
integrity_audit_msg(AUDIT_INTEGRITY_PCR, NULL, boot_aggregate_name, op,
@@ -73,7 +87,7 @@ err_out:
int __init ima_init(void)
{
- u8 pcr_i[IMA_DIGEST_SIZE];
+ u8 pcr_i[TPM_DIGEST_SIZE];
int rc;
ima_used_chip = 0;
@@ -84,13 +98,15 @@ int __init ima_init(void)
if (!ima_used_chip)
pr_info("No TPM chip found, activating TPM-bypass!\n");
+ rc = ima_init_crypto();
+ if (rc)
+ return rc;
+ rc = ima_init_template();
+ if (rc != 0)
+ return rc;
+
ima_add_boot_aggregate(); /* boot aggregate must be first entry */
ima_init_policy();
return ima_fs_init();
}
-
-void __exit ima_cleanup(void)
-{
- ima_fs_cleanup();
-}
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index a89f44d5e03..09baa335ebc 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -14,315 +14,233 @@
*
* File: ima_main.c
* implements the IMA hooks: ima_bprm_check, ima_file_mmap,
- * and ima_path_check.
+ * and ima_file_check.
*/
#include <linux/module.h>
#include <linux/file.h>
#include <linux/binfmts.h>
#include <linux/mount.h>
#include <linux/mman.h>
+#include <linux/slab.h>
+#include <linux/xattr.h>
+#include <linux/ima.h>
+#include <crypto/hash_info.h>
#include "ima.h"
int ima_initialized;
-char *ima_hash = "sha1";
+#ifdef CONFIG_IMA_APPRAISE
+int ima_appraise = IMA_APPRAISE_ENFORCE;
+#else
+int ima_appraise;
+#endif
+
+int ima_hash_algo = HASH_ALGO_SHA1;
+static int hash_setup_done;
+
static int __init hash_setup(char *str)
{
- if (strncmp(str, "md5", 3) == 0)
- ima_hash = "md5";
- return 1;
-}
-__setup("ima_hash=", hash_setup);
+ struct ima_template_desc *template_desc = ima_template_desc_current();
+ int i;
-struct ima_imbalance {
- struct hlist_node node;
- unsigned long fsmagic;
-};
+ if (hash_setup_done)
+ return 1;
-/*
- * ima_limit_imbalance - emit one imbalance message per filesystem type
- *
- * Maintain list of filesystem types that do not measure files properly.
- * Return false if unknown, true if known.
- */
-static bool ima_limit_imbalance(struct file *file)
-{
- static DEFINE_SPINLOCK(ima_imbalance_lock);
- static HLIST_HEAD(ima_imbalance_list);
-
- struct super_block *sb = file->f_dentry->d_sb;
- struct ima_imbalance *entry;
- struct hlist_node *node;
- bool found = false;
-
- rcu_read_lock();
- hlist_for_each_entry_rcu(entry, node, &ima_imbalance_list, node) {
- if (entry->fsmagic == sb->s_magic) {
- found = true;
+ if (strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) == 0) {
+ if (strncmp(str, "sha1", 4) == 0)
+ ima_hash_algo = HASH_ALGO_SHA1;
+ else if (strncmp(str, "md5", 3) == 0)
+ ima_hash_algo = HASH_ALGO_MD5;
+ goto out;
+ }
+
+ for (i = 0; i < HASH_ALGO__LAST; i++) {
+ if (strcmp(str, hash_algo_name[i]) == 0) {
+ ima_hash_algo = i;
break;
}
}
- rcu_read_unlock();
- if (found)
- goto out;
-
- entry = kmalloc(sizeof(*entry), GFP_NOFS);
- if (!entry)
- goto out;
- entry->fsmagic = sb->s_magic;
- spin_lock(&ima_imbalance_lock);
- /*
- * we could have raced and something else might have added this fs
- * to the list, but we don't really care
- */
- hlist_add_head_rcu(&entry->node, &ima_imbalance_list);
- spin_unlock(&ima_imbalance_lock);
- printk(KERN_INFO "IMA: unmeasured files on fsmagic: %lX\n",
- entry->fsmagic);
out:
- return found;
+ hash_setup_done = 1;
+ return 1;
}
+__setup("ima_hash=", hash_setup);
/*
- * Update the counts given an fmode_t
+ * ima_rdwr_violation_check
+ *
+ * Only invalidate the PCR for measured files:
+ * - Opening a file for write when already open for read,
+ * results in a time of measure, time of use (ToMToU) error.
+ * - Opening a file for read when already open for write,
+ * could result in a file measurement error.
+ *
*/
-static void ima_inc_counts(struct ima_iint_cache *iint, fmode_t mode)
+static void ima_rdwr_violation_check(struct file *file)
{
- BUG_ON(!mutex_is_locked(&iint->mutex));
+ struct inode *inode = file_inode(file);
+ fmode_t mode = file->f_mode;
+ bool send_tomtou = false, send_writers = false;
+ char *pathbuf = NULL;
+ const char *pathname;
- iint->opencount++;
- if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
- iint->readcount++;
- if (mode & FMODE_WRITE)
- iint->writecount++;
-}
+ if (!S_ISREG(inode->i_mode) || !ima_initialized)
+ return;
-/*
- * Decrement ima counts
- */
-static void ima_dec_counts(struct ima_iint_cache *iint, struct inode *inode,
- struct file *file)
-{
- mode_t mode = file->f_mode;
- BUG_ON(!mutex_is_locked(&iint->mutex));
+ mutex_lock(&inode->i_mutex); /* file metadata: permissions, xattr */
- iint->opencount--;
- if ((mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ)
- iint->readcount--;
if (mode & FMODE_WRITE) {
- iint->writecount--;
- if (iint->writecount == 0) {
- if (iint->version != inode->i_version)
- iint->flags &= ~IMA_MEASURED;
+ if (atomic_read(&inode->i_readcount) && IS_IMA(inode)) {
+ struct integrity_iint_cache *iint;
+ iint = integrity_iint_find(inode);
+ /* IMA_MEASURE is set from reader side */
+ if (iint && (iint->flags & IMA_MEASURE))
+ send_tomtou = true;
}
+ } else {
+ if ((atomic_read(&inode->i_writecount) > 0) &&
+ ima_must_measure(inode, MAY_READ, FILE_CHECK))
+ send_writers = true;
}
- if (((iint->opencount < 0) ||
- (iint->readcount < 0) ||
- (iint->writecount < 0)) &&
- !ima_limit_imbalance(file)) {
- printk(KERN_INFO "%s: open/free imbalance (r:%ld w:%ld o:%ld)\n",
- __FUNCTION__, iint->readcount, iint->writecount,
- iint->opencount);
- dump_stack();
+ mutex_unlock(&inode->i_mutex);
+
+ if (!send_tomtou && !send_writers)
+ return;
+
+ pathname = ima_d_path(&file->f_path, &pathbuf);
+
+ if (send_tomtou)
+ ima_add_violation(file, pathname, "invalid_pcr", "ToMToU");
+ if (send_writers)
+ ima_add_violation(file, pathname,
+ "invalid_pcr", "open_writers");
+ kfree(pathbuf);
+}
+
+static void ima_check_last_writer(struct integrity_iint_cache *iint,
+ struct inode *inode, struct file *file)
+{
+ fmode_t mode = file->f_mode;
+
+ if (!(mode & FMODE_WRITE))
+ return;
+
+ mutex_lock(&inode->i_mutex);
+ if (atomic_read(&inode->i_writecount) == 1 &&
+ iint->version != inode->i_version) {
+ iint->flags &= ~IMA_DONE_MASK;
+ if (iint->flags & IMA_APPRAISE)
+ ima_update_xattr(iint, file);
}
+ mutex_unlock(&inode->i_mutex);
}
/**
* ima_file_free - called on __fput()
* @file: pointer to file structure being freed
*
- * Flag files that changed, based on i_version;
- * and decrement the iint readcount/writecount.
+ * Flag files that changed, based on i_version
*/
void ima_file_free(struct file *file)
{
- struct inode *inode = file->f_dentry->d_inode;
- struct ima_iint_cache *iint;
+ struct inode *inode = file_inode(file);
+ struct integrity_iint_cache *iint;
- if (!ima_initialized || !S_ISREG(inode->i_mode))
+ if (!iint_initialized || !S_ISREG(inode->i_mode))
return;
- iint = ima_iint_find_get(inode);
+
+ iint = integrity_iint_find(inode);
if (!iint)
return;
- mutex_lock(&iint->mutex);
- ima_dec_counts(iint, inode, file);
- mutex_unlock(&iint->mutex);
- kref_put(&iint->refcount, iint_free);
+ ima_check_last_writer(iint, inode, file);
}
-/* ima_read_write_check - reflect possible reading/writing errors in the PCR.
- *
- * When opening a file for read, if the file is already open for write,
- * the file could change, resulting in a file measurement error.
- *
- * Opening a file for write, if the file is already open for read, results
- * in a time of measure, time of use (ToMToU) error.
- *
- * In either case invalidate the PCR.
- */
-enum iint_pcr_error { TOMTOU, OPEN_WRITERS };
-static void ima_read_write_check(enum iint_pcr_error error,
- struct ima_iint_cache *iint,
- struct inode *inode,
- const unsigned char *filename)
-{
- switch (error) {
- case TOMTOU:
- if (iint->readcount > 0)
- ima_add_violation(inode, filename, "invalid_pcr",
- "ToMToU");
- break;
- case OPEN_WRITERS:
- if (iint->writecount > 0)
- ima_add_violation(inode, filename, "invalid_pcr",
- "open_writers");
- break;
- }
-}
-
-static int get_path_measurement(struct ima_iint_cache *iint, struct file *file,
- const unsigned char *filename)
-{
- int rc = 0;
-
- ima_inc_counts(iint, file->f_mode);
-
- rc = ima_collect_measurement(iint, file);
- if (!rc)
- ima_store_measurement(iint, file, filename);
- return rc;
-}
-
-/**
- * ima_path_check - based on policy, collect/store measurement.
- * @path: contains a pointer to the path to be measured
- * @mask: contains MAY_READ, MAY_WRITE or MAY_EXECUTE
- *
- * Measure the file being open for readonly, based on the
- * ima_must_measure() policy decision.
- *
- * Keep read/write counters for all files, but only
- * invalidate the PCR for measured files:
- * - Opening a file for write when already open for read,
- * results in a time of measure, time of use (ToMToU) error.
- * - Opening a file for read when already open for write,
- * could result in a file measurement error.
- *
- * Always return 0 and audit dentry_open failures.
- * (Return code will be based upon measurement appraisal.)
- */
-int ima_path_check(struct path *path, int mask)
+static int process_measurement(struct file *file, const char *filename,
+ int mask, int function)
{
- struct inode *inode = path->dentry->d_inode;
- struct ima_iint_cache *iint;
- struct file *file = NULL;
- int rc;
+ struct inode *inode = file_inode(file);
+ struct integrity_iint_cache *iint;
+ struct ima_template_desc *template_desc = ima_template_desc_current();
+ char *pathbuf = NULL;
+ const char *pathname = NULL;
+ int rc = -ENOMEM, action, must_appraise, _func;
+ struct evm_ima_xattr_data *xattr_value = NULL, **xattr_ptr = NULL;
+ int xattr_len = 0;
if (!ima_initialized || !S_ISREG(inode->i_mode))
return 0;
- iint = ima_iint_find_get(inode);
- if (!iint)
+
+ /* Return an IMA_MEASURE, IMA_APPRAISE, IMA_AUDIT action
+ * bitmask based on the appraise/audit/measurement policy.
+ * Included is the appraise submask.
+ */
+ action = ima_get_action(inode, mask, function);
+ if (!action)
return 0;
- mutex_lock(&iint->mutex);
+ must_appraise = action & IMA_APPRAISE;
- rc = ima_must_measure(iint, inode, MAY_READ, PATH_CHECK);
- if (rc < 0)
- goto out;
+ /* Is the appraise rule hook specific? */
+ _func = (action & IMA_FILE_APPRAISE) ? FILE_CHECK : function;
- if ((mask & MAY_WRITE) || (mask == 0))
- ima_read_write_check(TOMTOU, iint, inode,
- path->dentry->d_name.name);
+ mutex_lock(&inode->i_mutex);
- if ((mask & (MAY_WRITE | MAY_READ | MAY_EXEC)) != MAY_READ)
+ iint = integrity_inode_get(inode);
+ if (!iint)
goto out;
- ima_read_write_check(OPEN_WRITERS, iint, inode,
- path->dentry->d_name.name);
- if (!(iint->flags & IMA_MEASURED)) {
- struct dentry *dentry = dget(path->dentry);
- struct vfsmount *mnt = mntget(path->mnt);
-
- file = dentry_open(dentry, mnt, O_RDONLY | O_LARGEFILE,
- current_cred());
- if (IS_ERR(file)) {
- int audit_info = 0;
-
- integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode,
- dentry->d_name.name,
- "add_measurement",
- "dentry_open failed",
- 1, audit_info);
- file = NULL;
- goto out;
- }
- rc = get_path_measurement(iint, file, dentry->d_name.name);
+ /* Determine if already appraised/measured based on bitmask
+ * (IMA_MEASURE, IMA_MEASURED, IMA_XXXX_APPRAISE, IMA_XXXX_APPRAISED,
+ * IMA_AUDIT, IMA_AUDITED)
+ */
+ iint->flags |= action;
+ action &= IMA_DO_MASK;
+ action &= ~((iint->flags & IMA_DONE_MASK) >> 1);
+
+ /* Nothing to do, just return existing appraised status */
+ if (!action) {
+ if (must_appraise)
+ rc = ima_get_cache_status(iint, _func);
+ goto out_digsig;
}
-out:
- mutex_unlock(&iint->mutex);
- if (file)
- fput(file);
- kref_put(&iint->refcount, iint_free);
- return 0;
-}
-EXPORT_SYMBOL_GPL(ima_path_check);
-static int process_measurement(struct file *file, const unsigned char *filename,
- int mask, int function)
-{
- struct inode *inode = file->f_dentry->d_inode;
- struct ima_iint_cache *iint;
- int rc;
-
- if (!ima_initialized || !S_ISREG(inode->i_mode))
- return 0;
- iint = ima_iint_find_get(inode);
- if (!iint)
- return -ENOMEM;
-
- mutex_lock(&iint->mutex);
- rc = ima_must_measure(iint, inode, mask, function);
- if (rc != 0)
- goto out;
+ if (strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) == 0) {
+ if (action & IMA_APPRAISE_SUBMASK)
+ xattr_ptr = &xattr_value;
+ } else
+ xattr_ptr = &xattr_value;
+
+ rc = ima_collect_measurement(iint, file, xattr_ptr, &xattr_len);
+ if (rc != 0) {
+ if (file->f_flags & O_DIRECT)
+ rc = (iint->flags & IMA_PERMIT_DIRECTIO) ? 0 : -EACCES;
+ goto out_digsig;
+ }
- rc = ima_collect_measurement(iint, file);
- if (!rc)
- ima_store_measurement(iint, file, filename);
+ pathname = filename ?: ima_d_path(&file->f_path, &pathbuf);
+
+ if (action & IMA_MEASURE)
+ ima_store_measurement(iint, file, pathname,
+ xattr_value, xattr_len);
+ if (action & IMA_APPRAISE_SUBMASK)
+ rc = ima_appraise_measurement(_func, iint, file, pathname,
+ xattr_value, xattr_len);
+ if (action & IMA_AUDIT)
+ ima_audit_measurement(iint, pathname);
+ kfree(pathbuf);
+out_digsig:
+ if ((mask & MAY_WRITE) && (iint->flags & IMA_DIGSIG))
+ rc = -EACCES;
out:
- mutex_unlock(&iint->mutex);
- kref_put(&iint->refcount, iint_free);
- return rc;
-}
-
-/*
- * ima_counts_get - increment file counts
- *
- * - for IPC shm and shmat file.
- * - for nfsd exported files.
- *
- * Increment the counts for these files to prevent unnecessary
- * imbalance messages.
- */
-void ima_counts_get(struct file *file)
-{
- struct inode *inode = file->f_dentry->d_inode;
- struct ima_iint_cache *iint;
-
- if (!ima_initialized || !S_ISREG(inode->i_mode))
- return;
- iint = ima_iint_find_get(inode);
- if (!iint)
- return;
- mutex_lock(&iint->mutex);
- ima_inc_counts(iint, file->f_mode);
- mutex_unlock(&iint->mutex);
-
- kref_put(&iint->refcount, iint_free);
+ mutex_unlock(&inode->i_mutex);
+ kfree(xattr_value);
+ if ((rc && must_appraise) && (ima_appraise & IMA_APPRAISE_ENFORCE))
+ return -EACCES;
+ return 0;
}
-EXPORT_SYMBOL_GPL(ima_counts_get);
/**
* ima_file_mmap - based on policy, collect/store measurement.
@@ -332,18 +250,13 @@ EXPORT_SYMBOL_GPL(ima_counts_get);
* Measure files being mmapped executable based on the ima_must_measure()
* policy decision.
*
- * Return 0 on success, an error code on failure.
- * (Based on the results of appraise_measurement().)
+ * On success return 0. On integrity appraisal error, assuming the file
+ * is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
*/
int ima_file_mmap(struct file *file, unsigned long prot)
{
- int rc;
-
- if (!file)
- return 0;
- if (prot & PROT_EXEC)
- rc = process_measurement(file, file->f_dentry->d_name.name,
- MAY_EXEC, FILE_MMAP);
+ if (file && (prot & PROT_EXEC))
+ return process_measurement(file, NULL, MAY_EXEC, MMAP_CHECK);
return 0;
}
@@ -357,33 +270,69 @@ int ima_file_mmap(struct file *file, unsigned long prot)
* So we can be certain that what we verify and measure here is actually
* what is being executed.
*
- * Return 0 on success, an error code on failure.
- * (Based on the results of appraise_measurement().)
+ * On success return 0. On integrity appraisal error, assuming the file
+ * is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
*/
int ima_bprm_check(struct linux_binprm *bprm)
{
- int rc;
+ return process_measurement(bprm->file,
+ (strcmp(bprm->filename, bprm->interp) == 0) ?
+ bprm->filename : bprm->interp,
+ MAY_EXEC, BPRM_CHECK);
+}
- rc = process_measurement(bprm->file, bprm->filename,
- MAY_EXEC, BPRM_CHECK);
- return 0;
+/**
+ * ima_path_check - based on policy, collect/store measurement.
+ * @file: pointer to the file to be measured
+ * @mask: contains MAY_READ, MAY_WRITE or MAY_EXECUTE
+ *
+ * Measure files based on the ima_must_measure() policy decision.
+ *
+ * On success return 0. On integrity appraisal error, assuming the file
+ * is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
+ */
+int ima_file_check(struct file *file, int mask)
+{
+ ima_rdwr_violation_check(file);
+ return process_measurement(file, NULL,
+ mask & (MAY_READ | MAY_WRITE | MAY_EXEC),
+ FILE_CHECK);
+}
+EXPORT_SYMBOL_GPL(ima_file_check);
+
+/**
+ * ima_module_check - based on policy, collect/store/appraise measurement.
+ * @file: pointer to the file to be measured/appraised
+ *
+ * Measure/appraise kernel modules based on policy.
+ *
+ * On success return 0. On integrity appraisal error, assuming the file
+ * is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
+ */
+int ima_module_check(struct file *file)
+{
+ if (!file) {
+#ifndef CONFIG_MODULE_SIG_FORCE
+ if ((ima_appraise & IMA_APPRAISE_MODULES) &&
+ (ima_appraise & IMA_APPRAISE_ENFORCE))
+ return -EACCES; /* INTEGRITY_UNKNOWN */
+#endif
+ return 0; /* We rely on module signature checking */
+ }
+ return process_measurement(file, NULL, MAY_EXEC, MODULE_CHECK);
}
static int __init init_ima(void)
{
int error;
- ima_iintcache_init();
+ hash_setup(CONFIG_IMA_DEFAULT_HASH);
error = ima_init();
- ima_initialized = 1;
+ if (!error)
+ ima_initialized = 1;
return error;
}
-static void __exit cleanup_ima(void)
-{
- ima_cleanup();
-}
-
late_initcall(init_ima); /* Start IMA after the TPM is available */
MODULE_DESCRIPTION("Integrity Measurement Architecture");
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index e1278399b34..40a7488f672 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -7,7 +7,7 @@
* the Free Software Foundation, version 2 of the License.
*
* ima_policy.c
- * - initialize default measure policy rules
+ * - initialize default measure policy rules
*
*/
#include <linux/module.h>
@@ -15,39 +15,51 @@
#include <linux/security.h>
#include <linux/magic.h>
#include <linux/parser.h>
+#include <linux/slab.h>
+#include <linux/genhd.h>
#include "ima.h"
/* flags definitions */
-#define IMA_FUNC 0x0001
-#define IMA_MASK 0x0002
+#define IMA_FUNC 0x0001
+#define IMA_MASK 0x0002
#define IMA_FSMAGIC 0x0004
#define IMA_UID 0x0008
+#define IMA_FOWNER 0x0010
+#define IMA_FSUUID 0x0020
-enum ima_action { UNKNOWN = -1, DONT_MEASURE = 0, MEASURE };
+#define UNKNOWN 0
+#define MEASURE 0x0001 /* same as IMA_MEASURE */
+#define DONT_MEASURE 0x0002
+#define APPRAISE 0x0004 /* same as IMA_APPRAISE */
+#define DONT_APPRAISE 0x0008
+#define AUDIT 0x0040
#define MAX_LSM_RULES 6
enum lsm_rule_types { LSM_OBJ_USER, LSM_OBJ_ROLE, LSM_OBJ_TYPE,
LSM_SUBJ_USER, LSM_SUBJ_ROLE, LSM_SUBJ_TYPE
};
-struct ima_measure_rule_entry {
+struct ima_rule_entry {
struct list_head list;
- enum ima_action action;
+ int action;
unsigned int flags;
enum ima_hooks func;
int mask;
unsigned long fsmagic;
- uid_t uid;
+ u8 fsuuid[16];
+ kuid_t uid;
+ kuid_t fowner;
struct {
void *rule; /* LSM file metadata specific */
+ void *args_p; /* audit value */
int type; /* audit type */
} lsm[MAX_LSM_RULES];
};
/*
* Without LSM specific knowledge, the default policy can only be
- * written in terms of .action, .func, .mask, .fsmagic, and .uid
+ * written in terms of .action, .func, .mask, .fsmagic, .uid, and .fowner
*/
/*
@@ -56,34 +68,88 @@ struct ima_measure_rule_entry {
* normal users can easily run the machine out of memory simply building
* and running executables.
*/
-static struct ima_measure_rule_entry default_rules[] = {
- {.action = DONT_MEASURE,.fsmagic = PROC_SUPER_MAGIC,.flags = IMA_FSMAGIC},
- {.action = DONT_MEASURE,.fsmagic = SYSFS_MAGIC,.flags = IMA_FSMAGIC},
- {.action = DONT_MEASURE,.fsmagic = DEBUGFS_MAGIC,.flags = IMA_FSMAGIC},
- {.action = DONT_MEASURE,.fsmagic = TMPFS_MAGIC,.flags = IMA_FSMAGIC},
- {.action = DONT_MEASURE,.fsmagic = SECURITYFS_MAGIC,.flags = IMA_FSMAGIC},
- {.action = DONT_MEASURE,.fsmagic = SELINUX_MAGIC,.flags = IMA_FSMAGIC},
- {.action = MEASURE,.func = FILE_MMAP,.mask = MAY_EXEC,
+static struct ima_rule_entry default_rules[] = {
+ {.action = DONT_MEASURE, .fsmagic = PROC_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = SYSFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = DEBUGFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = TMPFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = DEVPTS_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = BINFMTFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = SECURITYFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = SELINUX_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = MEASURE, .func = MMAP_CHECK, .mask = MAY_EXEC,
.flags = IMA_FUNC | IMA_MASK},
- {.action = MEASURE,.func = BPRM_CHECK,.mask = MAY_EXEC,
+ {.action = MEASURE, .func = BPRM_CHECK, .mask = MAY_EXEC,
.flags = IMA_FUNC | IMA_MASK},
- {.action = MEASURE,.func = PATH_CHECK,.mask = MAY_READ,.uid = 0,
+ {.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ, .uid = GLOBAL_ROOT_UID,
.flags = IMA_FUNC | IMA_MASK | IMA_UID},
+ {.action = MEASURE, .func = MODULE_CHECK, .flags = IMA_FUNC},
};
-static LIST_HEAD(measure_default_rules);
-static LIST_HEAD(measure_policy_rules);
-static struct list_head *ima_measure;
+static struct ima_rule_entry default_appraise_rules[] = {
+ {.action = DONT_APPRAISE, .fsmagic = PROC_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = SYSFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = DEBUGFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = TMPFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = RAMFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = DEVPTS_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = BINFMTFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = SECURITYFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = SELINUX_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = CGROUP_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = APPRAISE, .fowner = GLOBAL_ROOT_UID, .flags = IMA_FOWNER},
+};
+
+static LIST_HEAD(ima_default_rules);
+static LIST_HEAD(ima_policy_rules);
+static struct list_head *ima_rules;
-static DEFINE_MUTEX(ima_measure_mutex);
+static DEFINE_MUTEX(ima_rules_mutex);
static bool ima_use_tcb __initdata;
-static int __init default_policy_setup(char *str)
+static int __init default_measure_policy_setup(char *str)
{
ima_use_tcb = 1;
return 1;
}
-__setup("ima_tcb", default_policy_setup);
+__setup("ima_tcb", default_measure_policy_setup);
+
+static bool ima_use_appraise_tcb __initdata;
+static int __init default_appraise_policy_setup(char *str)
+{
+ ima_use_appraise_tcb = 1;
+ return 1;
+}
+__setup("ima_appraise_tcb", default_appraise_policy_setup);
+
+/*
+ * Although the IMA policy does not change, the LSM policy can be
+ * reloaded, leaving the IMA LSM based rules referring to the old,
+ * stale LSM policy.
+ *
+ * Update the IMA LSM based rules to reflect the reloaded LSM policy.
+ * We assume the rules still exist; and BUG_ON() if they don't.
+ */
+static void ima_lsm_update_rules(void)
+{
+ struct ima_rule_entry *entry, *tmp;
+ int result;
+ int i;
+
+ mutex_lock(&ima_rules_mutex);
+ list_for_each_entry_safe(entry, tmp, &ima_policy_rules, list) {
+ for (i = 0; i < MAX_LSM_RULES; i++) {
+ if (!entry->lsm[i].rule)
+ continue;
+ result = security_filter_rule_init(entry->lsm[i].type,
+ Audit_equal,
+ entry->lsm[i].args_p,
+ &entry->lsm[i].rule);
+ BUG_ON(!entry->lsm[i].rule);
+ }
+ }
+ mutex_unlock(&ima_rules_mutex);
+}
/**
* ima_match_rules - determine whether an inode matches the measure rule.
@@ -94,28 +160,37 @@ __setup("ima_tcb", default_policy_setup);
*
* Returns true on rule match, false on failure.
*/
-static bool ima_match_rules(struct ima_measure_rule_entry *rule,
+static bool ima_match_rules(struct ima_rule_entry *rule,
struct inode *inode, enum ima_hooks func, int mask)
{
struct task_struct *tsk = current;
+ const struct cred *cred = current_cred();
int i;
- if ((rule->flags & IMA_FUNC) && rule->func != func)
+ if ((rule->flags & IMA_FUNC) &&
+ (rule->func != func && func != POST_SETATTR))
return false;
- if ((rule->flags & IMA_MASK) && rule->mask != mask)
+ if ((rule->flags & IMA_MASK) &&
+ (rule->mask != mask && func != POST_SETATTR))
return false;
if ((rule->flags & IMA_FSMAGIC)
&& rule->fsmagic != inode->i_sb->s_magic)
return false;
- if ((rule->flags & IMA_UID) && rule->uid != tsk->cred->uid)
+ if ((rule->flags & IMA_FSUUID) &&
+ memcmp(rule->fsuuid, inode->i_sb->s_uuid, sizeof(rule->fsuuid)))
+ return false;
+ if ((rule->flags & IMA_UID) && !uid_eq(rule->uid, cred->uid))
+ return false;
+ if ((rule->flags & IMA_FOWNER) && !uid_eq(rule->fowner, inode->i_uid))
return false;
for (i = 0; i < MAX_LSM_RULES; i++) {
int rc = 0;
u32 osid, sid;
+ int retried = 0;
if (!rule->lsm[i].rule)
continue;
-
+retry:
switch (i) {
case LSM_OBJ_USER:
case LSM_OBJ_ROLE:
@@ -139,12 +214,39 @@ static bool ima_match_rules(struct ima_measure_rule_entry *rule,
default:
break;
}
+ if ((rc < 0) && (!retried)) {
+ retried = 1;
+ ima_lsm_update_rules();
+ goto retry;
+ }
if (!rc)
return false;
}
return true;
}
+/*
+ * In addition to knowing that we need to appraise the file in general,
+ * we need to differentiate between calling hooks, for hook specific rules.
+ */
+static int get_subaction(struct ima_rule_entry *rule, int func)
+{
+ if (!(rule->flags & IMA_FUNC))
+ return IMA_FILE_APPRAISE;
+
+ switch (func) {
+ case MMAP_CHECK:
+ return IMA_MMAP_APPRAISE;
+ case BPRM_CHECK:
+ return IMA_BPRM_APPRAISE;
+ case MODULE_CHECK:
+ return IMA_MODULE_APPRAISE;
+ case FILE_CHECK:
+ default:
+ return IMA_FILE_APPRAISE;
+ }
+}
+
/**
* ima_match_policy - decision based on LSM and other conditions
* @inode: pointer to an inode for which the policy decision is being made
@@ -158,39 +260,66 @@ static bool ima_match_rules(struct ima_measure_rule_entry *rule,
* as elements in the list are never deleted, nor does the list
* change.)
*/
-int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask)
+int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask,
+ int flags)
{
- struct ima_measure_rule_entry *entry;
+ struct ima_rule_entry *entry;
+ int action = 0, actmask = flags | (flags << 1);
+
+ list_for_each_entry(entry, ima_rules, list) {
- list_for_each_entry(entry, ima_measure, list) {
- bool rc;
+ if (!(entry->action & actmask))
+ continue;
- rc = ima_match_rules(entry, inode, func, mask);
- if (rc)
- return entry->action;
+ if (!ima_match_rules(entry, inode, func, mask))
+ continue;
+
+ action |= entry->flags & IMA_ACTION_FLAGS;
+
+ action |= entry->action & IMA_DO_MASK;
+ if (entry->action & IMA_APPRAISE)
+ action |= get_subaction(entry, func);
+
+ if (entry->action & IMA_DO_MASK)
+ actmask &= ~(entry->action | entry->action << 1);
+ else
+ actmask &= ~(entry->action | entry->action >> 1);
+
+ if (!actmask)
+ break;
}
- return 0;
+
+ return action;
}
/**
* ima_init_policy - initialize the default measure rules.
*
- * ima_measure points to either the measure_default_rules or the
- * the new measure_policy_rules.
+ * ima_rules points to either the ima_default_rules or the
+ * the new ima_policy_rules.
*/
void __init ima_init_policy(void)
{
- int i, entries;
+ int i, measure_entries, appraise_entries;
/* if !ima_use_tcb set entries = 0 so we load NO default rules */
- if (ima_use_tcb)
- entries = ARRAY_SIZE(default_rules);
- else
- entries = 0;
-
- for (i = 0; i < entries; i++)
- list_add_tail(&default_rules[i].list, &measure_default_rules);
- ima_measure = &measure_default_rules;
+ measure_entries = ima_use_tcb ? ARRAY_SIZE(default_rules) : 0;
+ appraise_entries = ima_use_appraise_tcb ?
+ ARRAY_SIZE(default_appraise_rules) : 0;
+
+ for (i = 0; i < measure_entries + appraise_entries; i++) {
+ if (i < measure_entries)
+ list_add_tail(&default_rules[i].list,
+ &ima_default_rules);
+ else {
+ int j = i - measure_entries;
+
+ list_add_tail(&default_appraise_rules[j].list,
+ &ima_default_rules);
+ }
+ }
+
+ ima_rules = &ima_default_rules;
}
/**
@@ -202,13 +331,13 @@ void __init ima_init_policy(void)
*/
void ima_update_policy(void)
{
- const char *op = "policy_update";
+ static const char op[] = "policy_update";
const char *cause = "already exists";
int result = 1;
int audit_info = 0;
- if (ima_measure == &measure_default_rules) {
- ima_measure = &measure_policy_rules;
+ if (ima_rules == &ima_default_rules) {
+ ima_rules = &ima_policy_rules;
cause = "complete";
result = 0;
}
@@ -219,14 +348,20 @@ void ima_update_policy(void)
enum {
Opt_err = -1,
Opt_measure = 1, Opt_dont_measure,
+ Opt_appraise, Opt_dont_appraise,
+ Opt_audit,
Opt_obj_user, Opt_obj_role, Opt_obj_type,
Opt_subj_user, Opt_subj_role, Opt_subj_type,
- Opt_func, Opt_mask, Opt_fsmagic, Opt_uid
+ Opt_func, Opt_mask, Opt_fsmagic, Opt_uid, Opt_fowner,
+ Opt_appraise_type, Opt_fsuuid, Opt_permit_directio
};
static match_table_t policy_tokens = {
{Opt_measure, "measure"},
{Opt_dont_measure, "dont_measure"},
+ {Opt_appraise, "appraise"},
+ {Opt_dont_appraise, "dont_appraise"},
+ {Opt_audit, "audit"},
{Opt_obj_user, "obj_user=%s"},
{Opt_obj_role, "obj_role=%s"},
{Opt_obj_type, "obj_type=%s"},
@@ -236,23 +371,47 @@ static match_table_t policy_tokens = {
{Opt_func, "func=%s"},
{Opt_mask, "mask=%s"},
{Opt_fsmagic, "fsmagic=%s"},
+ {Opt_fsuuid, "fsuuid=%s"},
{Opt_uid, "uid=%s"},
+ {Opt_fowner, "fowner=%s"},
+ {Opt_appraise_type, "appraise_type=%s"},
+ {Opt_permit_directio, "permit_directio"},
{Opt_err, NULL}
};
-static int ima_lsm_rule_init(struct ima_measure_rule_entry *entry,
- char *args, int lsm_rule, int audit_type)
+static int ima_lsm_rule_init(struct ima_rule_entry *entry,
+ substring_t *args, int lsm_rule, int audit_type)
{
int result;
+ if (entry->lsm[lsm_rule].rule)
+ return -EINVAL;
+
+ entry->lsm[lsm_rule].args_p = match_strdup(args);
+ if (!entry->lsm[lsm_rule].args_p)
+ return -ENOMEM;
+
entry->lsm[lsm_rule].type = audit_type;
result = security_filter_rule_init(entry->lsm[lsm_rule].type,
- Audit_equal, args,
+ Audit_equal,
+ entry->lsm[lsm_rule].args_p,
&entry->lsm[lsm_rule].rule);
+ if (!entry->lsm[lsm_rule].rule) {
+ kfree(entry->lsm[lsm_rule].args_p);
+ return -EINVAL;
+ }
+
return result;
}
-static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry)
+static void ima_log_string(struct audit_buffer *ab, char *key, char *value)
+{
+ audit_log_format(ab, "%s=", key);
+ audit_log_untrustedstring(ab, value);
+ audit_log_format(ab, " ");
+}
+
+static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
{
struct audit_buffer *ab;
char *p;
@@ -260,32 +419,76 @@ static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry)
ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_INTEGRITY_RULE);
- entry->action = -1;
- while ((p = strsep(&rule, " \n")) != NULL) {
+ entry->uid = INVALID_UID;
+ entry->fowner = INVALID_UID;
+ entry->action = UNKNOWN;
+ while ((p = strsep(&rule, " \t")) != NULL) {
substring_t args[MAX_OPT_ARGS];
int token;
unsigned long lnum;
if (result < 0)
break;
- if (!*p)
+ if ((*p == '\0') || (*p == ' ') || (*p == '\t'))
continue;
token = match_token(p, policy_tokens, args);
switch (token) {
case Opt_measure:
- audit_log_format(ab, "%s ", "measure");
+ ima_log_string(ab, "action", "measure");
+
+ if (entry->action != UNKNOWN)
+ result = -EINVAL;
+
entry->action = MEASURE;
break;
case Opt_dont_measure:
- audit_log_format(ab, "%s ", "dont_measure");
+ ima_log_string(ab, "action", "dont_measure");
+
+ if (entry->action != UNKNOWN)
+ result = -EINVAL;
+
entry->action = DONT_MEASURE;
break;
+ case Opt_appraise:
+ ima_log_string(ab, "action", "appraise");
+
+ if (entry->action != UNKNOWN)
+ result = -EINVAL;
+
+ entry->action = APPRAISE;
+ break;
+ case Opt_dont_appraise:
+ ima_log_string(ab, "action", "dont_appraise");
+
+ if (entry->action != UNKNOWN)
+ result = -EINVAL;
+
+ entry->action = DONT_APPRAISE;
+ break;
+ case Opt_audit:
+ ima_log_string(ab, "action", "audit");
+
+ if (entry->action != UNKNOWN)
+ result = -EINVAL;
+
+ entry->action = AUDIT;
+ break;
case Opt_func:
- audit_log_format(ab, "func=%s ", args[0].from);
- if (strcmp(args[0].from, "PATH_CHECK") == 0)
- entry->func = PATH_CHECK;
- else if (strcmp(args[0].from, "FILE_MMAP") == 0)
- entry->func = FILE_MMAP;
+ ima_log_string(ab, "func", args[0].from);
+
+ if (entry->func)
+ result = -EINVAL;
+
+ if (strcmp(args[0].from, "FILE_CHECK") == 0)
+ entry->func = FILE_CHECK;
+ /* PATH_CHECK is for backwards compat */
+ else if (strcmp(args[0].from, "PATH_CHECK") == 0)
+ entry->func = FILE_CHECK;
+ else if (strcmp(args[0].from, "MODULE_CHECK") == 0)
+ entry->func = MODULE_CHECK;
+ else if ((strcmp(args[0].from, "FILE_MMAP") == 0)
+ || (strcmp(args[0].from, "MMAP_CHECK") == 0))
+ entry->func = MMAP_CHECK;
else if (strcmp(args[0].from, "BPRM_CHECK") == 0)
entry->func = BPRM_CHECK;
else
@@ -294,7 +497,11 @@ static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry)
entry->flags |= IMA_FUNC;
break;
case Opt_mask:
- audit_log_format(ab, "mask=%s ", args[0].from);
+ ima_log_string(ab, "mask", args[0].from);
+
+ if (entry->mask)
+ result = -EINVAL;
+
if ((strcmp(args[0].from, "MAY_EXEC")) == 0)
entry->mask = MAY_EXEC;
else if (strcmp(args[0].from, "MAY_WRITE") == 0)
@@ -309,88 +516,148 @@ static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry)
entry->flags |= IMA_MASK;
break;
case Opt_fsmagic:
- audit_log_format(ab, "fsmagic=%s ", args[0].from);
- result = strict_strtoul(args[0].from, 16,
- &entry->fsmagic);
+ ima_log_string(ab, "fsmagic", args[0].from);
+
+ if (entry->fsmagic) {
+ result = -EINVAL;
+ break;
+ }
+
+ result = kstrtoul(args[0].from, 16, &entry->fsmagic);
if (!result)
entry->flags |= IMA_FSMAGIC;
break;
+ case Opt_fsuuid:
+ ima_log_string(ab, "fsuuid", args[0].from);
+
+ if (memchr_inv(entry->fsuuid, 0x00,
+ sizeof(entry->fsuuid))) {
+ result = -EINVAL;
+ break;
+ }
+
+ result = blk_part_pack_uuid(args[0].from,
+ entry->fsuuid);
+ if (!result)
+ entry->flags |= IMA_FSUUID;
+ break;
case Opt_uid:
- audit_log_format(ab, "uid=%s ", args[0].from);
- result = strict_strtoul(args[0].from, 10, &lnum);
+ ima_log_string(ab, "uid", args[0].from);
+
+ if (uid_valid(entry->uid)) {
+ result = -EINVAL;
+ break;
+ }
+
+ result = kstrtoul(args[0].from, 10, &lnum);
if (!result) {
- entry->uid = (uid_t) lnum;
- if (entry->uid != lnum)
+ entry->uid = make_kuid(current_user_ns(), (uid_t)lnum);
+ if (!uid_valid(entry->uid) || (((uid_t)lnum) != lnum))
result = -EINVAL;
else
entry->flags |= IMA_UID;
}
break;
+ case Opt_fowner:
+ ima_log_string(ab, "fowner", args[0].from);
+
+ if (uid_valid(entry->fowner)) {
+ result = -EINVAL;
+ break;
+ }
+
+ result = kstrtoul(args[0].from, 10, &lnum);
+ if (!result) {
+ entry->fowner = make_kuid(current_user_ns(), (uid_t)lnum);
+ if (!uid_valid(entry->fowner) || (((uid_t)lnum) != lnum))
+ result = -EINVAL;
+ else
+ entry->flags |= IMA_FOWNER;
+ }
+ break;
case Opt_obj_user:
- audit_log_format(ab, "obj_user=%s ", args[0].from);
- result = ima_lsm_rule_init(entry, args[0].from,
+ ima_log_string(ab, "obj_user", args[0].from);
+ result = ima_lsm_rule_init(entry, args,
LSM_OBJ_USER,
AUDIT_OBJ_USER);
break;
case Opt_obj_role:
- audit_log_format(ab, "obj_role=%s ", args[0].from);
- result = ima_lsm_rule_init(entry, args[0].from,
+ ima_log_string(ab, "obj_role", args[0].from);
+ result = ima_lsm_rule_init(entry, args,
LSM_OBJ_ROLE,
AUDIT_OBJ_ROLE);
break;
case Opt_obj_type:
- audit_log_format(ab, "obj_type=%s ", args[0].from);
- result = ima_lsm_rule_init(entry, args[0].from,
+ ima_log_string(ab, "obj_type", args[0].from);
+ result = ima_lsm_rule_init(entry, args,
LSM_OBJ_TYPE,
AUDIT_OBJ_TYPE);
break;
case Opt_subj_user:
- audit_log_format(ab, "subj_user=%s ", args[0].from);
- result = ima_lsm_rule_init(entry, args[0].from,
+ ima_log_string(ab, "subj_user", args[0].from);
+ result = ima_lsm_rule_init(entry, args,
LSM_SUBJ_USER,
AUDIT_SUBJ_USER);
break;
case Opt_subj_role:
- audit_log_format(ab, "subj_role=%s ", args[0].from);
- result = ima_lsm_rule_init(entry, args[0].from,
+ ima_log_string(ab, "subj_role", args[0].from);
+ result = ima_lsm_rule_init(entry, args,
LSM_SUBJ_ROLE,
AUDIT_SUBJ_ROLE);
break;
case Opt_subj_type:
- audit_log_format(ab, "subj_type=%s ", args[0].from);
- result = ima_lsm_rule_init(entry, args[0].from,
+ ima_log_string(ab, "subj_type", args[0].from);
+ result = ima_lsm_rule_init(entry, args,
LSM_SUBJ_TYPE,
AUDIT_SUBJ_TYPE);
break;
+ case Opt_appraise_type:
+ if (entry->action != APPRAISE) {
+ result = -EINVAL;
+ break;
+ }
+
+ ima_log_string(ab, "appraise_type", args[0].from);
+ if ((strcmp(args[0].from, "imasig")) == 0)
+ entry->flags |= IMA_DIGSIG_REQUIRED;
+ else
+ result = -EINVAL;
+ break;
+ case Opt_permit_directio:
+ entry->flags |= IMA_PERMIT_DIRECTIO;
+ break;
case Opt_err:
- audit_log_format(ab, "UNKNOWN=%s ", p);
+ ima_log_string(ab, "UNKNOWN", p);
+ result = -EINVAL;
break;
}
}
- if (entry->action == UNKNOWN)
+ if (!result && (entry->action == UNKNOWN))
result = -EINVAL;
-
- audit_log_format(ab, "res=%d", !result ? 0 : 1);
+ else if (entry->func == MODULE_CHECK)
+ ima_appraise |= IMA_APPRAISE_MODULES;
+ audit_log_format(ab, "res=%d", !result);
audit_log_end(ab);
return result;
}
/**
- * ima_parse_add_rule - add a rule to measure_policy_rules
+ * ima_parse_add_rule - add a rule to ima_policy_rules
* @rule - ima measurement policy rule
*
* Uses a mutex to protect the policy list from multiple concurrent writers.
- * Returns 0 on success, an error code on failure.
+ * Returns the length of the rule parsed, an error code on failure
*/
-int ima_parse_add_rule(char *rule)
+ssize_t ima_parse_add_rule(char *rule)
{
- const char *op = "update_policy";
- struct ima_measure_rule_entry *entry;
- int result = 0;
+ static const char op[] = "update_policy";
+ char *p;
+ struct ima_rule_entry *entry;
+ ssize_t result, len;
int audit_info = 0;
/* Prevent installed policy from changing */
- if (ima_measure != &measure_default_rules) {
+ if (ima_rules != &ima_default_rules) {
integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL,
NULL, op, "already exists",
-EACCES, audit_info);
@@ -406,29 +673,43 @@ int ima_parse_add_rule(char *rule)
INIT_LIST_HEAD(&entry->list);
- result = ima_parse_rule(rule, entry);
- if (!result) {
- mutex_lock(&ima_measure_mutex);
- list_add_tail(&entry->list, &measure_policy_rules);
- mutex_unlock(&ima_measure_mutex);
- } else {
+ p = strsep(&rule, "\n");
+ len = strlen(p) + 1;
+
+ if (*p == '#') {
+ kfree(entry);
+ return len;
+ }
+
+ result = ima_parse_rule(p, entry);
+ if (result) {
kfree(entry);
integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL,
NULL, op, "invalid policy", result,
audit_info);
+ return result;
}
- return result;
+
+ mutex_lock(&ima_rules_mutex);
+ list_add_tail(&entry->list, &ima_policy_rules);
+ mutex_unlock(&ima_rules_mutex);
+
+ return len;
}
/* ima_delete_rules called to cleanup invalid policy */
void ima_delete_rules(void)
{
- struct ima_measure_rule_entry *entry, *tmp;
+ struct ima_rule_entry *entry, *tmp;
+ int i;
+
+ mutex_lock(&ima_rules_mutex);
+ list_for_each_entry_safe(entry, tmp, &ima_policy_rules, list) {
+ for (i = 0; i < MAX_LSM_RULES; i++)
+ kfree(entry->lsm[i].args_p);
- mutex_lock(&ima_measure_mutex);
- list_for_each_entry_safe(entry, tmp, &measure_policy_rules, list) {
list_del(&entry->list);
kfree(entry);
}
- mutex_unlock(&ima_measure_mutex);
+ mutex_unlock(&ima_rules_mutex);
}
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
index a0880e9c8e0..552705d5a78 100644
--- a/security/integrity/ima/ima_queue.c
+++ b/security/integrity/ima/ima_queue.c
@@ -18,10 +18,16 @@
* The measurement list is append-only. No entry is
* ever removed or changed during the boot-cycle.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/rculist.h>
+#include <linux/slab.h>
#include "ima.h"
+#define AUDIT_CAUSE_LEN_MAX 32
+
LIST_HEAD(ima_measurements); /* list of all measurements */
/* key: inode (before secure-hashing a file) */
@@ -42,13 +48,12 @@ static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value)
{
struct ima_queue_entry *qe, *ret = NULL;
unsigned int key;
- struct hlist_node *pos;
int rc;
key = ima_hash_key(digest_value);
rcu_read_lock();
- hlist_for_each_entry_rcu(qe, pos, &ima_htable.queue[key], hnext) {
- rc = memcmp(qe->entry->digest, digest_value, IMA_DIGEST_SIZE);
+ hlist_for_each_entry_rcu(qe, &ima_htable.queue[key], hnext) {
+ rc = memcmp(qe->entry->digest, digest_value, TPM_DIGEST_SIZE);
if (rc == 0) {
ret = qe;
break;
@@ -70,7 +75,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
qe = kmalloc(sizeof(*qe), GFP_KERNEL);
if (qe == NULL) {
- pr_err("OUT OF MEMORY ERROR creating queue entry.\n");
+ pr_err("OUT OF MEMORY ERROR creating queue entry\n");
return -ENOMEM;
}
qe->entry = entry;
@@ -93,7 +98,7 @@ static int ima_pcr_extend(const u8 *hash)
result = tpm_pcr_extend(TPM_ANY_NUM, CONFIG_IMA_MEASURE_PCR_IDX, hash);
if (result != 0)
- pr_err("Error Communicating to TPM chip\n");
+ pr_err("Error Communicating to TPM chip, result: %d\n", result);
return result;
}
@@ -101,18 +106,21 @@ static int ima_pcr_extend(const u8 *hash)
* and extend the pcr.
*/
int ima_add_template_entry(struct ima_template_entry *entry, int violation,
- const char *op, struct inode *inode)
+ const char *op, struct inode *inode,
+ const unsigned char *filename)
{
- u8 digest[IMA_DIGEST_SIZE];
+ u8 digest[TPM_DIGEST_SIZE];
const char *audit_cause = "hash_added";
+ char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX];
int audit_info = 1;
- int result = 0;
+ int result = 0, tpmresult = 0;
mutex_lock(&ima_extend_list_mutex);
if (!violation) {
- memcpy(digest, entry->digest, sizeof digest);
+ memcpy(digest, entry->digest, sizeof(digest));
if (ima_lookup_digest_entry(digest)) {
audit_cause = "hash_exists";
+ result = -EEXIST;
goto out;
}
}
@@ -125,17 +133,18 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
}
if (violation) /* invalidate pcr */
- memset(digest, 0xff, sizeof digest);
+ memset(digest, 0xff, sizeof(digest));
- result = ima_pcr_extend(digest);
- if (result != 0) {
- audit_cause = "TPM error";
+ tpmresult = ima_pcr_extend(digest);
+ if (tpmresult != 0) {
+ snprintf(tpm_audit_cause, AUDIT_CAUSE_LEN_MAX, "TPM_error(%d)",
+ tpmresult);
+ audit_cause = tpm_audit_cause;
audit_info = 0;
}
out:
mutex_unlock(&ima_extend_list_mutex);
- integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode,
- entry->template.file_name,
+ integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
op, audit_cause, result, audit_info);
return result;
}
diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
new file mode 100644
index 00000000000..a076a967ec4
--- /dev/null
+++ b/security/integrity/ima/ima_template.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2013 Politecnico di Torino, Italy
+ * TORSEC group -- http://security.polito.it
+ *
+ * Author: Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: ima_template.c
+ * Helpers to manage template descriptors.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <crypto/hash_info.h>
+
+#include "ima.h"
+#include "ima_template_lib.h"
+
+static struct ima_template_desc defined_templates[] = {
+ {.name = IMA_TEMPLATE_IMA_NAME, .fmt = IMA_TEMPLATE_IMA_FMT},
+ {.name = "ima-ng", .fmt = "d-ng|n-ng"},
+ {.name = "ima-sig", .fmt = "d-ng|n-ng|sig"},
+};
+
+static struct ima_template_field supported_fields[] = {
+ {.field_id = "d", .field_init = ima_eventdigest_init,
+ .field_show = ima_show_template_digest},
+ {.field_id = "n", .field_init = ima_eventname_init,
+ .field_show = ima_show_template_string},
+ {.field_id = "d-ng", .field_init = ima_eventdigest_ng_init,
+ .field_show = ima_show_template_digest_ng},
+ {.field_id = "n-ng", .field_init = ima_eventname_ng_init,
+ .field_show = ima_show_template_string},
+ {.field_id = "sig", .field_init = ima_eventsig_init,
+ .field_show = ima_show_template_sig},
+};
+
+static struct ima_template_desc *ima_template;
+static struct ima_template_desc *lookup_template_desc(const char *name);
+
+static int __init ima_template_setup(char *str)
+{
+ struct ima_template_desc *template_desc;
+ int template_len = strlen(str);
+
+ /*
+ * Verify that a template with the supplied name exists.
+ * If not, use CONFIG_IMA_DEFAULT_TEMPLATE.
+ */
+ template_desc = lookup_template_desc(str);
+ if (!template_desc)
+ return 1;
+
+ /*
+ * Verify whether the current hash algorithm is supported
+ * by the 'ima' template.
+ */
+ if (template_len == 3 && strcmp(str, IMA_TEMPLATE_IMA_NAME) == 0 &&
+ ima_hash_algo != HASH_ALGO_SHA1 && ima_hash_algo != HASH_ALGO_MD5) {
+ pr_err("template does not support hash alg\n");
+ return 1;
+ }
+
+ ima_template = template_desc;
+ return 1;
+}
+__setup("ima_template=", ima_template_setup);
+
+static struct ima_template_desc *lookup_template_desc(const char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(defined_templates); i++) {
+ if (strcmp(defined_templates[i].name, name) == 0)
+ return defined_templates + i;
+ }
+
+ return NULL;
+}
+
+static struct ima_template_field *lookup_template_field(const char *field_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(supported_fields); i++)
+ if (strncmp(supported_fields[i].field_id, field_id,
+ IMA_TEMPLATE_FIELD_ID_MAX_LEN) == 0)
+ return &supported_fields[i];
+ return NULL;
+}
+
+static int template_fmt_size(const char *template_fmt)
+{
+ char c;
+ int template_fmt_len = strlen(template_fmt);
+ int i = 0, j = 0;
+
+ while (i < template_fmt_len) {
+ c = template_fmt[i];
+ if (c == '|')
+ j++;
+ i++;
+ }
+
+ return j + 1;
+}
+
+static int template_desc_init_fields(const char *template_fmt,
+ struct ima_template_field ***fields,
+ int *num_fields)
+{
+ char *c, *template_fmt_copy, *template_fmt_ptr;
+ int template_num_fields = template_fmt_size(template_fmt);
+ int i, result = 0;
+
+ if (template_num_fields > IMA_TEMPLATE_NUM_FIELDS_MAX)
+ return -EINVAL;
+
+ /* copying is needed as strsep() modifies the original buffer */
+ template_fmt_copy = kstrdup(template_fmt, GFP_KERNEL);
+ if (template_fmt_copy == NULL)
+ return -ENOMEM;
+
+ *fields = kzalloc(template_num_fields * sizeof(*fields), GFP_KERNEL);
+ if (*fields == NULL) {
+ result = -ENOMEM;
+ goto out;
+ }
+
+ template_fmt_ptr = template_fmt_copy;
+ for (i = 0; (c = strsep(&template_fmt_ptr, "|")) != NULL &&
+ i < template_num_fields; i++) {
+ struct ima_template_field *f = lookup_template_field(c);
+
+ if (!f) {
+ result = -ENOENT;
+ goto out;
+ }
+ (*fields)[i] = f;
+ }
+ *num_fields = i;
+out:
+ if (result < 0) {
+ kfree(*fields);
+ *fields = NULL;
+ }
+ kfree(template_fmt_copy);
+ return result;
+}
+
+static int init_defined_templates(void)
+{
+ int i = 0;
+ int result = 0;
+
+ /* Init defined templates. */
+ for (i = 0; i < ARRAY_SIZE(defined_templates); i++) {
+ struct ima_template_desc *template = &defined_templates[i];
+
+ result = template_desc_init_fields(template->fmt,
+ &(template->fields),
+ &(template->num_fields));
+ if (result < 0)
+ return result;
+ }
+ return result;
+}
+
+struct ima_template_desc *ima_template_desc_current(void)
+{
+ if (!ima_template)
+ ima_template =
+ lookup_template_desc(CONFIG_IMA_DEFAULT_TEMPLATE);
+ return ima_template;
+}
+
+int ima_init_template(void)
+{
+ int result;
+
+ result = init_defined_templates();
+ if (result < 0)
+ return result;
+
+ return 0;
+}
diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
new file mode 100644
index 00000000000..1506f024857
--- /dev/null
+++ b/security/integrity/ima/ima_template_lib.c
@@ -0,0 +1,342 @@
+/*
+ * Copyright (C) 2013 Politecnico di Torino, Italy
+ * TORSEC group -- http://security.polito.it
+ *
+ * Author: Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: ima_template_lib.c
+ * Library of supported template fields.
+ */
+#include <crypto/hash_info.h>
+
+#include "ima_template_lib.h"
+
+static bool ima_template_hash_algo_allowed(u8 algo)
+{
+ if (algo == HASH_ALGO_SHA1 || algo == HASH_ALGO_MD5)
+ return true;
+
+ return false;
+}
+
+enum data_formats {
+ DATA_FMT_DIGEST = 0,
+ DATA_FMT_DIGEST_WITH_ALGO,
+ DATA_FMT_STRING,
+ DATA_FMT_HEX
+};
+
+static int ima_write_template_field_data(const void *data, const u32 datalen,
+ enum data_formats datafmt,
+ struct ima_field_data *field_data)
+{
+ u8 *buf, *buf_ptr;
+ u32 buflen = datalen;
+
+ if (datafmt == DATA_FMT_STRING)
+ buflen = datalen + 1;
+
+ buf = kzalloc(buflen, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy(buf, data, datalen);
+
+ /*
+ * Replace all space characters with underscore for event names and
+ * strings. This avoid that, during the parsing of a measurements list,
+ * filenames with spaces or that end with the suffix ' (deleted)' are
+ * split into multiple template fields (the space is the delimitator
+ * character for measurements lists in ASCII format).
+ */
+ if (datafmt == DATA_FMT_STRING) {
+ for (buf_ptr = buf; buf_ptr - buf < datalen; buf_ptr++)
+ if (*buf_ptr == ' ')
+ *buf_ptr = '_';
+ }
+
+ field_data->data = buf;
+ field_data->len = buflen;
+ return 0;
+}
+
+static void ima_show_template_data_ascii(struct seq_file *m,
+ enum ima_show_type show,
+ enum data_formats datafmt,
+ struct ima_field_data *field_data)
+{
+ u8 *buf_ptr = field_data->data, buflen = field_data->len;
+
+ switch (datafmt) {
+ case DATA_FMT_DIGEST_WITH_ALGO:
+ buf_ptr = strnchr(field_data->data, buflen, ':');
+ if (buf_ptr != field_data->data)
+ seq_printf(m, "%s", field_data->data);
+
+ /* skip ':' and '\0' */
+ buf_ptr += 2;
+ buflen -= buf_ptr - field_data->data;
+ case DATA_FMT_DIGEST:
+ case DATA_FMT_HEX:
+ if (!buflen)
+ break;
+ ima_print_digest(m, buf_ptr, buflen);
+ break;
+ case DATA_FMT_STRING:
+ seq_printf(m, "%s", buf_ptr);
+ break;
+ default:
+ break;
+ }
+}
+
+static void ima_show_template_data_binary(struct seq_file *m,
+ enum ima_show_type show,
+ enum data_formats datafmt,
+ struct ima_field_data *field_data)
+{
+ u32 len = (show == IMA_SHOW_BINARY_OLD_STRING_FMT) ?
+ strlen(field_data->data) : field_data->len;
+
+ if (show != IMA_SHOW_BINARY_NO_FIELD_LEN)
+ ima_putc(m, &len, sizeof(len));
+
+ if (!len)
+ return;
+
+ ima_putc(m, field_data->data, len);
+}
+
+static void ima_show_template_field_data(struct seq_file *m,
+ enum ima_show_type show,
+ enum data_formats datafmt,
+ struct ima_field_data *field_data)
+{
+ switch (show) {
+ case IMA_SHOW_ASCII:
+ ima_show_template_data_ascii(m, show, datafmt, field_data);
+ break;
+ case IMA_SHOW_BINARY:
+ case IMA_SHOW_BINARY_NO_FIELD_LEN:
+ case IMA_SHOW_BINARY_OLD_STRING_FMT:
+ ima_show_template_data_binary(m, show, datafmt, field_data);
+ break;
+ default:
+ break;
+ }
+}
+
+void ima_show_template_digest(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_DIGEST, field_data);
+}
+
+void ima_show_template_digest_ng(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_DIGEST_WITH_ALGO,
+ field_data);
+}
+
+void ima_show_template_string(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_STRING, field_data);
+}
+
+void ima_show_template_sig(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_HEX, field_data);
+}
+
+static int ima_eventdigest_init_common(u8 *digest, u32 digestsize, u8 hash_algo,
+ struct ima_field_data *field_data)
+{
+ /*
+ * digest formats:
+ * - DATA_FMT_DIGEST: digest
+ * - DATA_FMT_DIGEST_WITH_ALGO: [<hash algo>] + ':' + '\0' + digest,
+ * where <hash algo> is provided if the hash algoritm is not
+ * SHA1 or MD5
+ */
+ u8 buffer[CRYPTO_MAX_ALG_NAME + 2 + IMA_MAX_DIGEST_SIZE] = { 0 };
+ enum data_formats fmt = DATA_FMT_DIGEST;
+ u32 offset = 0;
+
+ if (hash_algo < HASH_ALGO__LAST) {
+ fmt = DATA_FMT_DIGEST_WITH_ALGO;
+ offset += snprintf(buffer, CRYPTO_MAX_ALG_NAME + 1, "%s",
+ hash_algo_name[hash_algo]);
+ buffer[offset] = ':';
+ offset += 2;
+ }
+
+ if (digest)
+ memcpy(buffer + offset, digest, digestsize);
+ else
+ /*
+ * If digest is NULL, the event being recorded is a violation.
+ * Make room for the digest by increasing the offset of
+ * IMA_DIGEST_SIZE.
+ */
+ offset += IMA_DIGEST_SIZE;
+
+ return ima_write_template_field_data(buffer, offset + digestsize,
+ fmt, field_data);
+}
+
+/*
+ * This function writes the digest of an event (with size limit).
+ */
+int ima_eventdigest_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data)
+{
+ struct {
+ struct ima_digest_data hdr;
+ char digest[IMA_MAX_DIGEST_SIZE];
+ } hash;
+ u8 *cur_digest = NULL;
+ u32 cur_digestsize = 0;
+ struct inode *inode;
+ int result;
+
+ memset(&hash, 0, sizeof(hash));
+
+ if (!iint) /* recording a violation. */
+ goto out;
+
+ if (ima_template_hash_algo_allowed(iint->ima_hash->algo)) {
+ cur_digest = iint->ima_hash->digest;
+ cur_digestsize = iint->ima_hash->length;
+ goto out;
+ }
+
+ if (!file) /* missing info to re-calculate the digest */
+ return -EINVAL;
+
+ inode = file_inode(file);
+ hash.hdr.algo = ima_template_hash_algo_allowed(ima_hash_algo) ?
+ ima_hash_algo : HASH_ALGO_SHA1;
+ result = ima_calc_file_hash(file, &hash.hdr);
+ if (result) {
+ integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode,
+ filename, "collect_data",
+ "failed", result, 0);
+ return result;
+ }
+ cur_digest = hash.hdr.digest;
+ cur_digestsize = hash.hdr.length;
+out:
+ return ima_eventdigest_init_common(cur_digest, cur_digestsize,
+ HASH_ALGO__LAST, field_data);
+}
+
+/*
+ * This function writes the digest of an event (without size limit).
+ */
+int ima_eventdigest_ng_init(struct integrity_iint_cache *iint,
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len, struct ima_field_data *field_data)
+{
+ u8 *cur_digest = NULL, hash_algo = HASH_ALGO_SHA1;
+ u32 cur_digestsize = 0;
+
+ /* If iint is NULL, we are recording a violation. */
+ if (!iint)
+ goto out;
+
+ cur_digest = iint->ima_hash->digest;
+ cur_digestsize = iint->ima_hash->length;
+
+ hash_algo = iint->ima_hash->algo;
+out:
+ return ima_eventdigest_init_common(cur_digest, cur_digestsize,
+ hash_algo, field_data);
+}
+
+static int ima_eventname_init_common(struct integrity_iint_cache *iint,
+ struct file *file,
+ const unsigned char *filename,
+ struct ima_field_data *field_data,
+ bool size_limit)
+{
+ const char *cur_filename = NULL;
+ u32 cur_filename_len = 0;
+
+ BUG_ON(filename == NULL && file == NULL);
+
+ if (filename) {
+ cur_filename = filename;
+ cur_filename_len = strlen(filename);
+
+ if (!size_limit || cur_filename_len <= IMA_EVENT_NAME_LEN_MAX)
+ goto out;
+ }
+
+ if (file) {
+ cur_filename = file->f_dentry->d_name.name;
+ cur_filename_len = strlen(cur_filename);
+ } else
+ /*
+ * Truncate filename if the latter is too long and
+ * the file descriptor is not available.
+ */
+ cur_filename_len = IMA_EVENT_NAME_LEN_MAX;
+out:
+ return ima_write_template_field_data(cur_filename, cur_filename_len,
+ DATA_FMT_STRING, field_data);
+}
+
+/*
+ * This function writes the name of an event (with size limit).
+ */
+int ima_eventname_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data)
+{
+ return ima_eventname_init_common(iint, file, filename,
+ field_data, true);
+}
+
+/*
+ * This function writes the name of an event (without size limit).
+ */
+int ima_eventname_ng_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data)
+{
+ return ima_eventname_init_common(iint, file, filename,
+ field_data, false);
+}
+
+/*
+ * ima_eventsig_init - include the file signature as part of the template data
+ */
+int ima_eventsig_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data)
+{
+ enum data_formats fmt = DATA_FMT_HEX;
+ int rc = 0;
+
+ if ((!xattr_value) || (xattr_value->type != EVM_IMA_XATTR_DIGSIG))
+ goto out;
+
+ rc = ima_write_template_field_data(xattr_value, xattr_len, fmt,
+ field_data);
+out:
+ return rc;
+}
diff --git a/security/integrity/ima/ima_template_lib.h b/security/integrity/ima/ima_template_lib.h
new file mode 100644
index 00000000000..63f6b52cb1c
--- /dev/null
+++ b/security/integrity/ima/ima_template_lib.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2013 Politecnico di Torino, Italy
+ * TORSEC group -- http://security.polito.it
+ *
+ * Author: Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: ima_template_lib.h
+ * Header for the library of supported template fields.
+ */
+#ifndef __LINUX_IMA_TEMPLATE_LIB_H
+#define __LINUX_IMA_TEMPLATE_LIB_H
+
+#include <linux/seq_file.h>
+#include "ima.h"
+
+void ima_show_template_digest(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+void ima_show_template_digest_ng(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+void ima_show_template_string(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+void ima_show_template_sig(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+int ima_eventdigest_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data);
+int ima_eventname_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data);
+int ima_eventdigest_ng_init(struct integrity_iint_cache *iint,
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len, struct ima_field_data *field_data);
+int ima_eventname_ng_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data);
+int ima_eventsig_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data);
+#endif /* __LINUX_IMA_TEMPLATE_LIB_H */
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
new file mode 100644
index 00000000000..33c0a70f6b1
--- /dev/null
+++ b/security/integrity/integrity.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright (C) 2009-2010 IBM Corporation
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/integrity.h>
+#include <crypto/sha.h>
+#include <linux/key.h>
+
+/* iint action cache flags */
+#define IMA_MEASURE 0x00000001
+#define IMA_MEASURED 0x00000002
+#define IMA_APPRAISE 0x00000004
+#define IMA_APPRAISED 0x00000008
+/*#define IMA_COLLECT 0x00000010 do not use this flag */
+#define IMA_COLLECTED 0x00000020
+#define IMA_AUDIT 0x00000040
+#define IMA_AUDITED 0x00000080
+
+/* iint cache flags */
+#define IMA_ACTION_FLAGS 0xff000000
+#define IMA_DIGSIG 0x01000000
+#define IMA_DIGSIG_REQUIRED 0x02000000
+#define IMA_PERMIT_DIRECTIO 0x04000000
+
+#define IMA_DO_MASK (IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \
+ IMA_APPRAISE_SUBMASK)
+#define IMA_DONE_MASK (IMA_MEASURED | IMA_APPRAISED | IMA_AUDITED | \
+ IMA_COLLECTED | IMA_APPRAISED_SUBMASK)
+
+/* iint subaction appraise cache flags */
+#define IMA_FILE_APPRAISE 0x00000100
+#define IMA_FILE_APPRAISED 0x00000200
+#define IMA_MMAP_APPRAISE 0x00000400
+#define IMA_MMAP_APPRAISED 0x00000800
+#define IMA_BPRM_APPRAISE 0x00001000
+#define IMA_BPRM_APPRAISED 0x00002000
+#define IMA_MODULE_APPRAISE 0x00004000
+#define IMA_MODULE_APPRAISED 0x00008000
+#define IMA_APPRAISE_SUBMASK (IMA_FILE_APPRAISE | IMA_MMAP_APPRAISE | \
+ IMA_BPRM_APPRAISE | IMA_MODULE_APPRAISE)
+#define IMA_APPRAISED_SUBMASK (IMA_FILE_APPRAISED | IMA_MMAP_APPRAISED | \
+ IMA_BPRM_APPRAISED | IMA_MODULE_APPRAISED)
+
+enum evm_ima_xattr_type {
+ IMA_XATTR_DIGEST = 0x01,
+ EVM_XATTR_HMAC,
+ EVM_IMA_XATTR_DIGSIG,
+ IMA_XATTR_DIGEST_NG,
+};
+
+struct evm_ima_xattr_data {
+ u8 type;
+ u8 digest[SHA1_DIGEST_SIZE];
+} __packed;
+
+#define IMA_MAX_DIGEST_SIZE 64
+
+struct ima_digest_data {
+ u8 algo;
+ u8 length;
+ union {
+ struct {
+ u8 unused;
+ u8 type;
+ } sha1;
+ struct {
+ u8 type;
+ u8 algo;
+ } ng;
+ u8 data[2];
+ } xattr;
+ u8 digest[0];
+} __packed;
+
+/*
+ * signature format v2 - for using with asymmetric keys
+ */
+struct signature_v2_hdr {
+ uint8_t type; /* xattr type */
+ uint8_t version; /* signature format version */
+ uint8_t hash_algo; /* Digest algorithm [enum pkey_hash_algo] */
+ uint32_t keyid; /* IMA key identifier - not X509/PGP specific */
+ uint16_t sig_size; /* signature size */
+ uint8_t sig[0]; /* signature payload */
+} __packed;
+
+/* integrity data associated with an inode */
+struct integrity_iint_cache {
+ struct rb_node rb_node; /* rooted in integrity_iint_tree */
+ struct inode *inode; /* back pointer to inode in question */
+ u64 version; /* track inode changes */
+ unsigned long flags;
+ enum integrity_status ima_file_status:4;
+ enum integrity_status ima_mmap_status:4;
+ enum integrity_status ima_bprm_status:4;
+ enum integrity_status ima_module_status:4;
+ enum integrity_status evm_status:4;
+ struct ima_digest_data *ima_hash;
+};
+
+/* rbtree tree calls to lookup, insert, delete
+ * integrity data associated with an inode.
+ */
+struct integrity_iint_cache *integrity_iint_insert(struct inode *inode);
+struct integrity_iint_cache *integrity_iint_find(struct inode *inode);
+
+#define INTEGRITY_KEYRING_EVM 0
+#define INTEGRITY_KEYRING_MODULE 1
+#define INTEGRITY_KEYRING_IMA 2
+#define INTEGRITY_KEYRING_MAX 3
+
+#ifdef CONFIG_INTEGRITY_SIGNATURE
+
+int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
+ const char *digest, int digestlen);
+
+#else
+
+static inline int integrity_digsig_verify(const unsigned int id,
+ const char *sig, int siglen,
+ const char *digest, int digestlen)
+{
+ return -EOPNOTSUPP;
+}
+
+#endif /* CONFIG_INTEGRITY_SIGNATURE */
+
+#ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS
+int asymmetric_verify(struct key *keyring, const char *sig,
+ int siglen, const char *data, int datalen);
+#else
+static inline int asymmetric_verify(struct key *keyring, const char *sig,
+ int siglen, const char *data, int datalen)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#ifdef CONFIG_INTEGRITY_AUDIT
+/* declarations */
+void integrity_audit_msg(int audit_msgno, struct inode *inode,
+ const unsigned char *fname, const char *op,
+ const char *cause, int result, int info);
+#else
+static inline void integrity_audit_msg(int audit_msgno, struct inode *inode,
+ const unsigned char *fname,
+ const char *op, const char *cause,
+ int result, int info)
+{
+}
+#endif
+
+/* set during initialization */
+extern int iint_initialized;
diff --git a/security/integrity/ima/ima_audit.c b/security/integrity/integrity_audit.c
index ff513ff737f..90987d15b6f 100644
--- a/security/integrity/ima/ima_audit.c
+++ b/security/integrity/integrity_audit.c
@@ -7,42 +7,42 @@
* the Free Software Foundation, version 2 of the License.
*
* File: integrity_audit.c
- * Audit calls for the integrity subsystem
+ * Audit calls for the integrity subsystem
*/
#include <linux/fs.h>
+#include <linux/gfp.h>
#include <linux/audit.h>
-#include "ima.h"
+#include "integrity.h"
-static int ima_audit;
-
-#ifdef CONFIG_IMA_AUDIT
+static int integrity_audit_info;
/* ima_audit_setup - enable informational auditing messages */
-static int __init ima_audit_setup(char *str)
+static int __init integrity_audit_setup(char *str)
{
unsigned long audit;
- if (!strict_strtoul(str, 0, &audit))
- ima_audit = audit ? 1 : 0;
+ if (!kstrtoul(str, 0, &audit))
+ integrity_audit_info = audit ? 1 : 0;
return 1;
}
-__setup("ima_audit=", ima_audit_setup);
-#endif
+__setup("integrity_audit=", integrity_audit_setup);
void integrity_audit_msg(int audit_msgno, struct inode *inode,
const unsigned char *fname, const char *op,
const char *cause, int result, int audit_info)
{
struct audit_buffer *ab;
+ char name[TASK_COMM_LEN];
- if (!ima_audit && audit_info == 1) /* Skip informational messages */
+ if (!integrity_audit_info && audit_info == 1) /* Skip info messages */
return;
ab = audit_log_start(current->audit_context, GFP_KERNEL, audit_msgno);
- audit_log_format(ab, "integrity: pid=%d uid=%u auid=%u ses=%u",
- current->pid, current_cred()->uid,
- audit_get_loginuid(current),
+ audit_log_format(ab, "pid=%d uid=%u auid=%u ses=%u",
+ task_pid_nr(current),
+ from_kuid(&init_user_ns, current_cred()->uid),
+ from_kuid(&init_user_ns, audit_get_loginuid(current)),
audit_get_sessionid(current));
audit_log_task_context(ab);
audit_log_format(ab, " op=");
@@ -50,14 +50,16 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode,
audit_log_format(ab, " cause=");
audit_log_string(ab, cause);
audit_log_format(ab, " comm=");
- audit_log_untrustedstring(ab, current->comm);
+ audit_log_untrustedstring(ab, get_task_comm(name, current));
if (fname) {
audit_log_format(ab, " name=");
audit_log_untrustedstring(ab, fname);
}
- if (inode)
- audit_log_format(ab, " dev=%s ino=%lu",
- inode->i_sb->s_id, inode->i_ino);
- audit_log_format(ab, " res=%d", !result ? 0 : 1);
+ if (inode) {
+ audit_log_format(ab, " dev=");
+ audit_log_untrustedstring(ab, inode->i_sb->s_id);
+ audit_log_format(ab, " ino=%lu", inode->i_ino);
+ }
+ audit_log_format(ab, " res=%d", !result);
audit_log_end(ab);
}
diff --git a/security/keys/Kconfig b/security/keys/Kconfig
new file mode 100644
index 00000000000..a4f3f8c48d6
--- /dev/null
+++ b/security/keys/Kconfig
@@ -0,0 +1,100 @@
+#
+# Key management configuration
+#
+
+config KEYS
+ bool "Enable access key retention support"
+ select ASSOCIATIVE_ARRAY
+ help
+ This option provides support for retaining authentication tokens and
+ access keys in the kernel.
+
+ It also includes provision of methods by which such keys might be
+ associated with a process so that network filesystems, encryption
+ support and the like can find them.
+
+ Furthermore, a special type of key is available that acts as keyring:
+ a searchable sequence of keys. Each process is equipped with access
+ to five standard keyrings: UID-specific, GID-specific, session,
+ process and thread.
+
+ If you are unsure as to whether this is required, answer N.
+
+config PERSISTENT_KEYRINGS
+ bool "Enable register of persistent per-UID keyrings"
+ depends on KEYS
+ help
+ This option provides a register of persistent per-UID keyrings,
+ primarily aimed at Kerberos key storage. The keyrings are persistent
+ in the sense that they stay around after all processes of that UID
+ have exited, not that they survive the machine being rebooted.
+
+ A particular keyring may be accessed by either the user whose keyring
+ it is or by a process with administrative privileges. The active
+ LSMs gets to rule on which admin-level processes get to access the
+ cache.
+
+ Keyrings are created and added into the register upon demand and get
+ removed if they expire (a default timeout is set upon creation).
+
+config BIG_KEYS
+ bool "Large payload keys"
+ depends on KEYS
+ depends on TMPFS
+ help
+ This option provides support for holding large keys within the kernel
+ (for example Kerberos ticket caches). The data may be stored out to
+ swapspace by tmpfs.
+
+ If you are unsure as to whether this is required, answer N.
+
+config TRUSTED_KEYS
+ tristate "TRUSTED KEYS"
+ depends on KEYS && TCG_TPM
+ select CRYPTO
+ select CRYPTO_HMAC
+ select CRYPTO_SHA1
+ help
+ This option provides support for creating, sealing, and unsealing
+ keys in the kernel. Trusted keys are random number symmetric keys,
+ generated and RSA-sealed by the TPM. The TPM only unseals the keys,
+ if the boot PCRs and other criteria match. Userspace will only ever
+ see encrypted blobs.
+
+ If you are unsure as to whether this is required, answer N.
+
+config ENCRYPTED_KEYS
+ tristate "ENCRYPTED KEYS"
+ depends on KEYS
+ select CRYPTO
+ select CRYPTO_HMAC
+ select CRYPTO_AES
+ select CRYPTO_CBC
+ select CRYPTO_SHA256
+ select CRYPTO_RNG
+ help
+ This option provides support for create/encrypting/decrypting keys
+ in the kernel. Encrypted keys are kernel generated random numbers,
+ which are encrypted/decrypted with a 'master' symmetric key. The
+ 'master' key can be either a trusted-key or user-key type.
+ Userspace only ever sees/stores encrypted blobs.
+
+ If you are unsure as to whether this is required, answer N.
+
+config KEYS_DEBUG_PROC_KEYS
+ bool "Enable the /proc/keys file by which keys may be viewed"
+ depends on KEYS
+ help
+ This option turns on support for the /proc/keys file - through which
+ can be listed all the keys on the system that are viewable by the
+ reading process.
+
+ The only keys included in the list are those that grant View
+ permission to the reading process whether or not it possesses them.
+ Note that LSM security checks are still performed, and may further
+ filter out keys that the current process is not authorised to view.
+
+ Only key attributes are listed here; key payloads are not included in
+ the resulting table.
+
+ If you are unsure as to whether this is required, answer N.
diff --git a/security/keys/Makefile b/security/keys/Makefile
index 74d5447d7df..dfb3a7beded 100644
--- a/security/keys/Makefile
+++ b/security/keys/Makefile
@@ -2,6 +2,9 @@
# Makefile for key management
#
+#
+# Core
+#
obj-y := \
gc.o \
key.o \
@@ -12,7 +15,14 @@ obj-y := \
request_key.o \
request_key_auth.o \
user_defined.o
-
obj-$(CONFIG_KEYS_COMPAT) += compat.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SYSCTL) += sysctl.o
+obj-$(CONFIG_PERSISTENT_KEYRINGS) += persistent.o
+
+#
+# Key types
+#
+obj-$(CONFIG_BIG_KEYS) += big_key.o
+obj-$(CONFIG_TRUSTED_KEYS) += trusted.o
+obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted-keys/
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
new file mode 100644
index 00000000000..8137b27d641
--- /dev/null
+++ b/security/keys/big_key.c
@@ -0,0 +1,207 @@
+/* Large capacity key type
+ *
+ * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/seq_file.h>
+#include <linux/file.h>
+#include <linux/shmem_fs.h>
+#include <linux/err.h>
+#include <keys/user-type.h>
+#include <keys/big_key-type.h>
+
+MODULE_LICENSE("GPL");
+
+/*
+ * If the data is under this limit, there's no point creating a shm file to
+ * hold it as the permanently resident metadata for the shmem fs will be at
+ * least as large as the data.
+ */
+#define BIG_KEY_FILE_THRESHOLD (sizeof(struct inode) + sizeof(struct dentry))
+
+/*
+ * big_key defined keys take an arbitrary string as the description and an
+ * arbitrary blob of data as the payload
+ */
+struct key_type key_type_big_key = {
+ .name = "big_key",
+ .def_lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .instantiate = big_key_instantiate,
+ .match = user_match,
+ .revoke = big_key_revoke,
+ .destroy = big_key_destroy,
+ .describe = big_key_describe,
+ .read = big_key_read,
+};
+
+/*
+ * Instantiate a big key
+ */
+int big_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
+{
+ struct path *path = (struct path *)&key->payload.data2;
+ struct file *file;
+ ssize_t written;
+ size_t datalen = prep->datalen;
+ int ret;
+
+ ret = -EINVAL;
+ if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data)
+ goto error;
+
+ /* Set an arbitrary quota */
+ ret = key_payload_reserve(key, 16);
+ if (ret < 0)
+ goto error;
+
+ key->type_data.x[1] = datalen;
+
+ if (datalen > BIG_KEY_FILE_THRESHOLD) {
+ /* Create a shmem file to store the data in. This will permit the data
+ * to be swapped out if needed.
+ *
+ * TODO: Encrypt the stored data with a temporary key.
+ */
+ file = shmem_kernel_file_setup("", datalen, 0);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto err_quota;
+ }
+
+ written = kernel_write(file, prep->data, prep->datalen, 0);
+ if (written != datalen) {
+ ret = written;
+ if (written >= 0)
+ ret = -ENOMEM;
+ goto err_fput;
+ }
+
+ /* Pin the mount and dentry to the key so that we can open it again
+ * later
+ */
+ *path = file->f_path;
+ path_get(path);
+ fput(file);
+ } else {
+ /* Just store the data in a buffer */
+ void *data = kmalloc(datalen, GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto err_quota;
+ }
+
+ key->payload.data = memcpy(data, prep->data, prep->datalen);
+ }
+ return 0;
+
+err_fput:
+ fput(file);
+err_quota:
+ key_payload_reserve(key, 0);
+error:
+ return ret;
+}
+
+/*
+ * dispose of the links from a revoked keyring
+ * - called with the key sem write-locked
+ */
+void big_key_revoke(struct key *key)
+{
+ struct path *path = (struct path *)&key->payload.data2;
+
+ /* clear the quota */
+ key_payload_reserve(key, 0);
+ if (key_is_instantiated(key) && key->type_data.x[1] > BIG_KEY_FILE_THRESHOLD)
+ vfs_truncate(path, 0);
+}
+
+/*
+ * dispose of the data dangling from the corpse of a big_key key
+ */
+void big_key_destroy(struct key *key)
+{
+ if (key->type_data.x[1] > BIG_KEY_FILE_THRESHOLD) {
+ struct path *path = (struct path *)&key->payload.data2;
+ path_put(path);
+ path->mnt = NULL;
+ path->dentry = NULL;
+ } else {
+ kfree(key->payload.data);
+ key->payload.data = NULL;
+ }
+}
+
+/*
+ * describe the big_key key
+ */
+void big_key_describe(const struct key *key, struct seq_file *m)
+{
+ unsigned long datalen = key->type_data.x[1];
+
+ seq_puts(m, key->description);
+
+ if (key_is_instantiated(key))
+ seq_printf(m, ": %lu [%s]",
+ datalen,
+ datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff");
+}
+
+/*
+ * read the key data
+ * - the key's semaphore is read-locked
+ */
+long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
+{
+ unsigned long datalen = key->type_data.x[1];
+ long ret;
+
+ if (!buffer || buflen < datalen)
+ return datalen;
+
+ if (datalen > BIG_KEY_FILE_THRESHOLD) {
+ struct path *path = (struct path *)&key->payload.data2;
+ struct file *file;
+ loff_t pos;
+
+ file = dentry_open(path, O_RDONLY, current_cred());
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ pos = 0;
+ ret = vfs_read(file, buffer, datalen, &pos);
+ fput(file);
+ if (ret >= 0 && ret != datalen)
+ ret = -EIO;
+ } else {
+ ret = datalen;
+ if (copy_to_user(buffer, key->payload.data, datalen) != 0)
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+
+/*
+ * Module stuff
+ */
+static int __init big_key_init(void)
+{
+ return register_key_type(&key_type_big_key);
+}
+
+static void __exit big_key_cleanup(void)
+{
+ unregister_key_type(&key_type_big_key);
+}
+
+module_init(big_key_init);
+module_exit(big_key_cleanup);
diff --git a/security/keys/compat.c b/security/keys/compat.c
index 792c0a611a6..347896548ad 100644
--- a/security/keys/compat.c
+++ b/security/keys/compat.c
@@ -1,4 +1,4 @@
-/* compat.c: 32-bit compatibility syscall for 64-bit systems
+/* 32-bit compatibility syscall for 64-bit systems
*
* Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
@@ -12,18 +12,61 @@
#include <linux/syscalls.h>
#include <linux/keyctl.h>
#include <linux/compat.h>
+#include <linux/slab.h>
#include "internal.h"
-/*****************************************************************************/
/*
- * the key control system call, 32-bit compatibility version for 64-bit archs
- * - this should only be called if the 64-bit arch uses weird pointers in
- * 32-bit mode or doesn't guarantee that the top 32-bits of the argument
- * registers on taking a 32-bit syscall are zero
- * - if you can, you should call sys_keyctl directly
+ * Instantiate a key with the specified compatibility multipart payload and
+ * link the key into the destination keyring if one is given.
+ *
+ * The caller must have the appropriate instantiation permit set for this to
+ * work (see keyctl_assume_authority). No other permissions are required.
+ *
+ * If successful, 0 will be returned.
+ */
+static long compat_keyctl_instantiate_key_iov(
+ key_serial_t id,
+ const struct compat_iovec __user *_payload_iov,
+ unsigned ioc,
+ key_serial_t ringid)
+{
+ struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
+ long ret;
+
+ if (!_payload_iov || !ioc)
+ goto no_payload;
+
+ ret = compat_rw_copy_check_uvector(WRITE, _payload_iov, ioc,
+ ARRAY_SIZE(iovstack),
+ iovstack, &iov);
+ if (ret < 0)
+ goto err;
+ if (ret == 0)
+ goto no_payload_free;
+
+ ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
+err:
+ if (iov != iovstack)
+ kfree(iov);
+ return ret;
+
+no_payload_free:
+ if (iov != iovstack)
+ kfree(iov);
+no_payload:
+ return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
+}
+
+/*
+ * The key control system call, 32-bit compatibility version for 64-bit archs
+ *
+ * This should only be called if the 64-bit arch uses weird pointers in 32-bit
+ * mode or doesn't guarantee that the top 32-bits of the argument registers on
+ * taking a 32-bit syscall are zero. If you can, you should call sys_keyctl()
+ * directly.
*/
-asmlinkage long compat_sys_keyctl(u32 option,
- u32 arg2, u32 arg3, u32 arg4, u32 arg5)
+COMPAT_SYSCALL_DEFINE5(keyctl, u32, option,
+ u32, arg2, u32, arg3, u32, arg4, u32, arg5)
{
switch (option) {
case KEYCTL_GET_KEYRING_ID:
@@ -85,8 +128,20 @@ asmlinkage long compat_sys_keyctl(u32 option,
case KEYCTL_SESSION_TO_PARENT:
return keyctl_session_to_parent();
+ case KEYCTL_REJECT:
+ return keyctl_reject_key(arg2, arg3, arg4, arg5);
+
+ case KEYCTL_INSTANTIATE_IOV:
+ return compat_keyctl_instantiate_key_iov(
+ arg2, compat_ptr(arg3), arg4, arg5);
+
+ case KEYCTL_INVALIDATE:
+ return keyctl_invalidate_key(arg2);
+
+ case KEYCTL_GET_PERSISTENT:
+ return keyctl_get_persistent(arg2, arg3);
+
default:
return -EOPNOTSUPP;
}
-
-} /* end compat_sys_keyctl() */
+}
diff --git a/security/keys/encrypted-keys/Makefile b/security/keys/encrypted-keys/Makefile
new file mode 100644
index 00000000000..d6f8433250a
--- /dev/null
+++ b/security/keys/encrypted-keys/Makefile
@@ -0,0 +1,10 @@
+#
+# Makefile for encrypted keys
+#
+
+obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted-keys.o
+
+encrypted-keys-y := encrypted.o ecryptfs_format.o
+masterkey-$(CONFIG_TRUSTED_KEYS) := masterkey_trusted.o
+masterkey-$(CONFIG_TRUSTED_KEYS)-$(CONFIG_ENCRYPTED_KEYS) := masterkey_trusted.o
+encrypted-keys-y += $(masterkey-y) $(masterkey-m-m)
diff --git a/security/keys/encrypted-keys/ecryptfs_format.c b/security/keys/encrypted-keys/ecryptfs_format.c
new file mode 100644
index 00000000000..6daa3b6ff9e
--- /dev/null
+++ b/security/keys/encrypted-keys/ecryptfs_format.c
@@ -0,0 +1,81 @@
+/*
+ * ecryptfs_format.c: helper functions for the encrypted key type
+ *
+ * Copyright (C) 2006 International Business Machines Corp.
+ * Copyright (C) 2010 Politecnico di Torino, Italy
+ * TORSEC group -- http://security.polito.it
+ *
+ * Authors:
+ * Michael A. Halcrow <mahalcro@us.ibm.com>
+ * Tyler Hicks <tyhicks@ou.edu>
+ * Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ */
+
+#include <linux/module.h>
+#include "ecryptfs_format.h"
+
+u8 *ecryptfs_get_auth_tok_key(struct ecryptfs_auth_tok *auth_tok)
+{
+ return auth_tok->token.password.session_key_encryption_key;
+}
+EXPORT_SYMBOL(ecryptfs_get_auth_tok_key);
+
+/*
+ * ecryptfs_get_versions()
+ *
+ * Source code taken from the software 'ecryptfs-utils' version 83.
+ *
+ */
+void ecryptfs_get_versions(int *major, int *minor, int *file_version)
+{
+ *major = ECRYPTFS_VERSION_MAJOR;
+ *minor = ECRYPTFS_VERSION_MINOR;
+ if (file_version)
+ *file_version = ECRYPTFS_SUPPORTED_FILE_VERSION;
+}
+EXPORT_SYMBOL(ecryptfs_get_versions);
+
+/*
+ * ecryptfs_fill_auth_tok - fill the ecryptfs_auth_tok structure
+ *
+ * Fill the ecryptfs_auth_tok structure with required ecryptfs data.
+ * The source code is inspired to the original function generate_payload()
+ * shipped with the software 'ecryptfs-utils' version 83.
+ *
+ */
+int ecryptfs_fill_auth_tok(struct ecryptfs_auth_tok *auth_tok,
+ const char *key_desc)
+{
+ int major, minor;
+
+ ecryptfs_get_versions(&major, &minor, NULL);
+ auth_tok->version = (((uint16_t)(major << 8) & 0xFF00)
+ | ((uint16_t)minor & 0x00FF));
+ auth_tok->token_type = ECRYPTFS_PASSWORD;
+ strncpy((char *)auth_tok->token.password.signature, key_desc,
+ ECRYPTFS_PASSWORD_SIG_SIZE);
+ auth_tok->token.password.session_key_encryption_key_bytes =
+ ECRYPTFS_MAX_KEY_BYTES;
+ /*
+ * Removed auth_tok->token.password.salt and
+ * auth_tok->token.password.session_key_encryption_key
+ * initialization from the original code
+ */
+ /* TODO: Make the hash parameterizable via policy */
+ auth_tok->token.password.flags |=
+ ECRYPTFS_SESSION_KEY_ENCRYPTION_KEY_SET;
+ /* The kernel code will encrypt the session key. */
+ auth_tok->session_key.encrypted_key[0] = 0;
+ auth_tok->session_key.encrypted_key_size = 0;
+ /* Default; subject to change by kernel eCryptfs */
+ auth_tok->token.password.hash_algo = PGP_DIGEST_ALGO_SHA512;
+ auth_tok->token.password.flags &= ~(ECRYPTFS_PERSISTENT_PASSWORD);
+ return 0;
+}
+EXPORT_SYMBOL(ecryptfs_fill_auth_tok);
+
+MODULE_LICENSE("GPL");
diff --git a/security/keys/encrypted-keys/ecryptfs_format.h b/security/keys/encrypted-keys/ecryptfs_format.h
new file mode 100644
index 00000000000..40294de238b
--- /dev/null
+++ b/security/keys/encrypted-keys/ecryptfs_format.h
@@ -0,0 +1,30 @@
+/*
+ * ecryptfs_format.h: helper functions for the encrypted key type
+ *
+ * Copyright (C) 2006 International Business Machines Corp.
+ * Copyright (C) 2010 Politecnico di Torino, Italy
+ * TORSEC group -- http://security.polito.it
+ *
+ * Authors:
+ * Michael A. Halcrow <mahalcro@us.ibm.com>
+ * Tyler Hicks <tyhicks@ou.edu>
+ * Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ */
+
+#ifndef __KEYS_ECRYPTFS_H
+#define __KEYS_ECRYPTFS_H
+
+#include <linux/ecryptfs.h>
+
+#define PGP_DIGEST_ALGO_SHA512 10
+
+u8 *ecryptfs_get_auth_tok_key(struct ecryptfs_auth_tok *auth_tok);
+void ecryptfs_get_versions(int *major, int *minor, int *file_version);
+int ecryptfs_fill_auth_tok(struct ecryptfs_auth_tok *auth_tok,
+ const char *key_desc);
+
+#endif /* __KEYS_ECRYPTFS_H */
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
new file mode 100644
index 00000000000..5fe443d120a
--- /dev/null
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -0,0 +1,1040 @@
+/*
+ * Copyright (C) 2010 IBM Corporation
+ * Copyright (C) 2010 Politecnico di Torino, Italy
+ * TORSEC group -- http://security.polito.it
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * See Documentation/security/keys-trusted-encrypted.txt
+ */
+
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/parser.h>
+#include <linux/string.h>
+#include <linux/err.h>
+#include <keys/user-type.h>
+#include <keys/trusted-type.h>
+#include <keys/encrypted-type.h>
+#include <linux/key-type.h>
+#include <linux/random.h>
+#include <linux/rcupdate.h>
+#include <linux/scatterlist.h>
+#include <linux/crypto.h>
+#include <linux/ctype.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <crypto/aes.h>
+
+#include "encrypted.h"
+#include "ecryptfs_format.h"
+
+static const char KEY_TRUSTED_PREFIX[] = "trusted:";
+static const char KEY_USER_PREFIX[] = "user:";
+static const char hash_alg[] = "sha256";
+static const char hmac_alg[] = "hmac(sha256)";
+static const char blkcipher_alg[] = "cbc(aes)";
+static const char key_format_default[] = "default";
+static const char key_format_ecryptfs[] = "ecryptfs";
+static unsigned int ivsize;
+static int blksize;
+
+#define KEY_TRUSTED_PREFIX_LEN (sizeof (KEY_TRUSTED_PREFIX) - 1)
+#define KEY_USER_PREFIX_LEN (sizeof (KEY_USER_PREFIX) - 1)
+#define KEY_ECRYPTFS_DESC_LEN 16
+#define HASH_SIZE SHA256_DIGEST_SIZE
+#define MAX_DATA_SIZE 4096
+#define MIN_DATA_SIZE 20
+
+struct sdesc {
+ struct shash_desc shash;
+ char ctx[];
+};
+
+static struct crypto_shash *hashalg;
+static struct crypto_shash *hmacalg;
+
+enum {
+ Opt_err = -1, Opt_new, Opt_load, Opt_update
+};
+
+enum {
+ Opt_error = -1, Opt_default, Opt_ecryptfs
+};
+
+static const match_table_t key_format_tokens = {
+ {Opt_default, "default"},
+ {Opt_ecryptfs, "ecryptfs"},
+ {Opt_error, NULL}
+};
+
+static const match_table_t key_tokens = {
+ {Opt_new, "new"},
+ {Opt_load, "load"},
+ {Opt_update, "update"},
+ {Opt_err, NULL}
+};
+
+static int aes_get_sizes(void)
+{
+ struct crypto_blkcipher *tfm;
+
+ tfm = crypto_alloc_blkcipher(blkcipher_alg, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm)) {
+ pr_err("encrypted_key: failed to alloc_cipher (%ld)\n",
+ PTR_ERR(tfm));
+ return PTR_ERR(tfm);
+ }
+ ivsize = crypto_blkcipher_ivsize(tfm);
+ blksize = crypto_blkcipher_blocksize(tfm);
+ crypto_free_blkcipher(tfm);
+ return 0;
+}
+
+/*
+ * valid_ecryptfs_desc - verify the description of a new/loaded encrypted key
+ *
+ * The description of a encrypted key with format 'ecryptfs' must contain
+ * exactly 16 hexadecimal characters.
+ *
+ */
+static int valid_ecryptfs_desc(const char *ecryptfs_desc)
+{
+ int i;
+
+ if (strlen(ecryptfs_desc) != KEY_ECRYPTFS_DESC_LEN) {
+ pr_err("encrypted_key: key description must be %d hexadecimal "
+ "characters long\n", KEY_ECRYPTFS_DESC_LEN);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < KEY_ECRYPTFS_DESC_LEN; i++) {
+ if (!isxdigit(ecryptfs_desc[i])) {
+ pr_err("encrypted_key: key description must contain "
+ "only hexadecimal characters\n");
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * valid_master_desc - verify the 'key-type:desc' of a new/updated master-key
+ *
+ * key-type:= "trusted:" | "user:"
+ * desc:= master-key description
+ *
+ * Verify that 'key-type' is valid and that 'desc' exists. On key update,
+ * only the master key description is permitted to change, not the key-type.
+ * The key-type remains constant.
+ *
+ * On success returns 0, otherwise -EINVAL.
+ */
+static int valid_master_desc(const char *new_desc, const char *orig_desc)
+{
+ if (!memcmp(new_desc, KEY_TRUSTED_PREFIX, KEY_TRUSTED_PREFIX_LEN)) {
+ if (strlen(new_desc) == KEY_TRUSTED_PREFIX_LEN)
+ goto out;
+ if (orig_desc)
+ if (memcmp(new_desc, orig_desc, KEY_TRUSTED_PREFIX_LEN))
+ goto out;
+ } else if (!memcmp(new_desc, KEY_USER_PREFIX, KEY_USER_PREFIX_LEN)) {
+ if (strlen(new_desc) == KEY_USER_PREFIX_LEN)
+ goto out;
+ if (orig_desc)
+ if (memcmp(new_desc, orig_desc, KEY_USER_PREFIX_LEN))
+ goto out;
+ } else
+ goto out;
+ return 0;
+out:
+ return -EINVAL;
+}
+
+/*
+ * datablob_parse - parse the keyctl data
+ *
+ * datablob format:
+ * new [<format>] <master-key name> <decrypted data length>
+ * load [<format>] <master-key name> <decrypted data length>
+ * <encrypted iv + data>
+ * update <new-master-key name>
+ *
+ * Tokenizes a copy of the keyctl data, returning a pointer to each token,
+ * which is null terminated.
+ *
+ * On success returns 0, otherwise -EINVAL.
+ */
+static int datablob_parse(char *datablob, const char **format,
+ char **master_desc, char **decrypted_datalen,
+ char **hex_encoded_iv)
+{
+ substring_t args[MAX_OPT_ARGS];
+ int ret = -EINVAL;
+ int key_cmd;
+ int key_format;
+ char *p, *keyword;
+
+ keyword = strsep(&datablob, " \t");
+ if (!keyword) {
+ pr_info("encrypted_key: insufficient parameters specified\n");
+ return ret;
+ }
+ key_cmd = match_token(keyword, key_tokens, args);
+
+ /* Get optional format: default | ecryptfs */
+ p = strsep(&datablob, " \t");
+ if (!p) {
+ pr_err("encrypted_key: insufficient parameters specified\n");
+ return ret;
+ }
+
+ key_format = match_token(p, key_format_tokens, args);
+ switch (key_format) {
+ case Opt_ecryptfs:
+ case Opt_default:
+ *format = p;
+ *master_desc = strsep(&datablob, " \t");
+ break;
+ case Opt_error:
+ *master_desc = p;
+ break;
+ }
+
+ if (!*master_desc) {
+ pr_info("encrypted_key: master key parameter is missing\n");
+ goto out;
+ }
+
+ if (valid_master_desc(*master_desc, NULL) < 0) {
+ pr_info("encrypted_key: master key parameter \'%s\' "
+ "is invalid\n", *master_desc);
+ goto out;
+ }
+
+ if (decrypted_datalen) {
+ *decrypted_datalen = strsep(&datablob, " \t");
+ if (!*decrypted_datalen) {
+ pr_info("encrypted_key: keylen parameter is missing\n");
+ goto out;
+ }
+ }
+
+ switch (key_cmd) {
+ case Opt_new:
+ if (!decrypted_datalen) {
+ pr_info("encrypted_key: keyword \'%s\' not allowed "
+ "when called from .update method\n", keyword);
+ break;
+ }
+ ret = 0;
+ break;
+ case Opt_load:
+ if (!decrypted_datalen) {
+ pr_info("encrypted_key: keyword \'%s\' not allowed "
+ "when called from .update method\n", keyword);
+ break;
+ }
+ *hex_encoded_iv = strsep(&datablob, " \t");
+ if (!*hex_encoded_iv) {
+ pr_info("encrypted_key: hex blob is missing\n");
+ break;
+ }
+ ret = 0;
+ break;
+ case Opt_update:
+ if (decrypted_datalen) {
+ pr_info("encrypted_key: keyword \'%s\' not allowed "
+ "when called from .instantiate method\n",
+ keyword);
+ break;
+ }
+ ret = 0;
+ break;
+ case Opt_err:
+ pr_info("encrypted_key: keyword \'%s\' not recognized\n",
+ keyword);
+ break;
+ }
+out:
+ return ret;
+}
+
+/*
+ * datablob_format - format as an ascii string, before copying to userspace
+ */
+static char *datablob_format(struct encrypted_key_payload *epayload,
+ size_t asciiblob_len)
+{
+ char *ascii_buf, *bufp;
+ u8 *iv = epayload->iv;
+ int len;
+ int i;
+
+ ascii_buf = kmalloc(asciiblob_len + 1, GFP_KERNEL);
+ if (!ascii_buf)
+ goto out;
+
+ ascii_buf[asciiblob_len] = '\0';
+
+ /* copy datablob master_desc and datalen strings */
+ len = sprintf(ascii_buf, "%s %s %s ", epayload->format,
+ epayload->master_desc, epayload->datalen);
+
+ /* convert the hex encoded iv, encrypted-data and HMAC to ascii */
+ bufp = &ascii_buf[len];
+ for (i = 0; i < (asciiblob_len - len) / 2; i++)
+ bufp = hex_byte_pack(bufp, iv[i]);
+out:
+ return ascii_buf;
+}
+
+/*
+ * request_user_key - request the user key
+ *
+ * Use a user provided key to encrypt/decrypt an encrypted-key.
+ */
+static struct key *request_user_key(const char *master_desc, u8 **master_key,
+ size_t *master_keylen)
+{
+ struct user_key_payload *upayload;
+ struct key *ukey;
+
+ ukey = request_key(&key_type_user, master_desc, NULL);
+ if (IS_ERR(ukey))
+ goto error;
+
+ down_read(&ukey->sem);
+ upayload = ukey->payload.data;
+ *master_key = upayload->data;
+ *master_keylen = upayload->datalen;
+error:
+ return ukey;
+}
+
+static struct sdesc *alloc_sdesc(struct crypto_shash *alg)
+{
+ struct sdesc *sdesc;
+ int size;
+
+ size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
+ sdesc = kmalloc(size, GFP_KERNEL);
+ if (!sdesc)
+ return ERR_PTR(-ENOMEM);
+ sdesc->shash.tfm = alg;
+ sdesc->shash.flags = 0x0;
+ return sdesc;
+}
+
+static int calc_hmac(u8 *digest, const u8 *key, unsigned int keylen,
+ const u8 *buf, unsigned int buflen)
+{
+ struct sdesc *sdesc;
+ int ret;
+
+ sdesc = alloc_sdesc(hmacalg);
+ if (IS_ERR(sdesc)) {
+ pr_info("encrypted_key: can't alloc %s\n", hmac_alg);
+ return PTR_ERR(sdesc);
+ }
+
+ ret = crypto_shash_setkey(hmacalg, key, keylen);
+ if (!ret)
+ ret = crypto_shash_digest(&sdesc->shash, buf, buflen, digest);
+ kfree(sdesc);
+ return ret;
+}
+
+static int calc_hash(u8 *digest, const u8 *buf, unsigned int buflen)
+{
+ struct sdesc *sdesc;
+ int ret;
+
+ sdesc = alloc_sdesc(hashalg);
+ if (IS_ERR(sdesc)) {
+ pr_info("encrypted_key: can't alloc %s\n", hash_alg);
+ return PTR_ERR(sdesc);
+ }
+
+ ret = crypto_shash_digest(&sdesc->shash, buf, buflen, digest);
+ kfree(sdesc);
+ return ret;
+}
+
+enum derived_key_type { ENC_KEY, AUTH_KEY };
+
+/* Derive authentication/encryption key from trusted key */
+static int get_derived_key(u8 *derived_key, enum derived_key_type key_type,
+ const u8 *master_key, size_t master_keylen)
+{
+ u8 *derived_buf;
+ unsigned int derived_buf_len;
+ int ret;
+
+ derived_buf_len = strlen("AUTH_KEY") + 1 + master_keylen;
+ if (derived_buf_len < HASH_SIZE)
+ derived_buf_len = HASH_SIZE;
+
+ derived_buf = kzalloc(derived_buf_len, GFP_KERNEL);
+ if (!derived_buf) {
+ pr_err("encrypted_key: out of memory\n");
+ return -ENOMEM;
+ }
+ if (key_type)
+ strcpy(derived_buf, "AUTH_KEY");
+ else
+ strcpy(derived_buf, "ENC_KEY");
+
+ memcpy(derived_buf + strlen(derived_buf) + 1, master_key,
+ master_keylen);
+ ret = calc_hash(derived_key, derived_buf, derived_buf_len);
+ kfree(derived_buf);
+ return ret;
+}
+
+static int init_blkcipher_desc(struct blkcipher_desc *desc, const u8 *key,
+ unsigned int key_len, const u8 *iv,
+ unsigned int ivsize)
+{
+ int ret;
+
+ desc->tfm = crypto_alloc_blkcipher(blkcipher_alg, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(desc->tfm)) {
+ pr_err("encrypted_key: failed to load %s transform (%ld)\n",
+ blkcipher_alg, PTR_ERR(desc->tfm));
+ return PTR_ERR(desc->tfm);
+ }
+ desc->flags = 0;
+
+ ret = crypto_blkcipher_setkey(desc->tfm, key, key_len);
+ if (ret < 0) {
+ pr_err("encrypted_key: failed to setkey (%d)\n", ret);
+ crypto_free_blkcipher(desc->tfm);
+ return ret;
+ }
+ crypto_blkcipher_set_iv(desc->tfm, iv, ivsize);
+ return 0;
+}
+
+static struct key *request_master_key(struct encrypted_key_payload *epayload,
+ u8 **master_key, size_t *master_keylen)
+{
+ struct key *mkey = NULL;
+
+ if (!strncmp(epayload->master_desc, KEY_TRUSTED_PREFIX,
+ KEY_TRUSTED_PREFIX_LEN)) {
+ mkey = request_trusted_key(epayload->master_desc +
+ KEY_TRUSTED_PREFIX_LEN,
+ master_key, master_keylen);
+ } else if (!strncmp(epayload->master_desc, KEY_USER_PREFIX,
+ KEY_USER_PREFIX_LEN)) {
+ mkey = request_user_key(epayload->master_desc +
+ KEY_USER_PREFIX_LEN,
+ master_key, master_keylen);
+ } else
+ goto out;
+
+ if (IS_ERR(mkey)) {
+ int ret = PTR_ERR(mkey);
+
+ if (ret == -ENOTSUPP)
+ pr_info("encrypted_key: key %s not supported",
+ epayload->master_desc);
+ else
+ pr_info("encrypted_key: key %s not found",
+ epayload->master_desc);
+ goto out;
+ }
+
+ dump_master_key(*master_key, *master_keylen);
+out:
+ return mkey;
+}
+
+/* Before returning data to userspace, encrypt decrypted data. */
+static int derived_key_encrypt(struct encrypted_key_payload *epayload,
+ const u8 *derived_key,
+ unsigned int derived_keylen)
+{
+ struct scatterlist sg_in[2];
+ struct scatterlist sg_out[1];
+ struct blkcipher_desc desc;
+ unsigned int encrypted_datalen;
+ unsigned int padlen;
+ char pad[16];
+ int ret;
+
+ encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
+ padlen = encrypted_datalen - epayload->decrypted_datalen;
+
+ ret = init_blkcipher_desc(&desc, derived_key, derived_keylen,
+ epayload->iv, ivsize);
+ if (ret < 0)
+ goto out;
+ dump_decrypted_data(epayload);
+
+ memset(pad, 0, sizeof pad);
+ sg_init_table(sg_in, 2);
+ sg_set_buf(&sg_in[0], epayload->decrypted_data,
+ epayload->decrypted_datalen);
+ sg_set_buf(&sg_in[1], pad, padlen);
+
+ sg_init_table(sg_out, 1);
+ sg_set_buf(sg_out, epayload->encrypted_data, encrypted_datalen);
+
+ ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, encrypted_datalen);
+ crypto_free_blkcipher(desc.tfm);
+ if (ret < 0)
+ pr_err("encrypted_key: failed to encrypt (%d)\n", ret);
+ else
+ dump_encrypted_data(epayload, encrypted_datalen);
+out:
+ return ret;
+}
+
+static int datablob_hmac_append(struct encrypted_key_payload *epayload,
+ const u8 *master_key, size_t master_keylen)
+{
+ u8 derived_key[HASH_SIZE];
+ u8 *digest;
+ int ret;
+
+ ret = get_derived_key(derived_key, AUTH_KEY, master_key, master_keylen);
+ if (ret < 0)
+ goto out;
+
+ digest = epayload->format + epayload->datablob_len;
+ ret = calc_hmac(digest, derived_key, sizeof derived_key,
+ epayload->format, epayload->datablob_len);
+ if (!ret)
+ dump_hmac(NULL, digest, HASH_SIZE);
+out:
+ return ret;
+}
+
+/* verify HMAC before decrypting encrypted key */
+static int datablob_hmac_verify(struct encrypted_key_payload *epayload,
+ const u8 *format, const u8 *master_key,
+ size_t master_keylen)
+{
+ u8 derived_key[HASH_SIZE];
+ u8 digest[HASH_SIZE];
+ int ret;
+ char *p;
+ unsigned short len;
+
+ ret = get_derived_key(derived_key, AUTH_KEY, master_key, master_keylen);
+ if (ret < 0)
+ goto out;
+
+ len = epayload->datablob_len;
+ if (!format) {
+ p = epayload->master_desc;
+ len -= strlen(epayload->format) + 1;
+ } else
+ p = epayload->format;
+
+ ret = calc_hmac(digest, derived_key, sizeof derived_key, p, len);
+ if (ret < 0)
+ goto out;
+ ret = memcmp(digest, epayload->format + epayload->datablob_len,
+ sizeof digest);
+ if (ret) {
+ ret = -EINVAL;
+ dump_hmac("datablob",
+ epayload->format + epayload->datablob_len,
+ HASH_SIZE);
+ dump_hmac("calc", digest, HASH_SIZE);
+ }
+out:
+ return ret;
+}
+
+static int derived_key_decrypt(struct encrypted_key_payload *epayload,
+ const u8 *derived_key,
+ unsigned int derived_keylen)
+{
+ struct scatterlist sg_in[1];
+ struct scatterlist sg_out[2];
+ struct blkcipher_desc desc;
+ unsigned int encrypted_datalen;
+ char pad[16];
+ int ret;
+
+ encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
+ ret = init_blkcipher_desc(&desc, derived_key, derived_keylen,
+ epayload->iv, ivsize);
+ if (ret < 0)
+ goto out;
+ dump_encrypted_data(epayload, encrypted_datalen);
+
+ memset(pad, 0, sizeof pad);
+ sg_init_table(sg_in, 1);
+ sg_init_table(sg_out, 2);
+ sg_set_buf(sg_in, epayload->encrypted_data, encrypted_datalen);
+ sg_set_buf(&sg_out[0], epayload->decrypted_data,
+ epayload->decrypted_datalen);
+ sg_set_buf(&sg_out[1], pad, sizeof pad);
+
+ ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, encrypted_datalen);
+ crypto_free_blkcipher(desc.tfm);
+ if (ret < 0)
+ goto out;
+ dump_decrypted_data(epayload);
+out:
+ return ret;
+}
+
+/* Allocate memory for decrypted key and datablob. */
+static struct encrypted_key_payload *encrypted_key_alloc(struct key *key,
+ const char *format,
+ const char *master_desc,
+ const char *datalen)
+{
+ struct encrypted_key_payload *epayload = NULL;
+ unsigned short datablob_len;
+ unsigned short decrypted_datalen;
+ unsigned short payload_datalen;
+ unsigned int encrypted_datalen;
+ unsigned int format_len;
+ long dlen;
+ int ret;
+
+ ret = kstrtol(datalen, 10, &dlen);
+ if (ret < 0 || dlen < MIN_DATA_SIZE || dlen > MAX_DATA_SIZE)
+ return ERR_PTR(-EINVAL);
+
+ format_len = (!format) ? strlen(key_format_default) : strlen(format);
+ decrypted_datalen = dlen;
+ payload_datalen = decrypted_datalen;
+ if (format && !strcmp(format, key_format_ecryptfs)) {
+ if (dlen != ECRYPTFS_MAX_KEY_BYTES) {
+ pr_err("encrypted_key: keylen for the ecryptfs format "
+ "must be equal to %d bytes\n",
+ ECRYPTFS_MAX_KEY_BYTES);
+ return ERR_PTR(-EINVAL);
+ }
+ decrypted_datalen = ECRYPTFS_MAX_KEY_BYTES;
+ payload_datalen = sizeof(struct ecryptfs_auth_tok);
+ }
+
+ encrypted_datalen = roundup(decrypted_datalen, blksize);
+
+ datablob_len = format_len + 1 + strlen(master_desc) + 1
+ + strlen(datalen) + 1 + ivsize + 1 + encrypted_datalen;
+
+ ret = key_payload_reserve(key, payload_datalen + datablob_len
+ + HASH_SIZE + 1);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ epayload = kzalloc(sizeof(*epayload) + payload_datalen +
+ datablob_len + HASH_SIZE + 1, GFP_KERNEL);
+ if (!epayload)
+ return ERR_PTR(-ENOMEM);
+
+ epayload->payload_datalen = payload_datalen;
+ epayload->decrypted_datalen = decrypted_datalen;
+ epayload->datablob_len = datablob_len;
+ return epayload;
+}
+
+static int encrypted_key_decrypt(struct encrypted_key_payload *epayload,
+ const char *format, const char *hex_encoded_iv)
+{
+ struct key *mkey;
+ u8 derived_key[HASH_SIZE];
+ u8 *master_key;
+ u8 *hmac;
+ const char *hex_encoded_data;
+ unsigned int encrypted_datalen;
+ size_t master_keylen;
+ size_t asciilen;
+ int ret;
+
+ encrypted_datalen = roundup(epayload->decrypted_datalen, blksize);
+ asciilen = (ivsize + 1 + encrypted_datalen + HASH_SIZE) * 2;
+ if (strlen(hex_encoded_iv) != asciilen)
+ return -EINVAL;
+
+ hex_encoded_data = hex_encoded_iv + (2 * ivsize) + 2;
+ ret = hex2bin(epayload->iv, hex_encoded_iv, ivsize);
+ if (ret < 0)
+ return -EINVAL;
+ ret = hex2bin(epayload->encrypted_data, hex_encoded_data,
+ encrypted_datalen);
+ if (ret < 0)
+ return -EINVAL;
+
+ hmac = epayload->format + epayload->datablob_len;
+ ret = hex2bin(hmac, hex_encoded_data + (encrypted_datalen * 2),
+ HASH_SIZE);
+ if (ret < 0)
+ return -EINVAL;
+
+ mkey = request_master_key(epayload, &master_key, &master_keylen);
+ if (IS_ERR(mkey))
+ return PTR_ERR(mkey);
+
+ ret = datablob_hmac_verify(epayload, format, master_key, master_keylen);
+ if (ret < 0) {
+ pr_err("encrypted_key: bad hmac (%d)\n", ret);
+ goto out;
+ }
+
+ ret = get_derived_key(derived_key, ENC_KEY, master_key, master_keylen);
+ if (ret < 0)
+ goto out;
+
+ ret = derived_key_decrypt(epayload, derived_key, sizeof derived_key);
+ if (ret < 0)
+ pr_err("encrypted_key: failed to decrypt key (%d)\n", ret);
+out:
+ up_read(&mkey->sem);
+ key_put(mkey);
+ return ret;
+}
+
+static void __ekey_init(struct encrypted_key_payload *epayload,
+ const char *format, const char *master_desc,
+ const char *datalen)
+{
+ unsigned int format_len;
+
+ format_len = (!format) ? strlen(key_format_default) : strlen(format);
+ epayload->format = epayload->payload_data + epayload->payload_datalen;
+ epayload->master_desc = epayload->format + format_len + 1;
+ epayload->datalen = epayload->master_desc + strlen(master_desc) + 1;
+ epayload->iv = epayload->datalen + strlen(datalen) + 1;
+ epayload->encrypted_data = epayload->iv + ivsize + 1;
+ epayload->decrypted_data = epayload->payload_data;
+
+ if (!format)
+ memcpy(epayload->format, key_format_default, format_len);
+ else {
+ if (!strcmp(format, key_format_ecryptfs))
+ epayload->decrypted_data =
+ ecryptfs_get_auth_tok_key((struct ecryptfs_auth_tok *)epayload->payload_data);
+
+ memcpy(epayload->format, format, format_len);
+ }
+
+ memcpy(epayload->master_desc, master_desc, strlen(master_desc));
+ memcpy(epayload->datalen, datalen, strlen(datalen));
+}
+
+/*
+ * encrypted_init - initialize an encrypted key
+ *
+ * For a new key, use a random number for both the iv and data
+ * itself. For an old key, decrypt the hex encoded data.
+ */
+static int encrypted_init(struct encrypted_key_payload *epayload,
+ const char *key_desc, const char *format,
+ const char *master_desc, const char *datalen,
+ const char *hex_encoded_iv)
+{
+ int ret = 0;
+
+ if (format && !strcmp(format, key_format_ecryptfs)) {
+ ret = valid_ecryptfs_desc(key_desc);
+ if (ret < 0)
+ return ret;
+
+ ecryptfs_fill_auth_tok((struct ecryptfs_auth_tok *)epayload->payload_data,
+ key_desc);
+ }
+
+ __ekey_init(epayload, format, master_desc, datalen);
+ if (!hex_encoded_iv) {
+ get_random_bytes(epayload->iv, ivsize);
+
+ get_random_bytes(epayload->decrypted_data,
+ epayload->decrypted_datalen);
+ } else
+ ret = encrypted_key_decrypt(epayload, format, hex_encoded_iv);
+ return ret;
+}
+
+/*
+ * encrypted_instantiate - instantiate an encrypted key
+ *
+ * Decrypt an existing encrypted datablob or create a new encrypted key
+ * based on a kernel random number.
+ *
+ * On success, return 0. Otherwise return errno.
+ */
+static int encrypted_instantiate(struct key *key,
+ struct key_preparsed_payload *prep)
+{
+ struct encrypted_key_payload *epayload = NULL;
+ char *datablob = NULL;
+ const char *format = NULL;
+ char *master_desc = NULL;
+ char *decrypted_datalen = NULL;
+ char *hex_encoded_iv = NULL;
+ size_t datalen = prep->datalen;
+ int ret;
+
+ if (datalen <= 0 || datalen > 32767 || !prep->data)
+ return -EINVAL;
+
+ datablob = kmalloc(datalen + 1, GFP_KERNEL);
+ if (!datablob)
+ return -ENOMEM;
+ datablob[datalen] = 0;
+ memcpy(datablob, prep->data, datalen);
+ ret = datablob_parse(datablob, &format, &master_desc,
+ &decrypted_datalen, &hex_encoded_iv);
+ if (ret < 0)
+ goto out;
+
+ epayload = encrypted_key_alloc(key, format, master_desc,
+ decrypted_datalen);
+ if (IS_ERR(epayload)) {
+ ret = PTR_ERR(epayload);
+ goto out;
+ }
+ ret = encrypted_init(epayload, key->description, format, master_desc,
+ decrypted_datalen, hex_encoded_iv);
+ if (ret < 0) {
+ kfree(epayload);
+ goto out;
+ }
+
+ rcu_assign_keypointer(key, epayload);
+out:
+ kfree(datablob);
+ return ret;
+}
+
+static void encrypted_rcu_free(struct rcu_head *rcu)
+{
+ struct encrypted_key_payload *epayload;
+
+ epayload = container_of(rcu, struct encrypted_key_payload, rcu);
+ memset(epayload->decrypted_data, 0, epayload->decrypted_datalen);
+ kfree(epayload);
+}
+
+/*
+ * encrypted_update - update the master key description
+ *
+ * Change the master key description for an existing encrypted key.
+ * The next read will return an encrypted datablob using the new
+ * master key description.
+ *
+ * On success, return 0. Otherwise return errno.
+ */
+static int encrypted_update(struct key *key, struct key_preparsed_payload *prep)
+{
+ struct encrypted_key_payload *epayload = key->payload.data;
+ struct encrypted_key_payload *new_epayload;
+ char *buf;
+ char *new_master_desc = NULL;
+ const char *format = NULL;
+ size_t datalen = prep->datalen;
+ int ret = 0;
+
+ if (datalen <= 0 || datalen > 32767 || !prep->data)
+ return -EINVAL;
+
+ buf = kmalloc(datalen + 1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ buf[datalen] = 0;
+ memcpy(buf, prep->data, datalen);
+ ret = datablob_parse(buf, &format, &new_master_desc, NULL, NULL);
+ if (ret < 0)
+ goto out;
+
+ ret = valid_master_desc(new_master_desc, epayload->master_desc);
+ if (ret < 0)
+ goto out;
+
+ new_epayload = encrypted_key_alloc(key, epayload->format,
+ new_master_desc, epayload->datalen);
+ if (IS_ERR(new_epayload)) {
+ ret = PTR_ERR(new_epayload);
+ goto out;
+ }
+
+ __ekey_init(new_epayload, epayload->format, new_master_desc,
+ epayload->datalen);
+
+ memcpy(new_epayload->iv, epayload->iv, ivsize);
+ memcpy(new_epayload->payload_data, epayload->payload_data,
+ epayload->payload_datalen);
+
+ rcu_assign_keypointer(key, new_epayload);
+ call_rcu(&epayload->rcu, encrypted_rcu_free);
+out:
+ kfree(buf);
+ return ret;
+}
+
+/*
+ * encrypted_read - format and copy the encrypted data to userspace
+ *
+ * The resulting datablob format is:
+ * <master-key name> <decrypted data length> <encrypted iv> <encrypted data>
+ *
+ * On success, return to userspace the encrypted key datablob size.
+ */
+static long encrypted_read(const struct key *key, char __user *buffer,
+ size_t buflen)
+{
+ struct encrypted_key_payload *epayload;
+ struct key *mkey;
+ u8 *master_key;
+ size_t master_keylen;
+ char derived_key[HASH_SIZE];
+ char *ascii_buf;
+ size_t asciiblob_len;
+ int ret;
+
+ epayload = rcu_dereference_key(key);
+
+ /* returns the hex encoded iv, encrypted-data, and hmac as ascii */
+ asciiblob_len = epayload->datablob_len + ivsize + 1
+ + roundup(epayload->decrypted_datalen, blksize)
+ + (HASH_SIZE * 2);
+
+ if (!buffer || buflen < asciiblob_len)
+ return asciiblob_len;
+
+ mkey = request_master_key(epayload, &master_key, &master_keylen);
+ if (IS_ERR(mkey))
+ return PTR_ERR(mkey);
+
+ ret = get_derived_key(derived_key, ENC_KEY, master_key, master_keylen);
+ if (ret < 0)
+ goto out;
+
+ ret = derived_key_encrypt(epayload, derived_key, sizeof derived_key);
+ if (ret < 0)
+ goto out;
+
+ ret = datablob_hmac_append(epayload, master_key, master_keylen);
+ if (ret < 0)
+ goto out;
+
+ ascii_buf = datablob_format(epayload, asciiblob_len);
+ if (!ascii_buf) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ up_read(&mkey->sem);
+ key_put(mkey);
+
+ if (copy_to_user(buffer, ascii_buf, asciiblob_len) != 0)
+ ret = -EFAULT;
+ kfree(ascii_buf);
+
+ return asciiblob_len;
+out:
+ up_read(&mkey->sem);
+ key_put(mkey);
+ return ret;
+}
+
+/*
+ * encrypted_destroy - before freeing the key, clear the decrypted data
+ *
+ * Before freeing the key, clear the memory containing the decrypted
+ * key data.
+ */
+static void encrypted_destroy(struct key *key)
+{
+ struct encrypted_key_payload *epayload = key->payload.data;
+
+ if (!epayload)
+ return;
+
+ memset(epayload->decrypted_data, 0, epayload->decrypted_datalen);
+ kfree(key->payload.data);
+}
+
+struct key_type key_type_encrypted = {
+ .name = "encrypted",
+ .instantiate = encrypted_instantiate,
+ .update = encrypted_update,
+ .match = user_match,
+ .destroy = encrypted_destroy,
+ .describe = user_describe,
+ .read = encrypted_read,
+};
+EXPORT_SYMBOL_GPL(key_type_encrypted);
+
+static void encrypted_shash_release(void)
+{
+ if (hashalg)
+ crypto_free_shash(hashalg);
+ if (hmacalg)
+ crypto_free_shash(hmacalg);
+}
+
+static int __init encrypted_shash_alloc(void)
+{
+ int ret;
+
+ hmacalg = crypto_alloc_shash(hmac_alg, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(hmacalg)) {
+ pr_info("encrypted_key: could not allocate crypto %s\n",
+ hmac_alg);
+ return PTR_ERR(hmacalg);
+ }
+
+ hashalg = crypto_alloc_shash(hash_alg, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(hashalg)) {
+ pr_info("encrypted_key: could not allocate crypto %s\n",
+ hash_alg);
+ ret = PTR_ERR(hashalg);
+ goto hashalg_fail;
+ }
+
+ return 0;
+
+hashalg_fail:
+ crypto_free_shash(hmacalg);
+ return ret;
+}
+
+static int __init init_encrypted(void)
+{
+ int ret;
+
+ ret = encrypted_shash_alloc();
+ if (ret < 0)
+ return ret;
+ ret = register_key_type(&key_type_encrypted);
+ if (ret < 0)
+ goto out;
+ return aes_get_sizes();
+out:
+ encrypted_shash_release();
+ return ret;
+
+}
+
+static void __exit cleanup_encrypted(void)
+{
+ encrypted_shash_release();
+ unregister_key_type(&key_type_encrypted);
+}
+
+late_initcall(init_encrypted);
+module_exit(cleanup_encrypted);
+
+MODULE_LICENSE("GPL");
diff --git a/security/keys/encrypted-keys/encrypted.h b/security/keys/encrypted-keys/encrypted.h
new file mode 100644
index 00000000000..8136a2d44c6
--- /dev/null
+++ b/security/keys/encrypted-keys/encrypted.h
@@ -0,0 +1,66 @@
+#ifndef __ENCRYPTED_KEY_H
+#define __ENCRYPTED_KEY_H
+
+#define ENCRYPTED_DEBUG 0
+#if defined(CONFIG_TRUSTED_KEYS) || \
+ (defined(CONFIG_TRUSTED_KEYS_MODULE) && defined(CONFIG_ENCRYPTED_KEYS_MODULE))
+extern struct key *request_trusted_key(const char *trusted_desc,
+ u8 **master_key, size_t *master_keylen);
+#else
+static inline struct key *request_trusted_key(const char *trusted_desc,
+ u8 **master_key,
+ size_t *master_keylen)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+#endif
+
+#if ENCRYPTED_DEBUG
+static inline void dump_master_key(const u8 *master_key, size_t master_keylen)
+{
+ print_hex_dump(KERN_ERR, "master key: ", DUMP_PREFIX_NONE, 32, 1,
+ master_key, master_keylen, 0);
+}
+
+static inline void dump_decrypted_data(struct encrypted_key_payload *epayload)
+{
+ print_hex_dump(KERN_ERR, "decrypted data: ", DUMP_PREFIX_NONE, 32, 1,
+ epayload->decrypted_data,
+ epayload->decrypted_datalen, 0);
+}
+
+static inline void dump_encrypted_data(struct encrypted_key_payload *epayload,
+ unsigned int encrypted_datalen)
+{
+ print_hex_dump(KERN_ERR, "encrypted data: ", DUMP_PREFIX_NONE, 32, 1,
+ epayload->encrypted_data, encrypted_datalen, 0);
+}
+
+static inline void dump_hmac(const char *str, const u8 *digest,
+ unsigned int hmac_size)
+{
+ if (str)
+ pr_info("encrypted_key: %s", str);
+ print_hex_dump(KERN_ERR, "hmac: ", DUMP_PREFIX_NONE, 32, 1, digest,
+ hmac_size, 0);
+}
+#else
+static inline void dump_master_key(const u8 *master_key, size_t master_keylen)
+{
+}
+
+static inline void dump_decrypted_data(struct encrypted_key_payload *epayload)
+{
+}
+
+static inline void dump_encrypted_data(struct encrypted_key_payload *epayload,
+ unsigned int encrypted_datalen)
+{
+}
+
+static inline void dump_hmac(const char *str, const u8 *digest,
+ unsigned int hmac_size)
+{
+}
+#endif
+#endif
diff --git a/security/keys/encrypted-keys/masterkey_trusted.c b/security/keys/encrypted-keys/masterkey_trusted.c
new file mode 100644
index 00000000000..013f7e5d3a2
--- /dev/null
+++ b/security/keys/encrypted-keys/masterkey_trusted.c
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2010 IBM Corporation
+ * Copyright (C) 2010 Politecnico di Torino, Italy
+ * TORSEC group -- http://security.polito.it
+ *
+ * Authors:
+ * Mimi Zohar <zohar@us.ibm.com>
+ * Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * See Documentation/security/keys-trusted-encrypted.txt
+ */
+
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/err.h>
+#include <keys/trusted-type.h>
+#include <keys/encrypted-type.h>
+#include "encrypted.h"
+
+/*
+ * request_trusted_key - request the trusted key
+ *
+ * Trusted keys are sealed to PCRs and other metadata. Although userspace
+ * manages both trusted/encrypted key-types, like the encrypted key type
+ * data, trusted key type data is not visible decrypted from userspace.
+ */
+struct key *request_trusted_key(const char *trusted_desc,
+ u8 **master_key, size_t *master_keylen)
+{
+ struct trusted_key_payload *tpayload;
+ struct key *tkey;
+
+ tkey = request_key(&key_type_trusted, trusted_desc, NULL);
+ if (IS_ERR(tkey))
+ goto error;
+
+ down_read(&tkey->sem);
+ tpayload = tkey->payload.data;
+ *master_key = tpayload->key;
+ *master_keylen = tpayload->key_len;
+error:
+ return tkey;
+}
diff --git a/security/keys/gc.c b/security/keys/gc.c
index 4770be375ff..d3222b6d7d5 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -1,6 +1,6 @@
/* Key garbage collector
*
- * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2009-2011 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
@@ -10,6 +10,8 @@
*/
#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/security.h>
#include <keys/keyring-type.h>
#include "internal.h"
@@ -19,21 +21,37 @@
unsigned key_gc_delay = 5 * 60;
/*
- * Reaper
+ * Reaper for unused keys.
+ */
+static void key_garbage_collector(struct work_struct *work);
+DECLARE_WORK(key_gc_work, key_garbage_collector);
+
+/*
+ * Reaper for links from keyrings to dead keys.
*/
static void key_gc_timer_func(unsigned long);
-static void key_garbage_collector(struct work_struct *);
static DEFINE_TIMER(key_gc_timer, key_gc_timer_func, 0, 0);
-static DECLARE_WORK(key_gc_work, key_garbage_collector);
-static key_serial_t key_gc_cursor; /* the last key the gc considered */
-static bool key_gc_again;
-static unsigned long key_gc_executing;
+
static time_t key_gc_next_run = LONG_MAX;
-static time_t key_gc_new_timer;
+static struct key_type *key_gc_dead_keytype;
+
+static unsigned long key_gc_flags;
+#define KEY_GC_KEY_EXPIRED 0 /* A key expired and needs unlinking */
+#define KEY_GC_REAP_KEYTYPE 1 /* A keytype is being unregistered */
+#define KEY_GC_REAPING_KEYTYPE 2 /* Cleared when keytype reaped */
+
/*
- * Schedule a garbage collection run
- * - precision isn't particularly important
+ * Any key whose type gets unregistered will be re-typed to this if it can't be
+ * immediately unlinked.
+ */
+struct key_type key_type_dead = {
+ .name = "dead",
+};
+
+/*
+ * Schedule a garbage collection run.
+ * - time precision isn't particularly important
*/
void key_schedule_gc(time_t gc_at)
{
@@ -42,177 +60,308 @@ void key_schedule_gc(time_t gc_at)
kenter("%ld", gc_at - now);
- if (gc_at <= now) {
+ if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) {
+ kdebug("IMMEDIATE");
schedule_work(&key_gc_work);
} else if (gc_at < key_gc_next_run) {
+ kdebug("DEFERRED");
+ key_gc_next_run = gc_at;
expires = jiffies + (gc_at - now) * HZ;
mod_timer(&key_gc_timer, expires);
}
}
/*
- * The garbage collector timer kicked off
+ * Schedule a dead links collection run.
+ */
+void key_schedule_gc_links(void)
+{
+ set_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags);
+ schedule_work(&key_gc_work);
+}
+
+/*
+ * Some key's cleanup time was met after it expired, so we need to get the
+ * reaper to go through a cycle finding expired keys.
*/
static void key_gc_timer_func(unsigned long data)
{
kenter("");
key_gc_next_run = LONG_MAX;
+ key_schedule_gc_links();
+}
+
+/*
+ * wait_on_bit() sleep function for uninterruptible waiting
+ */
+static int key_gc_wait_bit(void *flags)
+{
+ schedule();
+ return 0;
+}
+
+/*
+ * Reap keys of dead type.
+ *
+ * We use three flags to make sure we see three complete cycles of the garbage
+ * collector: the first to mark keys of that type as being dead, the second to
+ * collect dead links and the third to clean up the dead keys. We have to be
+ * careful as there may already be a cycle in progress.
+ *
+ * The caller must be holding key_types_sem.
+ */
+void key_gc_keytype(struct key_type *ktype)
+{
+ kenter("%s", ktype->name);
+
+ key_gc_dead_keytype = ktype;
+ set_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags);
+ smp_mb();
+ set_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags);
+
+ kdebug("schedule");
schedule_work(&key_gc_work);
+
+ kdebug("sleep");
+ wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE, key_gc_wait_bit,
+ TASK_UNINTERRUPTIBLE);
+
+ key_gc_dead_keytype = NULL;
+ kleave("");
}
/*
- * Garbage collect pointers from a keyring
- * - return true if we altered the keyring
+ * Garbage collect a list of unreferenced, detached keys
*/
-static bool key_gc_keyring(struct key *keyring, time_t limit)
- __releases(key_serial_lock)
+static noinline void key_gc_unused_keys(struct list_head *keys)
{
- struct keyring_list *klist;
- struct key *key;
- int loop;
+ while (!list_empty(keys)) {
+ struct key *key =
+ list_entry(keys->next, struct key, graveyard_link);
+ list_del(&key->graveyard_link);
- kenter("%x", key_serial(keyring));
+ kdebug("- %u", key->serial);
+ key_check(key);
- if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
- goto dont_gc;
+ security_key_free(key);
- /* scan the keyring looking for dead keys */
- klist = rcu_dereference(keyring->payload.subscriptions);
- if (!klist)
- goto dont_gc;
+ /* deal with the user's key tracking and quota */
+ if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
+ spin_lock(&key->user->lock);
+ key->user->qnkeys--;
+ key->user->qnbytes -= key->quotalen;
+ spin_unlock(&key->user->lock);
+ }
- for (loop = klist->nkeys - 1; loop >= 0; loop--) {
- key = klist->keys[loop];
- if (test_bit(KEY_FLAG_DEAD, &key->flags) ||
- (key->expiry > 0 && key->expiry <= limit))
- goto do_gc;
- }
+ atomic_dec(&key->user->nkeys);
+ if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
+ atomic_dec(&key->user->nikeys);
-dont_gc:
- kleave(" = false");
- return false;
+ key_user_put(key->user);
-do_gc:
- key_gc_cursor = keyring->serial;
- key_get(keyring);
- spin_unlock(&key_serial_lock);
- keyring_gc(keyring, limit);
- key_put(keyring);
- kleave(" = true");
- return true;
+ /* now throw away the key memory */
+ if (key->type->destroy)
+ key->type->destroy(key);
+
+ kfree(key->description);
+
+#ifdef KEY_DEBUGGING
+ key->magic = KEY_DEBUG_MAGIC_X;
+#endif
+ kmem_cache_free(key_jar, key);
+ }
}
/*
- * Garbage collector for keys
- * - this involves scanning the keyrings for dead, expired and revoked keys
- * that have overstayed their welcome
+ * Garbage collector for unused keys.
+ *
+ * This is done in process context so that we don't have to disable interrupts
+ * all over the place. key_put() schedules this rather than trying to do the
+ * cleanup itself, which means key_put() doesn't have to sleep.
*/
static void key_garbage_collector(struct work_struct *work)
{
- struct rb_node *rb;
- key_serial_t cursor;
- struct key *key, *xkey;
- time_t new_timer = LONG_MAX, limit, now;
-
- now = current_kernel_time().tv_sec;
- kenter("[%x,%ld]", key_gc_cursor, key_gc_new_timer - now);
-
- if (test_and_set_bit(0, &key_gc_executing)) {
- key_schedule_gc(current_kernel_time().tv_sec + 1);
- kleave(" [busy; deferring]");
- return;
- }
+ static LIST_HEAD(graveyard);
+ static u8 gc_state; /* Internal persistent state */
+#define KEY_GC_REAP_AGAIN 0x01 /* - Need another cycle */
+#define KEY_GC_REAPING_LINKS 0x02 /* - We need to reap links */
+#define KEY_GC_SET_TIMER 0x04 /* - We need to restart the timer */
+#define KEY_GC_REAPING_DEAD_1 0x10 /* - We need to mark dead keys */
+#define KEY_GC_REAPING_DEAD_2 0x20 /* - We need to reap dead key links */
+#define KEY_GC_REAPING_DEAD_3 0x40 /* - We need to reap dead keys */
+#define KEY_GC_FOUND_DEAD_KEY 0x80 /* - We found at least one dead key */
- limit = now;
+ struct rb_node *cursor;
+ struct key *key;
+ time_t new_timer, limit;
+
+ kenter("[%lx,%x]", key_gc_flags, gc_state);
+
+ limit = current_kernel_time().tv_sec;
if (limit > key_gc_delay)
limit -= key_gc_delay;
else
limit = key_gc_delay;
+ /* Work out what we're going to be doing in this pass */
+ gc_state &= KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2;
+ gc_state <<= 1;
+ if (test_and_clear_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags))
+ gc_state |= KEY_GC_REAPING_LINKS | KEY_GC_SET_TIMER;
+
+ if (test_and_clear_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags))
+ gc_state |= KEY_GC_REAPING_DEAD_1;
+ kdebug("new pass %x", gc_state);
+
+ new_timer = LONG_MAX;
+
+ /* As only this function is permitted to remove things from the key
+ * serial tree, if cursor is non-NULL then it will always point to a
+ * valid node in the tree - even if lock got dropped.
+ */
spin_lock(&key_serial_lock);
+ cursor = rb_first(&key_serial_tree);
- if (unlikely(RB_EMPTY_ROOT(&key_serial_tree))) {
- spin_unlock(&key_serial_lock);
- clear_bit(0, &key_gc_executing);
- return;
- }
+continue_scanning:
+ while (cursor) {
+ key = rb_entry(cursor, struct key, serial_node);
+ cursor = rb_next(cursor);
- cursor = key_gc_cursor;
- if (cursor < 0)
- cursor = 0;
- if (cursor > 0)
- new_timer = key_gc_new_timer;
- else
- key_gc_again = false;
-
- /* find the first key above the cursor */
- key = NULL;
- rb = key_serial_tree.rb_node;
- while (rb) {
- xkey = rb_entry(rb, struct key, serial_node);
- if (cursor < xkey->serial) {
- key = xkey;
- rb = rb->rb_left;
- } else if (cursor > xkey->serial) {
- rb = rb->rb_right;
- } else {
- rb = rb_next(rb);
- if (!rb)
- goto reached_the_end;
- key = rb_entry(rb, struct key, serial_node);
- break;
+ if (atomic_read(&key->usage) == 0)
+ goto found_unreferenced_key;
+
+ if (unlikely(gc_state & KEY_GC_REAPING_DEAD_1)) {
+ if (key->type == key_gc_dead_keytype) {
+ gc_state |= KEY_GC_FOUND_DEAD_KEY;
+ set_bit(KEY_FLAG_DEAD, &key->flags);
+ key->perm = 0;
+ goto skip_dead_key;
+ }
}
- }
- if (!key)
- goto reached_the_end;
+ if (gc_state & KEY_GC_SET_TIMER) {
+ if (key->expiry > limit && key->expiry < new_timer) {
+ kdebug("will expire %x in %ld",
+ key_serial(key), key->expiry - limit);
+ new_timer = key->expiry;
+ }
+ }
+
+ if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2))
+ if (key->type == key_gc_dead_keytype)
+ gc_state |= KEY_GC_FOUND_DEAD_KEY;
- /* trawl through the keys looking for keyrings */
- for (;;) {
- if (key->expiry > limit && key->expiry < new_timer) {
- kdebug("will expire %x in %ld",
- key_serial(key), key->expiry - limit);
- new_timer = key->expiry;
+ if ((gc_state & KEY_GC_REAPING_LINKS) ||
+ unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) {
+ if (key->type == &key_type_keyring)
+ goto found_keyring;
}
- if (key->type == &key_type_keyring &&
- key_gc_keyring(key, limit))
- /* the gc had to release our lock so that the keyring
- * could be modified, so we have to get it again */
- goto gc_released_our_lock;
+ if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3))
+ if (key->type == key_gc_dead_keytype)
+ goto destroy_dead_key;
- rb = rb_next(&key->serial_node);
- if (!rb)
- goto reached_the_end;
- key = rb_entry(rb, struct key, serial_node);
+ skip_dead_key:
+ if (spin_is_contended(&key_serial_lock) || need_resched())
+ goto contended;
}
-gc_released_our_lock:
- kdebug("gc_released_our_lock");
- key_gc_new_timer = new_timer;
- key_gc_again = true;
- clear_bit(0, &key_gc_executing);
- schedule_work(&key_gc_work);
- kleave(" [continue]");
- return;
-
- /* when we reach the end of the run, we set the timer for the next one */
-reached_the_end:
- kdebug("reached_the_end");
+contended:
spin_unlock(&key_serial_lock);
- key_gc_new_timer = new_timer;
- key_gc_cursor = 0;
- clear_bit(0, &key_gc_executing);
-
- if (key_gc_again) {
- /* there may have been a key that expired whilst we were
- * scanning, so if we discarded any links we should do another
- * scan */
- new_timer = now + 1;
- key_schedule_gc(new_timer);
- } else if (new_timer < LONG_MAX) {
+
+maybe_resched:
+ if (cursor) {
+ cond_resched();
+ spin_lock(&key_serial_lock);
+ goto continue_scanning;
+ }
+
+ /* We've completed the pass. Set the timer if we need to and queue a
+ * new cycle if necessary. We keep executing cycles until we find one
+ * where we didn't reap any keys.
+ */
+ kdebug("pass complete");
+
+ if (gc_state & KEY_GC_SET_TIMER && new_timer != (time_t)LONG_MAX) {
new_timer += key_gc_delay;
key_schedule_gc(new_timer);
}
- kleave(" [end]");
+
+ if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2) ||
+ !list_empty(&graveyard)) {
+ /* Make sure that all pending keyring payload destructions are
+ * fulfilled and that people aren't now looking at dead or
+ * dying keys that they don't have a reference upon or a link
+ * to.
+ */
+ kdebug("gc sync");
+ synchronize_rcu();
+ }
+
+ if (!list_empty(&graveyard)) {
+ kdebug("gc keys");
+ key_gc_unused_keys(&graveyard);
+ }
+
+ if (unlikely(gc_state & (KEY_GC_REAPING_DEAD_1 |
+ KEY_GC_REAPING_DEAD_2))) {
+ if (!(gc_state & KEY_GC_FOUND_DEAD_KEY)) {
+ /* No remaining dead keys: short circuit the remaining
+ * keytype reap cycles.
+ */
+ kdebug("dead short");
+ gc_state &= ~(KEY_GC_REAPING_DEAD_1 | KEY_GC_REAPING_DEAD_2);
+ gc_state |= KEY_GC_REAPING_DEAD_3;
+ } else {
+ gc_state |= KEY_GC_REAP_AGAIN;
+ }
+ }
+
+ if (unlikely(gc_state & KEY_GC_REAPING_DEAD_3)) {
+ kdebug("dead wake");
+ smp_mb();
+ clear_bit(KEY_GC_REAPING_KEYTYPE, &key_gc_flags);
+ wake_up_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE);
+ }
+
+ if (gc_state & KEY_GC_REAP_AGAIN)
+ schedule_work(&key_gc_work);
+ kleave(" [end %x]", gc_state);
+ return;
+
+ /* We found an unreferenced key - once we've removed it from the tree,
+ * we can safely drop the lock.
+ */
+found_unreferenced_key:
+ kdebug("unrefd key %d", key->serial);
+ rb_erase(&key->serial_node, &key_serial_tree);
+ spin_unlock(&key_serial_lock);
+
+ list_add_tail(&key->graveyard_link, &graveyard);
+ gc_state |= KEY_GC_REAP_AGAIN;
+ goto maybe_resched;
+
+ /* We found a keyring and we need to check the payload for links to
+ * dead or expired keys. We don't flag another reap immediately as we
+ * have to wait for the old payload to be destroyed by RCU before we
+ * can reap the keys to which it refers.
+ */
+found_keyring:
+ spin_unlock(&key_serial_lock);
+ keyring_gc(key, limit);
+ goto maybe_resched;
+
+ /* We found a dead key that is still referenced. Reset its type and
+ * destroy its payload with its semaphore held.
+ */
+destroy_dead_key:
+ spin_unlock(&key_serial_lock);
+ kdebug("destroy key %d", key->serial);
+ down_write(&key->sem);
+ key->type = &key_type_dead;
+ if (key_gc_dead_keytype->destroy)
+ key_gc_dead_keytype->destroy(key);
+ memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
+ up_write(&key->sem);
+ goto maybe_resched;
}
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 24ba0307b7a..5f20da01fd8 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -1,4 +1,4 @@
-/* internal.h: authentication token and access key management internal defs
+/* Authentication token and access key management internal defs
*
* Copyright (C) 2003-5, 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
@@ -14,11 +14,9 @@
#include <linux/sched.h>
#include <linux/key-type.h>
+#include <linux/task_work.h>
-static inline __attribute__((format(printf, 1, 2)))
-void no_printk(const char *fmt, ...)
-{
-}
+struct iovec;
#ifdef __KDEBUG
#define kenter(FMT, ...) \
@@ -36,14 +34,18 @@ void no_printk(const char *fmt, ...)
no_printk(KERN_DEBUG FMT"\n", ##__VA_ARGS__)
#endif
+extern struct key_type key_type_dead;
extern struct key_type key_type_user;
+extern struct key_type key_type_logon;
/*****************************************************************************/
/*
- * keep track of keys for a user
- * - this needs to be separate to user_struct to avoid a refcount-loop
- * (user_struct pins some keyrings which pin this struct)
- * - this also keeps track of keys under request from userspace for this UID
+ * Keep track of keys for a user.
+ *
+ * This needs to be separate to user_struct to avoid a refcount-loop
+ * (user_struct pins some keyrings which pin this struct).
+ *
+ * We also keep track of keys under request from userspace for this UID here.
*/
struct key_user {
struct rb_node node;
@@ -52,8 +54,7 @@ struct key_user {
atomic_t usage; /* for accessing qnkeys & qnbytes */
atomic_t nkeys; /* number of keys */
atomic_t nikeys; /* number of instantiated keys */
- uid_t uid;
- struct user_namespace *user_ns;
+ kuid_t uid;
int qnkeys; /* number of keys allocated to this user */
int qnbytes; /* number of bytes allocated to this user */
};
@@ -62,12 +63,11 @@ extern struct rb_root key_user_tree;
extern spinlock_t key_user_lock;
extern struct key_user root_key_user;
-extern struct key_user *key_user_lookup(uid_t uid,
- struct user_namespace *user_ns);
+extern struct key_user *key_user_lookup(kuid_t uid);
extern void key_user_put(struct key_user *user);
/*
- * key quota limits
+ * Key quota limits.
* - root has its own separate limits to everyone else
*/
extern unsigned key_quota_root_maxkeys;
@@ -78,6 +78,7 @@ extern unsigned key_quota_maxbytes;
#define KEYQUOTA_LINK_BYTES 4 /* a link in a keyring is worth 4 bytes */
+extern struct kmem_cache *key_jar;
extern struct rb_root key_serial_tree;
extern spinlock_t key_serial_lock;
extern struct mutex key_construction_mutex;
@@ -87,34 +88,61 @@ extern wait_queue_head_t request_key_conswq;
extern struct key_type *key_type_lookup(const char *type);
extern void key_type_put(struct key_type *ktype);
-extern int __key_link(struct key *keyring, struct key *key);
+extern int __key_link_begin(struct key *keyring,
+ const struct keyring_index_key *index_key,
+ struct assoc_array_edit **_edit);
+extern int __key_link_check_live_key(struct key *keyring, struct key *key);
+extern void __key_link(struct key *key, struct assoc_array_edit **_edit);
+extern void __key_link_end(struct key *keyring,
+ const struct keyring_index_key *index_key,
+ struct assoc_array_edit *edit);
-extern key_ref_t __keyring_search_one(key_ref_t keyring_ref,
- const struct key_type *type,
- const char *description,
- key_perm_t perm);
+extern key_ref_t find_key_to_update(key_ref_t keyring_ref,
+ const struct keyring_index_key *index_key);
extern struct key *keyring_search_instkey(struct key *keyring,
key_serial_t target_id);
+extern int iterate_over_keyring(const struct key *keyring,
+ int (*func)(const struct key *key, void *data),
+ void *data);
+
typedef int (*key_match_func_t)(const struct key *, const void *);
+struct keyring_search_context {
+ struct keyring_index_key index_key;
+ const struct cred *cred;
+ key_match_func_t match;
+ const void *match_data;
+ unsigned flags;
+#define KEYRING_SEARCH_LOOKUP_TYPE 0x0001 /* [as type->def_lookup_type] */
+#define KEYRING_SEARCH_NO_STATE_CHECK 0x0002 /* Skip state checks */
+#define KEYRING_SEARCH_DO_STATE_CHECK 0x0004 /* Override NO_STATE_CHECK */
+#define KEYRING_SEARCH_NO_UPDATE_TIME 0x0008 /* Don't update times */
+#define KEYRING_SEARCH_NO_CHECK_PERM 0x0010 /* Don't check permissions */
+#define KEYRING_SEARCH_DETECT_TOO_DEEP 0x0020 /* Give an error on excessive depth */
+
+ int (*iterator)(const void *object, void *iterator_data);
+
+ /* Internal stuff */
+ int skipped_ret;
+ bool possessed;
+ key_ref_t result;
+ struct timespec now;
+};
+
extern key_ref_t keyring_search_aux(key_ref_t keyring_ref,
- const struct cred *cred,
- struct key_type *type,
- const void *description,
- key_match_func_t match);
+ struct keyring_search_context *ctx);
-extern key_ref_t search_process_keyrings(struct key_type *type,
- const void *description,
- key_match_func_t match,
- const struct cred *cred);
+extern key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx);
+extern key_ref_t search_process_keyrings(struct keyring_search_context *ctx);
extern struct key *find_keyring_by_name(const char *name, bool skip_perm_check);
extern int install_user_keyrings(void);
extern int install_thread_keyring_to_cred(struct cred *);
extern int install_process_keyring_to_cred(struct cred *);
+extern int install_session_keyring_to_cred(struct cred *, struct key *);
extern struct key *request_key_and_link(struct key_type *type,
const char *description,
@@ -124,6 +152,7 @@ extern struct key *request_key_and_link(struct key_type *type,
struct key *dest_keyring,
unsigned long flags);
+extern int lookup_user_key_possessed(const struct key *key, const void *target);
extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags,
key_perm_t perm);
#define KEY_LOOKUP_CREATE 0x01
@@ -131,34 +160,29 @@ extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags,
#define KEY_LOOKUP_FOR_UNLINK 0x04
extern long join_session_keyring(const char *name);
+extern void key_change_session_keyring(struct callback_head *twork);
+extern struct work_struct key_gc_work;
extern unsigned key_gc_delay;
extern void keyring_gc(struct key *keyring, time_t limit);
-extern void key_schedule_gc(time_t expiry_at);
+extern void key_schedule_gc(time_t gc_at);
+extern void key_schedule_gc_links(void);
+extern void key_gc_keytype(struct key_type *ktype);
-/*
- * check to see whether permission is granted to use a key in the desired way
- */
extern int key_task_permission(const key_ref_t key_ref,
const struct cred *cred,
key_perm_t perm);
-static inline int key_permission(const key_ref_t key_ref, key_perm_t perm)
+/*
+ * Check to see whether permission is granted to use a key in the desired way.
+ */
+static inline int key_permission(const key_ref_t key_ref, unsigned perm)
{
return key_task_permission(key_ref, current_cred(), perm);
}
-/* required permissions */
-#define KEY_VIEW 0x01 /* require permission to view attributes */
-#define KEY_READ 0x02 /* require permission to read content */
-#define KEY_WRITE 0x04 /* require permission to update / modify */
-#define KEY_SEARCH 0x08 /* require permission to search (keyring) or find (key) */
-#define KEY_LINK 0x10 /* require permission to link */
-#define KEY_SETATTR 0x20 /* require permission to change attributes */
-#define KEY_ALL 0x3f /* all the above permissions */
-
/*
- * request_key authorisation
+ * Authorisation record for request_key().
*/
struct request_key_auth {
struct key *target_key;
@@ -178,7 +202,18 @@ extern struct key *request_key_auth_new(struct key *target,
extern struct key *key_get_instantiation_authkey(key_serial_t target_id);
/*
- * keyctl functions
+ * Determine whether a key is dead.
+ */
+static inline bool key_is_dead(const struct key *key, time_t limit)
+{
+ return
+ key->flags & ((1 << KEY_FLAG_DEAD) |
+ (1 << KEY_FLAG_INVALIDATED)) ||
+ (key->expiry > 0 && key->expiry <= limit);
+}
+
+/*
+ * keyctl() functions
*/
extern long keyctl_get_keyring_ID(key_serial_t, int);
extern long keyctl_join_session_keyring(const char __user *);
@@ -202,9 +237,27 @@ extern long keyctl_assume_authority(key_serial_t);
extern long keyctl_get_security(key_serial_t keyid, char __user *buffer,
size_t buflen);
extern long keyctl_session_to_parent(void);
+extern long keyctl_reject_key(key_serial_t, unsigned, unsigned, key_serial_t);
+extern long keyctl_instantiate_key_iov(key_serial_t,
+ const struct iovec __user *,
+ unsigned, key_serial_t);
+extern long keyctl_invalidate_key(key_serial_t);
+
+extern long keyctl_instantiate_key_common(key_serial_t,
+ const struct iovec *,
+ unsigned, size_t, key_serial_t);
+#ifdef CONFIG_PERSISTENT_KEYRINGS
+extern long keyctl_get_persistent(uid_t, key_serial_t);
+extern unsigned persistent_keyring_expiry;
+#else
+static inline long keyctl_get_persistent(uid_t uid, key_serial_t destring)
+{
+ return -EOPNOTSUPP;
+}
+#endif
/*
- * debugging key validation
+ * Debugging key validation
*/
#ifdef KEY_DEBUGGING
extern void __key_check(const struct key *);
diff --git a/security/keys/key.c b/security/keys/key.c
index e50d264c9ad..2048a110e7f 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -18,10 +18,9 @@
#include <linux/workqueue.h>
#include <linux/random.h>
#include <linux/err.h>
-#include <linux/user_namespace.h>
#include "internal.h"
-static struct kmem_cache *key_jar;
+struct kmem_cache *key_jar;
struct rb_root key_serial_tree; /* tree of keys indexed by serial */
DEFINE_SPINLOCK(key_serial_lock);
@@ -36,17 +35,9 @@ unsigned int key_quota_maxbytes = 20000; /* general key space quota */
static LIST_HEAD(key_types_list);
static DECLARE_RWSEM(key_types_sem);
-static void key_cleanup(struct work_struct *work);
-static DECLARE_WORK(key_cleanup_task, key_cleanup);
-
-/* we serialise key instantiation and link */
+/* We serialise key instantiation and link */
DEFINE_MUTEX(key_construction_mutex);
-/* any key who's type gets unegistered will be re-typed to this */
-static struct key_type key_type_dead = {
- .name = "dead",
-};
-
#ifdef KEY_DEBUGGING
void __key_check(const struct key *key)
{
@@ -56,18 +47,17 @@ void __key_check(const struct key *key)
}
#endif
-/*****************************************************************************/
/*
- * get the key quota record for a user, allocating a new record if one doesn't
- * already exist
+ * Get the key quota record for a user, allocating a new record if one doesn't
+ * already exist.
*/
-struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns)
+struct key_user *key_user_lookup(kuid_t uid)
{
struct key_user *candidate = NULL, *user;
struct rb_node *parent = NULL;
struct rb_node **p;
- try_again:
+try_again:
p = &key_user_tree.rb_node;
spin_lock(&key_user_lock);
@@ -76,13 +66,9 @@ struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns)
parent = *p;
user = rb_entry(parent, struct key_user, node);
- if (uid < user->uid)
- p = &(*p)->rb_left;
- else if (uid > user->uid)
- p = &(*p)->rb_right;
- else if (user_ns < user->user_ns)
+ if (uid_lt(uid, user->uid))
p = &(*p)->rb_left;
- else if (user_ns > user->user_ns)
+ else if (uid_gt(uid, user->uid))
p = &(*p)->rb_right;
else
goto found;
@@ -111,7 +97,6 @@ struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns)
atomic_set(&candidate->nkeys, 0);
atomic_set(&candidate->nikeys, 0);
candidate->uid = uid;
- candidate->user_ns = get_user_ns(user_ns);
candidate->qnkeys = 0;
candidate->qnbytes = 0;
spin_lock_init(&candidate->lock);
@@ -124,36 +109,30 @@ struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns)
goto out;
/* okay - we found a user record for this UID */
- found:
+found:
atomic_inc(&user->usage);
spin_unlock(&key_user_lock);
kfree(candidate);
- out:
+out:
return user;
+}
-} /* end key_user_lookup() */
-
-/*****************************************************************************/
/*
- * dispose of a user structure
+ * Dispose of a user structure
*/
void key_user_put(struct key_user *user)
{
if (atomic_dec_and_lock(&user->usage, &key_user_lock)) {
rb_erase(&user->node, &key_user_tree);
spin_unlock(&key_user_lock);
- put_user_ns(user->user_ns);
kfree(user);
}
+}
-} /* end key_user_put() */
-
-/*****************************************************************************/
/*
- * assign a key the next unique serial number
- * - these are assigned randomly to avoid security issues through covert
- * channel problems
+ * Allocate a serial number for a key. These are assigned randomly to avoid
+ * security issues through covert channel problems.
*/
static inline void key_alloc_serial(struct key *key)
{
@@ -211,21 +190,39 @@ serial_exists:
if (key->serial < xkey->serial)
goto attempt_insertion;
}
+}
-} /* end key_alloc_serial() */
-
-/*****************************************************************************/
-/*
- * allocate a key of the specified type
- * - update the user's quota to reflect the existence of the key
- * - called from a key-type operation with key_types_sem read-locked by
- * key_create_or_update()
- * - this prevents unregistration of the key type
- * - upon return the key is as yet uninstantiated; the caller needs to either
- * instantiate the key or discard it before returning
+/**
+ * key_alloc - Allocate a key of the specified type.
+ * @type: The type of key to allocate.
+ * @desc: The key description to allow the key to be searched out.
+ * @uid: The owner of the new key.
+ * @gid: The group ID for the new key's group permissions.
+ * @cred: The credentials specifying UID namespace.
+ * @perm: The permissions mask of the new key.
+ * @flags: Flags specifying quota properties.
+ *
+ * Allocate a key of the specified type with the attributes given. The key is
+ * returned in an uninstantiated state and the caller needs to instantiate the
+ * key before returning.
+ *
+ * The user's key count quota is updated to reflect the creation of the key and
+ * the user's key data quota has the default for the key type reserved. The
+ * instantiation function should amend this as necessary. If insufficient
+ * quota is available, -EDQUOT will be returned.
+ *
+ * The LSM security modules can prevent a key being created, in which case
+ * -EACCES will be returned.
+ *
+ * Returns a pointer to the new key if successful and an error code otherwise.
+ *
+ * Note that the caller needs to ensure the key type isn't uninstantiated.
+ * Internally this can be done by locking key_types_sem. Externally, this can
+ * be done by either never unregistering the key type, or making sure
+ * key_alloc() calls don't race with module unloading.
*/
struct key *key_alloc(struct key_type *type, const char *desc,
- uid_t uid, gid_t gid, const struct cred *cred,
+ kuid_t uid, kgid_t gid, const struct cred *cred,
key_perm_t perm, unsigned long flags)
{
struct key_user *user = NULL;
@@ -237,20 +234,28 @@ struct key *key_alloc(struct key_type *type, const char *desc,
if (!desc || !*desc)
goto error;
- desclen = strlen(desc) + 1;
- quotalen = desclen + type->def_datalen;
+ if (type->vet_description) {
+ ret = type->vet_description(desc);
+ if (ret < 0) {
+ key = ERR_PTR(ret);
+ goto error;
+ }
+ }
+
+ desclen = strlen(desc);
+ quotalen = desclen + 1 + type->def_datalen;
/* get hold of the key tracking for this user */
- user = key_user_lookup(uid, cred->user->user_ns);
+ user = key_user_lookup(uid);
if (!user)
goto no_memory_1;
/* check that the user's quota permits allocation of another key and
* its description */
if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
- unsigned maxkeys = (uid == 0) ?
+ unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxkeys : key_quota_maxkeys;
- unsigned maxbytes = (uid == 0) ?
+ unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxbytes : key_quota_maxbytes;
spin_lock(&user->lock);
@@ -267,34 +272,32 @@ struct key *key_alloc(struct key_type *type, const char *desc,
}
/* allocate and initialise the key and its description */
- key = kmem_cache_alloc(key_jar, GFP_KERNEL);
+ key = kmem_cache_zalloc(key_jar, GFP_KERNEL);
if (!key)
goto no_memory_2;
if (desc) {
- key->description = kmemdup(desc, desclen, GFP_KERNEL);
+ key->index_key.desc_len = desclen;
+ key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL);
if (!key->description)
goto no_memory_3;
}
atomic_set(&key->usage, 1);
init_rwsem(&key->sem);
- key->type = type;
+ lockdep_set_class(&key->sem, &type->lock_class);
+ key->index_key.type = type;
key->user = user;
key->quotalen = quotalen;
key->datalen = type->def_datalen;
key->uid = uid;
key->gid = gid;
key->perm = perm;
- key->flags = 0;
- key->expiry = 0;
- key->payload.data = NULL;
- key->security = NULL;
if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
key->flags |= 1 << KEY_FLAG_IN_QUOTA;
-
- memset(&key->type_data, 0, sizeof(key->type_data));
+ if (flags & KEY_ALLOC_TRUSTED)
+ key->flags |= 1 << KEY_FLAG_TRUSTED;
#ifdef KEY_DEBUGGING
key->magic = KEY_DEBUG_MAGIC;
@@ -344,25 +347,30 @@ no_quota:
key_user_put(user);
key = ERR_PTR(-EDQUOT);
goto error;
-
-} /* end key_alloc() */
-
+}
EXPORT_SYMBOL(key_alloc);
-/*****************************************************************************/
-/*
- * reserve an amount of quota for the key's payload
+/**
+ * key_payload_reserve - Adjust data quota reservation for the key's payload
+ * @key: The key to make the reservation for.
+ * @datalen: The amount of data payload the caller now wants.
+ *
+ * Adjust the amount of the owning user's key data quota that a key reserves.
+ * If the amount is increased, then -EDQUOT may be returned if there isn't
+ * enough free quota available.
+ *
+ * If successful, 0 is returned.
*/
int key_payload_reserve(struct key *key, size_t datalen)
{
- int delta = (int) datalen - key->datalen;
+ int delta = (int)datalen - key->datalen;
int ret = 0;
key_check(key);
/* contemplate the quota adjustment */
if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
- unsigned maxbytes = (key->user->uid == 0) ?
+ unsigned maxbytes = uid_eq(key->user->uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxbytes : key_quota_maxbytes;
spin_lock(&key->user->lock);
@@ -384,21 +392,20 @@ int key_payload_reserve(struct key *key, size_t datalen)
key->datalen = datalen;
return ret;
-
-} /* end key_payload_reserve() */
-
+}
EXPORT_SYMBOL(key_payload_reserve);
-/*****************************************************************************/
/*
- * instantiate a key and link it into the target keyring atomically
- * - called with the target keyring's semaphore writelocked
+ * Instantiate a key and link it into the target keyring atomically. Must be
+ * called with the target keyring's semaphore writelocked. The target key's
+ * semaphore need not be locked as instantiation is serialised by
+ * key_construction_mutex.
*/
static int __key_instantiate_and_link(struct key *key,
- const void *data,
- size_t datalen,
+ struct key_preparsed_payload *prep,
struct key *keyring,
- struct key *authkey)
+ struct key *authkey,
+ struct assoc_array_edit **_edit)
{
int ret, awaken;
@@ -413,7 +420,7 @@ static int __key_instantiate_and_link(struct key *key,
/* can't instantiate twice */
if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
/* instantiate the key */
- ret = key->type->instantiate(key, data, datalen);
+ ret = key->type->instantiate(key, prep);
if (ret == 0) {
/* mark the key as being instantiated */
@@ -425,7 +432,7 @@ static int __key_instantiate_and_link(struct key *key,
/* and link it into the destination keyring */
if (keyring)
- ret = __key_link(keyring, key);
+ __key_link(key, _edit);
/* disable the authorisation key */
if (authkey)
@@ -440,12 +447,23 @@ static int __key_instantiate_and_link(struct key *key,
wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
return ret;
+}
-} /* end __key_instantiate_and_link() */
-
-/*****************************************************************************/
-/*
- * instantiate a key and link it into the target keyring atomically
+/**
+ * key_instantiate_and_link - Instantiate a key and link it into the keyring.
+ * @key: The key to instantiate.
+ * @data: The data to use to instantiate the keyring.
+ * @datalen: The length of @data.
+ * @keyring: Keyring to create a link in on success (or NULL).
+ * @authkey: The authorisation token permitting instantiation.
+ *
+ * Instantiate a key that's in the uninstantiated state using the provided data
+ * and, if successful, link it in to the destination keyring if one is
+ * supplied.
+ *
+ * If successful, 0 is returned, the authorisation token is revoked and anyone
+ * waiting for the key is woken up. If the key was already instantiated,
+ * -EBUSY will be returned.
*/
int key_instantiate_and_link(struct key *key,
const void *data,
@@ -453,33 +471,70 @@ int key_instantiate_and_link(struct key *key,
struct key *keyring,
struct key *authkey)
{
+ struct key_preparsed_payload prep;
+ struct assoc_array_edit *edit;
int ret;
- if (keyring)
- down_write(&keyring->sem);
+ memset(&prep, 0, sizeof(prep));
+ prep.data = data;
+ prep.datalen = datalen;
+ prep.quotalen = key->type->def_datalen;
+ if (key->type->preparse) {
+ ret = key->type->preparse(&prep);
+ if (ret < 0)
+ goto error;
+ }
- ret = __key_instantiate_and_link(key, data, datalen, keyring, authkey);
+ if (keyring) {
+ ret = __key_link_begin(keyring, &key->index_key, &edit);
+ if (ret < 0)
+ goto error_free_preparse;
+ }
+
+ ret = __key_instantiate_and_link(key, &prep, keyring, authkey, &edit);
if (keyring)
- up_write(&keyring->sem);
+ __key_link_end(keyring, &key->index_key, edit);
+error_free_preparse:
+ if (key->type->preparse)
+ key->type->free_preparse(&prep);
+error:
return ret;
-
-} /* end key_instantiate_and_link() */
+}
EXPORT_SYMBOL(key_instantiate_and_link);
-/*****************************************************************************/
-/*
- * negatively instantiate a key and link it into the target keyring atomically
+/**
+ * key_reject_and_link - Negatively instantiate a key and link it into the keyring.
+ * @key: The key to instantiate.
+ * @timeout: The timeout on the negative key.
+ * @error: The error to return when the key is hit.
+ * @keyring: Keyring to create a link in on success (or NULL).
+ * @authkey: The authorisation token permitting instantiation.
+ *
+ * Negatively instantiate a key that's in the uninstantiated state and, if
+ * successful, set its timeout and stored error and link it in to the
+ * destination keyring if one is supplied. The key and any links to the key
+ * will be automatically garbage collected after the timeout expires.
+ *
+ * Negative keys are used to rate limit repeated request_key() calls by causing
+ * them to return the stored error code (typically ENOKEY) until the negative
+ * key expires.
+ *
+ * If successful, 0 is returned, the authorisation token is revoked and anyone
+ * waiting for the key is woken up. If the key was already instantiated,
+ * -EBUSY will be returned.
*/
-int key_negate_and_link(struct key *key,
+int key_reject_and_link(struct key *key,
unsigned timeout,
+ unsigned error,
struct key *keyring,
struct key *authkey)
{
+ struct assoc_array_edit *edit;
struct timespec now;
- int ret, awaken;
+ int ret, awaken, link_ret = 0;
key_check(key);
key_check(keyring);
@@ -488,7 +543,7 @@ int key_negate_and_link(struct key *key,
ret = -EBUSY;
if (keyring)
- down_write(&keyring->sem);
+ link_ret = __key_link_begin(keyring, &key->index_key, &edit);
mutex_lock(&key_construction_mutex);
@@ -496,6 +551,8 @@ int key_negate_and_link(struct key *key,
if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
/* mark the key as being negatively instantiated */
atomic_inc(&key->user->nikeys);
+ key->type_data.reject_error = -error;
+ smp_wmb();
set_bit(KEY_FLAG_NEGATIVE, &key->flags);
set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
now = current_kernel_time();
@@ -508,8 +565,8 @@ int key_negate_and_link(struct key *key,
ret = 0;
/* and link it into the destination keyring */
- if (keyring)
- ret = __key_link(keyring, key);
+ if (keyring && link_ret == 0)
+ __key_link(key, &edit);
/* disable the authorisation key */
if (authkey)
@@ -519,87 +576,23 @@ int key_negate_and_link(struct key *key,
mutex_unlock(&key_construction_mutex);
if (keyring)
- up_write(&keyring->sem);
+ __key_link_end(keyring, &key->index_key, edit);
/* wake up anyone waiting for a key to be constructed */
if (awaken)
wake_up_bit(&key->flags, KEY_FLAG_USER_CONSTRUCT);
- return ret;
-
-} /* end key_negate_and_link() */
-
-EXPORT_SYMBOL(key_negate_and_link);
-
-/*****************************************************************************/
-/*
- * do cleaning up in process context so that we don't have to disable
- * interrupts all over the place
- */
-static void key_cleanup(struct work_struct *work)
-{
- struct rb_node *_n;
- struct key *key;
-
- go_again:
- /* look for a dead key in the tree */
- spin_lock(&key_serial_lock);
-
- for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
- key = rb_entry(_n, struct key, serial_node);
-
- if (atomic_read(&key->usage) == 0)
- goto found_dead_key;
- }
-
- spin_unlock(&key_serial_lock);
- return;
-
- found_dead_key:
- /* we found a dead key - once we've removed it from the tree, we can
- * drop the lock */
- rb_erase(&key->serial_node, &key_serial_tree);
- spin_unlock(&key_serial_lock);
-
- key_check(key);
-
- security_key_free(key);
-
- /* deal with the user's key tracking and quota */
- if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
- spin_lock(&key->user->lock);
- key->user->qnkeys--;
- key->user->qnbytes -= key->quotalen;
- spin_unlock(&key->user->lock);
- }
-
- atomic_dec(&key->user->nkeys);
- if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
- atomic_dec(&key->user->nikeys);
-
- key_user_put(key->user);
-
- /* now throw away the key memory */
- if (key->type->destroy)
- key->type->destroy(key);
-
- kfree(key->description);
-
-#ifdef KEY_DEBUGGING
- key->magic = KEY_DEBUG_MAGIC_X;
-#endif
- kmem_cache_free(key_jar, key);
-
- /* there may, of course, be more than one key to destroy */
- goto go_again;
-
-} /* end key_cleanup() */
+ return ret == 0 ? link_ret : ret;
+}
+EXPORT_SYMBOL(key_reject_and_link);
-/*****************************************************************************/
-/*
- * dispose of a reference to a key
- * - when all the references are gone, we schedule the cleanup task to come and
- * pull it out of the tree in definite process context
+/**
+ * key_put - Discard a reference to a key.
+ * @key: The key to discard a reference from.
+ *
+ * Discard a reference to a key, and when all the references are gone, we
+ * schedule the cleanup task to come and pull it out of the tree in process
+ * context at some later time.
*/
void key_put(struct key *key)
{
@@ -607,16 +600,13 @@ void key_put(struct key *key)
key_check(key);
if (atomic_dec_and_test(&key->usage))
- schedule_work(&key_cleanup_task);
+ schedule_work(&key_gc_work);
}
-
-} /* end key_put() */
-
+}
EXPORT_SYMBOL(key_put);
-/*****************************************************************************/
/*
- * find a key by its serial number
+ * Find a key by its serial number.
*/
struct key *key_lookup(key_serial_t id)
{
@@ -638,11 +628,11 @@ struct key *key_lookup(key_serial_t id)
goto found;
}
- not_found:
+not_found:
key = ERR_PTR(-ENOKEY);
goto error;
- found:
+found:
/* pretend it doesn't exist if it is awaiting deletion */
if (atomic_read(&key->usage) == 0)
goto not_found;
@@ -650,18 +640,18 @@ struct key *key_lookup(key_serial_t id)
/* this races with key_put(), but that doesn't matter since key_put()
* doesn't actually change the key
*/
- atomic_inc(&key->usage);
+ __key_get(key);
- error:
+error:
spin_unlock(&key_serial_lock);
return key;
+}
-} /* end key_lookup() */
-
-/*****************************************************************************/
/*
- * find and lock the specified key type against removal
- * - we return with the sem readlocked
+ * Find and lock the specified key type against removal.
+ *
+ * We return with the sem read-locked if successful. If the type wasn't
+ * available -ENOKEY is returned instead.
*/
struct key_type *key_type_lookup(const char *type)
{
@@ -679,35 +669,52 @@ struct key_type *key_type_lookup(const char *type)
up_read(&key_types_sem);
ktype = ERR_PTR(-ENOKEY);
- found_kernel_type:
+found_kernel_type:
return ktype;
+}
+
+void key_set_timeout(struct key *key, unsigned timeout)
+{
+ struct timespec now;
+ time_t expiry = 0;
+
+ /* make the changes with the locks held to prevent races */
+ down_write(&key->sem);
+
+ if (timeout > 0) {
+ now = current_kernel_time();
+ expiry = now.tv_sec + timeout;
+ }
-} /* end key_type_lookup() */
+ key->expiry = expiry;
+ key_schedule_gc(key->expiry + key_gc_delay);
+
+ up_write(&key->sem);
+}
+EXPORT_SYMBOL_GPL(key_set_timeout);
-/*****************************************************************************/
/*
- * unlock a key type
+ * Unlock a key type locked by key_type_lookup().
*/
void key_type_put(struct key_type *ktype)
{
up_read(&key_types_sem);
+}
-} /* end key_type_put() */
-
-/*****************************************************************************/
/*
- * attempt to update an existing key
- * - the key has an incremented refcount
- * - we need to put the key if we get an error
+ * Attempt to update an existing key.
+ *
+ * The key is given to us with an incremented refcount that we need to discard
+ * if we get an error.
*/
static inline key_ref_t __key_update(key_ref_t key_ref,
- const void *payload, size_t plen)
+ struct key_preparsed_payload *prep)
{
struct key *key = key_ref_to_ptr(key_ref);
int ret;
/* need write permission on the key to update it */
- ret = key_permission(key_ref, KEY_WRITE);
+ ret = key_permission(key_ref, KEY_NEED_WRITE);
if (ret < 0)
goto error;
@@ -717,7 +724,7 @@ static inline key_ref_t __key_update(key_ref_t key_ref,
down_write(&key->sem);
- ret = key->type->update(key, payload, plen);
+ ret = key->type->update(key, prep);
if (ret == 0)
/* updating a negative key instantiates it */
clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
@@ -733,13 +740,32 @@ error:
key_put(key);
key_ref = ERR_PTR(ret);
goto out;
+}
-} /* end __key_update() */
-
-/*****************************************************************************/
-/*
- * search the specified keyring for a key of the same description; if one is
- * found, update it, otherwise add a new one
+/**
+ * key_create_or_update - Update or create and instantiate a key.
+ * @keyring_ref: A pointer to the destination keyring with possession flag.
+ * @type: The type of key.
+ * @description: The searchable description for the key.
+ * @payload: The data to use to instantiate or update the key.
+ * @plen: The length of @payload.
+ * @perm: The permissions mask for a new key.
+ * @flags: The quota flags for a new key.
+ *
+ * Search the destination keyring for a key of the same description and if one
+ * is found, update it, otherwise create and instantiate a new one and create a
+ * link to it from that keyring.
+ *
+ * If perm is KEY_PERM_UNDEF then an appropriate key permissions mask will be
+ * concocted.
+ *
+ * Returns a pointer to the new key if successful, -ENODEV if the key type
+ * wasn't available, -ENOTDIR if the keyring wasn't a keyring, -EACCES if the
+ * caller isn't permitted to modify the keyring or the LSM did not permit
+ * creation of the key.
+ *
+ * On success, the possession flag from the keyring ref will be tacked on to
+ * the key ref before it is returned.
*/
key_ref_t key_create_or_update(key_ref_t keyring_ref,
const char *type,
@@ -749,23 +775,28 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
key_perm_t perm,
unsigned long flags)
{
+ struct keyring_index_key index_key = {
+ .description = description,
+ };
+ struct key_preparsed_payload prep;
+ struct assoc_array_edit *edit;
const struct cred *cred = current_cred();
- struct key_type *ktype;
struct key *keyring, *key = NULL;
key_ref_t key_ref;
int ret;
/* look up the key type to see if it's one of the registered kernel
* types */
- ktype = key_type_lookup(type);
- if (IS_ERR(ktype)) {
+ index_key.type = key_type_lookup(type);
+ if (IS_ERR(index_key.type)) {
key_ref = ERR_PTR(-ENODEV);
goto error;
}
key_ref = ERR_PTR(-EINVAL);
- if (!ktype->match || !ktype->instantiate)
- goto error_2;
+ if (!index_key.type->match || !index_key.type->instantiate ||
+ (!index_key.description && !index_key.type->preparse))
+ goto error_put_type;
keyring = key_ref_to_ptr(keyring_ref);
@@ -773,119 +804,173 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
key_ref = ERR_PTR(-ENOTDIR);
if (keyring->type != &key_type_keyring)
- goto error_2;
+ goto error_put_type;
+
+ memset(&prep, 0, sizeof(prep));
+ prep.data = payload;
+ prep.datalen = plen;
+ prep.quotalen = index_key.type->def_datalen;
+ prep.trusted = flags & KEY_ALLOC_TRUSTED;
+ if (index_key.type->preparse) {
+ ret = index_key.type->preparse(&prep);
+ if (ret < 0) {
+ key_ref = ERR_PTR(ret);
+ goto error_put_type;
+ }
+ if (!index_key.description)
+ index_key.description = prep.description;
+ key_ref = ERR_PTR(-EINVAL);
+ if (!index_key.description)
+ goto error_free_prep;
+ }
+ index_key.desc_len = strlen(index_key.description);
+
+ key_ref = ERR_PTR(-EPERM);
+ if (!prep.trusted && test_bit(KEY_FLAG_TRUSTED_ONLY, &keyring->flags))
+ goto error_free_prep;
+ flags |= prep.trusted ? KEY_ALLOC_TRUSTED : 0;
- down_write(&keyring->sem);
+ ret = __key_link_begin(keyring, &index_key, &edit);
+ if (ret < 0) {
+ key_ref = ERR_PTR(ret);
+ goto error_free_prep;
+ }
/* if we're going to allocate a new key, we're going to have
* to modify the keyring */
- ret = key_permission(keyring_ref, KEY_WRITE);
+ ret = key_permission(keyring_ref, KEY_NEED_WRITE);
if (ret < 0) {
key_ref = ERR_PTR(ret);
- goto error_3;
+ goto error_link_end;
}
/* if it's possible to update this type of key, search for an existing
* key of the same type and description in the destination keyring and
* update that instead if possible
*/
- if (ktype->update) {
- key_ref = __keyring_search_one(keyring_ref, ktype, description,
- 0);
- if (!IS_ERR(key_ref))
+ if (index_key.type->update) {
+ key_ref = find_key_to_update(keyring_ref, &index_key);
+ if (key_ref)
goto found_matching_key;
}
/* if the client doesn't provide, decide on the permissions we want */
if (perm == KEY_PERM_UNDEF) {
perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
- perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR;
+ perm |= KEY_USR_VIEW;
- if (ktype->read)
- perm |= KEY_POS_READ | KEY_USR_READ;
+ if (index_key.type->read)
+ perm |= KEY_POS_READ;
- if (ktype == &key_type_keyring || ktype->update)
- perm |= KEY_USR_WRITE;
+ if (index_key.type == &key_type_keyring ||
+ index_key.type->update)
+ perm |= KEY_POS_WRITE;
}
/* allocate a new key */
- key = key_alloc(ktype, description, cred->fsuid, cred->fsgid, cred,
- perm, flags);
+ key = key_alloc(index_key.type, index_key.description,
+ cred->fsuid, cred->fsgid, cred, perm, flags);
if (IS_ERR(key)) {
key_ref = ERR_CAST(key);
- goto error_3;
+ goto error_link_end;
}
/* instantiate it and link it into the target keyring */
- ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL);
+ ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &edit);
if (ret < 0) {
key_put(key);
key_ref = ERR_PTR(ret);
- goto error_3;
+ goto error_link_end;
}
key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
- error_3:
- up_write(&keyring->sem);
- error_2:
- key_type_put(ktype);
- error:
+error_link_end:
+ __key_link_end(keyring, &index_key, edit);
+error_free_prep:
+ if (index_key.type->preparse)
+ index_key.type->free_preparse(&prep);
+error_put_type:
+ key_type_put(index_key.type);
+error:
return key_ref;
found_matching_key:
/* we found a matching key, so we're going to try to update it
* - we can drop the locks first as we have the key pinned
*/
- up_write(&keyring->sem);
- key_type_put(ktype);
-
- key_ref = __key_update(key_ref, payload, plen);
- goto error;
-
-} /* end key_create_or_update() */
+ __key_link_end(keyring, &index_key, edit);
+ key_ref = __key_update(key_ref, &prep);
+ goto error_free_prep;
+}
EXPORT_SYMBOL(key_create_or_update);
-/*****************************************************************************/
-/*
- * update a key
+/**
+ * key_update - Update a key's contents.
+ * @key_ref: The pointer (plus possession flag) to the key.
+ * @payload: The data to be used to update the key.
+ * @plen: The length of @payload.
+ *
+ * Attempt to update the contents of a key with the given payload data. The
+ * caller must be granted Write permission on the key. Negative keys can be
+ * instantiated by this method.
+ *
+ * Returns 0 on success, -EACCES if not permitted and -EOPNOTSUPP if the key
+ * type does not support updating. The key type may return other errors.
*/
int key_update(key_ref_t key_ref, const void *payload, size_t plen)
{
+ struct key_preparsed_payload prep;
struct key *key = key_ref_to_ptr(key_ref);
int ret;
key_check(key);
/* the key must be writable */
- ret = key_permission(key_ref, KEY_WRITE);
+ ret = key_permission(key_ref, KEY_NEED_WRITE);
if (ret < 0)
goto error;
/* attempt to update it if supported */
ret = -EOPNOTSUPP;
- if (key->type->update) {
- down_write(&key->sem);
-
- ret = key->type->update(key, payload, plen);
- if (ret == 0)
- /* updating a negative key instantiates it */
- clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
+ if (!key->type->update)
+ goto error;
- up_write(&key->sem);
+ memset(&prep, 0, sizeof(prep));
+ prep.data = payload;
+ prep.datalen = plen;
+ prep.quotalen = key->type->def_datalen;
+ if (key->type->preparse) {
+ ret = key->type->preparse(&prep);
+ if (ret < 0)
+ goto error;
}
- error:
- return ret;
+ down_write(&key->sem);
+
+ ret = key->type->update(key, &prep);
+ if (ret == 0)
+ /* updating a negative key instantiates it */
+ clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
-} /* end key_update() */
+ up_write(&key->sem);
+ if (key->type->preparse)
+ key->type->free_preparse(&prep);
+error:
+ return ret;
+}
EXPORT_SYMBOL(key_update);
-/*****************************************************************************/
-/*
- * revoke a key
+/**
+ * key_revoke - Revoke a key.
+ * @key: The key to be revoked.
+ *
+ * Mark a key as being revoked and ask the type to free up its resources. The
+ * revocation timeout is set and the key and all its links will be
+ * automatically garbage collected after key_gc_delay amount of time if they
+ * are not manually dealt with first.
*/
void key_revoke(struct key *key)
{
@@ -913,20 +998,46 @@ void key_revoke(struct key *key)
}
up_write(&key->sem);
+}
+EXPORT_SYMBOL(key_revoke);
-} /* end key_revoke() */
+/**
+ * key_invalidate - Invalidate a key.
+ * @key: The key to be invalidated.
+ *
+ * Mark a key as being invalidated and have it cleaned up immediately. The key
+ * is ignored by all searches and other operations from this point.
+ */
+void key_invalidate(struct key *key)
+{
+ kenter("%d", key_serial(key));
-EXPORT_SYMBOL(key_revoke);
+ key_check(key);
-/*****************************************************************************/
-/*
- * register a type of key
+ if (!test_bit(KEY_FLAG_INVALIDATED, &key->flags)) {
+ down_write_nested(&key->sem, 1);
+ if (!test_and_set_bit(KEY_FLAG_INVALIDATED, &key->flags))
+ key_schedule_gc_links();
+ up_write(&key->sem);
+ }
+}
+EXPORT_SYMBOL(key_invalidate);
+
+/**
+ * register_key_type - Register a type of key.
+ * @ktype: The new key type.
+ *
+ * Register a new key type.
+ *
+ * Returns 0 on success or -EEXIST if a type of this name already exists.
*/
int register_key_type(struct key_type *ktype)
{
struct key_type *p;
int ret;
+ memset(&ktype->lock_class, 0, sizeof(ktype->lock_class));
+
ret = -EEXIST;
down_write(&key_types_sem);
@@ -938,73 +1049,37 @@ int register_key_type(struct key_type *ktype)
/* store the type */
list_add(&ktype->link, &key_types_list);
+
+ pr_notice("Key type %s registered\n", ktype->name);
ret = 0;
- out:
+out:
up_write(&key_types_sem);
return ret;
-
-} /* end register_key_type() */
-
+}
EXPORT_SYMBOL(register_key_type);
-/*****************************************************************************/
-/*
- * unregister a type of key
+/**
+ * unregister_key_type - Unregister a type of key.
+ * @ktype: The key type.
+ *
+ * Unregister a key type and mark all the extant keys of this type as dead.
+ * Those keys of this type are then destroyed to get rid of their payloads and
+ * they and their links will be garbage collected as soon as possible.
*/
void unregister_key_type(struct key_type *ktype)
{
- struct rb_node *_n;
- struct key *key;
-
down_write(&key_types_sem);
-
- /* withdraw the key type */
list_del_init(&ktype->link);
-
- /* mark all the keys of this type dead */
- spin_lock(&key_serial_lock);
-
- for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
- key = rb_entry(_n, struct key, serial_node);
-
- if (key->type == ktype) {
- key->type = &key_type_dead;
- set_bit(KEY_FLAG_DEAD, &key->flags);
- }
- }
-
- spin_unlock(&key_serial_lock);
-
- /* make sure everyone revalidates their keys */
- synchronize_rcu();
-
- /* we should now be able to destroy the payloads of all the keys of
- * this type with impunity */
- spin_lock(&key_serial_lock);
-
- for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
- key = rb_entry(_n, struct key, serial_node);
-
- if (key->type == ktype) {
- if (ktype->destroy)
- ktype->destroy(key);
- memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
- }
- }
-
- spin_unlock(&key_serial_lock);
- up_write(&key_types_sem);
-
- key_schedule_gc(0);
-
-} /* end unregister_key_type() */
-
+ downgrade_write(&key_types_sem);
+ key_gc_keytype(ktype);
+ pr_notice("Key type %s unregistered\n", ktype->name);
+ up_read(&key_types_sem);
+}
EXPORT_SYMBOL(unregister_key_type);
-/*****************************************************************************/
/*
- * initialise the key management stuff
+ * Initialise the key management state.
*/
void __init key_init(void)
{
@@ -1016,6 +1091,7 @@ void __init key_init(void)
list_add_tail(&key_type_keyring.link, &key_types_list);
list_add_tail(&key_type_dead.link, &key_types_list);
list_add_tail(&key_type_user.link, &key_types_list);
+ list_add_tail(&key_type_logon.link, &key_types_list);
/* record the root user tracking */
rb_link_node(&root_key_user.node,
@@ -1024,5 +1100,4 @@ void __init key_init(void)
rb_insert_color(&root_key_user.node,
&key_user_tree);
-
-} /* end key_init() */
+}
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index e9c2e7c584d..cd5bd0cef25 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -1,4 +1,4 @@
-/* keyctl.c: userspace keyctl operations
+/* Userspace key control operations
*
* Copyright (C) 2004-5 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
@@ -14,6 +14,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
+#include <linux/key.h>
#include <linux/keyctl.h>
#include <linux/fs.h>
#include <linux/capability.h>
@@ -21,6 +22,7 @@
#include <linux/err.h>
#include <linux/vmalloc.h>
#include <linux/security.h>
+#include <linux/uio.h>
#include <asm/uaccess.h>
#include "internal.h"
@@ -31,28 +33,27 @@ static int key_get_type_from_user(char *type,
int ret;
ret = strncpy_from_user(type, _type, len);
-
if (ret < 0)
- return -EFAULT;
-
+ return ret;
if (ret == 0 || ret >= len)
return -EINVAL;
-
if (type[0] == '.')
return -EPERM;
-
type[len - 1] = '\0';
-
return 0;
}
-/*****************************************************************************/
/*
- * extract the description of a new key from userspace and either add it as a
- * new key to the specified keyring or update a matching key in that keyring
- * - the keyring must be writable
- * - returns the new key's serial number
- * - implements add_key()
+ * Extract the description of a new key from userspace and either add it as a
+ * new key to the specified keyring or update a matching key in that keyring.
+ *
+ * If the description is NULL or an empty string, the key type is asked to
+ * generate one from the payload.
+ *
+ * The keyring must be writable so that we can attach the key to it.
+ *
+ * If successful, the new key's serial number is returned, otherwise an error
+ * code is returned.
*/
SYSCALL_DEFINE5(add_key, const char __user *, _type,
const char __user *, _description,
@@ -75,10 +76,17 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
if (ret < 0)
goto error;
- description = strndup_user(_description, PAGE_SIZE);
- if (IS_ERR(description)) {
- ret = PTR_ERR(description);
- goto error;
+ description = NULL;
+ if (_description) {
+ description = strndup_user(_description, PAGE_SIZE);
+ if (IS_ERR(description)) {
+ ret = PTR_ERR(description);
+ goto error;
+ }
+ if (!*description) {
+ kfree(description);
+ description = NULL;
+ }
}
/* pull the payload in if one was supplied */
@@ -87,7 +95,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
vm = false;
if (_payload) {
ret = -ENOMEM;
- payload = kmalloc(plen, GFP_KERNEL);
+ payload = kmalloc(plen, GFP_KERNEL | __GFP_NOWARN);
if (!payload) {
if (plen <= PAGE_SIZE)
goto error2;
@@ -103,7 +111,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
}
/* find the target keyring (which must be writable) */
- keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_WRITE);
+ keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
goto error3;
@@ -132,19 +140,20 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
kfree(description);
error:
return ret;
+}
-} /* end sys_add_key() */
-
-/*****************************************************************************/
/*
- * search the process keyrings for a matching key
- * - nested keyrings may also be searched if they have Search permission
- * - if a key is found, it will be attached to the destination keyring if
- * there's one specified
- * - /sbin/request-key will be invoked if _callout_info is non-NULL
- * - the _callout_info string will be passed to /sbin/request-key
- * - if the _callout_info string is empty, it will be rendered as "-"
- * - implements request_key()
+ * Search the process keyrings and keyring trees linked from those for a
+ * matching key. Keyrings must have appropriate Search permission to be
+ * searched.
+ *
+ * If a key is found, it will be attached to the destination keyring if there's
+ * one specified and the serial number of the key will be returned.
+ *
+ * If no key is found, /sbin/request-key will be invoked if _callout_info is
+ * non-NULL in an attempt to create a key. The _callout_info string will be
+ * passed to /sbin/request-key to aid with completing the request. If the
+ * _callout_info string is "" then it will be changed to "-".
*/
SYSCALL_DEFINE4(request_key, const char __user *, _type,
const char __user *, _description,
@@ -186,7 +195,7 @@ SYSCALL_DEFINE4(request_key, const char __user *, _type,
dest_ref = NULL;
if (destringid) {
dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE,
- KEY_WRITE);
+ KEY_NEED_WRITE);
if (IS_ERR(dest_ref)) {
ret = PTR_ERR(dest_ref);
goto error3;
@@ -209,27 +218,33 @@ SYSCALL_DEFINE4(request_key, const char __user *, _type,
goto error5;
}
+ /* wait for the key to finish being constructed */
+ ret = wait_for_key_construction(key, 1);
+ if (ret < 0)
+ goto error6;
+
ret = key->serial;
+error6:
key_put(key);
- error5:
+error5:
key_type_put(ktype);
- error4:
+error4:
key_ref_put(dest_ref);
- error3:
+error3:
kfree(callout_info);
- error2:
+error2:
kfree(description);
- error:
+error:
return ret;
+}
-} /* end sys_request_key() */
-
-/*****************************************************************************/
/*
- * get the ID of the specified process keyring
- * - the keyring must have search permission to be found
- * - implements keyctl(KEYCTL_GET_KEYRING_ID)
+ * Get the ID of the specified process keyring.
+ *
+ * The requested keyring must have search permission to be found.
+ *
+ * If successful, the ID of the requested keyring will be returned.
*/
long keyctl_get_keyring_ID(key_serial_t id, int create)
{
@@ -238,7 +253,7 @@ long keyctl_get_keyring_ID(key_serial_t id, int create)
long ret;
lflags = create ? KEY_LOOKUP_CREATE : 0;
- key_ref = lookup_user_key(id, lflags, KEY_SEARCH);
+ key_ref = lookup_user_key(id, lflags, KEY_NEED_SEARCH);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error;
@@ -246,15 +261,19 @@ long keyctl_get_keyring_ID(key_serial_t id, int create)
ret = key_ref_to_ptr(key_ref)->serial;
key_ref_put(key_ref);
- error:
+error:
return ret;
+}
-} /* end keyctl_get_keyring_ID() */
-
-/*****************************************************************************/
/*
- * join the session keyring
- * - implements keyctl(KEYCTL_JOIN_SESSION_KEYRING)
+ * Join a (named) session keyring.
+ *
+ * Create and join an anonymous session keyring or join a named session
+ * keyring, creating it if necessary. A named session keyring must have Search
+ * permission for it to be joined. Session keyrings without this permit will
+ * be skipped over.
+ *
+ * If successful, the ID of the joined session keyring will be returned.
*/
long keyctl_join_session_keyring(const char __user *_name)
{
@@ -275,16 +294,19 @@ long keyctl_join_session_keyring(const char __user *_name)
ret = join_session_keyring(name);
kfree(name);
- error:
+error:
return ret;
+}
-} /* end keyctl_join_session_keyring() */
-
-/*****************************************************************************/
/*
- * update a key's data payload
- * - the key must be writable
- * - implements keyctl(KEYCTL_UPDATE)
+ * Update a key's data payload from the given data.
+ *
+ * The key must grant the caller Write permission and the key type must support
+ * updating for this to work. A negative key can be positively instantiated
+ * with this call.
+ *
+ * If successful, 0 will be returned. If the key type does not support
+ * updating, then -EOPNOTSUPP will be returned.
*/
long keyctl_update_key(key_serial_t id,
const void __user *_payload,
@@ -312,7 +334,7 @@ long keyctl_update_key(key_serial_t id,
}
/* find the target key (which must be writable) */
- key_ref = lookup_user_key(id, 0, KEY_WRITE);
+ key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error2;
@@ -322,30 +344,33 @@ long keyctl_update_key(key_serial_t id,
ret = key_update(key_ref, payload, plen);
key_ref_put(key_ref);
- error2:
+error2:
kfree(payload);
- error:
+error:
return ret;
+}
-} /* end keyctl_update_key() */
-
-/*****************************************************************************/
/*
- * revoke a key
- * - the key must be writable
- * - implements keyctl(KEYCTL_REVOKE)
+ * Revoke a key.
+ *
+ * The key must be grant the caller Write or Setattr permission for this to
+ * work. The key type should give up its quota claim when revoked. The key
+ * and any links to the key will be automatically garbage collected after a
+ * certain amount of time (/proc/sys/kernel/keys/gc_delay).
+ *
+ * If successful, 0 is returned.
*/
long keyctl_revoke_key(key_serial_t id)
{
key_ref_t key_ref;
long ret;
- key_ref = lookup_user_key(id, 0, KEY_WRITE);
+ key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
if (ret != -EACCES)
goto error;
- key_ref = lookup_user_key(id, 0, KEY_SETATTR);
+ key_ref = lookup_user_key(id, 0, KEY_NEED_SETATTR);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error;
@@ -356,55 +381,102 @@ long keyctl_revoke_key(key_serial_t id)
ret = 0;
key_ref_put(key_ref);
- error:
+error:
return ret;
+}
+
+/*
+ * Invalidate a key.
+ *
+ * The key must be grant the caller Invalidate permission for this to work.
+ * The key and any links to the key will be automatically garbage collected
+ * immediately.
+ *
+ * If successful, 0 is returned.
+ */
+long keyctl_invalidate_key(key_serial_t id)
+{
+ key_ref_t key_ref;
+ long ret;
+
+ kenter("%d", id);
-} /* end keyctl_revoke_key() */
+ key_ref = lookup_user_key(id, 0, KEY_NEED_SEARCH);
+ if (IS_ERR(key_ref)) {
+ ret = PTR_ERR(key_ref);
+ goto error;
+ }
+
+ key_invalidate(key_ref_to_ptr(key_ref));
+ ret = 0;
+
+ key_ref_put(key_ref);
+error:
+ kleave(" = %ld", ret);
+ return ret;
+}
-/*****************************************************************************/
/*
- * clear the specified process keyring
- * - the keyring must be writable
- * - implements keyctl(KEYCTL_CLEAR)
+ * Clear the specified keyring, creating an empty process keyring if one of the
+ * special keyring IDs is used.
+ *
+ * The keyring must grant the caller Write permission for this to work. If
+ * successful, 0 will be returned.
*/
long keyctl_keyring_clear(key_serial_t ringid)
{
key_ref_t keyring_ref;
long ret;
- keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_WRITE);
+ keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
+
+ /* Root is permitted to invalidate certain special keyrings */
+ if (capable(CAP_SYS_ADMIN)) {
+ keyring_ref = lookup_user_key(ringid, 0, 0);
+ if (IS_ERR(keyring_ref))
+ goto error;
+ if (test_bit(KEY_FLAG_ROOT_CAN_CLEAR,
+ &key_ref_to_ptr(keyring_ref)->flags))
+ goto clear;
+ goto error_put;
+ }
+
goto error;
}
+clear:
ret = keyring_clear(key_ref_to_ptr(keyring_ref));
-
+error_put:
key_ref_put(keyring_ref);
- error:
+error:
return ret;
+}
-} /* end keyctl_keyring_clear() */
-
-/*****************************************************************************/
/*
- * link a key into a keyring
- * - the keyring must be writable
- * - the key must be linkable
- * - implements keyctl(KEYCTL_LINK)
+ * Create a link from a keyring to a key if there's no matching key in the
+ * keyring, otherwise replace the link to the matching key with a link to the
+ * new key.
+ *
+ * The key must grant the caller Link permission and the the keyring must grant
+ * the caller Write permission. Furthermore, if an additional link is created,
+ * the keyring's quota will be extended.
+ *
+ * If successful, 0 will be returned.
*/
long keyctl_keyring_link(key_serial_t id, key_serial_t ringid)
{
key_ref_t keyring_ref, key_ref;
long ret;
- keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_WRITE);
+ keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
goto error;
}
- key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE, KEY_LINK);
+ key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE, KEY_NEED_LINK);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error2;
@@ -413,26 +485,27 @@ long keyctl_keyring_link(key_serial_t id, key_serial_t ringid)
ret = key_link(key_ref_to_ptr(keyring_ref), key_ref_to_ptr(key_ref));
key_ref_put(key_ref);
- error2:
+error2:
key_ref_put(keyring_ref);
- error:
+error:
return ret;
+}
-} /* end keyctl_keyring_link() */
-
-/*****************************************************************************/
/*
- * unlink the first attachment of a key from a keyring
- * - the keyring must be writable
- * - we don't need any permissions on the key
- * - implements keyctl(KEYCTL_UNLINK)
+ * Unlink a key from a keyring.
+ *
+ * The keyring must grant the caller Write permission for this to work; the key
+ * itself need not grant the caller anything. If the last link to a key is
+ * removed then that key will be scheduled for destruction.
+ *
+ * If successful, 0 will be returned.
*/
long keyctl_keyring_unlink(key_serial_t id, key_serial_t ringid)
{
key_ref_t keyring_ref, key_ref;
long ret;
- keyring_ref = lookup_user_key(ringid, 0, KEY_WRITE);
+ keyring_ref = lookup_user_key(ringid, 0, KEY_NEED_WRITE);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
goto error;
@@ -447,23 +520,24 @@ long keyctl_keyring_unlink(key_serial_t id, key_serial_t ringid)
ret = key_unlink(key_ref_to_ptr(keyring_ref), key_ref_to_ptr(key_ref));
key_ref_put(key_ref);
- error2:
+error2:
key_ref_put(keyring_ref);
- error:
+error:
return ret;
+}
-} /* end keyctl_keyring_unlink() */
-
-/*****************************************************************************/
/*
- * describe a user key
- * - the key must have view permission
- * - if there's a buffer, we place up to buflen bytes of data into it
- * - unless there's an error, we return the amount of description available,
- * irrespective of how much we may have copied
- * - the description is formatted thus:
+ * Return a description of a key to userspace.
+ *
+ * The key must grant the caller View permission for this to work.
+ *
+ * If there's a buffer, we place up to buflen bytes of data into it formatted
+ * in the following way:
+ *
* type;uid;gid;perm;description<NUL>
- * - implements keyctl(KEYCTL_DESCRIBE)
+ *
+ * If successful, we return the amount of description available, irrespective
+ * of how much we may have copied into the buffer.
*/
long keyctl_describe_key(key_serial_t keyid,
char __user *buffer,
@@ -474,7 +548,7 @@ long keyctl_describe_key(key_serial_t keyid,
char *tmpbuf;
long ret;
- key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_VIEW);
+ key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW);
if (IS_ERR(key_ref)) {
/* viewing a key under construction is permitted if we have the
* authorisation token handy */
@@ -505,13 +579,11 @@ okay:
ret = snprintf(tmpbuf, PAGE_SIZE - 1,
"%s;%d;%d;%08x;%s",
- key_ref_to_ptr(key_ref)->type->name,
- key_ref_to_ptr(key_ref)->uid,
- key_ref_to_ptr(key_ref)->gid,
- key_ref_to_ptr(key_ref)->perm,
- key_ref_to_ptr(key_ref)->description ?
- key_ref_to_ptr(key_ref)->description : ""
- );
+ key->type->name,
+ from_kuid_munged(current_user_ns(), key->uid),
+ from_kgid_munged(current_user_ns(), key->gid),
+ key->perm,
+ key->description ?: "");
/* include a NUL char at the end of the data */
if (ret > PAGE_SIZE - 1)
@@ -529,22 +601,21 @@ okay:
}
kfree(tmpbuf);
- error2:
+error2:
key_ref_put(key_ref);
- error:
+error:
return ret;
+}
-} /* end keyctl_describe_key() */
-
-/*****************************************************************************/
/*
- * search the specified keyring for a matching key
- * - the start keyring must be searchable
- * - nested keyrings may also be searched if they are searchable
- * - only keys with search permission may be found
- * - if a key is found, it will be attached to the destination keyring if
- * there's one specified
- * - implements keyctl(KEYCTL_SEARCH)
+ * Search the specified keyring and any keyrings it links to for a matching
+ * key. Only keyrings that grant the caller Search permission will be searched
+ * (this includes the starting keyring). Only keys with Search permission can
+ * be found.
+ *
+ * If successful, the found key will be linked to the destination keyring if
+ * supplied and the key has Link permission, and the found key ID will be
+ * returned.
*/
long keyctl_keyring_search(key_serial_t ringid,
const char __user *_type,
@@ -568,7 +639,7 @@ long keyctl_keyring_search(key_serial_t ringid,
}
/* get the keyring at which to begin the search */
- keyring_ref = lookup_user_key(ringid, 0, KEY_SEARCH);
+ keyring_ref = lookup_user_key(ringid, 0, KEY_NEED_SEARCH);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
goto error2;
@@ -578,7 +649,7 @@ long keyctl_keyring_search(key_serial_t ringid,
dest_ref = NULL;
if (destringid) {
dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE,
- KEY_WRITE);
+ KEY_NEED_WRITE);
if (IS_ERR(dest_ref)) {
ret = PTR_ERR(dest_ref);
goto error3;
@@ -605,7 +676,7 @@ long keyctl_keyring_search(key_serial_t ringid,
/* link the resulting key to the destination keyring if we can */
if (dest_ref) {
- ret = key_permission(key_ref, KEY_LINK);
+ ret = key_permission(key_ref, KEY_NEED_LINK);
if (ret < 0)
goto error6;
@@ -616,30 +687,29 @@ long keyctl_keyring_search(key_serial_t ringid,
ret = key_ref_to_ptr(key_ref)->serial;
- error6:
+error6:
key_ref_put(key_ref);
- error5:
+error5:
key_type_put(ktype);
- error4:
+error4:
key_ref_put(dest_ref);
- error3:
+error3:
key_ref_put(keyring_ref);
- error2:
+error2:
kfree(description);
- error:
+error:
return ret;
+}
-} /* end keyctl_keyring_search() */
-
-/*****************************************************************************/
/*
- * read a user key's payload
- * - the keyring must be readable or the key must be searchable from the
- * process's keyrings
- * - if there's a buffer, we place up to buflen bytes of data into it
- * - unless there's an error, we return the amount of data in the key,
- * irrespective of how much we may have copied
- * - implements keyctl(KEYCTL_READ)
+ * Read a key's payload.
+ *
+ * The key must either grant the caller Read permission, or it must grant the
+ * caller Search permission when searched for from the process keyrings.
+ *
+ * If successful, we place up to buflen bytes of data into the buffer, if one
+ * is provided, and return the amount of data that is available in the key,
+ * irrespective of how much we copied into the buffer.
*/
long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
{
@@ -657,7 +727,7 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
key = key_ref_to_ptr(key_ref);
/* see if we can read it directly */
- ret = key_permission(key_ref, KEY_READ);
+ ret = key_permission(key_ref, KEY_NEED_READ);
if (ret == 0)
goto can_read_key;
if (ret != -EACCES)
@@ -673,7 +743,7 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
}
/* the key is probably readable - now try to read it */
- can_read_key:
+can_read_key:
ret = key_validate(key);
if (ret == 0) {
ret = -EOPNOTSUPP;
@@ -686,33 +756,50 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
}
}
- error2:
+error2:
key_put(key);
- error:
+error:
return ret;
+}
-} /* end keyctl_read_key() */
-
-/*****************************************************************************/
/*
- * change the ownership of a key
- * - the keyring owned by the changer
- * - if the uid or gid is -1, then that parameter is not changed
- * - implements keyctl(KEYCTL_CHOWN)
+ * Change the ownership of a key
+ *
+ * The key must grant the caller Setattr permission for this to work, though
+ * the key need not be fully instantiated yet. For the UID to be changed, or
+ * for the GID to be changed to a group the caller is not a member of, the
+ * caller must have sysadmin capability. If either uid or gid is -1 then that
+ * attribute is not changed.
+ *
+ * If the UID is to be changed, the new user must have sufficient quota to
+ * accept the key. The quota deduction will be removed from the old user to
+ * the new user should the attribute be changed.
+ *
+ * If successful, 0 will be returned.
*/
-long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid)
+long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group)
{
struct key_user *newowner, *zapowner = NULL;
struct key *key;
key_ref_t key_ref;
long ret;
+ kuid_t uid;
+ kgid_t gid;
+
+ uid = make_kuid(current_user_ns(), user);
+ gid = make_kgid(current_user_ns(), group);
+ ret = -EINVAL;
+ if ((user != (uid_t) -1) && !uid_valid(uid))
+ goto error;
+ if ((group != (gid_t) -1) && !gid_valid(gid))
+ goto error;
ret = 0;
- if (uid == (uid_t) -1 && gid == (gid_t) -1)
+ if (user == (uid_t) -1 && group == (gid_t) -1)
goto error;
key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
- KEY_SETATTR);
+ KEY_NEED_SETATTR);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error;
@@ -726,27 +813,27 @@ long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid)
if (!capable(CAP_SYS_ADMIN)) {
/* only the sysadmin can chown a key to some other UID */
- if (uid != (uid_t) -1 && key->uid != uid)
+ if (user != (uid_t) -1 && !uid_eq(key->uid, uid))
goto error_put;
/* only the sysadmin can set the key's GID to a group other
* than one of those that the current process subscribes to */
- if (gid != (gid_t) -1 && gid != key->gid && !in_group_p(gid))
+ if (group != (gid_t) -1 && !gid_eq(gid, key->gid) && !in_group_p(gid))
goto error_put;
}
/* change the UID */
- if (uid != (uid_t) -1 && uid != key->uid) {
+ if (user != (uid_t) -1 && !uid_eq(uid, key->uid)) {
ret = -ENOMEM;
- newowner = key_user_lookup(uid, current_user_ns());
+ newowner = key_user_lookup(uid);
if (!newowner)
goto error_put;
/* transfer the quota burden to the new user */
if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
- unsigned maxkeys = (uid == 0) ?
+ unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxkeys : key_quota_maxkeys;
- unsigned maxbytes = (uid == 0) ?
+ unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxbytes : key_quota_maxbytes;
spin_lock(&newowner->lock);
@@ -780,7 +867,7 @@ long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid)
}
/* change the GID */
- if (gid != (gid_t) -1)
+ if (group != (gid_t) -1)
key->gid = gid;
ret = 0;
@@ -798,14 +885,14 @@ quota_overrun:
zapowner = newowner;
ret = -EDQUOT;
goto error_put;
+}
-} /* end keyctl_chown_key() */
-
-/*****************************************************************************/
/*
- * change the permission mask on a key
- * - the keyring owned by the changer
- * - implements keyctl(KEYCTL_SETPERM)
+ * Change the permission mask on a key.
+ *
+ * The key must grant the caller Setattr permission for this to work, though
+ * the key need not be fully instantiated yet. If the caller does not have
+ * sysadmin capability, it may only change the permission on keys that it owns.
*/
long keyctl_setperm_key(key_serial_t id, key_perm_t perm)
{
@@ -818,7 +905,7 @@ long keyctl_setperm_key(key_serial_t id, key_perm_t perm)
goto error;
key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
- KEY_SETATTR);
+ KEY_NEED_SETATTR);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error;
@@ -831,7 +918,7 @@ long keyctl_setperm_key(key_serial_t id, key_perm_t perm)
down_write(&key->sem);
/* if we're not the sysadmin, we can only change a key that we own */
- if (capable(CAP_SYS_ADMIN) || key->uid == current_fsuid()) {
+ if (capable(CAP_SYS_ADMIN) || uid_eq(key->uid, current_fsuid())) {
key->perm = perm;
ret = 0;
}
@@ -840,11 +927,11 @@ long keyctl_setperm_key(key_serial_t id, key_perm_t perm)
key_put(key);
error:
return ret;
-
-} /* end keyctl_setperm_key() */
+}
/*
- * get the destination keyring for instantiation
+ * Get the destination keyring for instantiation and check that the caller has
+ * Write permission on it.
*/
static long get_instantiation_keyring(key_serial_t ringid,
struct request_key_auth *rka,
@@ -860,7 +947,7 @@ static long get_instantiation_keyring(key_serial_t ringid,
/* if a specific keyring is nominated by ID, then use that */
if (ringid > 0) {
- dkref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_WRITE);
+ dkref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
if (IS_ERR(dkref))
return PTR_ERR(dkref);
*_dest_keyring = key_ref_to_ptr(dkref);
@@ -881,7 +968,7 @@ static long get_instantiation_keyring(key_serial_t ringid,
}
/*
- * change the request_key authorisation key on the current process
+ * Change the request_key authorisation key on the current process.
*/
static int keyctl_change_reqkey_auth(struct key *key)
{
@@ -897,15 +984,35 @@ static int keyctl_change_reqkey_auth(struct key *key)
return commit_creds(new);
}
-/*****************************************************************************/
/*
- * instantiate the key with the specified payload, and, if one is given, link
- * the key into the keyring
+ * Copy the iovec data from userspace
*/
-long keyctl_instantiate_key(key_serial_t id,
- const void __user *_payload,
- size_t plen,
- key_serial_t ringid)
+static long copy_from_user_iovec(void *buffer, const struct iovec *iov,
+ unsigned ioc)
+{
+ for (; ioc > 0; ioc--) {
+ if (copy_from_user(buffer, iov->iov_base, iov->iov_len) != 0)
+ return -EFAULT;
+ buffer += iov->iov_len;
+ iov++;
+ }
+ return 0;
+}
+
+/*
+ * Instantiate a key with the specified payload and link the key into the
+ * destination keyring if one is given.
+ *
+ * The caller must have the appropriate instantiation permit set for this to
+ * work (see keyctl_assume_authority). No other permissions are required.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_instantiate_key_common(key_serial_t id,
+ const struct iovec *payload_iov,
+ unsigned ioc,
+ size_t plen,
+ key_serial_t ringid)
{
const struct cred *cred = current_cred();
struct request_key_auth *rka;
@@ -934,7 +1041,7 @@ long keyctl_instantiate_key(key_serial_t id,
/* pull the payload in if one was supplied */
payload = NULL;
- if (_payload) {
+ if (payload_iov) {
ret = -ENOMEM;
payload = kmalloc(plen, GFP_KERNEL);
if (!payload) {
@@ -946,8 +1053,8 @@ long keyctl_instantiate_key(key_serial_t id,
goto error;
}
- ret = -EFAULT;
- if (copy_from_user(payload, _payload, plen) != 0)
+ ret = copy_from_user_iovec(payload, payload_iov, ioc);
+ if (ret < 0)
goto error2;
}
@@ -975,22 +1082,127 @@ error2:
vfree(payload);
error:
return ret;
+}
-} /* end keyctl_instantiate_key() */
+/*
+ * Instantiate a key with the specified payload and link the key into the
+ * destination keyring if one is given.
+ *
+ * The caller must have the appropriate instantiation permit set for this to
+ * work (see keyctl_assume_authority). No other permissions are required.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_instantiate_key(key_serial_t id,
+ const void __user *_payload,
+ size_t plen,
+ key_serial_t ringid)
+{
+ if (_payload && plen) {
+ struct iovec iov[1] = {
+ [0].iov_base = (void __user *)_payload,
+ [0].iov_len = plen
+ };
+
+ return keyctl_instantiate_key_common(id, iov, 1, plen, ringid);
+ }
+
+ return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
+}
-/*****************************************************************************/
/*
- * negatively instantiate the key with the given timeout (in seconds), and, if
- * one is given, link the key into the keyring
+ * Instantiate a key with the specified multipart payload and link the key into
+ * the destination keyring if one is given.
+ *
+ * The caller must have the appropriate instantiation permit set for this to
+ * work (see keyctl_assume_authority). No other permissions are required.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_instantiate_key_iov(key_serial_t id,
+ const struct iovec __user *_payload_iov,
+ unsigned ioc,
+ key_serial_t ringid)
+{
+ struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
+ long ret;
+
+ if (!_payload_iov || !ioc)
+ goto no_payload;
+
+ ret = rw_copy_check_uvector(WRITE, _payload_iov, ioc,
+ ARRAY_SIZE(iovstack), iovstack, &iov);
+ if (ret < 0)
+ goto err;
+ if (ret == 0)
+ goto no_payload_free;
+
+ ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
+err:
+ if (iov != iovstack)
+ kfree(iov);
+ return ret;
+
+no_payload_free:
+ if (iov != iovstack)
+ kfree(iov);
+no_payload:
+ return keyctl_instantiate_key_common(id, NULL, 0, 0, ringid);
+}
+
+/*
+ * Negatively instantiate the key with the given timeout (in seconds) and link
+ * the key into the destination keyring if one is given.
+ *
+ * The caller must have the appropriate instantiation permit set for this to
+ * work (see keyctl_assume_authority). No other permissions are required.
+ *
+ * The key and any links to the key will be automatically garbage collected
+ * after the timeout expires.
+ *
+ * Negative keys are used to rate limit repeated request_key() calls by causing
+ * them to return -ENOKEY until the negative key expires.
+ *
+ * If successful, 0 will be returned.
*/
long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid)
{
+ return keyctl_reject_key(id, timeout, ENOKEY, ringid);
+}
+
+/*
+ * Negatively instantiate the key with the given timeout (in seconds) and error
+ * code and link the key into the destination keyring if one is given.
+ *
+ * The caller must have the appropriate instantiation permit set for this to
+ * work (see keyctl_assume_authority). No other permissions are required.
+ *
+ * The key and any links to the key will be automatically garbage collected
+ * after the timeout expires.
+ *
+ * Negative keys are used to rate limit repeated request_key() calls by causing
+ * them to return the specified error code until the negative key expires.
+ *
+ * If successful, 0 will be returned.
+ */
+long keyctl_reject_key(key_serial_t id, unsigned timeout, unsigned error,
+ key_serial_t ringid)
+{
const struct cred *cred = current_cred();
struct request_key_auth *rka;
struct key *instkey, *dest_keyring;
long ret;
- kenter("%d,%u,%d", id, timeout, ringid);
+ kenter("%d,%u,%u,%d", id, timeout, error, ringid);
+
+ /* must be a valid error code and mustn't be a kernel special */
+ if (error <= 0 ||
+ error >= MAX_ERRNO ||
+ error == ERESTARTSYS ||
+ error == ERESTARTNOINTR ||
+ error == ERESTARTNOHAND ||
+ error == ERESTART_RESTARTBLOCK)
+ return -EINVAL;
/* the appropriate instantiation authorisation key must have been
* assumed before calling this */
@@ -1010,7 +1222,7 @@ long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid)
goto error;
/* instantiate the key and link it into a keyring */
- ret = key_negate_and_link(rka->target_key, timeout,
+ ret = key_reject_and_link(rka->target_key, timeout, error,
dest_keyring, instkey);
key_put(dest_keyring);
@@ -1022,13 +1234,14 @@ long keyctl_negate_key(key_serial_t id, unsigned timeout, key_serial_t ringid)
error:
return ret;
+}
-} /* end keyctl_negate_key() */
-
-/*****************************************************************************/
/*
- * set the default keyring in which request_key() will cache keys
- * - return the old setting
+ * Read or set the default keyring in which request_key() will cache keys and
+ * return the old setting.
+ *
+ * If a process keyring is specified then this will be created if it doesn't
+ * yet exist. The old setting will be returned if successful.
*/
long keyctl_set_reqkey_keyring(int reqkey_defl)
{
@@ -1080,55 +1293,74 @@ set:
return old_setting;
error:
abort_creds(new);
- return -EINVAL;
-
-} /* end keyctl_set_reqkey_keyring() */
+ return ret;
+}
-/*****************************************************************************/
/*
- * set or clear the timeout for a key
+ * Set or clear the timeout on a key.
+ *
+ * Either the key must grant the caller Setattr permission or else the caller
+ * must hold an instantiation authorisation token for the key.
+ *
+ * The timeout is either 0 to clear the timeout, or a number of seconds from
+ * the current time. The key and any links to the key will be automatically
+ * garbage collected after the timeout expires.
+ *
+ * If successful, 0 is returned.
*/
long keyctl_set_timeout(key_serial_t id, unsigned timeout)
{
- struct timespec now;
- struct key *key;
+ struct key *key, *instkey;
key_ref_t key_ref;
- time_t expiry;
long ret;
key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
- KEY_SETATTR);
+ KEY_NEED_SETATTR);
if (IS_ERR(key_ref)) {
+ /* setting the timeout on a key under construction is permitted
+ * if we have the authorisation token handy */
+ if (PTR_ERR(key_ref) == -EACCES) {
+ instkey = key_get_instantiation_authkey(id);
+ if (!IS_ERR(instkey)) {
+ key_put(instkey);
+ key_ref = lookup_user_key(id,
+ KEY_LOOKUP_PARTIAL,
+ 0);
+ if (!IS_ERR(key_ref))
+ goto okay;
+ }
+ }
+
ret = PTR_ERR(key_ref);
goto error;
}
+okay:
key = key_ref_to_ptr(key_ref);
-
- /* make the changes with the locks held to prevent races */
- down_write(&key->sem);
-
- expiry = 0;
- if (timeout > 0) {
- now = current_kernel_time();
- expiry = now.tv_sec + timeout;
- }
-
- key->expiry = expiry;
- key_schedule_gc(key->expiry + key_gc_delay);
-
- up_write(&key->sem);
+ key_set_timeout(key, timeout);
key_put(key);
ret = 0;
error:
return ret;
+}
-} /* end keyctl_set_timeout() */
-
-/*****************************************************************************/
/*
- * assume the authority to instantiate the specified key
+ * Assume (or clear) the authority to instantiate the specified key.
+ *
+ * This sets the authoritative token currently in force for key instantiation.
+ * This must be done for a key to be instantiated. It has the effect of making
+ * available all the keys from the caller of the request_key() that created a
+ * key to request_key() calls made by the caller of this function.
+ *
+ * The caller must have the instantiation key in their process keyrings with a
+ * Search permission grant available to the caller.
+ *
+ * If the ID given is 0, then the setting will be cleared and 0 returned.
+ *
+ * If the ID given has a matching an authorisation key, then that key will be
+ * set and its ID will be returned. The authorisation key can be read to get
+ * the callout information passed to request_key().
*/
long keyctl_assume_authority(key_serial_t id)
{
@@ -1165,16 +1397,17 @@ long keyctl_assume_authority(key_serial_t id)
ret = authkey->serial;
error:
return ret;
-
-} /* end keyctl_assume_authority() */
+}
/*
- * get the security label of a key
- * - the key must grant us view permission
- * - if there's a buffer, we place up to buflen bytes of data into it
- * - unless there's an error, we return the amount of information available,
- * irrespective of how much we may have copied (including the terminal NUL)
- * - implements keyctl(KEYCTL_GET_SECURITY)
+ * Get a key's the LSM security label.
+ *
+ * The key must grant the caller View permission for this to work.
+ *
+ * If there's a buffer, then up to buflen bytes of data will be placed into it.
+ *
+ * If successful, the amount of information available will be returned,
+ * irrespective of how much was copied (including the terminal NUL).
*/
long keyctl_get_security(key_serial_t keyid,
char __user *buffer,
@@ -1185,7 +1418,7 @@ long keyctl_get_security(key_serial_t keyid,
char *context;
long ret;
- key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_VIEW);
+ key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW);
if (IS_ERR(key_ref)) {
if (PTR_ERR(key_ref) != -EACCES)
return PTR_ERR(key_ref);
@@ -1229,118 +1462,110 @@ long keyctl_get_security(key_serial_t keyid,
}
/*
- * attempt to install the calling process's session keyring on the process's
- * parent process
- * - the keyring must exist and must grant us LINK permission
- * - implements keyctl(KEYCTL_SESSION_TO_PARENT)
+ * Attempt to install the calling process's session keyring on the process's
+ * parent process.
+ *
+ * The keyring must exist and must grant the caller LINK permission, and the
+ * parent process must be single-threaded and must have the same effective
+ * ownership as this process and mustn't be SUID/SGID.
+ *
+ * The keyring will be emplaced on the parent when it next resumes userspace.
+ *
+ * If successful, 0 will be returned.
*/
long keyctl_session_to_parent(void)
{
-#ifdef TIF_NOTIFY_RESUME
struct task_struct *me, *parent;
const struct cred *mycred, *pcred;
- struct cred *cred, *oldcred;
+ struct callback_head *newwork, *oldwork;
key_ref_t keyring_r;
+ struct cred *cred;
int ret;
- keyring_r = lookup_user_key(KEY_SPEC_SESSION_KEYRING, 0, KEY_LINK);
+ keyring_r = lookup_user_key(KEY_SPEC_SESSION_KEYRING, 0, KEY_NEED_LINK);
if (IS_ERR(keyring_r))
return PTR_ERR(keyring_r);
+ ret = -ENOMEM;
+
/* our parent is going to need a new cred struct, a new tgcred struct
* and new security data, so we allocate them here to prevent ENOMEM in
* our parent */
- ret = -ENOMEM;
cred = cred_alloc_blank();
if (!cred)
goto error_keyring;
+ newwork = &cred->rcu;
- cred->tgcred->session_keyring = key_ref_to_ptr(keyring_r);
+ cred->session_keyring = key_ref_to_ptr(keyring_r);
keyring_r = NULL;
+ init_task_work(newwork, key_change_session_keyring);
me = current;
+ rcu_read_lock();
write_lock_irq(&tasklist_lock);
- parent = me->real_parent;
ret = -EPERM;
+ oldwork = NULL;
+ parent = me->real_parent;
/* the parent mustn't be init and mustn't be a kernel thread */
if (parent->pid <= 1 || !parent->mm)
- goto not_permitted;
+ goto unlock;
/* the parent must be single threaded */
- if (atomic_read(&parent->signal->count) != 1)
- goto not_permitted;
+ if (!thread_group_empty(parent))
+ goto unlock;
/* the parent and the child must have different session keyrings or
* there's no point */
mycred = current_cred();
pcred = __task_cred(parent);
if (mycred == pcred ||
- mycred->tgcred->session_keyring == pcred->tgcred->session_keyring)
- goto already_same;
+ mycred->session_keyring == pcred->session_keyring) {
+ ret = 0;
+ goto unlock;
+ }
/* the parent must have the same effective ownership and mustn't be
* SUID/SGID */
- if (pcred-> uid != mycred->euid ||
- pcred->euid != mycred->euid ||
- pcred->suid != mycred->euid ||
- pcred-> gid != mycred->egid ||
- pcred->egid != mycred->egid ||
- pcred->sgid != mycred->egid)
- goto not_permitted;
+ if (!uid_eq(pcred->uid, mycred->euid) ||
+ !uid_eq(pcred->euid, mycred->euid) ||
+ !uid_eq(pcred->suid, mycred->euid) ||
+ !gid_eq(pcred->gid, mycred->egid) ||
+ !gid_eq(pcred->egid, mycred->egid) ||
+ !gid_eq(pcred->sgid, mycred->egid))
+ goto unlock;
/* the keyrings must have the same UID */
- if (pcred ->tgcred->session_keyring->uid != mycred->euid ||
- mycred->tgcred->session_keyring->uid != mycred->euid)
- goto not_permitted;
-
- /* the LSM must permit the replacement of the parent's keyring with the
- * keyring from this process */
- ret = security_key_session_to_parent(mycred, pcred,
- key_ref_to_ptr(keyring_r));
- if (ret < 0)
- goto not_permitted;
+ if ((pcred->session_keyring &&
+ !uid_eq(pcred->session_keyring->uid, mycred->euid)) ||
+ !uid_eq(mycred->session_keyring->uid, mycred->euid))
+ goto unlock;
- /* if there's an already pending keyring replacement, then we replace
- * that */
- oldcred = parent->replacement_session_keyring;
+ /* cancel an already pending keyring replacement */
+ oldwork = task_work_cancel(parent, key_change_session_keyring);
/* the replacement session keyring is applied just prior to userspace
* restarting */
- parent->replacement_session_keyring = cred;
- cred = NULL;
- set_ti_thread_flag(task_thread_info(parent), TIF_NOTIFY_RESUME);
-
- write_unlock_irq(&tasklist_lock);
- if (oldcred)
- put_cred(oldcred);
- return 0;
-
-already_same:
- ret = 0;
-not_permitted:
+ ret = task_work_add(parent, newwork, true);
+ if (!ret)
+ newwork = NULL;
+unlock:
write_unlock_irq(&tasklist_lock);
- put_cred(cred);
+ rcu_read_unlock();
+ if (oldwork)
+ put_cred(container_of(oldwork, struct cred, rcu));
+ if (newwork)
+ put_cred(cred);
return ret;
error_keyring:
key_ref_put(keyring_r);
return ret;
-
-#else /* !TIF_NOTIFY_RESUME */
- /*
- * To be removed when TIF_NOTIFY_RESUME has been implemented on
- * m68k/xtensa
- */
-#warning TIF_NOTIFY_RESUME not implemented
- return -EOPNOTSUPP;
-#endif /* !TIF_NOTIFY_RESUME */
}
-/*****************************************************************************/
/*
- * the key control system call
+ * The key control system call
*/
SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3,
unsigned long, arg4, unsigned long, arg5)
@@ -1426,8 +1651,26 @@ SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3,
case KEYCTL_SESSION_TO_PARENT:
return keyctl_session_to_parent();
+ case KEYCTL_REJECT:
+ return keyctl_reject_key((key_serial_t) arg2,
+ (unsigned) arg3,
+ (unsigned) arg4,
+ (key_serial_t) arg5);
+
+ case KEYCTL_INSTANTIATE_IOV:
+ return keyctl_instantiate_key_iov(
+ (key_serial_t) arg2,
+ (const struct iovec __user *) arg3,
+ (unsigned) arg4,
+ (key_serial_t) arg5);
+
+ case KEYCTL_INVALIDATE:
+ return keyctl_invalidate_key((key_serial_t) arg2);
+
+ case KEYCTL_GET_PERSISTENT:
+ return keyctl_get_persistent((uid_t)arg2, (key_serial_t)arg3);
+
default:
return -EOPNOTSUPP;
}
-
-} /* end sys_keyctl() */
+}
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 8ec02746ca9..9cf2575f0d9 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -1,6 +1,6 @@
/* Keyring handling
*
- * Copyright (C) 2004-2005, 2008 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2004-2005, 2008, 2013 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
@@ -17,20 +17,44 @@
#include <linux/seq_file.h>
#include <linux/err.h>
#include <keys/keyring-type.h>
-#include <asm/uaccess.h>
+#include <keys/user-type.h>
+#include <linux/assoc_array_priv.h>
+#include <linux/uaccess.h>
#include "internal.h"
/*
- * when plumbing the depths of the key tree, this sets a hard limit set on how
- * deep we're willing to go
+ * When plumbing the depths of the key tree, this sets a hard limit
+ * set on how deep we're willing to go.
*/
#define KEYRING_SEARCH_MAX_DEPTH 6
/*
- * we keep all named keyrings in a hash to speed looking them up
+ * We keep all named keyrings in a hash to speed looking them up.
*/
#define KEYRING_NAME_HASH_SIZE (1 << 5)
+/*
+ * We mark pointers we pass to the associative array with bit 1 set if
+ * they're keyrings and clear otherwise.
+ */
+#define KEYRING_PTR_SUBTYPE 0x2UL
+
+static inline bool keyring_ptr_is_keyring(const struct assoc_array_ptr *x)
+{
+ return (unsigned long)x & KEYRING_PTR_SUBTYPE;
+}
+static inline struct key *keyring_ptr_to_key(const struct assoc_array_ptr *x)
+{
+ void *object = assoc_array_ptr_to_leaf(x);
+ return (struct key *)((unsigned long)object & ~KEYRING_PTR_SUBTYPE);
+}
+static inline void *keyring_key_to_ptr(struct key *key)
+{
+ if (key->type == &key_type_keyring)
+ return (void *)((unsigned long)key | KEYRING_PTR_SUBTYPE);
+ return key;
+}
+
static struct list_head keyring_name_hash[KEYRING_NAME_HASH_SIZE];
static DEFINE_RWLOCK(keyring_name_lock);
@@ -39,17 +63,18 @@ static inline unsigned keyring_hash(const char *desc)
unsigned bucket = 0;
for (; *desc; desc++)
- bucket += (unsigned char) *desc;
+ bucket += (unsigned char)*desc;
return bucket & (KEYRING_NAME_HASH_SIZE - 1);
}
/*
- * the keyring type definition
+ * The keyring key type definition. Keyrings are simply keys of this type and
+ * can be treated as ordinary keys in addition to having their own special
+ * operations.
*/
static int keyring_instantiate(struct key *keyring,
- const void *data, size_t datalen);
-static int keyring_match(const struct key *keyring, const void *criterion);
+ struct key_preparsed_payload *prep);
static void keyring_revoke(struct key *keyring);
static void keyring_destroy(struct key *keyring);
static void keyring_describe(const struct key *keyring, struct seq_file *m);
@@ -58,27 +83,25 @@ static long keyring_read(const struct key *keyring,
struct key_type key_type_keyring = {
.name = "keyring",
- .def_datalen = sizeof(struct keyring_list),
+ .def_datalen = 0,
.instantiate = keyring_instantiate,
- .match = keyring_match,
+ .match = user_match,
.revoke = keyring_revoke,
.destroy = keyring_destroy,
.describe = keyring_describe,
.read = keyring_read,
};
-
EXPORT_SYMBOL(key_type_keyring);
/*
- * semaphore to serialise link/link calls to prevent two link calls in parallel
- * introducing a cycle
+ * Semaphore to serialise link/link calls to prevent two link calls in parallel
+ * introducing a cycle.
*/
static DECLARE_RWSEM(keyring_serialise_link_sem);
-/*****************************************************************************/
/*
- * publish the name of a keyring so that it can be found by name (if it has
- * one)
+ * Publish the name of a keyring so that it can be found by name (if it has
+ * one).
*/
static void keyring_publish_name(struct key *keyring)
{
@@ -97,50 +120,259 @@ static void keyring_publish_name(struct key *keyring)
write_unlock(&keyring_name_lock);
}
+}
-} /* end keyring_publish_name() */
-
-/*****************************************************************************/
/*
- * initialise a keyring
- * - we object if we were given any data
+ * Initialise a keyring.
+ *
+ * Returns 0 on success, -EINVAL if given any data.
*/
static int keyring_instantiate(struct key *keyring,
- const void *data, size_t datalen)
+ struct key_preparsed_payload *prep)
{
int ret;
ret = -EINVAL;
- if (datalen == 0) {
+ if (prep->datalen == 0) {
+ assoc_array_init(&keyring->keys);
/* make the keyring available by name if it has one */
keyring_publish_name(keyring);
ret = 0;
}
return ret;
+}
+
+/*
+ * Multiply 64-bits by 32-bits to 96-bits and fold back to 64-bit. Ideally we'd
+ * fold the carry back too, but that requires inline asm.
+ */
+static u64 mult_64x32_and_fold(u64 x, u32 y)
+{
+ u64 hi = (u64)(u32)(x >> 32) * y;
+ u64 lo = (u64)(u32)(x) * y;
+ return lo + ((u64)(u32)hi << 32) + (u32)(hi >> 32);
+}
+
+/*
+ * Hash a key type and description.
+ */
+static unsigned long hash_key_type_and_desc(const struct keyring_index_key *index_key)
+{
+ const unsigned level_shift = ASSOC_ARRAY_LEVEL_STEP;
+ const unsigned long fan_mask = ASSOC_ARRAY_FAN_MASK;
+ const char *description = index_key->description;
+ unsigned long hash, type;
+ u32 piece;
+ u64 acc;
+ int n, desc_len = index_key->desc_len;
+
+ type = (unsigned long)index_key->type;
+
+ acc = mult_64x32_and_fold(type, desc_len + 13);
+ acc = mult_64x32_and_fold(acc, 9207);
+ for (;;) {
+ n = desc_len;
+ if (n <= 0)
+ break;
+ if (n > 4)
+ n = 4;
+ piece = 0;
+ memcpy(&piece, description, n);
+ description += n;
+ desc_len -= n;
+ acc = mult_64x32_and_fold(acc, piece);
+ acc = mult_64x32_and_fold(acc, 9207);
+ }
+
+ /* Fold the hash down to 32 bits if need be. */
+ hash = acc;
+ if (ASSOC_ARRAY_KEY_CHUNK_SIZE == 32)
+ hash ^= acc >> 32;
-} /* end keyring_instantiate() */
+ /* Squidge all the keyrings into a separate part of the tree to
+ * ordinary keys by making sure the lowest level segment in the hash is
+ * zero for keyrings and non-zero otherwise.
+ */
+ if (index_key->type != &key_type_keyring && (hash & fan_mask) == 0)
+ return hash | (hash >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - level_shift)) | 1;
+ if (index_key->type == &key_type_keyring && (hash & fan_mask) != 0)
+ return (hash + (hash << level_shift)) & ~fan_mask;
+ return hash;
+}
-/*****************************************************************************/
/*
- * match keyrings on their name
+ * Build the next index key chunk.
+ *
+ * On 32-bit systems the index key is laid out as:
+ *
+ * 0 4 5 9...
+ * hash desclen typeptr desc[]
+ *
+ * On 64-bit systems:
+ *
+ * 0 8 9 17...
+ * hash desclen typeptr desc[]
+ *
+ * We return it one word-sized chunk at a time.
*/
-static int keyring_match(const struct key *keyring, const void *description)
+static unsigned long keyring_get_key_chunk(const void *data, int level)
+{
+ const struct keyring_index_key *index_key = data;
+ unsigned long chunk = 0;
+ long offset = 0;
+ int desc_len = index_key->desc_len, n = sizeof(chunk);
+
+ level /= ASSOC_ARRAY_KEY_CHUNK_SIZE;
+ switch (level) {
+ case 0:
+ return hash_key_type_and_desc(index_key);
+ case 1:
+ return ((unsigned long)index_key->type << 8) | desc_len;
+ case 2:
+ if (desc_len == 0)
+ return (u8)((unsigned long)index_key->type >>
+ (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8));
+ n--;
+ offset = 1;
+ default:
+ offset += sizeof(chunk) - 1;
+ offset += (level - 3) * sizeof(chunk);
+ if (offset >= desc_len)
+ return 0;
+ desc_len -= offset;
+ if (desc_len > n)
+ desc_len = n;
+ offset += desc_len;
+ do {
+ chunk <<= 8;
+ chunk |= ((u8*)index_key->description)[--offset];
+ } while (--desc_len > 0);
+
+ if (level == 2) {
+ chunk <<= 8;
+ chunk |= (u8)((unsigned long)index_key->type >>
+ (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8));
+ }
+ return chunk;
+ }
+}
+
+static unsigned long keyring_get_object_key_chunk(const void *object, int level)
+{
+ const struct key *key = keyring_ptr_to_key(object);
+ return keyring_get_key_chunk(&key->index_key, level);
+}
+
+static bool keyring_compare_object(const void *object, const void *data)
{
- return keyring->description &&
- strcmp(keyring->description, description) == 0;
+ const struct keyring_index_key *index_key = data;
+ const struct key *key = keyring_ptr_to_key(object);
-} /* end keyring_match() */
+ return key->index_key.type == index_key->type &&
+ key->index_key.desc_len == index_key->desc_len &&
+ memcmp(key->index_key.description, index_key->description,
+ index_key->desc_len) == 0;
+}
-/*****************************************************************************/
/*
- * dispose of the data dangling from the corpse of a keyring
+ * Compare the index keys of a pair of objects and determine the bit position
+ * at which they differ - if they differ.
*/
-static void keyring_destroy(struct key *keyring)
+static int keyring_diff_objects(const void *object, const void *data)
+{
+ const struct key *key_a = keyring_ptr_to_key(object);
+ const struct keyring_index_key *a = &key_a->index_key;
+ const struct keyring_index_key *b = data;
+ unsigned long seg_a, seg_b;
+ int level, i;
+
+ level = 0;
+ seg_a = hash_key_type_and_desc(a);
+ seg_b = hash_key_type_and_desc(b);
+ if ((seg_a ^ seg_b) != 0)
+ goto differ;
+
+ /* The number of bits contributed by the hash is controlled by a
+ * constant in the assoc_array headers. Everything else thereafter we
+ * can deal with as being machine word-size dependent.
+ */
+ level += ASSOC_ARRAY_KEY_CHUNK_SIZE / 8;
+ seg_a = a->desc_len;
+ seg_b = b->desc_len;
+ if ((seg_a ^ seg_b) != 0)
+ goto differ;
+
+ /* The next bit may not work on big endian */
+ level++;
+ seg_a = (unsigned long)a->type;
+ seg_b = (unsigned long)b->type;
+ if ((seg_a ^ seg_b) != 0)
+ goto differ;
+
+ level += sizeof(unsigned long);
+ if (a->desc_len == 0)
+ goto same;
+
+ i = 0;
+ if (((unsigned long)a->description | (unsigned long)b->description) &
+ (sizeof(unsigned long) - 1)) {
+ do {
+ seg_a = *(unsigned long *)(a->description + i);
+ seg_b = *(unsigned long *)(b->description + i);
+ if ((seg_a ^ seg_b) != 0)
+ goto differ_plus_i;
+ i += sizeof(unsigned long);
+ } while (i < (a->desc_len & (sizeof(unsigned long) - 1)));
+ }
+
+ for (; i < a->desc_len; i++) {
+ seg_a = *(unsigned char *)(a->description + i);
+ seg_b = *(unsigned char *)(b->description + i);
+ if ((seg_a ^ seg_b) != 0)
+ goto differ_plus_i;
+ }
+
+same:
+ return -1;
+
+differ_plus_i:
+ level += i;
+differ:
+ i = level * 8 + __ffs(seg_a ^ seg_b);
+ return i;
+}
+
+/*
+ * Free an object after stripping the keyring flag off of the pointer.
+ */
+static void keyring_free_object(void *object)
{
- struct keyring_list *klist;
- int loop;
+ key_put(keyring_ptr_to_key(object));
+}
+
+/*
+ * Operations for keyring management by the index-tree routines.
+ */
+static const struct assoc_array_ops keyring_assoc_array_ops = {
+ .get_key_chunk = keyring_get_key_chunk,
+ .get_object_key_chunk = keyring_get_object_key_chunk,
+ .compare_object = keyring_compare_object,
+ .diff_objects = keyring_diff_objects,
+ .free_object = keyring_free_object,
+};
+/*
+ * Clean up a keyring when it is destroyed. Unpublish its name if it had one
+ * and dispose of its data.
+ *
+ * The garbage collector detects the final key_put(), removes the keyring from
+ * the serial number tree and then does RCU synchronisation before coming here,
+ * so we shouldn't need to worry about code poking around here with the RCU
+ * readlock held by this time.
+ */
+static void keyring_destroy(struct key *keyring)
+{
if (keyring->description) {
write_lock(&keyring_name_lock);
@@ -151,111 +383,110 @@ static void keyring_destroy(struct key *keyring)
write_unlock(&keyring_name_lock);
}
- klist = rcu_dereference(keyring->payload.subscriptions);
- if (klist) {
- for (loop = klist->nkeys - 1; loop >= 0; loop--)
- key_put(klist->keys[loop]);
- kfree(klist);
- }
-
-} /* end keyring_destroy() */
+ assoc_array_destroy(&keyring->keys, &keyring_assoc_array_ops);
+}
-/*****************************************************************************/
/*
- * describe the keyring
+ * Describe a keyring for /proc.
*/
static void keyring_describe(const struct key *keyring, struct seq_file *m)
{
- struct keyring_list *klist;
-
- if (keyring->description) {
+ if (keyring->description)
seq_puts(m, keyring->description);
- }
- else {
+ else
seq_puts(m, "[anon]");
+
+ if (key_is_instantiated(keyring)) {
+ if (keyring->keys.nr_leaves_on_tree != 0)
+ seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree);
+ else
+ seq_puts(m, ": empty");
}
+}
- rcu_read_lock();
- klist = rcu_dereference(keyring->payload.subscriptions);
- if (klist)
- seq_printf(m, ": %u/%u", klist->nkeys, klist->maxkeys);
- else
- seq_puts(m, ": empty");
- rcu_read_unlock();
+struct keyring_read_iterator_context {
+ size_t qty;
+ size_t count;
+ key_serial_t __user *buffer;
+};
+
+static int keyring_read_iterator(const void *object, void *data)
+{
+ struct keyring_read_iterator_context *ctx = data;
+ const struct key *key = keyring_ptr_to_key(object);
+ int ret;
+
+ kenter("{%s,%d},,{%zu/%zu}",
+ key->type->name, key->serial, ctx->count, ctx->qty);
-} /* end keyring_describe() */
+ if (ctx->count >= ctx->qty)
+ return 1;
+
+ ret = put_user(key->serial, ctx->buffer);
+ if (ret < 0)
+ return ret;
+ ctx->buffer++;
+ ctx->count += sizeof(key->serial);
+ return 0;
+}
-/*****************************************************************************/
/*
- * read a list of key IDs from the keyring's contents
- * - the keyring's semaphore is read-locked
+ * Read a list of key IDs from the keyring's contents in binary form
+ *
+ * The keyring's semaphore is read-locked by the caller. This prevents someone
+ * from modifying it under us - which could cause us to read key IDs multiple
+ * times.
*/
static long keyring_read(const struct key *keyring,
char __user *buffer, size_t buflen)
{
- struct keyring_list *klist;
- struct key *key;
- size_t qty, tmp;
- int loop, ret;
-
- ret = 0;
- klist = rcu_dereference(keyring->payload.subscriptions);
-
- if (klist) {
- /* calculate how much data we could return */
- qty = klist->nkeys * sizeof(key_serial_t);
+ struct keyring_read_iterator_context ctx;
+ unsigned long nr_keys;
+ int ret;
- if (buffer && buflen > 0) {
- if (buflen > qty)
- buflen = qty;
+ kenter("{%d},,%zu", key_serial(keyring), buflen);
- /* copy the IDs of the subscribed keys into the
- * buffer */
- ret = -EFAULT;
+ if (buflen & (sizeof(key_serial_t) - 1))
+ return -EINVAL;
- for (loop = 0; loop < klist->nkeys; loop++) {
- key = klist->keys[loop];
+ nr_keys = keyring->keys.nr_leaves_on_tree;
+ if (nr_keys == 0)
+ return 0;
- tmp = sizeof(key_serial_t);
- if (tmp > buflen)
- tmp = buflen;
+ /* Calculate how much data we could return */
+ ctx.qty = nr_keys * sizeof(key_serial_t);
- if (copy_to_user(buffer,
- &key->serial,
- tmp) != 0)
- goto error;
+ if (!buffer || !buflen)
+ return ctx.qty;
- buflen -= tmp;
- if (buflen == 0)
- break;
- buffer += tmp;
- }
- }
+ if (buflen > ctx.qty)
+ ctx.qty = buflen;
- ret = qty;
+ /* Copy the IDs of the subscribed keys into the buffer */
+ ctx.buffer = (key_serial_t __user *)buffer;
+ ctx.count = 0;
+ ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx);
+ if (ret < 0) {
+ kleave(" = %d [iterate]", ret);
+ return ret;
}
- error:
- return ret;
-
-} /* end keyring_read() */
+ kleave(" = %zu [ok]", ctx.count);
+ return ctx.count;
+}
-/*****************************************************************************/
/*
- * allocate a keyring and link into the destination keyring
+ * Allocate a keyring and link into the destination keyring.
*/
-struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
- const struct cred *cred, unsigned long flags,
- struct key *dest)
+struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid,
+ const struct cred *cred, key_perm_t perm,
+ unsigned long flags, struct key *dest)
{
struct key *keyring;
int ret;
keyring = key_alloc(&key_type_keyring, description,
- uid, gid, cred,
- (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL,
- flags);
-
+ uid, gid, cred, perm, flags);
if (!IS_ERR(keyring)) {
ret = key_instantiate_and_link(keyring, NULL, 0, dest, NULL);
if (ret < 0) {
@@ -265,268 +496,460 @@ struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
}
return keyring;
+}
+EXPORT_SYMBOL(keyring_alloc);
-} /* end keyring_alloc() */
-
-/*****************************************************************************/
/*
- * search the supplied keyring tree for a key that matches the criterion
- * - perform a breadth-then-depth search up to the prescribed limit
- * - we only find keys on which we have search permission
- * - we use the supplied match function to see if the description (or other
- * feature of interest) matches
- * - we rely on RCU to prevent the keyring lists from disappearing on us
- * - we return -EAGAIN if we didn't find any matching key
- * - we return -ENOKEY if we only found negative matching keys
- * - we propagate the possession attribute from the keyring ref to the key ref
+ * Iteration function to consider each key found.
*/
-key_ref_t keyring_search_aux(key_ref_t keyring_ref,
- const struct cred *cred,
- struct key_type *type,
- const void *description,
- key_match_func_t match)
+static int keyring_search_iterator(const void *object, void *iterator_data)
{
- struct {
- struct keyring_list *keylist;
- int kix;
- } stack[KEYRING_SEARCH_MAX_DEPTH];
-
- struct keyring_list *keylist;
- struct timespec now;
- unsigned long possessed, kflags;
- struct key *keyring, *key;
- key_ref_t key_ref;
- long err;
- int sp, kix;
+ struct keyring_search_context *ctx = iterator_data;
+ const struct key *key = keyring_ptr_to_key(object);
+ unsigned long kflags = key->flags;
- keyring = key_ref_to_ptr(keyring_ref);
- possessed = is_key_possessed(keyring_ref);
- key_check(keyring);
+ kenter("{%d}", key->serial);
- /* top keyring must have search permission to begin the search */
- err = key_task_permission(keyring_ref, cred, KEY_SEARCH);
- if (err < 0) {
- key_ref = ERR_PTR(err);
- goto error;
+ /* ignore keys not of this type */
+ if (key->type != ctx->index_key.type) {
+ kleave(" = 0 [!type]");
+ return 0;
}
- key_ref = ERR_PTR(-ENOTDIR);
- if (keyring->type != &key_type_keyring)
- goto error;
+ /* skip invalidated, revoked and expired keys */
+ if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
+ if (kflags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED))) {
+ ctx->result = ERR_PTR(-EKEYREVOKED);
+ kleave(" = %d [invrev]", ctx->skipped_ret);
+ goto skipped;
+ }
- rcu_read_lock();
+ if (key->expiry && ctx->now.tv_sec >= key->expiry) {
+ ctx->result = ERR_PTR(-EKEYEXPIRED);
+ kleave(" = %d [expire]", ctx->skipped_ret);
+ goto skipped;
+ }
+ }
- now = current_kernel_time();
- err = -EAGAIN;
- sp = 0;
-
- /* firstly we should check to see if this top-level keyring is what we
- * are looking for */
- key_ref = ERR_PTR(-EAGAIN);
- kflags = keyring->flags;
- if (keyring->type == type && match(keyring, description)) {
- key = keyring;
-
- /* check it isn't negative and hasn't expired or been
- * revoked */
- if (kflags & (1 << KEY_FLAG_REVOKED))
- goto error_2;
- if (key->expiry && now.tv_sec >= key->expiry)
- goto error_2;
- key_ref = ERR_PTR(-ENOKEY);
- if (kflags & (1 << KEY_FLAG_NEGATIVE))
- goto error_2;
- goto found;
+ /* keys that don't match */
+ if (!ctx->match(key, ctx->match_data)) {
+ kleave(" = 0 [!match]");
+ return 0;
}
- /* otherwise, the top keyring must not be revoked, expired, or
- * negatively instantiated if we are to search it */
- key_ref = ERR_PTR(-EAGAIN);
- if (kflags & ((1 << KEY_FLAG_REVOKED) | (1 << KEY_FLAG_NEGATIVE)) ||
- (keyring->expiry && now.tv_sec >= keyring->expiry))
- goto error_2;
+ /* key must have search permissions */
+ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) &&
+ key_task_permission(make_key_ref(key, ctx->possessed),
+ ctx->cred, KEY_NEED_SEARCH) < 0) {
+ ctx->result = ERR_PTR(-EACCES);
+ kleave(" = %d [!perm]", ctx->skipped_ret);
+ goto skipped;
+ }
- /* start processing a new keyring */
-descend:
- if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
- goto not_this_keyring;
+ if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
+ /* we set a different error code if we pass a negative key */
+ if (kflags & (1 << KEY_FLAG_NEGATIVE)) {
+ smp_rmb();
+ ctx->result = ERR_PTR(key->type_data.reject_error);
+ kleave(" = %d [neg]", ctx->skipped_ret);
+ goto skipped;
+ }
+ }
- keylist = rcu_dereference(keyring->payload.subscriptions);
- if (!keylist)
- goto not_this_keyring;
+ /* Found */
+ ctx->result = make_key_ref(key, ctx->possessed);
+ kleave(" = 1 [found]");
+ return 1;
- /* iterate through the keys in this keyring first */
- for (kix = 0; kix < keylist->nkeys; kix++) {
- key = keylist->keys[kix];
- kflags = key->flags;
+skipped:
+ return ctx->skipped_ret;
+}
- /* ignore keys not of this type */
- if (key->type != type)
- continue;
+/*
+ * Search inside a keyring for a key. We can search by walking to it
+ * directly based on its index-key or we can iterate over the entire
+ * tree looking for it, based on the match function.
+ */
+static int search_keyring(struct key *keyring, struct keyring_search_context *ctx)
+{
+ if ((ctx->flags & KEYRING_SEARCH_LOOKUP_TYPE) ==
+ KEYRING_SEARCH_LOOKUP_DIRECT) {
+ const void *object;
+
+ object = assoc_array_find(&keyring->keys,
+ &keyring_assoc_array_ops,
+ &ctx->index_key);
+ return object ? ctx->iterator(object, ctx) : 0;
+ }
+ return assoc_array_iterate(&keyring->keys, ctx->iterator, ctx);
+}
- /* skip revoked keys and expired keys */
- if (kflags & (1 << KEY_FLAG_REVOKED))
- continue;
+/*
+ * Search a tree of keyrings that point to other keyrings up to the maximum
+ * depth.
+ */
+static bool search_nested_keyrings(struct key *keyring,
+ struct keyring_search_context *ctx)
+{
+ struct {
+ struct key *keyring;
+ struct assoc_array_node *node;
+ int slot;
+ } stack[KEYRING_SEARCH_MAX_DEPTH];
- if (key->expiry && now.tv_sec >= key->expiry)
- continue;
+ struct assoc_array_shortcut *shortcut;
+ struct assoc_array_node *node;
+ struct assoc_array_ptr *ptr;
+ struct key *key;
+ int sp = 0, slot;
- /* keys that don't match */
- if (!match(key, description))
- continue;
+ kenter("{%d},{%s,%s}",
+ keyring->serial,
+ ctx->index_key.type->name,
+ ctx->index_key.description);
- /* key must have search permissions */
- if (key_task_permission(make_key_ref(key, possessed),
- cred, KEY_SEARCH) < 0)
- continue;
+ if (ctx->index_key.description)
+ ctx->index_key.desc_len = strlen(ctx->index_key.description);
- /* we set a different error code if we pass a negative key */
- if (kflags & (1 << KEY_FLAG_NEGATIVE)) {
- err = -ENOKEY;
- continue;
+ /* Check to see if this top-level keyring is what we are looking for
+ * and whether it is valid or not.
+ */
+ if (ctx->flags & KEYRING_SEARCH_LOOKUP_ITERATE ||
+ keyring_compare_object(keyring, &ctx->index_key)) {
+ ctx->skipped_ret = 2;
+ ctx->flags |= KEYRING_SEARCH_DO_STATE_CHECK;
+ switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) {
+ case 1:
+ goto found;
+ case 2:
+ return false;
+ default:
+ break;
}
+ }
+ ctx->skipped_ret = 0;
+ if (ctx->flags & KEYRING_SEARCH_NO_STATE_CHECK)
+ ctx->flags &= ~KEYRING_SEARCH_DO_STATE_CHECK;
+
+ /* Start processing a new keyring */
+descend_to_keyring:
+ kdebug("descend to %d", keyring->serial);
+ if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED)))
+ goto not_this_keyring;
+
+ /* Search through the keys in this keyring before its searching its
+ * subtrees.
+ */
+ if (search_keyring(keyring, ctx))
goto found;
- }
- /* search through the keyrings nested in this one */
- kix = 0;
-ascend:
- for (; kix < keylist->nkeys; kix++) {
- key = keylist->keys[kix];
- if (key->type != &key_type_keyring)
- continue;
+ /* Then manually iterate through the keyrings nested in this one.
+ *
+ * Start from the root node of the index tree. Because of the way the
+ * hash function has been set up, keyrings cluster on the leftmost
+ * branch of the root node (root slot 0) or in the root node itself.
+ * Non-keyrings avoid the leftmost branch of the root entirely (root
+ * slots 1-15).
+ */
+ ptr = ACCESS_ONCE(keyring->keys.root);
+ if (!ptr)
+ goto not_this_keyring;
- /* recursively search nested keyrings
- * - only search keyrings for which we have search permission
+ if (assoc_array_ptr_is_shortcut(ptr)) {
+ /* If the root is a shortcut, either the keyring only contains
+ * keyring pointers (everything clusters behind root slot 0) or
+ * doesn't contain any keyring pointers.
*/
- if (sp >= KEYRING_SEARCH_MAX_DEPTH)
+ shortcut = assoc_array_ptr_to_shortcut(ptr);
+ smp_read_barrier_depends();
+ if ((shortcut->index_key[0] & ASSOC_ARRAY_FAN_MASK) != 0)
+ goto not_this_keyring;
+
+ ptr = ACCESS_ONCE(shortcut->next_node);
+ node = assoc_array_ptr_to_node(ptr);
+ goto begin_node;
+ }
+
+ node = assoc_array_ptr_to_node(ptr);
+ smp_read_barrier_depends();
+
+ ptr = node->slots[0];
+ if (!assoc_array_ptr_is_meta(ptr))
+ goto begin_node;
+
+descend_to_node:
+ /* Descend to a more distal node in this keyring's content tree and go
+ * through that.
+ */
+ kdebug("descend");
+ if (assoc_array_ptr_is_shortcut(ptr)) {
+ shortcut = assoc_array_ptr_to_shortcut(ptr);
+ smp_read_barrier_depends();
+ ptr = ACCESS_ONCE(shortcut->next_node);
+ BUG_ON(!assoc_array_ptr_is_node(ptr));
+ }
+ node = assoc_array_ptr_to_node(ptr);
+
+begin_node:
+ kdebug("begin_node");
+ smp_read_barrier_depends();
+ slot = 0;
+ascend_to_node:
+ /* Go through the slots in a node */
+ for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+ ptr = ACCESS_ONCE(node->slots[slot]);
+
+ if (assoc_array_ptr_is_meta(ptr) && node->back_pointer)
+ goto descend_to_node;
+
+ if (!keyring_ptr_is_keyring(ptr))
continue;
- if (key_task_permission(make_key_ref(key, possessed),
- cred, KEY_SEARCH) < 0)
+ key = keyring_ptr_to_key(ptr);
+
+ if (sp >= KEYRING_SEARCH_MAX_DEPTH) {
+ if (ctx->flags & KEYRING_SEARCH_DETECT_TOO_DEEP) {
+ ctx->result = ERR_PTR(-ELOOP);
+ return false;
+ }
+ goto not_this_keyring;
+ }
+
+ /* Search a nested keyring */
+ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) &&
+ key_task_permission(make_key_ref(key, ctx->possessed),
+ ctx->cred, KEY_NEED_SEARCH) < 0)
continue;
/* stack the current position */
- stack[sp].keylist = keylist;
- stack[sp].kix = kix;
+ stack[sp].keyring = keyring;
+ stack[sp].node = node;
+ stack[sp].slot = slot;
sp++;
/* begin again with the new keyring */
keyring = key;
- goto descend;
+ goto descend_to_keyring;
+ }
+
+ /* We've dealt with all the slots in the current node, so now we need
+ * to ascend to the parent and continue processing there.
+ */
+ ptr = ACCESS_ONCE(node->back_pointer);
+ slot = node->parent_slot;
+
+ if (ptr && assoc_array_ptr_is_shortcut(ptr)) {
+ shortcut = assoc_array_ptr_to_shortcut(ptr);
+ smp_read_barrier_depends();
+ ptr = ACCESS_ONCE(shortcut->back_pointer);
+ slot = shortcut->parent_slot;
+ }
+ if (!ptr)
+ goto not_this_keyring;
+ node = assoc_array_ptr_to_node(ptr);
+ smp_read_barrier_depends();
+ slot++;
+
+ /* If we've ascended to the root (zero backpointer), we must have just
+ * finished processing the leftmost branch rather than the root slots -
+ * so there can't be any more keyrings for us to find.
+ */
+ if (node->back_pointer) {
+ kdebug("ascend %d", slot);
+ goto ascend_to_node;
}
- /* the keyring we're looking at was disqualified or didn't contain a
- * matching key */
+ /* The keyring we're looking at was disqualified or didn't contain a
+ * matching key.
+ */
not_this_keyring:
- if (sp > 0) {
- /* resume the processing of a keyring higher up in the tree */
- sp--;
- keylist = stack[sp].keylist;
- kix = stack[sp].kix + 1;
- goto ascend;
+ kdebug("not_this_keyring %d", sp);
+ if (sp <= 0) {
+ kleave(" = false");
+ return false;
}
- key_ref = ERR_PTR(err);
- goto error_2;
+ /* Resume the processing of a keyring higher up in the tree */
+ sp--;
+ keyring = stack[sp].keyring;
+ node = stack[sp].node;
+ slot = stack[sp].slot + 1;
+ kdebug("ascend to %d [%d]", keyring->serial, slot);
+ goto ascend_to_node;
- /* we found a viable match */
+ /* We found a viable match */
found:
- atomic_inc(&key->usage);
+ key = key_ref_to_ptr(ctx->result);
key_check(key);
- key_ref = make_key_ref(key, possessed);
-error_2:
- rcu_read_unlock();
-error:
- return key_ref;
+ if (!(ctx->flags & KEYRING_SEARCH_NO_UPDATE_TIME)) {
+ key->last_used_at = ctx->now.tv_sec;
+ keyring->last_used_at = ctx->now.tv_sec;
+ while (sp > 0)
+ stack[--sp].keyring->last_used_at = ctx->now.tv_sec;
+ }
+ kleave(" = true");
+ return true;
+}
+
+/**
+ * keyring_search_aux - Search a keyring tree for a key matching some criteria
+ * @keyring_ref: A pointer to the keyring with possession indicator.
+ * @ctx: The keyring search context.
+ *
+ * Search the supplied keyring tree for a key that matches the criteria given.
+ * The root keyring and any linked keyrings must grant Search permission to the
+ * caller to be searchable and keys can only be found if they too grant Search
+ * to the caller. The possession flag on the root keyring pointer controls use
+ * of the possessor bits in permissions checking of the entire tree. In
+ * addition, the LSM gets to forbid keyring searches and key matches.
+ *
+ * The search is performed as a breadth-then-depth search up to the prescribed
+ * limit (KEYRING_SEARCH_MAX_DEPTH).
+ *
+ * Keys are matched to the type provided and are then filtered by the match
+ * function, which is given the description to use in any way it sees fit. The
+ * match function may use any attributes of a key that it wishes to to
+ * determine the match. Normally the match function from the key type would be
+ * used.
+ *
+ * RCU can be used to prevent the keyring key lists from disappearing without
+ * the need to take lots of locks.
+ *
+ * Returns a pointer to the found key and increments the key usage count if
+ * successful; -EAGAIN if no matching keys were found, or if expired or revoked
+ * keys were found; -ENOKEY if only negative keys were found; -ENOTDIR if the
+ * specified keyring wasn't a keyring.
+ *
+ * In the case of a successful return, the possession attribute from
+ * @keyring_ref is propagated to the returned key reference.
+ */
+key_ref_t keyring_search_aux(key_ref_t keyring_ref,
+ struct keyring_search_context *ctx)
+{
+ struct key *keyring;
+ long err;
-} /* end keyring_search_aux() */
+ ctx->iterator = keyring_search_iterator;
+ ctx->possessed = is_key_possessed(keyring_ref);
+ ctx->result = ERR_PTR(-EAGAIN);
-/*****************************************************************************/
-/*
- * search the supplied keyring tree for a key that matches the criterion
- * - perform a breadth-then-depth search up to the prescribed limit
- * - we only find keys on which we have search permission
- * - we readlock the keyrings as we search down the tree
- * - we return -EAGAIN if we didn't find any matching key
- * - we return -ENOKEY if we only found negative matching keys
+ keyring = key_ref_to_ptr(keyring_ref);
+ key_check(keyring);
+
+ if (keyring->type != &key_type_keyring)
+ return ERR_PTR(-ENOTDIR);
+
+ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM)) {
+ err = key_task_permission(keyring_ref, ctx->cred, KEY_NEED_SEARCH);
+ if (err < 0)
+ return ERR_PTR(err);
+ }
+
+ rcu_read_lock();
+ ctx->now = current_kernel_time();
+ if (search_nested_keyrings(keyring, ctx))
+ __key_get(key_ref_to_ptr(ctx->result));
+ rcu_read_unlock();
+ return ctx->result;
+}
+
+/**
+ * keyring_search - Search the supplied keyring tree for a matching key
+ * @keyring: The root of the keyring tree to be searched.
+ * @type: The type of keyring we want to find.
+ * @description: The name of the keyring we want to find.
+ *
+ * As keyring_search_aux() above, but using the current task's credentials and
+ * type's default matching function and preferred search method.
*/
key_ref_t keyring_search(key_ref_t keyring,
struct key_type *type,
const char *description)
{
- if (!type->match)
+ struct keyring_search_context ctx = {
+ .index_key.type = type,
+ .index_key.description = description,
+ .cred = current_cred(),
+ .match = type->match,
+ .match_data = description,
+ .flags = (type->def_lookup_type |
+ KEYRING_SEARCH_DO_STATE_CHECK),
+ };
+
+ if (!ctx.match)
return ERR_PTR(-ENOKEY);
- return keyring_search_aux(keyring, current->cred,
- type, description, type->match);
-
-} /* end keyring_search() */
-
+ return keyring_search_aux(keyring, &ctx);
+}
EXPORT_SYMBOL(keyring_search);
-/*****************************************************************************/
/*
- * search the given keyring only (no recursion)
- * - keyring must be locked by caller
- * - caller must guarantee that the keyring is a keyring
+ * Search the given keyring for a key that might be updated.
+ *
+ * The caller must guarantee that the keyring is a keyring and that the
+ * permission is granted to modify the keyring as no check is made here. The
+ * caller must also hold a lock on the keyring semaphore.
+ *
+ * Returns a pointer to the found key with usage count incremented if
+ * successful and returns NULL if not found. Revoked and invalidated keys are
+ * skipped over.
+ *
+ * If successful, the possession indicator is propagated from the keyring ref
+ * to the returned key reference.
*/
-key_ref_t __keyring_search_one(key_ref_t keyring_ref,
- const struct key_type *ktype,
- const char *description,
- key_perm_t perm)
+key_ref_t find_key_to_update(key_ref_t keyring_ref,
+ const struct keyring_index_key *index_key)
{
- struct keyring_list *klist;
- unsigned long possessed;
struct key *keyring, *key;
- int loop;
+ const void *object;
keyring = key_ref_to_ptr(keyring_ref);
- possessed = is_key_possessed(keyring_ref);
- rcu_read_lock();
+ kenter("{%d},{%s,%s}",
+ keyring->serial, index_key->type->name, index_key->description);
- klist = rcu_dereference(keyring->payload.subscriptions);
- if (klist) {
- for (loop = 0; loop < klist->nkeys; loop++) {
- key = klist->keys[loop];
-
- if (key->type == ktype &&
- (!key->type->match ||
- key->type->match(key, description)) &&
- key_permission(make_key_ref(key, possessed),
- perm) == 0 &&
- !test_bit(KEY_FLAG_REVOKED, &key->flags)
- )
- goto found;
- }
- }
+ object = assoc_array_find(&keyring->keys, &keyring_assoc_array_ops,
+ index_key);
- rcu_read_unlock();
- return ERR_PTR(-ENOKEY);
+ if (object)
+ goto found;
- found:
- atomic_inc(&key->usage);
- rcu_read_unlock();
- return make_key_ref(key, possessed);
+ kleave(" = NULL");
+ return NULL;
-} /* end __keyring_search_one() */
+found:
+ key = keyring_ptr_to_key(object);
+ if (key->flags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED))) {
+ kleave(" = NULL [x]");
+ return NULL;
+ }
+ __key_get(key);
+ kleave(" = {%d}", key->serial);
+ return make_key_ref(key, is_key_possessed(keyring_ref));
+}
-/*****************************************************************************/
/*
- * find a keyring with the specified name
- * - all named keyrings are searched
- * - normally only finds keyrings with search permission for the current process
+ * Find a keyring with the specified name.
+ *
+ * All named keyrings in the current user namespace are searched, provided they
+ * grant Search permission directly to the caller (unless this check is
+ * skipped). Keyrings whose usage points have reached zero or who have been
+ * revoked are skipped.
+ *
+ * Returns a pointer to the keyring with the keyring's refcount having being
+ * incremented on success. -ENOKEY is returned if a key could not be found.
*/
struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
{
struct key *keyring;
int bucket;
- keyring = ERR_PTR(-EINVAL);
if (!name)
- goto error;
+ return ERR_PTR(-EINVAL);
bucket = keyring_hash(name);
@@ -539,7 +962,7 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
&keyring_name_hash[bucket],
type_data.link
) {
- if (keyring->user->user_ns != current_user_ns())
+ if (!kuid_has_mapping(current_user_ns(), keyring->user->uid))
continue;
if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
@@ -550,552 +973,392 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
if (!skip_perm_check &&
key_permission(make_key_ref(keyring, 0),
- KEY_SEARCH) < 0)
+ KEY_NEED_SEARCH) < 0)
continue;
- /* we've got a match */
- atomic_inc(&keyring->usage);
- read_unlock(&keyring_name_lock);
- goto error;
+ /* we've got a match but we might end up racing with
+ * key_cleanup() if the keyring is currently 'dead'
+ * (ie. it has a zero usage count) */
+ if (!atomic_inc_not_zero(&keyring->usage))
+ continue;
+ keyring->last_used_at = current_kernel_time().tv_sec;
+ goto out;
}
}
- read_unlock(&keyring_name_lock);
keyring = ERR_PTR(-ENOKEY);
-
- error:
+out:
+ read_unlock(&keyring_name_lock);
return keyring;
+}
+
+static int keyring_detect_cycle_iterator(const void *object,
+ void *iterator_data)
+{
+ struct keyring_search_context *ctx = iterator_data;
+ const struct key *key = keyring_ptr_to_key(object);
+
+ kenter("{%d}", key->serial);
-} /* end find_keyring_by_name() */
+ /* We might get a keyring with matching index-key that is nonetheless a
+ * different keyring. */
+ if (key != ctx->match_data)
+ return 0;
+
+ ctx->result = ERR_PTR(-EDEADLK);
+ return 1;
+}
-/*****************************************************************************/
/*
- * see if a cycle will will be created by inserting acyclic tree B in acyclic
- * tree A at the topmost level (ie: as a direct child of A)
- * - since we are adding B to A at the top level, checking for cycles should
- * just be a matter of seeing if node A is somewhere in tree B
+ * See if a cycle will will be created by inserting acyclic tree B in acyclic
+ * tree A at the topmost level (ie: as a direct child of A).
+ *
+ * Since we are adding B to A at the top level, checking for cycles should just
+ * be a matter of seeing if node A is somewhere in tree B.
*/
static int keyring_detect_cycle(struct key *A, struct key *B)
{
- struct {
- struct keyring_list *keylist;
- int kix;
- } stack[KEYRING_SEARCH_MAX_DEPTH];
-
- struct keyring_list *keylist;
- struct key *subtree, *key;
- int sp, kix, ret;
+ struct keyring_search_context ctx = {
+ .index_key = A->index_key,
+ .match_data = A,
+ .iterator = keyring_detect_cycle_iterator,
+ .flags = (KEYRING_SEARCH_LOOKUP_DIRECT |
+ KEYRING_SEARCH_NO_STATE_CHECK |
+ KEYRING_SEARCH_NO_UPDATE_TIME |
+ KEYRING_SEARCH_NO_CHECK_PERM |
+ KEYRING_SEARCH_DETECT_TOO_DEEP),
+ };
rcu_read_lock();
+ search_nested_keyrings(B, &ctx);
+ rcu_read_unlock();
+ return PTR_ERR(ctx.result) == -EAGAIN ? 0 : PTR_ERR(ctx.result);
+}
- ret = -EDEADLK;
- if (A == B)
- goto cycle_detected;
-
- subtree = B;
- sp = 0;
+/*
+ * Preallocate memory so that a key can be linked into to a keyring.
+ */
+int __key_link_begin(struct key *keyring,
+ const struct keyring_index_key *index_key,
+ struct assoc_array_edit **_edit)
+ __acquires(&keyring->sem)
+ __acquires(&keyring_serialise_link_sem)
+{
+ struct assoc_array_edit *edit;
+ int ret;
- /* start processing a new keyring */
- descend:
- if (test_bit(KEY_FLAG_REVOKED, &subtree->flags))
- goto not_this_keyring;
+ kenter("%d,%s,%s,",
+ keyring->serial, index_key->type->name, index_key->description);
- keylist = rcu_dereference(subtree->payload.subscriptions);
- if (!keylist)
- goto not_this_keyring;
- kix = 0;
+ BUG_ON(index_key->desc_len == 0);
- ascend:
- /* iterate through the remaining keys in this keyring */
- for (; kix < keylist->nkeys; kix++) {
- key = keylist->keys[kix];
+ if (keyring->type != &key_type_keyring)
+ return -ENOTDIR;
- if (key == A)
- goto cycle_detected;
+ down_write(&keyring->sem);
- /* recursively check nested keyrings */
- if (key->type == &key_type_keyring) {
- if (sp >= KEYRING_SEARCH_MAX_DEPTH)
- goto too_deep;
+ ret = -EKEYREVOKED;
+ if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
+ goto error_krsem;
- /* stack the current position */
- stack[sp].keylist = keylist;
- stack[sp].kix = kix;
- sp++;
+ /* serialise link/link calls to prevent parallel calls causing a cycle
+ * when linking two keyring in opposite orders */
+ if (index_key->type == &key_type_keyring)
+ down_write(&keyring_serialise_link_sem);
- /* begin again with the new keyring */
- subtree = key;
- goto descend;
- }
+ /* Create an edit script that will insert/replace the key in the
+ * keyring tree.
+ */
+ edit = assoc_array_insert(&keyring->keys,
+ &keyring_assoc_array_ops,
+ index_key,
+ NULL);
+ if (IS_ERR(edit)) {
+ ret = PTR_ERR(edit);
+ goto error_sem;
}
- /* the keyring we're looking at was disqualified or didn't contain a
- * matching key */
- not_this_keyring:
- if (sp > 0) {
- /* resume the checking of a keyring higher up in the tree */
- sp--;
- keylist = stack[sp].keylist;
- kix = stack[sp].kix + 1;
- goto ascend;
+ /* If we're not replacing a link in-place then we're going to need some
+ * extra quota.
+ */
+ if (!edit->dead_leaf) {
+ ret = key_payload_reserve(keyring,
+ keyring->datalen + KEYQUOTA_LINK_BYTES);
+ if (ret < 0)
+ goto error_cancel;
}
- ret = 0; /* no cycles detected */
+ *_edit = edit;
+ kleave(" = 0");
+ return 0;
- error:
- rcu_read_unlock();
+error_cancel:
+ assoc_array_cancel_edit(edit);
+error_sem:
+ if (index_key->type == &key_type_keyring)
+ up_write(&keyring_serialise_link_sem);
+error_krsem:
+ up_write(&keyring->sem);
+ kleave(" = %d", ret);
return ret;
+}
- too_deep:
- ret = -ELOOP;
- goto error;
-
- cycle_detected:
- ret = -EDEADLK;
- goto error;
-
-} /* end keyring_detect_cycle() */
-
-/*****************************************************************************/
/*
- * dispose of a keyring list after the RCU grace period
+ * Check already instantiated keys aren't going to be a problem.
+ *
+ * The caller must have called __key_link_begin(). Don't need to call this for
+ * keys that were created since __key_link_begin() was called.
*/
-static void keyring_link_rcu_disposal(struct rcu_head *rcu)
+int __key_link_check_live_key(struct key *keyring, struct key *key)
{
- struct keyring_list *klist =
- container_of(rcu, struct keyring_list, rcu);
-
- kfree(klist);
-
-} /* end keyring_link_rcu_disposal() */
+ if (key->type == &key_type_keyring)
+ /* check that we aren't going to create a cycle by linking one
+ * keyring to another */
+ return keyring_detect_cycle(keyring, key);
+ return 0;
+}
-/*****************************************************************************/
/*
- * dispose of a keyring list after the RCU grace period, freeing the unlinked
- * key
+ * Link a key into to a keyring.
+ *
+ * Must be called with __key_link_begin() having being called. Discards any
+ * already extant link to matching key if there is one, so that each keyring
+ * holds at most one link to any given key of a particular type+description
+ * combination.
*/
-static void keyring_unlink_rcu_disposal(struct rcu_head *rcu)
+void __key_link(struct key *key, struct assoc_array_edit **_edit)
{
- struct keyring_list *klist =
- container_of(rcu, struct keyring_list, rcu);
-
- key_put(klist->keys[klist->delkey]);
- kfree(klist);
-
-} /* end keyring_unlink_rcu_disposal() */
+ __key_get(key);
+ assoc_array_insert_set_object(*_edit, keyring_key_to_ptr(key));
+ assoc_array_apply_edit(*_edit);
+ *_edit = NULL;
+}
-/*****************************************************************************/
/*
- * link a key into to a keyring
- * - must be called with the keyring's semaphore write-locked
- * - discard already extant link to matching key if there is one
+ * Finish linking a key into to a keyring.
+ *
+ * Must be called with __key_link_begin() having being called.
*/
-int __key_link(struct key *keyring, struct key *key)
+void __key_link_end(struct key *keyring,
+ const struct keyring_index_key *index_key,
+ struct assoc_array_edit *edit)
+ __releases(&keyring->sem)
+ __releases(&keyring_serialise_link_sem)
{
- struct keyring_list *klist, *nklist;
- unsigned max;
- size_t size;
- int loop, ret;
-
- ret = -EKEYREVOKED;
- if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
- goto error;
-
- ret = -ENOTDIR;
- if (keyring->type != &key_type_keyring)
- goto error;
-
- /* serialise link/link calls to prevent parallel calls causing a
- * cycle when applied to two keyring in opposite orders */
- down_write(&keyring_serialise_link_sem);
-
- /* check that we aren't going to create a cycle adding one keyring to
- * another */
- if (key->type == &key_type_keyring) {
- ret = keyring_detect_cycle(keyring, key);
- if (ret < 0)
- goto error2;
- }
-
- /* see if there's a matching key we can displace */
- klist = keyring->payload.subscriptions;
-
- if (klist && klist->nkeys > 0) {
- struct key_type *type = key->type;
-
- for (loop = klist->nkeys - 1; loop >= 0; loop--) {
- if (klist->keys[loop]->type == type &&
- strcmp(klist->keys[loop]->description,
- key->description) == 0
- ) {
- /* found a match - replace with new key */
- size = sizeof(struct key *) * klist->maxkeys;
- size += sizeof(*klist);
- BUG_ON(size > PAGE_SIZE);
-
- ret = -ENOMEM;
- nklist = kmemdup(klist, size, GFP_KERNEL);
- if (!nklist)
- goto error2;
-
- /* replace matched key */
- atomic_inc(&key->usage);
- nklist->keys[loop] = key;
-
- rcu_assign_pointer(
- keyring->payload.subscriptions,
- nklist);
-
- /* dispose of the old keyring list and the
- * displaced key */
- klist->delkey = loop;
- call_rcu(&klist->rcu,
- keyring_unlink_rcu_disposal);
-
- goto done;
- }
- }
- }
-
- /* check that we aren't going to overrun the user's quota */
- ret = key_payload_reserve(keyring,
- keyring->datalen + KEYQUOTA_LINK_BYTES);
- if (ret < 0)
- goto error2;
-
- klist = keyring->payload.subscriptions;
-
- if (klist && klist->nkeys < klist->maxkeys) {
- /* there's sufficient slack space to add directly */
- atomic_inc(&key->usage);
-
- klist->keys[klist->nkeys] = key;
- smp_wmb();
- klist->nkeys++;
- smp_wmb();
- }
- else {
- /* grow the key list */
- max = 4;
- if (klist)
- max += klist->maxkeys;
-
- ret = -ENFILE;
- if (max > 65535)
- goto error3;
- size = sizeof(*klist) + sizeof(struct key *) * max;
- if (size > PAGE_SIZE)
- goto error3;
-
- ret = -ENOMEM;
- nklist = kmalloc(size, GFP_KERNEL);
- if (!nklist)
- goto error3;
- nklist->maxkeys = max;
- nklist->nkeys = 0;
-
- if (klist) {
- nklist->nkeys = klist->nkeys;
- memcpy(nklist->keys,
- klist->keys,
- sizeof(struct key *) * klist->nkeys);
- }
+ BUG_ON(index_key->type == NULL);
+ kenter("%d,%s,", keyring->serial, index_key->type->name);
- /* add the key into the new space */
- atomic_inc(&key->usage);
- nklist->keys[nklist->nkeys++] = key;
+ if (index_key->type == &key_type_keyring)
+ up_write(&keyring_serialise_link_sem);
- rcu_assign_pointer(keyring->payload.subscriptions, nklist);
-
- /* dispose of the old keyring list */
- if (klist)
- call_rcu(&klist->rcu, keyring_link_rcu_disposal);
+ if (edit && !edit->dead_leaf) {
+ key_payload_reserve(keyring,
+ keyring->datalen - KEYQUOTA_LINK_BYTES);
+ assoc_array_cancel_edit(edit);
}
+ up_write(&keyring->sem);
+}
-done:
- ret = 0;
-error2:
- up_write(&keyring_serialise_link_sem);
-error:
- return ret;
-
-error3:
- /* undo the quota changes */
- key_payload_reserve(keyring,
- keyring->datalen - KEYQUOTA_LINK_BYTES);
- goto error2;
-
-} /* end __key_link() */
-
-/*****************************************************************************/
-/*
- * link a key to a keyring
+/**
+ * key_link - Link a key to a keyring
+ * @keyring: The keyring to make the link in.
+ * @key: The key to link to.
+ *
+ * Make a link in a keyring to a key, such that the keyring holds a reference
+ * on that key and the key can potentially be found by searching that keyring.
+ *
+ * This function will write-lock the keyring's semaphore and will consume some
+ * of the user's key data quota to hold the link.
+ *
+ * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring,
+ * -EKEYREVOKED if the keyring has been revoked, -ENFILE if the keyring is
+ * full, -EDQUOT if there is insufficient key data quota remaining to add
+ * another link or -ENOMEM if there's insufficient memory.
+ *
+ * It is assumed that the caller has checked that it is permitted for a link to
+ * be made (the keyring should have Write permission and the key Link
+ * permission).
*/
int key_link(struct key *keyring, struct key *key)
{
+ struct assoc_array_edit *edit;
int ret;
+ kenter("{%d,%d}", keyring->serial, atomic_read(&keyring->usage));
+
key_check(keyring);
key_check(key);
- down_write(&keyring->sem);
- ret = __key_link(keyring, key);
- up_write(&keyring->sem);
+ if (test_bit(KEY_FLAG_TRUSTED_ONLY, &keyring->flags) &&
+ !test_bit(KEY_FLAG_TRUSTED, &key->flags))
+ return -EPERM;
+
+ ret = __key_link_begin(keyring, &key->index_key, &edit);
+ if (ret == 0) {
+ kdebug("begun {%d,%d}", keyring->serial, atomic_read(&keyring->usage));
+ ret = __key_link_check_live_key(keyring, key);
+ if (ret == 0)
+ __key_link(key, &edit);
+ __key_link_end(keyring, &key->index_key, edit);
+ }
+ kleave(" = %d {%d,%d}", ret, keyring->serial, atomic_read(&keyring->usage));
return ret;
-
-} /* end key_link() */
-
+}
EXPORT_SYMBOL(key_link);
-/*****************************************************************************/
-/*
- * unlink the first link to a key from a keyring
+/**
+ * key_unlink - Unlink the first link to a key from a keyring.
+ * @keyring: The keyring to remove the link from.
+ * @key: The key the link is to.
+ *
+ * Remove a link from a keyring to a key.
+ *
+ * This function will write-lock the keyring's semaphore.
+ *
+ * Returns 0 if successful, -ENOTDIR if the keyring isn't a keyring, -ENOENT if
+ * the key isn't linked to by the keyring or -ENOMEM if there's insufficient
+ * memory.
+ *
+ * It is assumed that the caller has checked that it is permitted for a link to
+ * be removed (the keyring should have Write permission; no permissions are
+ * required on the key).
*/
int key_unlink(struct key *keyring, struct key *key)
{
- struct keyring_list *klist, *nklist;
- int loop, ret;
+ struct assoc_array_edit *edit;
+ int ret;
key_check(keyring);
key_check(key);
- ret = -ENOTDIR;
if (keyring->type != &key_type_keyring)
- goto error;
+ return -ENOTDIR;
down_write(&keyring->sem);
- klist = keyring->payload.subscriptions;
- if (klist) {
- /* search the keyring for the key */
- for (loop = 0; loop < klist->nkeys; loop++)
- if (klist->keys[loop] == key)
- goto key_is_present;
+ edit = assoc_array_delete(&keyring->keys, &keyring_assoc_array_ops,
+ &key->index_key);
+ if (IS_ERR(edit)) {
+ ret = PTR_ERR(edit);
+ goto error;
}
-
- up_write(&keyring->sem);
ret = -ENOENT;
- goto error;
-
-key_is_present:
- /* we need to copy the key list for RCU purposes */
- nklist = kmalloc(sizeof(*klist) +
- sizeof(struct key *) * klist->maxkeys,
- GFP_KERNEL);
- if (!nklist)
- goto nomem;
- nklist->maxkeys = klist->maxkeys;
- nklist->nkeys = klist->nkeys - 1;
-
- if (loop > 0)
- memcpy(&nklist->keys[0],
- &klist->keys[0],
- loop * sizeof(struct key *));
-
- if (loop < nklist->nkeys)
- memcpy(&nklist->keys[loop],
- &klist->keys[loop + 1],
- (nklist->nkeys - loop) * sizeof(struct key *));
-
- /* adjust the user's quota */
- key_payload_reserve(keyring,
- keyring->datalen - KEYQUOTA_LINK_BYTES);
-
- rcu_assign_pointer(keyring->payload.subscriptions, nklist);
-
- up_write(&keyring->sem);
-
- /* schedule for later cleanup */
- klist->delkey = loop;
- call_rcu(&klist->rcu, keyring_unlink_rcu_disposal);
+ if (edit == NULL)
+ goto error;
+ assoc_array_apply_edit(edit);
+ key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES);
ret = 0;
error:
- return ret;
-nomem:
- ret = -ENOMEM;
up_write(&keyring->sem);
- goto error;
-
-} /* end key_unlink() */
-
+ return ret;
+}
EXPORT_SYMBOL(key_unlink);
-/*****************************************************************************/
-/*
- * dispose of a keyring list after the RCU grace period, releasing the keys it
- * links to
- */
-static void keyring_clear_rcu_disposal(struct rcu_head *rcu)
-{
- struct keyring_list *klist;
- int loop;
-
- klist = container_of(rcu, struct keyring_list, rcu);
-
- for (loop = klist->nkeys - 1; loop >= 0; loop--)
- key_put(klist->keys[loop]);
-
- kfree(klist);
-
-} /* end keyring_clear_rcu_disposal() */
-
-/*****************************************************************************/
-/*
- * clear the specified process keyring
- * - implements keyctl(KEYCTL_CLEAR)
+/**
+ * keyring_clear - Clear a keyring
+ * @keyring: The keyring to clear.
+ *
+ * Clear the contents of the specified keyring.
+ *
+ * Returns 0 if successful or -ENOTDIR if the keyring isn't a keyring.
*/
int keyring_clear(struct key *keyring)
{
- struct keyring_list *klist;
+ struct assoc_array_edit *edit;
int ret;
- ret = -ENOTDIR;
- if (keyring->type == &key_type_keyring) {
- /* detach the pointer block with the locks held */
- down_write(&keyring->sem);
-
- klist = keyring->payload.subscriptions;
- if (klist) {
- /* adjust the quota */
- key_payload_reserve(keyring,
- sizeof(struct keyring_list));
-
- rcu_assign_pointer(keyring->payload.subscriptions,
- NULL);
- }
-
- up_write(&keyring->sem);
+ if (keyring->type != &key_type_keyring)
+ return -ENOTDIR;
- /* free the keys after the locks have been dropped */
- if (klist)
- call_rcu(&klist->rcu, keyring_clear_rcu_disposal);
+ down_write(&keyring->sem);
+ edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops);
+ if (IS_ERR(edit)) {
+ ret = PTR_ERR(edit);
+ } else {
+ if (edit)
+ assoc_array_apply_edit(edit);
+ key_payload_reserve(keyring, 0);
ret = 0;
}
+ up_write(&keyring->sem);
return ret;
-
-} /* end keyring_clear() */
-
+}
EXPORT_SYMBOL(keyring_clear);
-/*****************************************************************************/
/*
- * dispose of the links from a revoked keyring
- * - called with the key sem write-locked
+ * Dispose of the links from a revoked keyring.
+ *
+ * This is called with the key sem write-locked.
*/
static void keyring_revoke(struct key *keyring)
{
- struct keyring_list *klist = keyring->payload.subscriptions;
+ struct assoc_array_edit *edit;
- /* adjust the quota */
- key_payload_reserve(keyring, 0);
-
- if (klist) {
- rcu_assign_pointer(keyring->payload.subscriptions, NULL);
- call_rcu(&klist->rcu, keyring_clear_rcu_disposal);
+ edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops);
+ if (!IS_ERR(edit)) {
+ if (edit)
+ assoc_array_apply_edit(edit);
+ key_payload_reserve(keyring, 0);
}
+}
+
+static bool keyring_gc_select_iterator(void *object, void *iterator_data)
+{
+ struct key *key = keyring_ptr_to_key(object);
+ time_t *limit = iterator_data;
-} /* end keyring_revoke() */
+ if (key_is_dead(key, *limit))
+ return false;
+ key_get(key);
+ return true;
+}
-/*
- * Determine whether a key is dead
- */
-static bool key_is_dead(struct key *key, time_t limit)
+static int keyring_gc_check_iterator(const void *object, void *iterator_data)
{
- return test_bit(KEY_FLAG_DEAD, &key->flags) ||
- (key->expiry > 0 && key->expiry <= limit);
+ const struct key *key = keyring_ptr_to_key(object);
+ time_t *limit = iterator_data;
+
+ key_check(key);
+ return key_is_dead(key, *limit);
}
/*
- * Collect garbage from the contents of a keyring
+ * Garbage collect pointers from a keyring.
+ *
+ * Not called with any locks held. The keyring's key struct will not be
+ * deallocated under us as only our caller may deallocate it.
*/
void keyring_gc(struct key *keyring, time_t limit)
{
- struct keyring_list *klist, *new;
- struct key *key;
- int loop, keep, max;
-
- kenter("{%x,%s}", key_serial(keyring), keyring->description);
-
- down_write(&keyring->sem);
-
- klist = keyring->payload.subscriptions;
- if (!klist)
- goto no_klist;
-
- /* work out how many subscriptions we're keeping */
- keep = 0;
- for (loop = klist->nkeys - 1; loop >= 0; loop--)
- if (!key_is_dead(klist->keys[loop], limit))
- keep++;
-
- if (keep == klist->nkeys)
- goto just_return;
-
- /* allocate a new keyring payload */
- max = roundup(keep, 4);
- new = kmalloc(sizeof(struct keyring_list) + max * sizeof(struct key *),
- GFP_KERNEL);
- if (!new)
- goto nomem;
- new->maxkeys = max;
- new->nkeys = 0;
- new->delkey = 0;
-
- /* install the live keys
- * - must take care as expired keys may be updated back to life
- */
- keep = 0;
- for (loop = klist->nkeys - 1; loop >= 0; loop--) {
- key = klist->keys[loop];
- if (!key_is_dead(key, limit)) {
- if (keep >= max)
- goto discard_new;
- new->keys[keep++] = key_get(key);
- }
- }
- new->nkeys = keep;
-
- /* adjust the quota */
- key_payload_reserve(keyring,
- sizeof(struct keyring_list) +
- KEYQUOTA_LINK_BYTES * keep);
-
- if (keep == 0) {
- rcu_assign_pointer(keyring->payload.subscriptions, NULL);
- kfree(new);
- } else {
- rcu_assign_pointer(keyring->payload.subscriptions, new);
- }
-
- up_write(&keyring->sem);
+ int result;
- call_rcu(&klist->rcu, keyring_clear_rcu_disposal);
- kleave(" [yes]");
- return;
+ kenter("%x{%s}", keyring->serial, keyring->description ?: "");
-discard_new:
- new->nkeys = keep;
- keyring_clear_rcu_disposal(&new->rcu);
- up_write(&keyring->sem);
- kleave(" [discard]");
- return;
+ if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED)))
+ goto dont_gc;
-just_return:
- up_write(&keyring->sem);
- kleave(" [no dead]");
- return;
+ /* scan the keyring looking for dead keys */
+ rcu_read_lock();
+ result = assoc_array_iterate(&keyring->keys,
+ keyring_gc_check_iterator, &limit);
+ rcu_read_unlock();
+ if (result == true)
+ goto do_gc;
-no_klist:
- up_write(&keyring->sem);
- kleave(" [no_klist]");
+dont_gc:
+ kleave(" [no gc]");
return;
-nomem:
+do_gc:
+ down_write(&keyring->sem);
+ assoc_array_gc(&keyring->keys, &keyring_assoc_array_ops,
+ keyring_gc_select_iterator, &limit);
up_write(&keyring->sem);
- kleave(" [oom]");
+ kleave(" [gc]");
}
diff --git a/security/keys/permission.c b/security/keys/permission.c
index 0ed802c9e69..732cc0beffd 100644
--- a/security/keys/permission.c
+++ b/security/keys/permission.c
@@ -1,4 +1,4 @@
-/* permission.c: key permission determination
+/* Key permission checking
*
* Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
@@ -13,21 +13,22 @@
#include <linux/security.h>
#include "internal.h"
-/*****************************************************************************/
/**
* key_task_permission - Check a key can be used
- * @key_ref: The key to check
- * @cred: The credentials to use
- * @perm: The permissions to check for
+ * @key_ref: The key to check.
+ * @cred: The credentials to use.
+ * @perm: The permissions to check for.
*
* Check to see whether permission is granted to use a key in the desired way,
* but permit the security modules to override.
*
- * The caller must hold either a ref on cred or must hold the RCU readlock or a
- * spinlock.
+ * The caller must hold either a ref on cred or must hold the RCU readlock.
+ *
+ * Returns 0 if successful, -EACCES if access is denied based on the
+ * permissions bits or the LSM check.
*/
int key_task_permission(const key_ref_t key_ref, const struct cred *cred,
- key_perm_t perm)
+ unsigned perm)
{
struct key *key;
key_perm_t kperm;
@@ -35,19 +36,16 @@ int key_task_permission(const key_ref_t key_ref, const struct cred *cred,
key = key_ref_to_ptr(key_ref);
- if (key->user->user_ns != cred->user->user_ns)
- goto use_other_perms;
-
/* use the second 8-bits of permissions for keys the caller owns */
- if (key->uid == cred->fsuid) {
+ if (uid_eq(key->uid, cred->fsuid)) {
kperm = key->perm >> 16;
goto use_these_perms;
}
/* use the third 8-bits of permissions for keys the caller has a group
* membership in common with */
- if (key->gid != -1 && key->perm & KEY_GRP_ALL) {
- if (key->gid == cred->fsgid) {
+ if (gid_valid(key->gid) && key->perm & KEY_GRP_ALL) {
+ if (gid_eq(key->gid, cred->fsgid)) {
kperm = key->perm >> 8;
goto use_these_perms;
}
@@ -59,8 +57,6 @@ int key_task_permission(const key_ref_t key_ref, const struct cred *cred,
}
}
-use_other_perms:
-
/* otherwise use the least-significant 8-bits */
kperm = key->perm;
@@ -72,46 +68,43 @@ use_these_perms:
if (is_key_possessed(key_ref))
kperm |= key->perm >> 24;
- kperm = kperm & perm & KEY_ALL;
+ kperm = kperm & perm & KEY_NEED_ALL;
if (kperm != perm)
return -EACCES;
/* let LSM be the final arbiter */
return security_key_permission(key_ref, cred, perm);
-
-} /* end key_task_permission() */
-
+}
EXPORT_SYMBOL(key_task_permission);
-/*****************************************************************************/
-/*
- * validate a key
+/**
+ * key_validate - Validate a key.
+ * @key: The key to be validated.
+ *
+ * Check that a key is valid, returning 0 if the key is okay, -ENOKEY if the
+ * key is invalidated, -EKEYREVOKED if the key's type has been removed or if
+ * the key has been revoked or -EKEYEXPIRED if the key has expired.
*/
-int key_validate(struct key *key)
+int key_validate(const struct key *key)
{
- struct timespec now;
- int ret = 0;
-
- if (key) {
- /* check it's still accessible */
- ret = -EKEYREVOKED;
- if (test_bit(KEY_FLAG_REVOKED, &key->flags) ||
- test_bit(KEY_FLAG_DEAD, &key->flags))
- goto error;
-
- /* check it hasn't expired */
- ret = 0;
- if (key->expiry) {
- now = current_kernel_time();
- if (now.tv_sec >= key->expiry)
- ret = -EKEYEXPIRED;
- }
- }
+ unsigned long flags = key->flags;
- error:
- return ret;
+ if (flags & (1 << KEY_FLAG_INVALIDATED))
+ return -ENOKEY;
-} /* end key_validate() */
+ /* check it's still accessible */
+ if (flags & ((1 << KEY_FLAG_REVOKED) |
+ (1 << KEY_FLAG_DEAD)))
+ return -EKEYREVOKED;
+
+ /* check it hasn't expired */
+ if (key->expiry) {
+ struct timespec now = current_kernel_time();
+ if (now.tv_sec >= key->expiry)
+ return -EKEYEXPIRED;
+ }
+ return 0;
+}
EXPORT_SYMBOL(key_validate);
diff --git a/security/keys/persistent.c b/security/keys/persistent.c
new file mode 100644
index 00000000000..c9fae5ea89f
--- /dev/null
+++ b/security/keys/persistent.c
@@ -0,0 +1,167 @@
+/* General persistent per-UID keyrings register
+ *
+ * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/user_namespace.h>
+#include "internal.h"
+
+unsigned persistent_keyring_expiry = 3 * 24 * 3600; /* Expire after 3 days of non-use */
+
+/*
+ * Create the persistent keyring register for the current user namespace.
+ *
+ * Called with the namespace's sem locked for writing.
+ */
+static int key_create_persistent_register(struct user_namespace *ns)
+{
+ struct key *reg = keyring_alloc(".persistent_register",
+ KUIDT_INIT(0), KGIDT_INIT(0),
+ current_cred(),
+ ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ),
+ KEY_ALLOC_NOT_IN_QUOTA, NULL);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ ns->persistent_keyring_register = reg;
+ return 0;
+}
+
+/*
+ * Create the persistent keyring for the specified user.
+ *
+ * Called with the namespace's sem locked for writing.
+ */
+static key_ref_t key_create_persistent(struct user_namespace *ns, kuid_t uid,
+ struct keyring_index_key *index_key)
+{
+ struct key *persistent;
+ key_ref_t reg_ref, persistent_ref;
+
+ if (!ns->persistent_keyring_register) {
+ long err = key_create_persistent_register(ns);
+ if (err < 0)
+ return ERR_PTR(err);
+ } else {
+ reg_ref = make_key_ref(ns->persistent_keyring_register, true);
+ persistent_ref = find_key_to_update(reg_ref, index_key);
+ if (persistent_ref)
+ return persistent_ref;
+ }
+
+ persistent = keyring_alloc(index_key->description,
+ uid, INVALID_GID, current_cred(),
+ ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ),
+ KEY_ALLOC_NOT_IN_QUOTA,
+ ns->persistent_keyring_register);
+ if (IS_ERR(persistent))
+ return ERR_CAST(persistent);
+
+ return make_key_ref(persistent, true);
+}
+
+/*
+ * Get the persistent keyring for a specific UID and link it to the nominated
+ * keyring.
+ */
+static long key_get_persistent(struct user_namespace *ns, kuid_t uid,
+ key_ref_t dest_ref)
+{
+ struct keyring_index_key index_key;
+ struct key *persistent;
+ key_ref_t reg_ref, persistent_ref;
+ char buf[32];
+ long ret;
+
+ /* Look in the register if it exists */
+ index_key.type = &key_type_keyring;
+ index_key.description = buf;
+ index_key.desc_len = sprintf(buf, "_persistent.%u", from_kuid(ns, uid));
+
+ if (ns->persistent_keyring_register) {
+ reg_ref = make_key_ref(ns->persistent_keyring_register, true);
+ down_read(&ns->persistent_keyring_register_sem);
+ persistent_ref = find_key_to_update(reg_ref, &index_key);
+ up_read(&ns->persistent_keyring_register_sem);
+
+ if (persistent_ref)
+ goto found;
+ }
+
+ /* It wasn't in the register, so we'll need to create it. We might
+ * also need to create the register.
+ */
+ down_write(&ns->persistent_keyring_register_sem);
+ persistent_ref = key_create_persistent(ns, uid, &index_key);
+ up_write(&ns->persistent_keyring_register_sem);
+ if (!IS_ERR(persistent_ref))
+ goto found;
+
+ return PTR_ERR(persistent_ref);
+
+found:
+ ret = key_task_permission(persistent_ref, current_cred(), KEY_NEED_LINK);
+ if (ret == 0) {
+ persistent = key_ref_to_ptr(persistent_ref);
+ ret = key_link(key_ref_to_ptr(dest_ref), persistent);
+ if (ret == 0) {
+ key_set_timeout(persistent, persistent_keyring_expiry);
+ ret = persistent->serial;
+ }
+ }
+
+ key_ref_put(persistent_ref);
+ return ret;
+}
+
+/*
+ * Get the persistent keyring for a specific UID and link it to the nominated
+ * keyring.
+ */
+long keyctl_get_persistent(uid_t _uid, key_serial_t destid)
+{
+ struct user_namespace *ns = current_user_ns();
+ key_ref_t dest_ref;
+ kuid_t uid;
+ long ret;
+
+ /* -1 indicates the current user */
+ if (_uid == (uid_t)-1) {
+ uid = current_uid();
+ } else {
+ uid = make_kuid(ns, _uid);
+ if (!uid_valid(uid))
+ return -EINVAL;
+
+ /* You can only see your own persistent cache if you're not
+ * sufficiently privileged.
+ */
+ if (!uid_eq(uid, current_uid()) &&
+ !uid_eq(uid, current_euid()) &&
+ !ns_capable(ns, CAP_SETUID))
+ return -EPERM;
+ }
+
+ /* There must be a destination keyring */
+ dest_ref = lookup_user_key(destid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
+ if (IS_ERR(dest_ref))
+ return PTR_ERR(dest_ref);
+ if (key_ref_to_ptr(dest_ref)->type != &key_type_keyring) {
+ ret = -ENOTDIR;
+ goto out_put_dest;
+ }
+
+ ret = key_get_persistent(ns, uid, dest_ref);
+
+out_put_dest:
+ key_ref_put(dest_ref);
+ return ret;
+}
diff --git a/security/keys/proc.c b/security/keys/proc.c
index 9d01021ca0c..d3f6f2fd21d 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -1,4 +1,4 @@
-/* proc.c: proc files for key database enumeration
+/* procfs files for key database enumeration
*
* Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
-#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
@@ -61,9 +60,8 @@ static const struct file_operations proc_key_users_fops = {
.release = seq_release,
};
-/*****************************************************************************/
/*
- * declare the /proc files
+ * Declare the /proc files.
*/
static int __init key_proc_init(void)
{
@@ -80,25 +78,24 @@ static int __init key_proc_init(void)
panic("Cannot create /proc/key-users\n");
return 0;
-
-} /* end key_proc_init() */
+}
__initcall(key_proc_init);
-/*****************************************************************************/
/*
- * implement "/proc/keys" to provides a list of the keys on the system
+ * Implement "/proc/keys" to provide a list of the keys on the system that
+ * grant View permission to the caller.
*/
#ifdef CONFIG_KEYS_DEBUG_PROC_KEYS
-static struct rb_node *key_serial_next(struct rb_node *n)
+static struct rb_node *key_serial_next(struct seq_file *p, struct rb_node *n)
{
- struct user_namespace *user_ns = current_user_ns();
+ struct user_namespace *user_ns = seq_user_ns(p);
n = rb_next(n);
while (n) {
struct key *key = rb_entry(n, struct key, serial_node);
- if (key->user->user_ns == user_ns)
+ if (kuid_has_mapping(user_ns, key->user->uid))
break;
n = rb_next(n);
}
@@ -110,9 +107,9 @@ static int proc_keys_open(struct inode *inode, struct file *file)
return seq_open(file, &proc_keys_ops);
}
-static struct key *find_ge_key(key_serial_t id)
+static struct key *find_ge_key(struct seq_file *p, key_serial_t id)
{
- struct user_namespace *user_ns = current_user_ns();
+ struct user_namespace *user_ns = seq_user_ns(p);
struct rb_node *n = key_serial_tree.rb_node;
struct key *minkey = NULL;
@@ -135,7 +132,7 @@ static struct key *find_ge_key(key_serial_t id)
return NULL;
for (;;) {
- if (minkey->user->user_ns == user_ns)
+ if (kuid_has_mapping(user_ns, minkey->user->uid))
return minkey;
n = rb_next(&minkey->serial_node);
if (!n)
@@ -154,7 +151,7 @@ static void *proc_keys_start(struct seq_file *p, loff_t *_pos)
if (*_pos > INT_MAX)
return NULL;
- key = find_ge_key(pos);
+ key = find_ge_key(p, pos);
if (!key)
return NULL;
*_pos = key->serial;
@@ -171,7 +168,7 @@ static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos)
{
struct rb_node *n;
- n = key_serial_next(v);
+ n = key_serial_next(p, v);
if (n)
*_pos = key_node_serial(n);
return n;
@@ -189,16 +186,39 @@ static int proc_keys_show(struct seq_file *m, void *v)
struct key *key = rb_entry(_p, struct key, serial_node);
struct timespec now;
unsigned long timo;
+ key_ref_t key_ref, skey_ref;
char xbuf[12];
int rc;
+ struct keyring_search_context ctx = {
+ .index_key.type = key->type,
+ .index_key.description = key->description,
+ .cred = current_cred(),
+ .match = lookup_user_key_possessed,
+ .match_data = key,
+ .flags = (KEYRING_SEARCH_NO_STATE_CHECK |
+ KEYRING_SEARCH_LOOKUP_DIRECT),
+ };
+
+ key_ref = make_key_ref(key, 0);
+
+ /* determine if the key is possessed by this process (a test we can
+ * skip if the key does not indicate the possessor can view it
+ */
+ if (key->perm & KEY_POS_VIEW) {
+ skey_ref = search_my_process_keyrings(&ctx);
+ if (!IS_ERR(skey_ref)) {
+ key_ref_put(skey_ref);
+ key_ref = make_key_ref(key, 1);
+ }
+ }
+
/* check whether the current task is allowed to view the key (assuming
* non-possession)
* - the caller holds a spinlock, and thus the RCU read lock, making our
* access to __current_cred() safe
*/
- rc = key_task_permission(make_key_ref(key, 0), current_cred(),
- KEY_VIEW);
+ rc = key_task_permission(key_ref, ctx.cred, KEY_NEED_VIEW);
if (rc < 0)
return 0;
@@ -229,7 +249,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
#define showflag(KEY, LETTER, FLAG) \
(test_bit(FLAG, &(KEY)->flags) ? LETTER : '-')
- seq_printf(m, "%08x %c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ",
+ seq_printf(m, "%08x %c%c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ",
key->serial,
showflag(key, 'I', KEY_FLAG_INSTANTIATED),
showflag(key, 'R', KEY_FLAG_REVOKED),
@@ -237,11 +257,12 @@ static int proc_keys_show(struct seq_file *m, void *v)
showflag(key, 'Q', KEY_FLAG_IN_QUOTA),
showflag(key, 'U', KEY_FLAG_USER_CONSTRUCT),
showflag(key, 'N', KEY_FLAG_NEGATIVE),
+ showflag(key, 'i', KEY_FLAG_INVALIDATED),
atomic_read(&key->usage),
xbuf,
key->perm,
- key->uid,
- key->gid,
+ from_kuid_munged(seq_user_ns(m), key->uid),
+ from_kgid_munged(seq_user_ns(m), key->gid),
key->type->name);
#undef showflag
@@ -256,31 +277,31 @@ static int proc_keys_show(struct seq_file *m, void *v)
#endif /* CONFIG_KEYS_DEBUG_PROC_KEYS */
-static struct rb_node *__key_user_next(struct rb_node *n)
+static struct rb_node *__key_user_next(struct user_namespace *user_ns, struct rb_node *n)
{
while (n) {
struct key_user *user = rb_entry(n, struct key_user, node);
- if (user->user_ns == current_user_ns())
+ if (kuid_has_mapping(user_ns, user->uid))
break;
n = rb_next(n);
}
return n;
}
-static struct rb_node *key_user_next(struct rb_node *n)
+static struct rb_node *key_user_next(struct user_namespace *user_ns, struct rb_node *n)
{
- return __key_user_next(rb_next(n));
+ return __key_user_next(user_ns, rb_next(n));
}
-static struct rb_node *key_user_first(struct rb_root *r)
+static struct rb_node *key_user_first(struct user_namespace *user_ns, struct rb_root *r)
{
struct rb_node *n = rb_first(r);
- return __key_user_next(n);
+ return __key_user_next(user_ns, n);
}
-/*****************************************************************************/
/*
- * implement "/proc/key-users" to provides a list of the key users
+ * Implement "/proc/key-users" to provides a list of the key users and their
+ * quotas.
*/
static int proc_key_users_open(struct inode *inode, struct file *file)
{
@@ -295,10 +316,10 @@ static void *proc_key_users_start(struct seq_file *p, loff_t *_pos)
spin_lock(&key_user_lock);
- _p = key_user_first(&key_user_tree);
+ _p = key_user_first(seq_user_ns(p), &key_user_tree);
while (pos > 0 && _p) {
pos--;
- _p = key_user_next(_p);
+ _p = key_user_next(seq_user_ns(p), _p);
}
return _p;
@@ -307,7 +328,7 @@ static void *proc_key_users_start(struct seq_file *p, loff_t *_pos)
static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos)
{
(*_pos)++;
- return key_user_next((struct rb_node *) v);
+ return key_user_next(seq_user_ns(p), (struct rb_node *)v);
}
static void proc_key_users_stop(struct seq_file *p, void *v)
@@ -320,13 +341,13 @@ static int proc_key_users_show(struct seq_file *m, void *v)
{
struct rb_node *_p = v;
struct key_user *user = rb_entry(_p, struct key_user, node);
- unsigned maxkeys = (user->uid == 0) ?
+ unsigned maxkeys = uid_eq(user->uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxkeys : key_quota_maxkeys;
- unsigned maxbytes = (user->uid == 0) ?
+ unsigned maxbytes = uid_eq(user->uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxbytes : key_quota_maxbytes;
seq_printf(m, "%5u: %5d %d/%d %d/%d %d/%d\n",
- user->uid,
+ from_kuid_munged(seq_user_ns(m), user->uid),
atomic_read(&user->usage),
atomic_read(&user->nkeys),
atomic_read(&user->nikeys),
@@ -336,5 +357,4 @@ static int proc_key_users_show(struct seq_file *m, void *v)
maxbytes);
return 0;
-
}
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 5c23afb31ec..0cf8a130a26 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -1,4 +1,4 @@
-/* Management of a process's keyrings
+/* Manage a process's keyrings
*
* Copyright (C) 2004-2005, 2008 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
@@ -12,7 +12,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/sched.h>
-#include <linux/slab.h>
#include <linux/keyctl.h>
#include <linux/fs.h>
#include <linux/err.h>
@@ -22,41 +21,43 @@
#include <asm/uaccess.h>
#include "internal.h"
-/* session keyring create vs join semaphore */
+/* Session keyring create vs join semaphore */
static DEFINE_MUTEX(key_session_mutex);
-/* user keyring creation semaphore */
+/* User keyring creation semaphore */
static DEFINE_MUTEX(key_user_keyring_mutex);
-/* the root user's tracking struct */
+/* The root user's tracking struct */
struct key_user root_key_user = {
.usage = ATOMIC_INIT(3),
.cons_lock = __MUTEX_INITIALIZER(root_key_user.cons_lock),
.lock = __SPIN_LOCK_UNLOCKED(root_key_user.lock),
.nkeys = ATOMIC_INIT(2),
.nikeys = ATOMIC_INIT(2),
- .uid = 0,
- .user_ns = &init_user_ns,
+ .uid = GLOBAL_ROOT_UID,
};
-/*****************************************************************************/
/*
- * install user and user session keyrings for a particular UID
+ * Install the user and user session keyrings for the current process's UID.
*/
int install_user_keyrings(void)
{
struct user_struct *user;
const struct cred *cred;
struct key *uid_keyring, *session_keyring;
+ key_perm_t user_keyring_perm;
char buf[20];
int ret;
+ uid_t uid;
+ user_keyring_perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL;
cred = current_cred();
user = cred->user;
+ uid = from_kuid(cred->user_ns, user->uid);
- kenter("%p{%u}", user, user->uid);
+ kenter("%p{%u}", user, uid);
- if (user->uid_keyring) {
+ if (user->uid_keyring && user->session_keyring) {
kleave(" = 0 [exist]");
return 0;
}
@@ -69,13 +70,13 @@ int install_user_keyrings(void)
* - there may be one in existence already as it may have been
* pinned by a session, but the user_struct pointing to it
* may have been destroyed by setuid */
- sprintf(buf, "_uid.%u", user->uid);
+ sprintf(buf, "_uid.%u", uid);
uid_keyring = find_keyring_by_name(buf, true);
if (IS_ERR(uid_keyring)) {
- uid_keyring = keyring_alloc(buf, user->uid, (gid_t) -1,
- cred, KEY_ALLOC_IN_QUOTA,
- NULL);
+ uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID,
+ cred, user_keyring_perm,
+ KEY_ALLOC_IN_QUOTA, NULL);
if (IS_ERR(uid_keyring)) {
ret = PTR_ERR(uid_keyring);
goto error;
@@ -84,13 +85,14 @@ int install_user_keyrings(void)
/* get a default session keyring (which might also exist
* already) */
- sprintf(buf, "_uid_ses.%u", user->uid);
+ sprintf(buf, "_uid_ses.%u", uid);
session_keyring = find_keyring_by_name(buf, true);
if (IS_ERR(session_keyring)) {
session_keyring =
- keyring_alloc(buf, user->uid, (gid_t) -1,
- cred, KEY_ALLOC_IN_QUOTA, NULL);
+ keyring_alloc(buf, user->uid, INVALID_GID,
+ cred, user_keyring_perm,
+ KEY_ALLOC_IN_QUOTA, NULL);
if (IS_ERR(session_keyring)) {
ret = PTR_ERR(session_keyring);
goto error_release;
@@ -123,13 +125,15 @@ error:
}
/*
- * install a fresh thread keyring directly to new credentials
+ * Install a fresh thread keyring directly to new credentials. This keyring is
+ * allowed to overrun the quota.
*/
int install_thread_keyring_to_cred(struct cred *new)
{
struct key *keyring;
keyring = keyring_alloc("_tid", new->uid, new->gid, new,
+ KEY_POS_ALL | KEY_USR_VIEW,
KEY_ALLOC_QUOTA_OVERRUN, NULL);
if (IS_ERR(keyring))
return PTR_ERR(keyring);
@@ -139,7 +143,7 @@ int install_thread_keyring_to_cred(struct cred *new)
}
/*
- * install a fresh thread keyring, discarding the old one
+ * Install a fresh thread keyring, discarding the old one.
*/
static int install_thread_keyring(void)
{
@@ -162,39 +166,34 @@ static int install_thread_keyring(void)
}
/*
- * install a process keyring directly to a credentials struct
- * - returns -EEXIST if there was already a process keyring, 0 if one installed,
- * and other -ve on any other error
+ * Install a process keyring directly to a credentials struct.
+ *
+ * Returns -EEXIST if there was already a process keyring, 0 if one installed,
+ * and other value on any other error
*/
int install_process_keyring_to_cred(struct cred *new)
{
struct key *keyring;
- int ret;
- if (new->tgcred->process_keyring)
+ if (new->process_keyring)
return -EEXIST;
- keyring = keyring_alloc("_pid", new->uid, new->gid,
- new, KEY_ALLOC_QUOTA_OVERRUN, NULL);
+ keyring = keyring_alloc("_pid", new->uid, new->gid, new,
+ KEY_POS_ALL | KEY_USR_VIEW,
+ KEY_ALLOC_QUOTA_OVERRUN, NULL);
if (IS_ERR(keyring))
return PTR_ERR(keyring);
- spin_lock_irq(&new->tgcred->lock);
- if (!new->tgcred->process_keyring) {
- new->tgcred->process_keyring = keyring;
- keyring = NULL;
- ret = 0;
- } else {
- ret = -EEXIST;
- }
- spin_unlock_irq(&new->tgcred->lock);
- key_put(keyring);
- return ret;
+ new->process_keyring = keyring;
+ return 0;
}
/*
- * make sure a process keyring is installed
- * - we
+ * Make sure a process keyring is installed for the current process. The
+ * existing process keyring is not replaced.
+ *
+ * Returns 0 if there is a process keyring by the end of this function, some
+ * error otherwise.
*/
static int install_process_keyring(void)
{
@@ -208,17 +207,16 @@ static int install_process_keyring(void)
ret = install_process_keyring_to_cred(new);
if (ret < 0) {
abort_creds(new);
- return ret != -EEXIST ?: 0;
+ return ret != -EEXIST ? ret : 0;
}
return commit_creds(new);
}
/*
- * install a session keyring directly to a credentials struct
+ * Install a session keyring directly to a credentials struct.
*/
-static int install_session_keyring_to_cred(struct cred *cred,
- struct key *keyring)
+int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
{
unsigned long flags;
struct key *old;
@@ -228,36 +226,31 @@ static int install_session_keyring_to_cred(struct cred *cred,
/* create an empty session keyring */
if (!keyring) {
flags = KEY_ALLOC_QUOTA_OVERRUN;
- if (cred->tgcred->session_keyring)
+ if (cred->session_keyring)
flags = KEY_ALLOC_IN_QUOTA;
- keyring = keyring_alloc("_ses", cred->uid, cred->gid,
- cred, flags, NULL);
+ keyring = keyring_alloc("_ses", cred->uid, cred->gid, cred,
+ KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ,
+ flags, NULL);
if (IS_ERR(keyring))
return PTR_ERR(keyring);
} else {
- atomic_inc(&keyring->usage);
+ __key_get(keyring);
}
/* install the keyring */
- spin_lock_irq(&cred->tgcred->lock);
- old = cred->tgcred->session_keyring;
- rcu_assign_pointer(cred->tgcred->session_keyring, keyring);
- spin_unlock_irq(&cred->tgcred->lock);
-
- /* we're using RCU on the pointer, but there's no point synchronising
- * on it if it didn't previously point to anything */
- if (old) {
- synchronize_rcu();
+ old = cred->session_keyring;
+ rcu_assign_pointer(cred->session_keyring, keyring);
+
+ if (old)
key_put(old);
- }
return 0;
}
/*
- * install a session keyring, discarding the old one
- * - if a keyring is not supplied, an empty one is invented
+ * Install a session keyring, discarding the old one. If a keyring is not
+ * supplied, an empty one is invented.
*/
static int install_session_keyring(struct key *keyring)
{
@@ -268,7 +261,7 @@ static int install_session_keyring(struct key *keyring)
if (!new)
return -ENOMEM;
- ret = install_session_keyring_to_cred(new, NULL);
+ ret = install_session_keyring_to_cred(new, keyring);
if (ret < 0) {
abort_creds(new);
return ret;
@@ -277,9 +270,8 @@ static int install_session_keyring(struct key *keyring)
return commit_creds(new);
}
-/*****************************************************************************/
/*
- * the filesystem user ID changed
+ * Handle the fsuid changing.
*/
void key_fsuid_changed(struct task_struct *tsk)
{
@@ -290,12 +282,10 @@ void key_fsuid_changed(struct task_struct *tsk)
tsk->cred->thread_keyring->uid = tsk->cred->fsuid;
up_write(&tsk->cred->thread_keyring->sem);
}
+}
-} /* end key_fsuid_changed() */
-
-/*****************************************************************************/
/*
- * the filesystem group ID changed
+ * Handle the fsgid changing.
*/
void key_fsgid_changed(struct task_struct *tsk)
{
@@ -306,27 +296,33 @@ void key_fsgid_changed(struct task_struct *tsk)
tsk->cred->thread_keyring->gid = tsk->cred->fsgid;
up_write(&tsk->cred->thread_keyring->sem);
}
+}
-} /* end key_fsgid_changed() */
-
-/*****************************************************************************/
/*
- * search the process keyrings for the first matching key
- * - we use the supplied match function to see if the description (or other
- * feature of interest) matches
- * - we return -EAGAIN if we didn't find any matching key
- * - we return -ENOKEY if we found only negative matching keys
+ * Search the process keyrings attached to the supplied cred for the first
+ * matching key.
+ *
+ * The search criteria are the type and the match function. The description is
+ * given to the match function as a parameter, but doesn't otherwise influence
+ * the search. Typically the match function will compare the description
+ * parameter to the key's description.
+ *
+ * This can only search keyrings that grant Search permission to the supplied
+ * credentials. Keyrings linked to searched keyrings will also be searched if
+ * they grant Search permission too. Keys can only be found if they grant
+ * Search permission to the credentials.
+ *
+ * Returns a pointer to the key with the key usage count incremented if
+ * successful, -EAGAIN if we didn't find any matching key or -ENOKEY if we only
+ * matched negative keys.
+ *
+ * In the case of a successful return, the possession attribute is set on the
+ * returned key reference.
*/
-key_ref_t search_process_keyrings(struct key_type *type,
- const void *description,
- key_match_func_t match,
- const struct cred *cred)
+key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx)
{
- struct request_key_auth *rka;
key_ref_t key_ref, ret, err;
- might_sleep();
-
/* we want to return -EAGAIN or -ENOKEY if any of the keyrings were
* searchable, but we failed to find a key or we found a negative key;
* otherwise we want to return a sample error (probably -EACCES) if
@@ -339,17 +335,14 @@ key_ref_t search_process_keyrings(struct key_type *type,
err = ERR_PTR(-EAGAIN);
/* search the thread keyring first */
- if (cred->thread_keyring) {
+ if (ctx->cred->thread_keyring) {
key_ref = keyring_search_aux(
- make_key_ref(cred->thread_keyring, 1),
- cred, type, description, match);
+ make_key_ref(ctx->cred->thread_keyring, 1), ctx);
if (!IS_ERR(key_ref))
goto found;
switch (PTR_ERR(key_ref)) {
case -EAGAIN: /* no key */
- if (ret)
- break;
case -ENOKEY: /* negative key */
ret = key_ref;
break;
@@ -360,10 +353,9 @@ key_ref_t search_process_keyrings(struct key_type *type,
}
/* search the process keyring second */
- if (cred->tgcred->process_keyring) {
+ if (ctx->cred->process_keyring) {
key_ref = keyring_search_aux(
- make_key_ref(cred->tgcred->process_keyring, 1),
- cred, type, description, match);
+ make_key_ref(ctx->cred->process_keyring, 1), ctx);
if (!IS_ERR(key_ref))
goto found;
@@ -381,13 +373,11 @@ key_ref_t search_process_keyrings(struct key_type *type,
}
/* search the session keyring */
- if (cred->tgcred->session_keyring) {
+ if (ctx->cred->session_keyring) {
rcu_read_lock();
key_ref = keyring_search_aux(
- make_key_ref(rcu_dereference(
- cred->tgcred->session_keyring),
- 1),
- cred, type, description, match);
+ make_key_ref(rcu_dereference(ctx->cred->session_keyring), 1),
+ ctx);
rcu_read_unlock();
if (!IS_ERR(key_ref))
@@ -406,10 +396,10 @@ key_ref_t search_process_keyrings(struct key_type *type,
}
}
/* or search the user-session keyring */
- else if (cred->user->session_keyring) {
+ else if (ctx->cred->user->session_keyring) {
key_ref = keyring_search_aux(
- make_key_ref(cred->user->session_keyring, 1),
- cred, type, description, match);
+ make_key_ref(ctx->cred->user->session_keyring, 1),
+ ctx);
if (!IS_ERR(key_ref))
goto found;
@@ -426,192 +416,241 @@ key_ref_t search_process_keyrings(struct key_type *type,
}
}
+ /* no key - decide on the error we're going to go for */
+ key_ref = ret ? ret : err;
+
+found:
+ return key_ref;
+}
+
+/*
+ * Search the process keyrings attached to the supplied cred for the first
+ * matching key in the manner of search_my_process_keyrings(), but also search
+ * the keys attached to the assumed authorisation key using its credentials if
+ * one is available.
+ *
+ * Return same as search_my_process_keyrings().
+ */
+key_ref_t search_process_keyrings(struct keyring_search_context *ctx)
+{
+ struct request_key_auth *rka;
+ key_ref_t key_ref, ret = ERR_PTR(-EACCES), err;
+
+ might_sleep();
+
+ key_ref = search_my_process_keyrings(ctx);
+ if (!IS_ERR(key_ref))
+ goto found;
+ err = key_ref;
+
/* if this process has an instantiation authorisation key, then we also
* search the keyrings of the process mentioned there
* - we don't permit access to request_key auth keys via this method
*/
- if (cred->request_key_auth &&
- cred == current_cred() &&
- type != &key_type_request_key_auth
+ if (ctx->cred->request_key_auth &&
+ ctx->cred == current_cred() &&
+ ctx->index_key.type != &key_type_request_key_auth
) {
+ const struct cred *cred = ctx->cred;
+
/* defend against the auth key being revoked */
down_read(&cred->request_key_auth->sem);
- if (key_validate(cred->request_key_auth) == 0) {
- rka = cred->request_key_auth->payload.data;
+ if (key_validate(ctx->cred->request_key_auth) == 0) {
+ rka = ctx->cred->request_key_auth->payload.data;
- key_ref = search_process_keyrings(type, description,
- match, rka->cred);
+ ctx->cred = rka->cred;
+ key_ref = search_process_keyrings(ctx);
+ ctx->cred = cred;
up_read(&cred->request_key_auth->sem);
if (!IS_ERR(key_ref))
goto found;
- switch (PTR_ERR(key_ref)) {
- case -EAGAIN: /* no key */
- if (ret)
- break;
- case -ENOKEY: /* negative key */
- ret = key_ref;
- break;
- default:
- err = key_ref;
- break;
- }
+ ret = key_ref;
} else {
up_read(&cred->request_key_auth->sem);
}
}
/* no key - decide on the error we're going to go for */
- key_ref = ret ? ret : err;
+ if (err == ERR_PTR(-ENOKEY) || ret == ERR_PTR(-ENOKEY))
+ key_ref = ERR_PTR(-ENOKEY);
+ else if (err == ERR_PTR(-EACCES))
+ key_ref = ret;
+ else
+ key_ref = err;
found:
return key_ref;
+}
-} /* end search_process_keyrings() */
-
-/*****************************************************************************/
/*
- * see if the key we're looking at is the target key
+ * See if the key we're looking at is the target key.
*/
-static int lookup_user_key_possessed(const struct key *key, const void *target)
+int lookup_user_key_possessed(const struct key *key, const void *target)
{
return key == target;
+}
-} /* end lookup_user_key_possessed() */
-
-/*****************************************************************************/
/*
- * lookup a key given a key ID from userspace with a given permissions mask
- * - don't create special keyrings unless so requested
- * - partially constructed keys aren't found unless requested
+ * Look up a key ID given us by userspace with a given permissions mask to get
+ * the key it refers to.
+ *
+ * Flags can be passed to request that special keyrings be created if referred
+ * to directly, to permit partially constructed keys to be found and to skip
+ * validity and permission checks on the found key.
+ *
+ * Returns a pointer to the key with an incremented usage count if successful;
+ * -EINVAL if the key ID is invalid; -ENOKEY if the key ID does not correspond
+ * to a key or the best found key was a negative key; -EKEYREVOKED or
+ * -EKEYEXPIRED if the best found key was revoked or expired; -EACCES if the
+ * found key doesn't grant the requested permit or the LSM denied access to it;
+ * or -ENOMEM if a special keyring couldn't be created.
+ *
+ * In the case of a successful return, the possession attribute is set on the
+ * returned key reference.
*/
key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags,
key_perm_t perm)
{
+ struct keyring_search_context ctx = {
+ .match = lookup_user_key_possessed,
+ .flags = (KEYRING_SEARCH_NO_STATE_CHECK |
+ KEYRING_SEARCH_LOOKUP_DIRECT),
+ };
struct request_key_auth *rka;
- const struct cred *cred;
struct key *key;
key_ref_t key_ref, skey_ref;
int ret;
try_again:
- cred = get_current_cred();
+ ctx.cred = get_current_cred();
key_ref = ERR_PTR(-ENOKEY);
switch (id) {
case KEY_SPEC_THREAD_KEYRING:
- if (!cred->thread_keyring) {
+ if (!ctx.cred->thread_keyring) {
if (!(lflags & KEY_LOOKUP_CREATE))
goto error;
ret = install_thread_keyring();
if (ret < 0) {
- key = ERR_PTR(ret);
+ key_ref = ERR_PTR(ret);
goto error;
}
goto reget_creds;
}
- key = cred->thread_keyring;
- atomic_inc(&key->usage);
+ key = ctx.cred->thread_keyring;
+ __key_get(key);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_PROCESS_KEYRING:
- if (!cred->tgcred->process_keyring) {
+ if (!ctx.cred->process_keyring) {
if (!(lflags & KEY_LOOKUP_CREATE))
goto error;
ret = install_process_keyring();
if (ret < 0) {
- key = ERR_PTR(ret);
+ key_ref = ERR_PTR(ret);
goto error;
}
goto reget_creds;
}
- key = cred->tgcred->process_keyring;
- atomic_inc(&key->usage);
+ key = ctx.cred->process_keyring;
+ __key_get(key);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_SESSION_KEYRING:
- if (!cred->tgcred->session_keyring) {
+ if (!ctx.cred->session_keyring) {
/* always install a session keyring upon access if one
* doesn't exist yet */
ret = install_user_keyrings();
if (ret < 0)
goto error;
- ret = install_session_keyring(
- cred->user->session_keyring);
+ if (lflags & KEY_LOOKUP_CREATE)
+ ret = join_session_keyring(NULL);
+ else
+ ret = install_session_keyring(
+ ctx.cred->user->session_keyring);
if (ret < 0)
goto error;
goto reget_creds;
+ } else if (ctx.cred->session_keyring ==
+ ctx.cred->user->session_keyring &&
+ lflags & KEY_LOOKUP_CREATE) {
+ ret = join_session_keyring(NULL);
+ if (ret < 0)
+ goto error;
+ goto reget_creds;
}
rcu_read_lock();
- key = rcu_dereference(cred->tgcred->session_keyring);
- atomic_inc(&key->usage);
+ key = rcu_dereference(ctx.cred->session_keyring);
+ __key_get(key);
rcu_read_unlock();
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_USER_KEYRING:
- if (!cred->user->uid_keyring) {
+ if (!ctx.cred->user->uid_keyring) {
ret = install_user_keyrings();
if (ret < 0)
goto error;
}
- key = cred->user->uid_keyring;
- atomic_inc(&key->usage);
+ key = ctx.cred->user->uid_keyring;
+ __key_get(key);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_USER_SESSION_KEYRING:
- if (!cred->user->session_keyring) {
+ if (!ctx.cred->user->session_keyring) {
ret = install_user_keyrings();
if (ret < 0)
goto error;
}
- key = cred->user->session_keyring;
- atomic_inc(&key->usage);
+ key = ctx.cred->user->session_keyring;
+ __key_get(key);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_GROUP_KEYRING:
/* group keyrings are not yet supported */
- key = ERR_PTR(-EINVAL);
+ key_ref = ERR_PTR(-EINVAL);
goto error;
case KEY_SPEC_REQKEY_AUTH_KEY:
- key = cred->request_key_auth;
+ key = ctx.cred->request_key_auth;
if (!key)
goto error;
- atomic_inc(&key->usage);
+ __key_get(key);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_REQUESTOR_KEYRING:
- if (!cred->request_key_auth)
+ if (!ctx.cred->request_key_auth)
goto error;
- down_read(&cred->request_key_auth->sem);
- if (cred->request_key_auth->flags & KEY_FLAG_REVOKED) {
+ down_read(&ctx.cred->request_key_auth->sem);
+ if (test_bit(KEY_FLAG_REVOKED,
+ &ctx.cred->request_key_auth->flags)) {
key_ref = ERR_PTR(-EKEYREVOKED);
key = NULL;
} else {
- rka = cred->request_key_auth->payload.data;
+ rka = ctx.cred->request_key_auth->payload.data;
key = rka->dest_keyring;
- atomic_inc(&key->usage);
+ __key_get(key);
}
- up_read(&cred->request_key_auth->sem);
+ up_read(&ctx.cred->request_key_auth->sem);
if (!key)
goto error;
key_ref = make_key_ref(key, 1);
@@ -631,9 +670,13 @@ try_again:
key_ref = make_key_ref(key, 0);
/* check to see if we possess the key */
- skey_ref = search_process_keyrings(key->type, key,
- lookup_user_key_possessed,
- cred);
+ ctx.index_key.type = key->type;
+ ctx.index_key.description = key->description;
+ ctx.index_key.desc_len = strlen(key->description);
+ ctx.match_data = key;
+ kdebug("check possessed");
+ skey_ref = search_process_keyrings(&ctx);
+ kdebug("possessed=%p", skey_ref);
if (!IS_ERR(skey_ref)) {
key_put(key);
@@ -673,12 +716,14 @@ try_again:
goto invalid_key;
/* check the permissions */
- ret = key_task_permission(key_ref, cred, perm);
+ ret = key_task_permission(key_ref, ctx.cred, perm);
if (ret < 0)
goto invalid_key;
+ key->last_used_at = current_kernel_time().tv_sec;
+
error:
- put_cred(cred);
+ put_cred(ctx.cred);
return key_ref;
invalid_key:
@@ -689,17 +734,20 @@ invalid_key:
/* if we attempted to install a keyring, then it may have caused new
* creds to be installed */
reget_creds:
- put_cred(cred);
+ put_cred(ctx.cred);
goto try_again;
+}
-} /* end lookup_user_key() */
-
-/*****************************************************************************/
/*
- * join the named keyring as the session keyring if possible, or attempt to
- * create a new one of that name if not
- * - if the name is NULL, an empty anonymous keyring is installed instead
- * - named session keyring joining is done with a semaphore held
+ * Join the named keyring as the session keyring if possible else attempt to
+ * create a new one of that name and join that.
+ *
+ * If the name is NULL, an empty anonymous keyring will be installed as the
+ * session keyring.
+ *
+ * Named session keyrings are joined with a semaphore held to prevent the
+ * keyrings from going away whilst the attempt is made to going them and also
+ * to prevent a race in creating compatible session keyrings.
*/
long join_session_keyring(const char *name)
{
@@ -708,12 +756,6 @@ long join_session_keyring(const char *name)
struct key *keyring;
long ret, serial;
- /* only permit this if there's a single thread in the thread group -
- * this avoids us having to adjust the creds on all threads and risking
- * ENOMEM */
- if (!current_is_single_threaded())
- return -EMLINK;
-
new = prepare_creds();
if (!new)
return -ENOMEM;
@@ -725,7 +767,7 @@ long join_session_keyring(const char *name)
if (ret < 0)
goto error;
- serial = new->tgcred->session_keyring->serial;
+ serial = new->session_keyring->serial;
ret = commit_creds(new);
if (ret == 0)
ret = serial;
@@ -739,8 +781,10 @@ long join_session_keyring(const char *name)
keyring = find_keyring_by_name(name, false);
if (PTR_ERR(keyring) == -ENOKEY) {
/* not found - try and create a new one */
- keyring = keyring_alloc(name, old->uid, old->gid, old,
- KEY_ALLOC_IN_QUOTA, NULL);
+ keyring = keyring_alloc(
+ name, old->uid, old->gid, old,
+ KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ | KEY_USR_LINK,
+ KEY_ALLOC_IN_QUOTA, NULL);
if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto error2;
@@ -748,6 +792,9 @@ long join_session_keyring(const char *name)
} else if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto error2;
+ } else if (keyring == new->session_keyring) {
+ ret = 0;
+ goto error2;
}
/* we've got a keyring - now to install it */
@@ -771,26 +818,19 @@ error:
}
/*
- * Replace a process's session keyring when that process resumes userspace on
- * behalf of one of its children
+ * Replace a process's session keyring on behalf of one of its children when
+ * the target process is about to resume userspace execution.
*/
-void key_replace_session_keyring(void)
+void key_change_session_keyring(struct callback_head *twork)
{
- const struct cred *old;
- struct cred *new;
-
- if (!current->replacement_session_keyring)
- return;
+ const struct cred *old = current_cred();
+ struct cred *new = container_of(twork, struct cred, rcu);
- write_lock_irq(&tasklist_lock);
- new = current->replacement_session_keyring;
- current->replacement_session_keyring = NULL;
- write_unlock_irq(&tasklist_lock);
-
- if (!new)
+ if (unlikely(current->flags & PF_EXITING)) {
+ put_cred(new);
return;
+ }
- old = current_cred();
new-> uid = old-> uid;
new-> euid = old-> euid;
new-> suid = old-> suid;
@@ -800,6 +840,7 @@ void key_replace_session_keyring(void)
new-> sgid = old-> sgid;
new->fsgid = old->fsgid;
new->user = get_uid(old->user);
+ new->user_ns = get_user_ns(old->user_ns);
new->group_info = get_group_info(old->group_info);
new->securebits = old->securebits;
@@ -810,10 +851,19 @@ void key_replace_session_keyring(void)
new->jit_keyring = old->jit_keyring;
new->thread_keyring = key_get(old->thread_keyring);
- new->tgcred->tgid = old->tgcred->tgid;
- new->tgcred->process_keyring = key_get(old->tgcred->process_keyring);
+ new->process_keyring = key_get(old->process_keyring);
security_transfer_creds(new, old);
commit_creds(new);
}
+
+/*
+ * Make sure that root's user and user-session keyrings exist.
+ */
+static int __init init_root_keyring(void)
+{
+ return install_user_keyrings();
+}
+
+late_initcall(init_root_keyring);
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 03fe63ed55b..381411941cc 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -8,7 +8,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * See Documentation/keys-request-key.txt
+ * See Documentation/security/keys-request-key.txt
*/
#include <linux/module.h>
@@ -39,8 +39,14 @@ static int key_wait_bit_intr(void *flags)
return signal_pending(current) ? -ERESTARTSYS : 0;
}
-/*
- * call to complete the construction of a key
+/**
+ * complete_request_key - Complete the construction of a key.
+ * @cons: The key construction record.
+ * @error: The success or failute of the construction.
+ *
+ * Complete the attempt to construct a key. The key will be negated
+ * if an error is indicated. The authorisation key will be revoked
+ * unconditionally.
*/
void complete_request_key(struct key_construction *cons, int error)
{
@@ -59,7 +65,48 @@ void complete_request_key(struct key_construction *cons, int error)
EXPORT_SYMBOL(complete_request_key);
/*
- * request userspace finish the construction of a key
+ * Initialise a usermode helper that is going to have a specific session
+ * keyring.
+ *
+ * This is called in context of freshly forked kthread before kernel_execve(),
+ * so we can simply install the desired session_keyring at this point.
+ */
+static int umh_keys_init(struct subprocess_info *info, struct cred *cred)
+{
+ struct key *keyring = info->data;
+
+ return install_session_keyring_to_cred(cred, keyring);
+}
+
+/*
+ * Clean up a usermode helper with session keyring.
+ */
+static void umh_keys_cleanup(struct subprocess_info *info)
+{
+ struct key *keyring = info->data;
+ key_put(keyring);
+}
+
+/*
+ * Call a usermode helper with a specific session keyring.
+ */
+static int call_usermodehelper_keys(char *path, char **argv, char **envp,
+ struct key *session_keyring, int wait)
+{
+ struct subprocess_info *info;
+
+ info = call_usermodehelper_setup(path, argv, envp, GFP_KERNEL,
+ umh_keys_init, umh_keys_cleanup,
+ session_keyring);
+ if (!info)
+ return -ENOMEM;
+
+ key_get(session_keyring);
+ return call_usermodehelper_exec(info, wait);
+}
+
+/*
+ * Request userspace finish the construction of a key
* - execute "/sbin/request-key <op> <key> <uid> <gid> <keyring> <keyring> <keyring>"
*/
static int call_sbin_request_key(struct key_construction *cons,
@@ -68,7 +115,8 @@ static int call_sbin_request_key(struct key_construction *cons,
{
const struct cred *cred = current_cred();
key_serial_t prkey, sskey;
- struct key *key = cons->key, *authkey = cons->authkey, *keyring;
+ struct key *key = cons->key, *authkey = cons->authkey, *keyring,
+ *session;
char *argv[9], *envp[3], uid_str[12], gid_str[12];
char key_str[12], keyring_str[3][12];
char desc[20];
@@ -85,6 +133,7 @@ static int call_sbin_request_key(struct key_construction *cons,
cred = get_current_cred();
keyring = keyring_alloc(desc, cred->fsuid, cred->fsgid, cred,
+ KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ,
KEY_ALLOC_QUOTA_OVERRUN, NULL);
put_cred(cred);
if (IS_ERR(keyring)) {
@@ -93,13 +142,13 @@ static int call_sbin_request_key(struct key_construction *cons,
}
/* attach the auth key to the session keyring */
- ret = __key_link(keyring, authkey);
+ ret = key_link(keyring, authkey);
if (ret < 0)
goto error_link;
/* record the UID and GID */
- sprintf(uid_str, "%d", cred->fsuid);
- sprintf(gid_str, "%d", cred->fsgid);
+ sprintf(uid_str, "%d", from_kuid(&init_user_ns, cred->fsuid));
+ sprintf(gid_str, "%d", from_kgid(&init_user_ns, cred->fsgid));
/* we say which key is under construction */
sprintf(key_str, "%d", key->serial);
@@ -109,13 +158,16 @@ static int call_sbin_request_key(struct key_construction *cons,
cred->thread_keyring ? cred->thread_keyring->serial : 0);
prkey = 0;
- if (cred->tgcred->process_keyring)
- prkey = cred->tgcred->process_keyring->serial;
+ if (cred->process_keyring)
+ prkey = cred->process_keyring->serial;
+ sprintf(keyring_str[1], "%d", prkey);
- if (cred->tgcred->session_keyring)
- sskey = rcu_dereference(cred->tgcred->session_keyring)->serial;
- else
- sskey = cred->user->session_keyring->serial;
+ rcu_read_lock();
+ session = rcu_dereference(cred->session_keyring);
+ if (!session)
+ session = cred->user->session_keyring;
+ sskey = session->serial;
+ rcu_read_unlock();
sprintf(keyring_str[2], "%d", sskey);
@@ -162,8 +214,9 @@ error_alloc:
}
/*
- * call out to userspace for key construction
- * - we ignore program failure and go on key status instead
+ * Call out to userspace for key construction.
+ *
+ * Program failure is ignored in favour of key status.
*/
static int construct_key(struct key *key, const void *callout_info,
size_t callout_len, void *aux,
@@ -210,9 +263,10 @@ static int construct_key(struct key *key, const void *callout_info,
}
/*
- * get the appropriate destination keyring for the request
- * - we return whatever keyring we select with an extra reference upon it which
- * the caller must release
+ * Get the appropriate destination keyring for the request.
+ *
+ * The keyring selected is returned with an extra reference upon it which the
+ * caller must release.
*/
static void construct_get_dest_keyring(struct key **_dest_keyring)
{
@@ -251,14 +305,14 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
break;
case KEY_REQKEY_DEFL_PROCESS_KEYRING:
- dest_keyring = key_get(cred->tgcred->process_keyring);
+ dest_keyring = key_get(cred->process_keyring);
if (dest_keyring)
break;
case KEY_REQKEY_DEFL_SESSION_KEYRING:
rcu_read_lock();
dest_keyring = key_get(
- rcu_dereference(cred->tgcred->session_keyring));
+ rcu_dereference(cred->session_keyring));
rcu_read_unlock();
if (dest_keyring)
@@ -285,77 +339,112 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
}
/*
- * allocate a new key in under-construction state and attempt to link it in to
- * the requested place
- * - may return a key that's already under construction instead
+ * Allocate a new key in under-construction state and attempt to link it in to
+ * the requested keyring.
+ *
+ * May return a key that's already under construction instead if there was a
+ * race between two thread calling request_key().
*/
-static int construct_alloc_key(struct key_type *type,
- const char *description,
+static int construct_alloc_key(struct keyring_search_context *ctx,
struct key *dest_keyring,
unsigned long flags,
struct key_user *user,
struct key **_key)
{
- const struct cred *cred = current_cred();
+ struct assoc_array_edit *edit;
struct key *key;
+ key_perm_t perm;
key_ref_t key_ref;
+ int ret;
- kenter("%s,%s,,,", type->name, description);
+ kenter("%s,%s,,,",
+ ctx->index_key.type->name, ctx->index_key.description);
+ *_key = NULL;
mutex_lock(&user->cons_lock);
- key = key_alloc(type, description, cred->fsuid, cred->fsgid, cred,
- KEY_POS_ALL, flags);
+ perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
+ perm |= KEY_USR_VIEW;
+ if (ctx->index_key.type->read)
+ perm |= KEY_POS_READ;
+ if (ctx->index_key.type == &key_type_keyring ||
+ ctx->index_key.type->update)
+ perm |= KEY_POS_WRITE;
+
+ key = key_alloc(ctx->index_key.type, ctx->index_key.description,
+ ctx->cred->fsuid, ctx->cred->fsgid, ctx->cred,
+ perm, flags);
if (IS_ERR(key))
goto alloc_failed;
set_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags);
- if (dest_keyring)
- down_write(&dest_keyring->sem);
+ if (dest_keyring) {
+ ret = __key_link_begin(dest_keyring, &ctx->index_key, &edit);
+ if (ret < 0)
+ goto link_prealloc_failed;
+ }
/* attach the key to the destination keyring under lock, but we do need
* to do another check just in case someone beat us to it whilst we
* waited for locks */
mutex_lock(&key_construction_mutex);
- key_ref = search_process_keyrings(type, description, type->match, cred);
+ key_ref = search_process_keyrings(ctx);
if (!IS_ERR(key_ref))
goto key_already_present;
if (dest_keyring)
- __key_link(dest_keyring, key);
+ __key_link(key, &edit);
mutex_unlock(&key_construction_mutex);
if (dest_keyring)
- up_write(&dest_keyring->sem);
+ __key_link_end(dest_keyring, &ctx->index_key, edit);
mutex_unlock(&user->cons_lock);
*_key = key;
kleave(" = 0 [%d]", key_serial(key));
return 0;
+ /* the key is now present - we tell the caller that we found it by
+ * returning -EINPROGRESS */
key_already_present:
+ key_put(key);
mutex_unlock(&key_construction_mutex);
- if (dest_keyring)
- up_write(&dest_keyring->sem);
+ key = key_ref_to_ptr(key_ref);
+ if (dest_keyring) {
+ ret = __key_link_check_live_key(dest_keyring, key);
+ if (ret == 0)
+ __key_link(key, &edit);
+ __key_link_end(dest_keyring, &ctx->index_key, edit);
+ if (ret < 0)
+ goto link_check_failed;
+ }
mutex_unlock(&user->cons_lock);
- key_put(key);
- *_key = key = key_ref_to_ptr(key_ref);
+ *_key = key;
kleave(" = -EINPROGRESS [%d]", key_serial(key));
return -EINPROGRESS;
+link_check_failed:
+ mutex_unlock(&user->cons_lock);
+ key_put(key);
+ kleave(" = %d [linkcheck]", ret);
+ return ret;
+
+link_prealloc_failed:
+ mutex_unlock(&user->cons_lock);
+ kleave(" = %d [prelink]", ret);
+ return ret;
+
alloc_failed:
mutex_unlock(&user->cons_lock);
- *_key = NULL;
kleave(" = %ld", PTR_ERR(key));
return PTR_ERR(key);
}
/*
- * commence key construction
+ * Commence key construction.
*/
-static struct key *construct_key_and_link(struct key_type *type,
- const char *description,
+static struct key *construct_key_and_link(struct keyring_search_context *ctx,
const char *callout_info,
size_t callout_len,
void *aux,
@@ -368,14 +457,13 @@ static struct key *construct_key_and_link(struct key_type *type,
kenter("");
- user = key_user_lookup(current_fsuid(), current_user_ns());
+ user = key_user_lookup(current_fsuid());
if (!user)
return ERR_PTR(-ENOMEM);
construct_get_dest_keyring(&dest_keyring);
- ret = construct_alloc_key(type, description, dest_keyring, flags, user,
- &key);
+ ret = construct_alloc_key(ctx, dest_keyring, flags, user, &key);
key_user_put(user);
if (ret == 0) {
@@ -385,6 +473,10 @@ static struct key *construct_key_and_link(struct key_type *type,
kdebug("cons failed");
goto construction_failed;
}
+ } else if (ret == -EINPROGRESS) {
+ ret = 0;
+ } else {
+ goto couldnt_alloc_key;
}
key_put(dest_keyring);
@@ -394,17 +486,38 @@ static struct key *construct_key_and_link(struct key_type *type,
construction_failed:
key_negate_and_link(key, key_negative_timeout, NULL, NULL);
key_put(key);
+couldnt_alloc_key:
key_put(dest_keyring);
kleave(" = %d", ret);
return ERR_PTR(ret);
}
-/*
- * request a key
- * - search the process's keyrings
- * - check the list of keys being created or updated
- * - call out to userspace for a key if supplementary info was provided
- * - cache the key in an appropriate keyring
+/**
+ * request_key_and_link - Request a key and cache it in a keyring.
+ * @type: The type of key we want.
+ * @description: The searchable description of the key.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ * @callout_len: The length of callout_info.
+ * @aux: Auxiliary data for the upcall.
+ * @dest_keyring: Where to cache the key.
+ * @flags: Flags to key_alloc().
+ *
+ * A key matching the specified criteria is searched for in the process's
+ * keyrings and returned with its usage count incremented if found. Otherwise,
+ * if callout_info is not NULL, a key will be allocated and some service
+ * (probably in userspace) will be asked to instantiate it.
+ *
+ * If successfully found or created, the key will be linked to the destination
+ * keyring if one is provided.
+ *
+ * Returns a pointer to the key if successful; -EACCES, -ENOKEY, -EKEYREVOKED
+ * or -EKEYEXPIRED if an inaccessible, negative, revoked or expired key was
+ * found; -ENOKEY if no key was found and no @callout_info was given; -EDQUOT
+ * if insufficient key quota was available to create a new key; or -ENOMEM if
+ * insufficient memory was available.
+ *
+ * If the returned key was created, then it may still be under construction,
+ * and wait_for_key_construction() should be used to wait for that to complete.
*/
struct key *request_key_and_link(struct key_type *type,
const char *description,
@@ -414,20 +527,37 @@ struct key *request_key_and_link(struct key_type *type,
struct key *dest_keyring,
unsigned long flags)
{
- const struct cred *cred = current_cred();
+ struct keyring_search_context ctx = {
+ .index_key.type = type,
+ .index_key.description = description,
+ .cred = current_cred(),
+ .match = type->match,
+ .match_data = description,
+ .flags = KEYRING_SEARCH_LOOKUP_DIRECT,
+ };
struct key *key;
key_ref_t key_ref;
+ int ret;
kenter("%s,%s,%p,%zu,%p,%p,%lx",
- type->name, description, callout_info, callout_len, aux,
- dest_keyring, flags);
+ ctx.index_key.type->name, ctx.index_key.description,
+ callout_info, callout_len, aux, dest_keyring, flags);
/* search all the process keyrings for a key */
- key_ref = search_process_keyrings(type, description, type->match,
- cred);
+ key_ref = search_process_keyrings(&ctx);
if (!IS_ERR(key_ref)) {
key = key_ref_to_ptr(key_ref);
+ if (dest_keyring) {
+ construct_get_dest_keyring(&dest_keyring);
+ ret = key_link(dest_keyring, key);
+ key_put(dest_keyring);
+ if (ret < 0) {
+ key_put(key);
+ key = ERR_PTR(ret);
+ goto error;
+ }
+ }
} else if (PTR_ERR(key_ref) != -EAGAIN) {
key = ERR_CAST(key_ref);
} else {
@@ -437,9 +567,8 @@ struct key *request_key_and_link(struct key_type *type,
if (!callout_info)
goto error;
- key = construct_key_and_link(type, description, callout_info,
- callout_len, aux, dest_keyring,
- flags);
+ key = construct_key_and_link(&ctx, callout_info, callout_len,
+ aux, dest_keyring, flags);
}
error:
@@ -447,8 +576,16 @@ error:
return key;
}
-/*
- * wait for construction of a key to complete
+/**
+ * wait_for_key_construction - Wait for construction of a key to complete
+ * @key: The key being waited for.
+ * @intr: Whether to wait interruptibly.
+ *
+ * Wait for a key to finish being constructed.
+ *
+ * Returns 0 if successful; -ERESTARTSYS if the wait was interrupted; -ENOKEY
+ * if the key was negated; or -EKEYREVOKED or -EKEYEXPIRED if the key was
+ * revoked or expired.
*/
int wait_for_key_construction(struct key *key, bool intr)
{
@@ -459,16 +596,27 @@ int wait_for_key_construction(struct key *key, bool intr)
intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
if (ret < 0)
return ret;
+ if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
+ smp_rmb();
+ return key->type_data.reject_error;
+ }
return key_validate(key);
}
EXPORT_SYMBOL(wait_for_key_construction);
-/*
- * request a key
- * - search the process's keyrings
- * - check the list of keys being created or updated
- * - call out to userspace for a key if supplementary info was provided
- * - waits uninterruptible for creation to complete
+/**
+ * request_key - Request a key and wait for construction
+ * @type: Type of key.
+ * @description: The searchable description of the key.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ *
+ * As for request_key_and_link() except that it does not add the returned key
+ * to a keyring if found, new keys are always allocated in the user's quota,
+ * the callout_info must be a NUL-terminated string and no auxiliary data can
+ * be passed.
+ *
+ * Furthermore, it then works as wait_for_key_construction() to wait for the
+ * completion of keys undergoing construction with a non-interruptible wait.
*/
struct key *request_key(struct key_type *type,
const char *description,
@@ -493,12 +641,19 @@ struct key *request_key(struct key_type *type,
}
EXPORT_SYMBOL(request_key);
-/*
- * request a key with auxiliary data for the upcaller
- * - search the process's keyrings
- * - check the list of keys being created or updated
- * - call out to userspace for a key if supplementary info was provided
- * - waits uninterruptible for creation to complete
+/**
+ * request_key_with_auxdata - Request a key with auxiliary data for the upcaller
+ * @type: The type of key we want.
+ * @description: The searchable description of the key.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ * @callout_len: The length of callout_info.
+ * @aux: Auxiliary data for the upcall.
+ *
+ * As for request_key_and_link() except that it does not add the returned key
+ * to a keyring if found and new keys are always allocated in the user's quota.
+ *
+ * Furthermore, it then works as wait_for_key_construction() to wait for the
+ * completion of keys undergoing construction with a non-interruptible wait.
*/
struct key *request_key_with_auxdata(struct key_type *type,
const char *description,
@@ -523,10 +678,18 @@ struct key *request_key_with_auxdata(struct key_type *type,
EXPORT_SYMBOL(request_key_with_auxdata);
/*
- * request a key (allow async construction)
- * - search the process's keyrings
- * - check the list of keys being created or updated
- * - call out to userspace for a key if supplementary info was provided
+ * request_key_async - Request a key (allow async construction)
+ * @type: Type of key.
+ * @description: The searchable description of the key.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ * @callout_len: The length of callout_info.
+ *
+ * As for request_key_and_link() except that it does not add the returned key
+ * to a keyring if found, new keys are always allocated in the user's quota and
+ * no auxiliary data can be passed.
+ *
+ * The caller should call wait_for_key_construction() to wait for the
+ * completion of the returned key if it is still undergoing construction.
*/
struct key *request_key_async(struct key_type *type,
const char *description,
@@ -541,9 +704,17 @@ EXPORT_SYMBOL(request_key_async);
/*
* request a key with auxiliary data for the upcaller (allow async construction)
- * - search the process's keyrings
- * - check the list of keys being created or updated
- * - call out to userspace for a key if supplementary info was provided
+ * @type: Type of key.
+ * @description: The searchable description of the key.
+ * @callout_info: The data to pass to the instantiation upcall (or NULL).
+ * @callout_len: The length of callout_info.
+ * @aux: Auxiliary data for the upcall.
+ *
+ * As for request_key_and_link() except that it does not add the returned key
+ * to a keyring if found and new keys are always allocated in the user's quota.
+ *
+ * The caller should call wait_for_key_construction() to wait for the
+ * completion of the returned key if it is still undergoing construction.
*/
struct key *request_key_async_with_auxdata(struct key_type *type,
const char *description,
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 86747151ee5..7495a93b4b9 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -1,4 +1,4 @@
-/* request_key_auth.c: request key authorisation controlling key def
+/* Request key authorisation token key definition.
*
* Copyright (C) 2005 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
@@ -8,7 +8,7 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
- * See Documentation/keys-request-key.txt
+ * See Documentation/security/keys-request-key.txt
*/
#include <linux/module.h>
@@ -18,15 +18,17 @@
#include <linux/slab.h>
#include <asm/uaccess.h>
#include "internal.h"
+#include <keys/user-type.h>
-static int request_key_auth_instantiate(struct key *, const void *, size_t);
+static int request_key_auth_instantiate(struct key *,
+ struct key_preparsed_payload *);
static void request_key_auth_describe(const struct key *, struct seq_file *);
static void request_key_auth_revoke(struct key *);
static void request_key_auth_destroy(struct key *);
static long request_key_auth_read(const struct key *, char __user *, size_t);
/*
- * the request-key authorisation key type definition
+ * The request-key authorisation key type definition.
*/
struct key_type key_type_request_key_auth = {
.name = ".request_key_auth",
@@ -38,22 +40,18 @@ struct key_type key_type_request_key_auth = {
.read = request_key_auth_read,
};
-/*****************************************************************************/
/*
- * instantiate a request-key authorisation key
+ * Instantiate a request-key authorisation key.
*/
static int request_key_auth_instantiate(struct key *key,
- const void *data,
- size_t datalen)
+ struct key_preparsed_payload *prep)
{
- key->payload.data = (struct request_key_auth *) data;
+ key->payload.data = (struct request_key_auth *)prep->data;
return 0;
+}
-} /* end request_key_auth_instantiate() */
-
-/*****************************************************************************/
/*
- * reading a request-key authorisation key retrieves the callout information
+ * Describe an authorisation token.
*/
static void request_key_auth_describe(const struct key *key,
struct seq_file *m)
@@ -62,13 +60,12 @@ static void request_key_auth_describe(const struct key *key,
seq_puts(m, "key:");
seq_puts(m, key->description);
- seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len);
-
-} /* end request_key_auth_describe() */
+ if (key_is_instantiated(key))
+ seq_printf(m, " pid:%d ci:%zu", rka->pid, rka->callout_len);
+}
-/*****************************************************************************/
/*
- * read the callout_info data
+ * Read the callout_info data (retrieves the callout information).
* - the key's semaphore is read-locked
*/
static long request_key_auth_read(const struct key *key,
@@ -91,13 +88,12 @@ static long request_key_auth_read(const struct key *key,
}
return ret;
+}
-} /* end request_key_auth_read() */
-
-/*****************************************************************************/
/*
- * handle revocation of an authorisation token key
- * - called with the key sem write-locked
+ * Handle revocation of an authorisation token key.
+ *
+ * Called with the key sem write-locked.
*/
static void request_key_auth_revoke(struct key *key)
{
@@ -109,12 +105,10 @@ static void request_key_auth_revoke(struct key *key)
put_cred(rka->cred);
rka->cred = NULL;
}
+}
-} /* end request_key_auth_revoke() */
-
-/*****************************************************************************/
/*
- * destroy an instantiation authorisation token key
+ * Destroy an instantiation authorisation token key.
*/
static void request_key_auth_destroy(struct key *key)
{
@@ -131,13 +125,11 @@ static void request_key_auth_destroy(struct key *key)
key_put(rka->dest_keyring);
kfree(rka->callout_info);
kfree(rka);
+}
-} /* end request_key_auth_destroy() */
-
-/*****************************************************************************/
/*
- * create an authorisation token for /sbin/request-key or whoever to gain
- * access to the caller's security data
+ * Create an authorisation token for /sbin/request-key or whoever to gain
+ * access to the caller's security data.
*/
struct key *request_key_auth_new(struct key *target, const void *callout_info,
size_t callout_len, struct key *dest_keyring)
@@ -228,45 +220,34 @@ error_alloc:
kfree(rka);
kleave("= %d", ret);
return ERR_PTR(ret);
+}
-} /* end request_key_auth_new() */
-
-/*****************************************************************************/
-/*
- * see if an authorisation key is associated with a particular key
- */
-static int key_get_instantiation_authkey_match(const struct key *key,
- const void *_id)
-{
- struct request_key_auth *rka = key->payload.data;
- key_serial_t id = (key_serial_t)(unsigned long) _id;
-
- return rka->target_key->serial == id;
-
-} /* end key_get_instantiation_authkey_match() */
-
-/*****************************************************************************/
/*
- * get the authorisation key for instantiation of a specific key if attached to
- * the current process's keyrings
- * - this key is inserted into a keyring and that is set as /sbin/request-key's
- * session keyring
- * - a target_id of zero specifies any valid token
+ * Search the current process's keyrings for the authorisation key for
+ * instantiation of a key.
*/
struct key *key_get_instantiation_authkey(key_serial_t target_id)
{
- const struct cred *cred = current_cred();
+ char description[16];
+ struct keyring_search_context ctx = {
+ .index_key.type = &key_type_request_key_auth,
+ .index_key.description = description,
+ .cred = current_cred(),
+ .match = user_match,
+ .match_data = description,
+ .flags = KEYRING_SEARCH_LOOKUP_DIRECT,
+ };
struct key *authkey;
key_ref_t authkey_ref;
- authkey_ref = search_process_keyrings(
- &key_type_request_key_auth,
- (void *) (unsigned long) target_id,
- key_get_instantiation_authkey_match,
- cred);
+ sprintf(description, "%x", target_id);
+
+ authkey_ref = search_process_keyrings(&ctx);
if (IS_ERR(authkey_ref)) {
authkey = ERR_CAST(authkey_ref);
+ if (authkey == ERR_PTR(-EAGAIN))
+ authkey = ERR_PTR(-ENOKEY);
goto error;
}
@@ -278,5 +259,4 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id)
error:
return authkey;
-
-} /* end key_get_instantiation_authkey() */
+}
diff --git a/security/keys/sysctl.c b/security/keys/sysctl.c
index ee32d181764..b68faa1a5cf 100644
--- a/security/keys/sysctl.c
+++ b/security/keys/sysctl.c
@@ -15,7 +15,7 @@
static const int zero, one = 1, max = INT_MAX;
-ctl_table key_sysctls[] = {
+struct ctl_table key_sysctls[] = {
{
.procname = "maxkeys",
.data = &key_quota_maxkeys,
@@ -61,5 +61,16 @@ ctl_table key_sysctls[] = {
.extra1 = (void *) &zero,
.extra2 = (void *) &max,
},
+#ifdef CONFIG_PERSISTENT_KEYRINGS
+ {
+ .procname = "persistent_keyring_expiry",
+ .data = &persistent_keyring_expiry,
+ .maxlen = sizeof(unsigned),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = (void *) &zero,
+ .extra2 = (void *) &max,
+ },
+#endif
{ }
};
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
new file mode 100644
index 00000000000..6b804aa4529
--- /dev/null
+++ b/security/keys/trusted.c
@@ -0,0 +1,1163 @@
+/*
+ * Copyright (C) 2010 IBM Corporation
+ *
+ * Author:
+ * David Safford <safford@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ * See Documentation/security/keys-trusted-encrypted.txt
+ */
+
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/parser.h>
+#include <linux/string.h>
+#include <linux/err.h>
+#include <keys/user-type.h>
+#include <keys/trusted-type.h>
+#include <linux/key-type.h>
+#include <linux/rcupdate.h>
+#include <linux/crypto.h>
+#include <crypto/hash.h>
+#include <crypto/sha.h>
+#include <linux/capability.h>
+#include <linux/tpm.h>
+#include <linux/tpm_command.h>
+
+#include "trusted.h"
+
+static const char hmac_alg[] = "hmac(sha1)";
+static const char hash_alg[] = "sha1";
+
+struct sdesc {
+ struct shash_desc shash;
+ char ctx[];
+};
+
+static struct crypto_shash *hashalg;
+static struct crypto_shash *hmacalg;
+
+static struct sdesc *init_sdesc(struct crypto_shash *alg)
+{
+ struct sdesc *sdesc;
+ int size;
+
+ size = sizeof(struct shash_desc) + crypto_shash_descsize(alg);
+ sdesc = kmalloc(size, GFP_KERNEL);
+ if (!sdesc)
+ return ERR_PTR(-ENOMEM);
+ sdesc->shash.tfm = alg;
+ sdesc->shash.flags = 0x0;
+ return sdesc;
+}
+
+static int TSS_sha1(const unsigned char *data, unsigned int datalen,
+ unsigned char *digest)
+{
+ struct sdesc *sdesc;
+ int ret;
+
+ sdesc = init_sdesc(hashalg);
+ if (IS_ERR(sdesc)) {
+ pr_info("trusted_key: can't alloc %s\n", hash_alg);
+ return PTR_ERR(sdesc);
+ }
+
+ ret = crypto_shash_digest(&sdesc->shash, data, datalen, digest);
+ kfree(sdesc);
+ return ret;
+}
+
+static int TSS_rawhmac(unsigned char *digest, const unsigned char *key,
+ unsigned int keylen, ...)
+{
+ struct sdesc *sdesc;
+ va_list argp;
+ unsigned int dlen;
+ unsigned char *data;
+ int ret;
+
+ sdesc = init_sdesc(hmacalg);
+ if (IS_ERR(sdesc)) {
+ pr_info("trusted_key: can't alloc %s\n", hmac_alg);
+ return PTR_ERR(sdesc);
+ }
+
+ ret = crypto_shash_setkey(hmacalg, key, keylen);
+ if (ret < 0)
+ goto out;
+ ret = crypto_shash_init(&sdesc->shash);
+ if (ret < 0)
+ goto out;
+
+ va_start(argp, keylen);
+ for (;;) {
+ dlen = va_arg(argp, unsigned int);
+ if (dlen == 0)
+ break;
+ data = va_arg(argp, unsigned char *);
+ if (data == NULL) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = crypto_shash_update(&sdesc->shash, data, dlen);
+ if (ret < 0)
+ break;
+ }
+ va_end(argp);
+ if (!ret)
+ ret = crypto_shash_final(&sdesc->shash, digest);
+out:
+ kfree(sdesc);
+ return ret;
+}
+
+/*
+ * calculate authorization info fields to send to TPM
+ */
+static int TSS_authhmac(unsigned char *digest, const unsigned char *key,
+ unsigned int keylen, unsigned char *h1,
+ unsigned char *h2, unsigned char h3, ...)
+{
+ unsigned char paramdigest[SHA1_DIGEST_SIZE];
+ struct sdesc *sdesc;
+ unsigned int dlen;
+ unsigned char *data;
+ unsigned char c;
+ int ret;
+ va_list argp;
+
+ sdesc = init_sdesc(hashalg);
+ if (IS_ERR(sdesc)) {
+ pr_info("trusted_key: can't alloc %s\n", hash_alg);
+ return PTR_ERR(sdesc);
+ }
+
+ c = h3;
+ ret = crypto_shash_init(&sdesc->shash);
+ if (ret < 0)
+ goto out;
+ va_start(argp, h3);
+ for (;;) {
+ dlen = va_arg(argp, unsigned int);
+ if (dlen == 0)
+ break;
+ data = va_arg(argp, unsigned char *);
+ if (!data) {
+ ret = -EINVAL;
+ break;
+ }
+ ret = crypto_shash_update(&sdesc->shash, data, dlen);
+ if (ret < 0)
+ break;
+ }
+ va_end(argp);
+ if (!ret)
+ ret = crypto_shash_final(&sdesc->shash, paramdigest);
+ if (!ret)
+ ret = TSS_rawhmac(digest, key, keylen, SHA1_DIGEST_SIZE,
+ paramdigest, TPM_NONCE_SIZE, h1,
+ TPM_NONCE_SIZE, h2, 1, &c, 0, 0);
+out:
+ kfree(sdesc);
+ return ret;
+}
+
+/*
+ * verify the AUTH1_COMMAND (Seal) result from TPM
+ */
+static int TSS_checkhmac1(unsigned char *buffer,
+ const uint32_t command,
+ const unsigned char *ononce,
+ const unsigned char *key,
+ unsigned int keylen, ...)
+{
+ uint32_t bufsize;
+ uint16_t tag;
+ uint32_t ordinal;
+ uint32_t result;
+ unsigned char *enonce;
+ unsigned char *continueflag;
+ unsigned char *authdata;
+ unsigned char testhmac[SHA1_DIGEST_SIZE];
+ unsigned char paramdigest[SHA1_DIGEST_SIZE];
+ struct sdesc *sdesc;
+ unsigned int dlen;
+ unsigned int dpos;
+ va_list argp;
+ int ret;
+
+ bufsize = LOAD32(buffer, TPM_SIZE_OFFSET);
+ tag = LOAD16(buffer, 0);
+ ordinal = command;
+ result = LOAD32N(buffer, TPM_RETURN_OFFSET);
+ if (tag == TPM_TAG_RSP_COMMAND)
+ return 0;
+ if (tag != TPM_TAG_RSP_AUTH1_COMMAND)
+ return -EINVAL;
+ authdata = buffer + bufsize - SHA1_DIGEST_SIZE;
+ continueflag = authdata - 1;
+ enonce = continueflag - TPM_NONCE_SIZE;
+
+ sdesc = init_sdesc(hashalg);
+ if (IS_ERR(sdesc)) {
+ pr_info("trusted_key: can't alloc %s\n", hash_alg);
+ return PTR_ERR(sdesc);
+ }
+ ret = crypto_shash_init(&sdesc->shash);
+ if (ret < 0)
+ goto out;
+ ret = crypto_shash_update(&sdesc->shash, (const u8 *)&result,
+ sizeof result);
+ if (ret < 0)
+ goto out;
+ ret = crypto_shash_update(&sdesc->shash, (const u8 *)&ordinal,
+ sizeof ordinal);
+ if (ret < 0)
+ goto out;
+ va_start(argp, keylen);
+ for (;;) {
+ dlen = va_arg(argp, unsigned int);
+ if (dlen == 0)
+ break;
+ dpos = va_arg(argp, unsigned int);
+ ret = crypto_shash_update(&sdesc->shash, buffer + dpos, dlen);
+ if (ret < 0)
+ break;
+ }
+ va_end(argp);
+ if (!ret)
+ ret = crypto_shash_final(&sdesc->shash, paramdigest);
+ if (ret < 0)
+ goto out;
+
+ ret = TSS_rawhmac(testhmac, key, keylen, SHA1_DIGEST_SIZE, paramdigest,
+ TPM_NONCE_SIZE, enonce, TPM_NONCE_SIZE, ononce,
+ 1, continueflag, 0, 0);
+ if (ret < 0)
+ goto out;
+
+ if (memcmp(testhmac, authdata, SHA1_DIGEST_SIZE))
+ ret = -EINVAL;
+out:
+ kfree(sdesc);
+ return ret;
+}
+
+/*
+ * verify the AUTH2_COMMAND (unseal) result from TPM
+ */
+static int TSS_checkhmac2(unsigned char *buffer,
+ const uint32_t command,
+ const unsigned char *ononce,
+ const unsigned char *key1,
+ unsigned int keylen1,
+ const unsigned char *key2,
+ unsigned int keylen2, ...)
+{
+ uint32_t bufsize;
+ uint16_t tag;
+ uint32_t ordinal;
+ uint32_t result;
+ unsigned char *enonce1;
+ unsigned char *continueflag1;
+ unsigned char *authdata1;
+ unsigned char *enonce2;
+ unsigned char *continueflag2;
+ unsigned char *authdata2;
+ unsigned char testhmac1[SHA1_DIGEST_SIZE];
+ unsigned char testhmac2[SHA1_DIGEST_SIZE];
+ unsigned char paramdigest[SHA1_DIGEST_SIZE];
+ struct sdesc *sdesc;
+ unsigned int dlen;
+ unsigned int dpos;
+ va_list argp;
+ int ret;
+
+ bufsize = LOAD32(buffer, TPM_SIZE_OFFSET);
+ tag = LOAD16(buffer, 0);
+ ordinal = command;
+ result = LOAD32N(buffer, TPM_RETURN_OFFSET);
+
+ if (tag == TPM_TAG_RSP_COMMAND)
+ return 0;
+ if (tag != TPM_TAG_RSP_AUTH2_COMMAND)
+ return -EINVAL;
+ authdata1 = buffer + bufsize - (SHA1_DIGEST_SIZE + 1
+ + SHA1_DIGEST_SIZE + SHA1_DIGEST_SIZE);
+ authdata2 = buffer + bufsize - (SHA1_DIGEST_SIZE);
+ continueflag1 = authdata1 - 1;
+ continueflag2 = authdata2 - 1;
+ enonce1 = continueflag1 - TPM_NONCE_SIZE;
+ enonce2 = continueflag2 - TPM_NONCE_SIZE;
+
+ sdesc = init_sdesc(hashalg);
+ if (IS_ERR(sdesc)) {
+ pr_info("trusted_key: can't alloc %s\n", hash_alg);
+ return PTR_ERR(sdesc);
+ }
+ ret = crypto_shash_init(&sdesc->shash);
+ if (ret < 0)
+ goto out;
+ ret = crypto_shash_update(&sdesc->shash, (const u8 *)&result,
+ sizeof result);
+ if (ret < 0)
+ goto out;
+ ret = crypto_shash_update(&sdesc->shash, (const u8 *)&ordinal,
+ sizeof ordinal);
+ if (ret < 0)
+ goto out;
+
+ va_start(argp, keylen2);
+ for (;;) {
+ dlen = va_arg(argp, unsigned int);
+ if (dlen == 0)
+ break;
+ dpos = va_arg(argp, unsigned int);
+ ret = crypto_shash_update(&sdesc->shash, buffer + dpos, dlen);
+ if (ret < 0)
+ break;
+ }
+ va_end(argp);
+ if (!ret)
+ ret = crypto_shash_final(&sdesc->shash, paramdigest);
+ if (ret < 0)
+ goto out;
+
+ ret = TSS_rawhmac(testhmac1, key1, keylen1, SHA1_DIGEST_SIZE,
+ paramdigest, TPM_NONCE_SIZE, enonce1,
+ TPM_NONCE_SIZE, ononce, 1, continueflag1, 0, 0);
+ if (ret < 0)
+ goto out;
+ if (memcmp(testhmac1, authdata1, SHA1_DIGEST_SIZE)) {
+ ret = -EINVAL;
+ goto out;
+ }
+ ret = TSS_rawhmac(testhmac2, key2, keylen2, SHA1_DIGEST_SIZE,
+ paramdigest, TPM_NONCE_SIZE, enonce2,
+ TPM_NONCE_SIZE, ononce, 1, continueflag2, 0, 0);
+ if (ret < 0)
+ goto out;
+ if (memcmp(testhmac2, authdata2, SHA1_DIGEST_SIZE))
+ ret = -EINVAL;
+out:
+ kfree(sdesc);
+ return ret;
+}
+
+/*
+ * For key specific tpm requests, we will generate and send our
+ * own TPM command packets using the drivers send function.
+ */
+static int trusted_tpm_send(const u32 chip_num, unsigned char *cmd,
+ size_t buflen)
+{
+ int rc;
+
+ dump_tpm_buf(cmd);
+ rc = tpm_send(chip_num, cmd, buflen);
+ dump_tpm_buf(cmd);
+ if (rc > 0)
+ /* Can't return positive return codes values to keyctl */
+ rc = -EPERM;
+ return rc;
+}
+
+/*
+ * Lock a trusted key, by extending a selected PCR.
+ *
+ * Prevents a trusted key that is sealed to PCRs from being accessed.
+ * This uses the tpm driver's extend function.
+ */
+static int pcrlock(const int pcrnum)
+{
+ unsigned char hash[SHA1_DIGEST_SIZE];
+ int ret;
+
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ ret = tpm_get_random(TPM_ANY_NUM, hash, SHA1_DIGEST_SIZE);
+ if (ret != SHA1_DIGEST_SIZE)
+ return ret;
+ return tpm_pcr_extend(TPM_ANY_NUM, pcrnum, hash) ? -EINVAL : 0;
+}
+
+/*
+ * Create an object specific authorisation protocol (OSAP) session
+ */
+static int osap(struct tpm_buf *tb, struct osapsess *s,
+ const unsigned char *key, uint16_t type, uint32_t handle)
+{
+ unsigned char enonce[TPM_NONCE_SIZE];
+ unsigned char ononce[TPM_NONCE_SIZE];
+ int ret;
+
+ ret = tpm_get_random(TPM_ANY_NUM, ononce, TPM_NONCE_SIZE);
+ if (ret != TPM_NONCE_SIZE)
+ return ret;
+
+ INIT_BUF(tb);
+ store16(tb, TPM_TAG_RQU_COMMAND);
+ store32(tb, TPM_OSAP_SIZE);
+ store32(tb, TPM_ORD_OSAP);
+ store16(tb, type);
+ store32(tb, handle);
+ storebytes(tb, ononce, TPM_NONCE_SIZE);
+
+ ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE);
+ if (ret < 0)
+ return ret;
+
+ s->handle = LOAD32(tb->data, TPM_DATA_OFFSET);
+ memcpy(s->enonce, &(tb->data[TPM_DATA_OFFSET + sizeof(uint32_t)]),
+ TPM_NONCE_SIZE);
+ memcpy(enonce, &(tb->data[TPM_DATA_OFFSET + sizeof(uint32_t) +
+ TPM_NONCE_SIZE]), TPM_NONCE_SIZE);
+ return TSS_rawhmac(s->secret, key, SHA1_DIGEST_SIZE, TPM_NONCE_SIZE,
+ enonce, TPM_NONCE_SIZE, ononce, 0, 0);
+}
+
+/*
+ * Create an object independent authorisation protocol (oiap) session
+ */
+static int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce)
+{
+ int ret;
+
+ INIT_BUF(tb);
+ store16(tb, TPM_TAG_RQU_COMMAND);
+ store32(tb, TPM_OIAP_SIZE);
+ store32(tb, TPM_ORD_OIAP);
+ ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE);
+ if (ret < 0)
+ return ret;
+
+ *handle = LOAD32(tb->data, TPM_DATA_OFFSET);
+ memcpy(nonce, &tb->data[TPM_DATA_OFFSET + sizeof(uint32_t)],
+ TPM_NONCE_SIZE);
+ return 0;
+}
+
+struct tpm_digests {
+ unsigned char encauth[SHA1_DIGEST_SIZE];
+ unsigned char pubauth[SHA1_DIGEST_SIZE];
+ unsigned char xorwork[SHA1_DIGEST_SIZE * 2];
+ unsigned char xorhash[SHA1_DIGEST_SIZE];
+ unsigned char nonceodd[TPM_NONCE_SIZE];
+};
+
+/*
+ * Have the TPM seal(encrypt) the trusted key, possibly based on
+ * Platform Configuration Registers (PCRs). AUTH1 for sealing key.
+ */
+static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
+ uint32_t keyhandle, const unsigned char *keyauth,
+ const unsigned char *data, uint32_t datalen,
+ unsigned char *blob, uint32_t *bloblen,
+ const unsigned char *blobauth,
+ const unsigned char *pcrinfo, uint32_t pcrinfosize)
+{
+ struct osapsess sess;
+ struct tpm_digests *td;
+ unsigned char cont;
+ uint32_t ordinal;
+ uint32_t pcrsize;
+ uint32_t datsize;
+ int sealinfosize;
+ int encdatasize;
+ int storedsize;
+ int ret;
+ int i;
+
+ /* alloc some work space for all the hashes */
+ td = kmalloc(sizeof *td, GFP_KERNEL);
+ if (!td)
+ return -ENOMEM;
+
+ /* get session for sealing key */
+ ret = osap(tb, &sess, keyauth, keytype, keyhandle);
+ if (ret < 0)
+ goto out;
+ dump_sess(&sess);
+
+ /* calculate encrypted authorization value */
+ memcpy(td->xorwork, sess.secret, SHA1_DIGEST_SIZE);
+ memcpy(td->xorwork + SHA1_DIGEST_SIZE, sess.enonce, SHA1_DIGEST_SIZE);
+ ret = TSS_sha1(td->xorwork, SHA1_DIGEST_SIZE * 2, td->xorhash);
+ if (ret < 0)
+ goto out;
+
+ ret = tpm_get_random(TPM_ANY_NUM, td->nonceodd, TPM_NONCE_SIZE);
+ if (ret != TPM_NONCE_SIZE)
+ goto out;
+ ordinal = htonl(TPM_ORD_SEAL);
+ datsize = htonl(datalen);
+ pcrsize = htonl(pcrinfosize);
+ cont = 0;
+
+ /* encrypt data authorization key */
+ for (i = 0; i < SHA1_DIGEST_SIZE; ++i)
+ td->encauth[i] = td->xorhash[i] ^ blobauth[i];
+
+ /* calculate authorization HMAC value */
+ if (pcrinfosize == 0) {
+ /* no pcr info specified */
+ ret = TSS_authhmac(td->pubauth, sess.secret, SHA1_DIGEST_SIZE,
+ sess.enonce, td->nonceodd, cont,
+ sizeof(uint32_t), &ordinal, SHA1_DIGEST_SIZE,
+ td->encauth, sizeof(uint32_t), &pcrsize,
+ sizeof(uint32_t), &datsize, datalen, data, 0,
+ 0);
+ } else {
+ /* pcr info specified */
+ ret = TSS_authhmac(td->pubauth, sess.secret, SHA1_DIGEST_SIZE,
+ sess.enonce, td->nonceodd, cont,
+ sizeof(uint32_t), &ordinal, SHA1_DIGEST_SIZE,
+ td->encauth, sizeof(uint32_t), &pcrsize,
+ pcrinfosize, pcrinfo, sizeof(uint32_t),
+ &datsize, datalen, data, 0, 0);
+ }
+ if (ret < 0)
+ goto out;
+
+ /* build and send the TPM request packet */
+ INIT_BUF(tb);
+ store16(tb, TPM_TAG_RQU_AUTH1_COMMAND);
+ store32(tb, TPM_SEAL_SIZE + pcrinfosize + datalen);
+ store32(tb, TPM_ORD_SEAL);
+ store32(tb, keyhandle);
+ storebytes(tb, td->encauth, SHA1_DIGEST_SIZE);
+ store32(tb, pcrinfosize);
+ storebytes(tb, pcrinfo, pcrinfosize);
+ store32(tb, datalen);
+ storebytes(tb, data, datalen);
+ store32(tb, sess.handle);
+ storebytes(tb, td->nonceodd, TPM_NONCE_SIZE);
+ store8(tb, cont);
+ storebytes(tb, td->pubauth, SHA1_DIGEST_SIZE);
+
+ ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE);
+ if (ret < 0)
+ goto out;
+
+ /* calculate the size of the returned Blob */
+ sealinfosize = LOAD32(tb->data, TPM_DATA_OFFSET + sizeof(uint32_t));
+ encdatasize = LOAD32(tb->data, TPM_DATA_OFFSET + sizeof(uint32_t) +
+ sizeof(uint32_t) + sealinfosize);
+ storedsize = sizeof(uint32_t) + sizeof(uint32_t) + sealinfosize +
+ sizeof(uint32_t) + encdatasize;
+
+ /* check the HMAC in the response */
+ ret = TSS_checkhmac1(tb->data, ordinal, td->nonceodd, sess.secret,
+ SHA1_DIGEST_SIZE, storedsize, TPM_DATA_OFFSET, 0,
+ 0);
+
+ /* copy the returned blob to caller */
+ if (!ret) {
+ memcpy(blob, tb->data + TPM_DATA_OFFSET, storedsize);
+ *bloblen = storedsize;
+ }
+out:
+ kfree(td);
+ return ret;
+}
+
+/*
+ * use the AUTH2_COMMAND form of unseal, to authorize both key and blob
+ */
+static int tpm_unseal(struct tpm_buf *tb,
+ uint32_t keyhandle, const unsigned char *keyauth,
+ const unsigned char *blob, int bloblen,
+ const unsigned char *blobauth,
+ unsigned char *data, unsigned int *datalen)
+{
+ unsigned char nonceodd[TPM_NONCE_SIZE];
+ unsigned char enonce1[TPM_NONCE_SIZE];
+ unsigned char enonce2[TPM_NONCE_SIZE];
+ unsigned char authdata1[SHA1_DIGEST_SIZE];
+ unsigned char authdata2[SHA1_DIGEST_SIZE];
+ uint32_t authhandle1 = 0;
+ uint32_t authhandle2 = 0;
+ unsigned char cont = 0;
+ uint32_t ordinal;
+ uint32_t keyhndl;
+ int ret;
+
+ /* sessions for unsealing key and data */
+ ret = oiap(tb, &authhandle1, enonce1);
+ if (ret < 0) {
+ pr_info("trusted_key: oiap failed (%d)\n", ret);
+ return ret;
+ }
+ ret = oiap(tb, &authhandle2, enonce2);
+ if (ret < 0) {
+ pr_info("trusted_key: oiap failed (%d)\n", ret);
+ return ret;
+ }
+
+ ordinal = htonl(TPM_ORD_UNSEAL);
+ keyhndl = htonl(SRKHANDLE);
+ ret = tpm_get_random(TPM_ANY_NUM, nonceodd, TPM_NONCE_SIZE);
+ if (ret != TPM_NONCE_SIZE) {
+ pr_info("trusted_key: tpm_get_random failed (%d)\n", ret);
+ return ret;
+ }
+ ret = TSS_authhmac(authdata1, keyauth, TPM_NONCE_SIZE,
+ enonce1, nonceodd, cont, sizeof(uint32_t),
+ &ordinal, bloblen, blob, 0, 0);
+ if (ret < 0)
+ return ret;
+ ret = TSS_authhmac(authdata2, blobauth, TPM_NONCE_SIZE,
+ enonce2, nonceodd, cont, sizeof(uint32_t),
+ &ordinal, bloblen, blob, 0, 0);
+ if (ret < 0)
+ return ret;
+
+ /* build and send TPM request packet */
+ INIT_BUF(tb);
+ store16(tb, TPM_TAG_RQU_AUTH2_COMMAND);
+ store32(tb, TPM_UNSEAL_SIZE + bloblen);
+ store32(tb, TPM_ORD_UNSEAL);
+ store32(tb, keyhandle);
+ storebytes(tb, blob, bloblen);
+ store32(tb, authhandle1);
+ storebytes(tb, nonceodd, TPM_NONCE_SIZE);
+ store8(tb, cont);
+ storebytes(tb, authdata1, SHA1_DIGEST_SIZE);
+ store32(tb, authhandle2);
+ storebytes(tb, nonceodd, TPM_NONCE_SIZE);
+ store8(tb, cont);
+ storebytes(tb, authdata2, SHA1_DIGEST_SIZE);
+
+ ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, MAX_BUF_SIZE);
+ if (ret < 0) {
+ pr_info("trusted_key: authhmac failed (%d)\n", ret);
+ return ret;
+ }
+
+ *datalen = LOAD32(tb->data, TPM_DATA_OFFSET);
+ ret = TSS_checkhmac2(tb->data, ordinal, nonceodd,
+ keyauth, SHA1_DIGEST_SIZE,
+ blobauth, SHA1_DIGEST_SIZE,
+ sizeof(uint32_t), TPM_DATA_OFFSET,
+ *datalen, TPM_DATA_OFFSET + sizeof(uint32_t), 0,
+ 0);
+ if (ret < 0) {
+ pr_info("trusted_key: TSS_checkhmac2 failed (%d)\n", ret);
+ return ret;
+ }
+ memcpy(data, tb->data + TPM_DATA_OFFSET + sizeof(uint32_t), *datalen);
+ return 0;
+}
+
+/*
+ * Have the TPM seal(encrypt) the symmetric key
+ */
+static int key_seal(struct trusted_key_payload *p,
+ struct trusted_key_options *o)
+{
+ struct tpm_buf *tb;
+ int ret;
+
+ tb = kzalloc(sizeof *tb, GFP_KERNEL);
+ if (!tb)
+ return -ENOMEM;
+
+ /* include migratable flag at end of sealed key */
+ p->key[p->key_len] = p->migratable;
+
+ ret = tpm_seal(tb, o->keytype, o->keyhandle, o->keyauth,
+ p->key, p->key_len + 1, p->blob, &p->blob_len,
+ o->blobauth, o->pcrinfo, o->pcrinfo_len);
+ if (ret < 0)
+ pr_info("trusted_key: srkseal failed (%d)\n", ret);
+
+ kfree(tb);
+ return ret;
+}
+
+/*
+ * Have the TPM unseal(decrypt) the symmetric key
+ */
+static int key_unseal(struct trusted_key_payload *p,
+ struct trusted_key_options *o)
+{
+ struct tpm_buf *tb;
+ int ret;
+
+ tb = kzalloc(sizeof *tb, GFP_KERNEL);
+ if (!tb)
+ return -ENOMEM;
+
+ ret = tpm_unseal(tb, o->keyhandle, o->keyauth, p->blob, p->blob_len,
+ o->blobauth, p->key, &p->key_len);
+ if (ret < 0)
+ pr_info("trusted_key: srkunseal failed (%d)\n", ret);
+ else
+ /* pull migratable flag out of sealed key */
+ p->migratable = p->key[--p->key_len];
+
+ kfree(tb);
+ return ret;
+}
+
+enum {
+ Opt_err = -1,
+ Opt_new, Opt_load, Opt_update,
+ Opt_keyhandle, Opt_keyauth, Opt_blobauth,
+ Opt_pcrinfo, Opt_pcrlock, Opt_migratable
+};
+
+static const match_table_t key_tokens = {
+ {Opt_new, "new"},
+ {Opt_load, "load"},
+ {Opt_update, "update"},
+ {Opt_keyhandle, "keyhandle=%s"},
+ {Opt_keyauth, "keyauth=%s"},
+ {Opt_blobauth, "blobauth=%s"},
+ {Opt_pcrinfo, "pcrinfo=%s"},
+ {Opt_pcrlock, "pcrlock=%s"},
+ {Opt_migratable, "migratable=%s"},
+ {Opt_err, NULL}
+};
+
+/* can have zero or more token= options */
+static int getoptions(char *c, struct trusted_key_payload *pay,
+ struct trusted_key_options *opt)
+{
+ substring_t args[MAX_OPT_ARGS];
+ char *p = c;
+ int token;
+ int res;
+ unsigned long handle;
+ unsigned long lock;
+
+ while ((p = strsep(&c, " \t"))) {
+ if (*p == '\0' || *p == ' ' || *p == '\t')
+ continue;
+ token = match_token(p, key_tokens, args);
+
+ switch (token) {
+ case Opt_pcrinfo:
+ opt->pcrinfo_len = strlen(args[0].from) / 2;
+ if (opt->pcrinfo_len > MAX_PCRINFO_SIZE)
+ return -EINVAL;
+ res = hex2bin(opt->pcrinfo, args[0].from,
+ opt->pcrinfo_len);
+ if (res < 0)
+ return -EINVAL;
+ break;
+ case Opt_keyhandle:
+ res = kstrtoul(args[0].from, 16, &handle);
+ if (res < 0)
+ return -EINVAL;
+ opt->keytype = SEAL_keytype;
+ opt->keyhandle = handle;
+ break;
+ case Opt_keyauth:
+ if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE)
+ return -EINVAL;
+ res = hex2bin(opt->keyauth, args[0].from,
+ SHA1_DIGEST_SIZE);
+ if (res < 0)
+ return -EINVAL;
+ break;
+ case Opt_blobauth:
+ if (strlen(args[0].from) != 2 * SHA1_DIGEST_SIZE)
+ return -EINVAL;
+ res = hex2bin(opt->blobauth, args[0].from,
+ SHA1_DIGEST_SIZE);
+ if (res < 0)
+ return -EINVAL;
+ break;
+ case Opt_migratable:
+ if (*args[0].from == '0')
+ pay->migratable = 0;
+ else
+ return -EINVAL;
+ break;
+ case Opt_pcrlock:
+ res = kstrtoul(args[0].from, 10, &lock);
+ if (res < 0)
+ return -EINVAL;
+ opt->pcrlock = lock;
+ break;
+ default:
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
+/*
+ * datablob_parse - parse the keyctl data and fill in the
+ * payload and options structures
+ *
+ * On success returns 0, otherwise -EINVAL.
+ */
+static int datablob_parse(char *datablob, struct trusted_key_payload *p,
+ struct trusted_key_options *o)
+{
+ substring_t args[MAX_OPT_ARGS];
+ long keylen;
+ int ret = -EINVAL;
+ int key_cmd;
+ char *c;
+
+ /* main command */
+ c = strsep(&datablob, " \t");
+ if (!c)
+ return -EINVAL;
+ key_cmd = match_token(c, key_tokens, args);
+ switch (key_cmd) {
+ case Opt_new:
+ /* first argument is key size */
+ c = strsep(&datablob, " \t");
+ if (!c)
+ return -EINVAL;
+ ret = kstrtol(c, 10, &keylen);
+ if (ret < 0 || keylen < MIN_KEY_SIZE || keylen > MAX_KEY_SIZE)
+ return -EINVAL;
+ p->key_len = keylen;
+ ret = getoptions(datablob, p, o);
+ if (ret < 0)
+ return ret;
+ ret = Opt_new;
+ break;
+ case Opt_load:
+ /* first argument is sealed blob */
+ c = strsep(&datablob, " \t");
+ if (!c)
+ return -EINVAL;
+ p->blob_len = strlen(c) / 2;
+ if (p->blob_len > MAX_BLOB_SIZE)
+ return -EINVAL;
+ ret = hex2bin(p->blob, c, p->blob_len);
+ if (ret < 0)
+ return -EINVAL;
+ ret = getoptions(datablob, p, o);
+ if (ret < 0)
+ return ret;
+ ret = Opt_load;
+ break;
+ case Opt_update:
+ /* all arguments are options */
+ ret = getoptions(datablob, p, o);
+ if (ret < 0)
+ return ret;
+ ret = Opt_update;
+ break;
+ case Opt_err:
+ return -EINVAL;
+ break;
+ }
+ return ret;
+}
+
+static struct trusted_key_options *trusted_options_alloc(void)
+{
+ struct trusted_key_options *options;
+
+ options = kzalloc(sizeof *options, GFP_KERNEL);
+ if (options) {
+ /* set any non-zero defaults */
+ options->keytype = SRK_keytype;
+ options->keyhandle = SRKHANDLE;
+ }
+ return options;
+}
+
+static struct trusted_key_payload *trusted_payload_alloc(struct key *key)
+{
+ struct trusted_key_payload *p = NULL;
+ int ret;
+
+ ret = key_payload_reserve(key, sizeof *p);
+ if (ret < 0)
+ return p;
+ p = kzalloc(sizeof *p, GFP_KERNEL);
+ if (p)
+ p->migratable = 1; /* migratable by default */
+ return p;
+}
+
+/*
+ * trusted_instantiate - create a new trusted key
+ *
+ * Unseal an existing trusted blob or, for a new key, get a
+ * random key, then seal and create a trusted key-type key,
+ * adding it to the specified keyring.
+ *
+ * On success, return 0. Otherwise return errno.
+ */
+static int trusted_instantiate(struct key *key,
+ struct key_preparsed_payload *prep)
+{
+ struct trusted_key_payload *payload = NULL;
+ struct trusted_key_options *options = NULL;
+ size_t datalen = prep->datalen;
+ char *datablob;
+ int ret = 0;
+ int key_cmd;
+ size_t key_len;
+
+ if (datalen <= 0 || datalen > 32767 || !prep->data)
+ return -EINVAL;
+
+ datablob = kmalloc(datalen + 1, GFP_KERNEL);
+ if (!datablob)
+ return -ENOMEM;
+ memcpy(datablob, prep->data, datalen);
+ datablob[datalen] = '\0';
+
+ options = trusted_options_alloc();
+ if (!options) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ payload = trusted_payload_alloc(key);
+ if (!payload) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ key_cmd = datablob_parse(datablob, payload, options);
+ if (key_cmd < 0) {
+ ret = key_cmd;
+ goto out;
+ }
+
+ dump_payload(payload);
+ dump_options(options);
+
+ switch (key_cmd) {
+ case Opt_load:
+ ret = key_unseal(payload, options);
+ dump_payload(payload);
+ dump_options(options);
+ if (ret < 0)
+ pr_info("trusted_key: key_unseal failed (%d)\n", ret);
+ break;
+ case Opt_new:
+ key_len = payload->key_len;
+ ret = tpm_get_random(TPM_ANY_NUM, payload->key, key_len);
+ if (ret != key_len) {
+ pr_info("trusted_key: key_create failed (%d)\n", ret);
+ goto out;
+ }
+ ret = key_seal(payload, options);
+ if (ret < 0)
+ pr_info("trusted_key: key_seal failed (%d)\n", ret);
+ break;
+ default:
+ ret = -EINVAL;
+ goto out;
+ }
+ if (!ret && options->pcrlock)
+ ret = pcrlock(options->pcrlock);
+out:
+ kfree(datablob);
+ kfree(options);
+ if (!ret)
+ rcu_assign_keypointer(key, payload);
+ else
+ kfree(payload);
+ return ret;
+}
+
+static void trusted_rcu_free(struct rcu_head *rcu)
+{
+ struct trusted_key_payload *p;
+
+ p = container_of(rcu, struct trusted_key_payload, rcu);
+ memset(p->key, 0, p->key_len);
+ kfree(p);
+}
+
+/*
+ * trusted_update - reseal an existing key with new PCR values
+ */
+static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
+{
+ struct trusted_key_payload *p = key->payload.data;
+ struct trusted_key_payload *new_p;
+ struct trusted_key_options *new_o;
+ size_t datalen = prep->datalen;
+ char *datablob;
+ int ret = 0;
+
+ if (!p->migratable)
+ return -EPERM;
+ if (datalen <= 0 || datalen > 32767 || !prep->data)
+ return -EINVAL;
+
+ datablob = kmalloc(datalen + 1, GFP_KERNEL);
+ if (!datablob)
+ return -ENOMEM;
+ new_o = trusted_options_alloc();
+ if (!new_o) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ new_p = trusted_payload_alloc(key);
+ if (!new_p) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(datablob, prep->data, datalen);
+ datablob[datalen] = '\0';
+ ret = datablob_parse(datablob, new_p, new_o);
+ if (ret != Opt_update) {
+ ret = -EINVAL;
+ kfree(new_p);
+ goto out;
+ }
+ /* copy old key values, and reseal with new pcrs */
+ new_p->migratable = p->migratable;
+ new_p->key_len = p->key_len;
+ memcpy(new_p->key, p->key, p->key_len);
+ dump_payload(p);
+ dump_payload(new_p);
+
+ ret = key_seal(new_p, new_o);
+ if (ret < 0) {
+ pr_info("trusted_key: key_seal failed (%d)\n", ret);
+ kfree(new_p);
+ goto out;
+ }
+ if (new_o->pcrlock) {
+ ret = pcrlock(new_o->pcrlock);
+ if (ret < 0) {
+ pr_info("trusted_key: pcrlock failed (%d)\n", ret);
+ kfree(new_p);
+ goto out;
+ }
+ }
+ rcu_assign_keypointer(key, new_p);
+ call_rcu(&p->rcu, trusted_rcu_free);
+out:
+ kfree(datablob);
+ kfree(new_o);
+ return ret;
+}
+
+/*
+ * trusted_read - copy the sealed blob data to userspace in hex.
+ * On success, return to userspace the trusted key datablob size.
+ */
+static long trusted_read(const struct key *key, char __user *buffer,
+ size_t buflen)
+{
+ struct trusted_key_payload *p;
+ char *ascii_buf;
+ char *bufp;
+ int i;
+
+ p = rcu_dereference_key(key);
+ if (!p)
+ return -EINVAL;
+ if (!buffer || buflen <= 0)
+ return 2 * p->blob_len;
+ ascii_buf = kmalloc(2 * p->blob_len, GFP_KERNEL);
+ if (!ascii_buf)
+ return -ENOMEM;
+
+ bufp = ascii_buf;
+ for (i = 0; i < p->blob_len; i++)
+ bufp = hex_byte_pack(bufp, p->blob[i]);
+ if ((copy_to_user(buffer, ascii_buf, 2 * p->blob_len)) != 0) {
+ kfree(ascii_buf);
+ return -EFAULT;
+ }
+ kfree(ascii_buf);
+ return 2 * p->blob_len;
+}
+
+/*
+ * trusted_destroy - before freeing the key, clear the decrypted data
+ */
+static void trusted_destroy(struct key *key)
+{
+ struct trusted_key_payload *p = key->payload.data;
+
+ if (!p)
+ return;
+ memset(p->key, 0, p->key_len);
+ kfree(key->payload.data);
+}
+
+struct key_type key_type_trusted = {
+ .name = "trusted",
+ .instantiate = trusted_instantiate,
+ .update = trusted_update,
+ .match = user_match,
+ .destroy = trusted_destroy,
+ .describe = user_describe,
+ .read = trusted_read,
+};
+
+EXPORT_SYMBOL_GPL(key_type_trusted);
+
+static void trusted_shash_release(void)
+{
+ if (hashalg)
+ crypto_free_shash(hashalg);
+ if (hmacalg)
+ crypto_free_shash(hmacalg);
+}
+
+static int __init trusted_shash_alloc(void)
+{
+ int ret;
+
+ hmacalg = crypto_alloc_shash(hmac_alg, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(hmacalg)) {
+ pr_info("trusted_key: could not allocate crypto %s\n",
+ hmac_alg);
+ return PTR_ERR(hmacalg);
+ }
+
+ hashalg = crypto_alloc_shash(hash_alg, 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(hashalg)) {
+ pr_info("trusted_key: could not allocate crypto %s\n",
+ hash_alg);
+ ret = PTR_ERR(hashalg);
+ goto hashalg_fail;
+ }
+
+ return 0;
+
+hashalg_fail:
+ crypto_free_shash(hmacalg);
+ return ret;
+}
+
+static int __init init_trusted(void)
+{
+ int ret;
+
+ ret = trusted_shash_alloc();
+ if (ret < 0)
+ return ret;
+ ret = register_key_type(&key_type_trusted);
+ if (ret < 0)
+ trusted_shash_release();
+ return ret;
+}
+
+static void __exit cleanup_trusted(void)
+{
+ trusted_shash_release();
+ unregister_key_type(&key_type_trusted);
+}
+
+late_initcall(init_trusted);
+module_exit(cleanup_trusted);
+
+MODULE_LICENSE("GPL");
diff --git a/security/keys/trusted.h b/security/keys/trusted.h
new file mode 100644
index 00000000000..3249fbd2b65
--- /dev/null
+++ b/security/keys/trusted.h
@@ -0,0 +1,134 @@
+#ifndef __TRUSTED_KEY_H
+#define __TRUSTED_KEY_H
+
+/* implementation specific TPM constants */
+#define MAX_PCRINFO_SIZE 64
+#define MAX_BUF_SIZE 512
+#define TPM_GETRANDOM_SIZE 14
+#define TPM_OSAP_SIZE 36
+#define TPM_OIAP_SIZE 10
+#define TPM_SEAL_SIZE 87
+#define TPM_UNSEAL_SIZE 104
+#define TPM_SIZE_OFFSET 2
+#define TPM_RETURN_OFFSET 6
+#define TPM_DATA_OFFSET 10
+
+#define LOAD32(buffer, offset) (ntohl(*(uint32_t *)&buffer[offset]))
+#define LOAD32N(buffer, offset) (*(uint32_t *)&buffer[offset])
+#define LOAD16(buffer, offset) (ntohs(*(uint16_t *)&buffer[offset]))
+
+struct tpm_buf {
+ int len;
+ unsigned char data[MAX_BUF_SIZE];
+};
+
+#define INIT_BUF(tb) (tb->len = 0)
+
+struct osapsess {
+ uint32_t handle;
+ unsigned char secret[SHA1_DIGEST_SIZE];
+ unsigned char enonce[TPM_NONCE_SIZE];
+};
+
+/* discrete values, but have to store in uint16_t for TPM use */
+enum {
+ SEAL_keytype = 1,
+ SRK_keytype = 4
+};
+
+struct trusted_key_options {
+ uint16_t keytype;
+ uint32_t keyhandle;
+ unsigned char keyauth[SHA1_DIGEST_SIZE];
+ unsigned char blobauth[SHA1_DIGEST_SIZE];
+ uint32_t pcrinfo_len;
+ unsigned char pcrinfo[MAX_PCRINFO_SIZE];
+ int pcrlock;
+};
+
+#define TPM_DEBUG 0
+
+#if TPM_DEBUG
+static inline void dump_options(struct trusted_key_options *o)
+{
+ pr_info("trusted_key: sealing key type %d\n", o->keytype);
+ pr_info("trusted_key: sealing key handle %0X\n", o->keyhandle);
+ pr_info("trusted_key: pcrlock %d\n", o->pcrlock);
+ pr_info("trusted_key: pcrinfo %d\n", o->pcrinfo_len);
+ print_hex_dump(KERN_INFO, "pcrinfo ", DUMP_PREFIX_NONE,
+ 16, 1, o->pcrinfo, o->pcrinfo_len, 0);
+}
+
+static inline void dump_payload(struct trusted_key_payload *p)
+{
+ pr_info("trusted_key: key_len %d\n", p->key_len);
+ print_hex_dump(KERN_INFO, "key ", DUMP_PREFIX_NONE,
+ 16, 1, p->key, p->key_len, 0);
+ pr_info("trusted_key: bloblen %d\n", p->blob_len);
+ print_hex_dump(KERN_INFO, "blob ", DUMP_PREFIX_NONE,
+ 16, 1, p->blob, p->blob_len, 0);
+ pr_info("trusted_key: migratable %d\n", p->migratable);
+}
+
+static inline void dump_sess(struct osapsess *s)
+{
+ print_hex_dump(KERN_INFO, "trusted-key: handle ", DUMP_PREFIX_NONE,
+ 16, 1, &s->handle, 4, 0);
+ pr_info("trusted-key: secret:\n");
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE,
+ 16, 1, &s->secret, SHA1_DIGEST_SIZE, 0);
+ pr_info("trusted-key: enonce:\n");
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE,
+ 16, 1, &s->enonce, SHA1_DIGEST_SIZE, 0);
+}
+
+static inline void dump_tpm_buf(unsigned char *buf)
+{
+ int len;
+
+ pr_info("\ntrusted-key: tpm buffer\n");
+ len = LOAD32(buf, TPM_SIZE_OFFSET);
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_NONE, 16, 1, buf, len, 0);
+}
+#else
+static inline void dump_options(struct trusted_key_options *o)
+{
+}
+
+static inline void dump_payload(struct trusted_key_payload *p)
+{
+}
+
+static inline void dump_sess(struct osapsess *s)
+{
+}
+
+static inline void dump_tpm_buf(unsigned char *buf)
+{
+}
+#endif
+
+static inline void store8(struct tpm_buf *buf, const unsigned char value)
+{
+ buf->data[buf->len++] = value;
+}
+
+static inline void store16(struct tpm_buf *buf, const uint16_t value)
+{
+ *(uint16_t *) & buf->data[buf->len] = htons(value);
+ buf->len += sizeof value;
+}
+
+static inline void store32(struct tpm_buf *buf, const uint32_t value)
+{
+ *(uint32_t *) & buf->data[buf->len] = htonl(value);
+ buf->len += sizeof value;
+}
+
+static inline void storebytes(struct tpm_buf *buf, const unsigned char *in,
+ const int len)
+{
+ memcpy(buf->data + buf->len, in, len);
+ buf->len += len;
+}
+#endif
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index 7c687d56822..faa2caeb593 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -18,34 +18,56 @@
#include <asm/uaccess.h>
#include "internal.h"
+static int logon_vet_description(const char *desc);
+
/*
* user defined keys take an arbitrary string as the description and an
* arbitrary blob of data as the payload
*/
struct key_type key_type_user = {
- .name = "user",
- .instantiate = user_instantiate,
- .update = user_update,
- .match = user_match,
- .revoke = user_revoke,
- .destroy = user_destroy,
- .describe = user_describe,
- .read = user_read,
+ .name = "user",
+ .def_lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .instantiate = user_instantiate,
+ .update = user_update,
+ .match = user_match,
+ .revoke = user_revoke,
+ .destroy = user_destroy,
+ .describe = user_describe,
+ .read = user_read,
};
EXPORT_SYMBOL_GPL(key_type_user);
-/*****************************************************************************/
+/*
+ * This key type is essentially the same as key_type_user, but it does
+ * not define a .read op. This is suitable for storing username and
+ * password pairs in the keyring that you do not want to be readable
+ * from userspace.
+ */
+struct key_type key_type_logon = {
+ .name = "logon",
+ .def_lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .instantiate = user_instantiate,
+ .update = user_update,
+ .match = user_match,
+ .revoke = user_revoke,
+ .destroy = user_destroy,
+ .describe = user_describe,
+ .vet_description = logon_vet_description,
+};
+EXPORT_SYMBOL_GPL(key_type_logon);
+
/*
* instantiate a user defined key
*/
-int user_instantiate(struct key *key, const void *data, size_t datalen)
+int user_instantiate(struct key *key, struct key_preparsed_payload *prep)
{
struct user_key_payload *upayload;
+ size_t datalen = prep->datalen;
int ret;
ret = -EINVAL;
- if (datalen <= 0 || datalen > 32767 || !data)
+ if (datalen <= 0 || datalen > 32767 || !prep->data)
goto error;
ret = key_payload_reserve(key, datalen);
@@ -59,43 +81,28 @@ int user_instantiate(struct key *key, const void *data, size_t datalen)
/* attach the data */
upayload->datalen = datalen;
- memcpy(upayload->data, data, datalen);
- rcu_assign_pointer(key->payload.data, upayload);
+ memcpy(upayload->data, prep->data, datalen);
+ rcu_assign_keypointer(key, upayload);
ret = 0;
error:
return ret;
-
-} /* end user_instantiate() */
+}
EXPORT_SYMBOL_GPL(user_instantiate);
-/*****************************************************************************/
-/*
- * dispose of the old data from an updated user defined key
- */
-static void user_update_rcu_disposal(struct rcu_head *rcu)
-{
- struct user_key_payload *upayload;
-
- upayload = container_of(rcu, struct user_key_payload, rcu);
-
- kfree(upayload);
-
-} /* end user_update_rcu_disposal() */
-
-/*****************************************************************************/
/*
* update a user defined key
* - the key's semaphore is write-locked
*/
-int user_update(struct key *key, const void *data, size_t datalen)
+int user_update(struct key *key, struct key_preparsed_payload *prep)
{
struct user_key_payload *upayload, *zap;
+ size_t datalen = prep->datalen;
int ret;
ret = -EINVAL;
- if (datalen <= 0 || datalen > 32767 || !data)
+ if (datalen <= 0 || datalen > 32767 || !prep->data)
goto error;
/* construct a replacement payload */
@@ -105,7 +112,7 @@ int user_update(struct key *key, const void *data, size_t datalen)
goto error;
upayload->datalen = datalen;
- memcpy(upayload->data, data, datalen);
+ memcpy(upayload->data, prep->data, datalen);
/* check the quota and attach the new data */
zap = upayload;
@@ -115,32 +122,29 @@ int user_update(struct key *key, const void *data, size_t datalen)
if (ret == 0) {
/* attach the new data, displacing the old */
zap = key->payload.data;
- rcu_assign_pointer(key->payload.data, upayload);
+ rcu_assign_keypointer(key, upayload);
key->expiry = 0;
}
- call_rcu(&zap->rcu, user_update_rcu_disposal);
+ if (zap)
+ kfree_rcu(zap, rcu);
error:
return ret;
-
-} /* end user_update() */
+}
EXPORT_SYMBOL_GPL(user_update);
-/*****************************************************************************/
/*
* match users on their name
*/
int user_match(const struct key *key, const void *description)
{
return strcmp(key->description, description) == 0;
-
-} /* end user_match() */
+}
EXPORT_SYMBOL_GPL(user_match);
-/*****************************************************************************/
/*
* dispose of the links from a revoked keyring
* - called with the key sem write-locked
@@ -153,15 +157,13 @@ void user_revoke(struct key *key)
key_payload_reserve(key, 0);
if (upayload) {
- rcu_assign_pointer(key->payload.data, NULL);
- call_rcu(&upayload->rcu, user_update_rcu_disposal);
+ rcu_assign_keypointer(key, NULL);
+ kfree_rcu(upayload, rcu);
}
-
-} /* end user_revoke() */
+}
EXPORT_SYMBOL(user_revoke);
-/*****************************************************************************/
/*
* dispose of the data dangling from the corpse of a user key
*/
@@ -170,26 +172,22 @@ void user_destroy(struct key *key)
struct user_key_payload *upayload = key->payload.data;
kfree(upayload);
-
-} /* end user_destroy() */
+}
EXPORT_SYMBOL_GPL(user_destroy);
-/*****************************************************************************/
/*
* describe the user key
*/
void user_describe(const struct key *key, struct seq_file *m)
{
seq_puts(m, key->description);
-
- seq_printf(m, ": %u", key->datalen);
-
-} /* end user_describe() */
+ if (key_is_instantiated(key))
+ seq_printf(m, ": %u", key->datalen);
+}
EXPORT_SYMBOL_GPL(user_describe);
-/*****************************************************************************/
/*
* read the key data
* - the key's semaphore is read-locked
@@ -199,7 +197,7 @@ long user_read(const struct key *key, char __user *buffer, size_t buflen)
struct user_key_payload *upayload;
long ret;
- upayload = rcu_dereference(key->payload.data);
+ upayload = rcu_dereference_key(key);
ret = upayload->datalen;
/* we can return the data as is */
@@ -212,7 +210,23 @@ long user_read(const struct key *key, char __user *buffer, size_t buflen)
}
return ret;
-
-} /* end user_read() */
+}
EXPORT_SYMBOL_GPL(user_read);
+
+/* Vet the description for a "logon" key */
+static int logon_vet_description(const char *desc)
+{
+ char *p;
+
+ /* require a "qualified" description string */
+ p = strchr(desc, ':');
+ if (!p)
+ return -EINVAL;
+
+ /* also reject description with ':' as first char */
+ if (p == desc)
+ return -EINVAL;
+
+ return 0;
+}
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index acba3dfc8d2..69fdf3bc765 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/stddef.h>
#include <linux/kernel.h>
+#include <linux/gfp.h>
#include <linux/fs.h>
#include <linux/init.h>
#include <net/sock.h>
@@ -48,8 +49,8 @@ int ipv4_skb_to_auditdata(struct sk_buff *skb,
if (ih == NULL)
return -EINVAL;
- ad->u.net.v4info.saddr = ih->saddr;
- ad->u.net.v4info.daddr = ih->daddr;
+ ad->u.net->v4info.saddr = ih->saddr;
+ ad->u.net->v4info.daddr = ih->daddr;
if (proto)
*proto = ih->protocol;
@@ -63,8 +64,8 @@ int ipv4_skb_to_auditdata(struct sk_buff *skb,
if (th == NULL)
break;
- ad->u.net.sport = th->source;
- ad->u.net.dport = th->dest;
+ ad->u.net->sport = th->source;
+ ad->u.net->dport = th->dest;
break;
}
case IPPROTO_UDP: {
@@ -72,8 +73,8 @@ int ipv4_skb_to_auditdata(struct sk_buff *skb,
if (uh == NULL)
break;
- ad->u.net.sport = uh->source;
- ad->u.net.dport = uh->dest;
+ ad->u.net->sport = uh->source;
+ ad->u.net->dport = uh->dest;
break;
}
case IPPROTO_DCCP: {
@@ -81,16 +82,16 @@ int ipv4_skb_to_auditdata(struct sk_buff *skb,
if (dh == NULL)
break;
- ad->u.net.sport = dh->dccph_sport;
- ad->u.net.dport = dh->dccph_dport;
+ ad->u.net->sport = dh->dccph_sport;
+ ad->u.net->dport = dh->dccph_dport;
break;
}
case IPPROTO_SCTP: {
struct sctphdr *sh = sctp_hdr(skb);
if (sh == NULL)
break;
- ad->u.net.sport = sh->source;
- ad->u.net.dport = sh->dest;
+ ad->u.net->sport = sh->source;
+ ad->u.net->dport = sh->dest;
break;
}
default:
@@ -113,19 +114,20 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb,
int offset, ret = 0;
struct ipv6hdr *ip6;
u8 nexthdr;
+ __be16 frag_off;
ip6 = ipv6_hdr(skb);
if (ip6 == NULL)
return -EINVAL;
- ipv6_addr_copy(&ad->u.net.v6info.saddr, &ip6->saddr);
- ipv6_addr_copy(&ad->u.net.v6info.daddr, &ip6->daddr);
+ ad->u.net->v6info.saddr = ip6->saddr;
+ ad->u.net->v6info.daddr = ip6->daddr;
ret = 0;
/* IPv6 can have several extension header before the Transport header
* skip them */
offset = skb_network_offset(skb);
offset += sizeof(*ip6);
nexthdr = ip6->nexthdr;
- offset = ipv6_skip_exthdr(skb, offset, &nexthdr);
+ offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
if (offset < 0)
return 0;
if (proto)
@@ -138,8 +140,8 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb,
if (th == NULL)
break;
- ad->u.net.sport = th->source;
- ad->u.net.dport = th->dest;
+ ad->u.net->sport = th->source;
+ ad->u.net->dport = th->dest;
break;
}
case IPPROTO_UDP: {
@@ -149,8 +151,8 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb,
if (uh == NULL)
break;
- ad->u.net.sport = uh->source;
- ad->u.net.dport = uh->dest;
+ ad->u.net->sport = uh->source;
+ ad->u.net->dport = uh->dest;
break;
}
case IPPROTO_DCCP: {
@@ -160,8 +162,8 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb,
if (dh == NULL)
break;
- ad->u.net.sport = dh->dccph_sport;
- ad->u.net.dport = dh->dccph_dport;
+ ad->u.net->sport = dh->dccph_sport;
+ ad->u.net->dport = dh->dccph_dport;
break;
}
case IPPROTO_SCTP: {
@@ -170,8 +172,8 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb,
sh = skb_header_pointer(skb, offset, sizeof(_sctph), &_sctph);
if (sh == NULL)
break;
- ad->u.net.sport = sh->source;
- ad->u.net.dport = sh->dest;
+ ad->u.net->sport = sh->source;
+ ad->u.net->dport = sh->dest;
break;
}
default:
@@ -209,18 +211,20 @@ static inline void print_ipv4_addr(struct audit_buffer *ab, __be32 addr,
static void dump_common_audit_data(struct audit_buffer *ab,
struct common_audit_data *a)
{
- struct inode *inode = NULL;
struct task_struct *tsk = current;
- if (a->tsk)
- tsk = a->tsk;
- if (tsk && tsk->pid) {
- audit_log_format(ab, " pid=%d comm=", tsk->pid);
- audit_log_untrustedstring(ab, tsk->comm);
- }
+ /*
+ * To keep stack sizes in check force programers to notice if they
+ * start making this union too large! See struct lsm_network_audit
+ * as an example of how to deal with large data.
+ */
+ BUILD_BUG_ON(sizeof(a->u) > sizeof(void *)*2);
+
+ audit_log_format(ab, " pid=%d comm=", task_pid_nr(tsk));
+ audit_log_untrustedstring(ab, tsk->comm);
switch (a->type) {
- case LSM_AUDIT_NO_AUDIT:
+ case LSM_AUDIT_DATA_NONE:
return;
case LSM_AUDIT_DATA_IPC:
audit_log_format(ab, " key=%d ", a->u.ipc_id);
@@ -228,43 +232,63 @@ static void dump_common_audit_data(struct audit_buffer *ab,
case LSM_AUDIT_DATA_CAP:
audit_log_format(ab, " capability=%d ", a->u.cap);
break;
- case LSM_AUDIT_DATA_FS:
- if (a->u.fs.path.dentry) {
- struct dentry *dentry = a->u.fs.path.dentry;
- if (a->u.fs.path.mnt) {
- audit_log_d_path(ab, "path=", &a->u.fs.path);
- } else {
- audit_log_format(ab, " name=");
- audit_log_untrustedstring(ab,
- dentry->d_name.name);
- }
- inode = dentry->d_inode;
- } else if (a->u.fs.inode) {
- struct dentry *dentry;
- inode = a->u.fs.inode;
- dentry = d_find_alias(inode);
- if (dentry) {
- audit_log_format(ab, " name=");
- audit_log_untrustedstring(ab,
- dentry->d_name.name);
- dput(dentry);
- }
+ case LSM_AUDIT_DATA_PATH: {
+ struct inode *inode;
+
+ audit_log_d_path(ab, " path=", &a->u.path);
+
+ inode = a->u.path.dentry->d_inode;
+ if (inode) {
+ audit_log_format(ab, " dev=");
+ audit_log_untrustedstring(ab, inode->i_sb->s_id);
+ audit_log_format(ab, " ino=%lu", inode->i_ino);
}
- if (inode)
- audit_log_format(ab, " dev=%s ino=%lu",
- inode->i_sb->s_id,
- inode->i_ino);
break;
+ }
+ case LSM_AUDIT_DATA_DENTRY: {
+ struct inode *inode;
+
+ audit_log_format(ab, " name=");
+ audit_log_untrustedstring(ab, a->u.dentry->d_name.name);
+
+ inode = a->u.dentry->d_inode;
+ if (inode) {
+ audit_log_format(ab, " dev=");
+ audit_log_untrustedstring(ab, inode->i_sb->s_id);
+ audit_log_format(ab, " ino=%lu", inode->i_ino);
+ }
+ break;
+ }
+ case LSM_AUDIT_DATA_INODE: {
+ struct dentry *dentry;
+ struct inode *inode;
+
+ inode = a->u.inode;
+ dentry = d_find_alias(inode);
+ if (dentry) {
+ audit_log_format(ab, " name=");
+ audit_log_untrustedstring(ab,
+ dentry->d_name.name);
+ dput(dentry);
+ }
+ audit_log_format(ab, " dev=");
+ audit_log_untrustedstring(ab, inode->i_sb->s_id);
+ audit_log_format(ab, " ino=%lu", inode->i_ino);
+ break;
+ }
case LSM_AUDIT_DATA_TASK:
tsk = a->u.tsk;
- if (tsk && tsk->pid) {
- audit_log_format(ab, " pid=%d comm=", tsk->pid);
- audit_log_untrustedstring(ab, tsk->comm);
+ if (tsk) {
+ pid_t pid = task_pid_nr(tsk);
+ if (pid) {
+ audit_log_format(ab, " pid=%d comm=", pid);
+ audit_log_untrustedstring(ab, tsk->comm);
+ }
}
break;
case LSM_AUDIT_DATA_NET:
- if (a->u.net.sk) {
- struct sock *sk = a->u.net.sk;
+ if (a->u.net->sk) {
+ struct sock *sk = a->u.net->sk;
struct unix_sock *u;
int len = 0;
char *p = NULL;
@@ -281,26 +305,23 @@ static void dump_common_audit_data(struct audit_buffer *ab,
"faddr", "fport");
break;
}
+#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6: {
struct inet_sock *inet = inet_sk(sk);
- struct ipv6_pinfo *inet6 = inet6_sk(sk);
- print_ipv6_addr(ab, &inet6->rcv_saddr,
+ print_ipv6_addr(ab, &sk->sk_v6_rcv_saddr,
inet->inet_sport,
"laddr", "lport");
- print_ipv6_addr(ab, &inet6->daddr,
+ print_ipv6_addr(ab, &sk->sk_v6_daddr,
inet->inet_dport,
"faddr", "fport");
break;
}
+#endif
case AF_UNIX:
u = unix_sk(sk);
- if (u->dentry) {
- struct path path = {
- .dentry = u->dentry,
- .mnt = u->mnt
- };
- audit_log_d_path(ab, "path=", &path);
+ if (u->path.dentry) {
+ audit_log_d_path(ab, " path=", &u->path);
break;
}
if (!u->addr)
@@ -316,29 +337,29 @@ static void dump_common_audit_data(struct audit_buffer *ab,
}
}
- switch (a->u.net.family) {
+ switch (a->u.net->family) {
case AF_INET:
- print_ipv4_addr(ab, a->u.net.v4info.saddr,
- a->u.net.sport,
+ print_ipv4_addr(ab, a->u.net->v4info.saddr,
+ a->u.net->sport,
"saddr", "src");
- print_ipv4_addr(ab, a->u.net.v4info.daddr,
- a->u.net.dport,
+ print_ipv4_addr(ab, a->u.net->v4info.daddr,
+ a->u.net->dport,
"daddr", "dest");
break;
case AF_INET6:
- print_ipv6_addr(ab, &a->u.net.v6info.saddr,
- a->u.net.sport,
+ print_ipv6_addr(ab, &a->u.net->v6info.saddr,
+ a->u.net->sport,
"saddr", "src");
- print_ipv6_addr(ab, &a->u.net.v6info.daddr,
- a->u.net.dport,
+ print_ipv6_addr(ab, &a->u.net->v6info.daddr,
+ a->u.net->dport,
"daddr", "dest");
break;
}
- if (a->u.net.netif > 0) {
+ if (a->u.net->netif > 0) {
struct net_device *dev;
/* NOTE: we always use init's namespace */
- dev = dev_get_by_index(&init_net, a->u.net.netif);
+ dev = dev_get_by_index(&init_net, a->u.net->netif);
if (dev) {
audit_log_format(ab, " netif=%s", dev->name);
dev_put(dev);
@@ -364,29 +385,34 @@ static void dump_common_audit_data(struct audit_buffer *ab,
/**
* common_lsm_audit - generic LSM auditing function
* @a: auxiliary audit data
+ * @pre_audit: lsm-specific pre-audit callback
+ * @post_audit: lsm-specific post-audit callback
*
* setup the audit buffer for common security information
* uses callback to print LSM specific information
*/
-void common_lsm_audit(struct common_audit_data *a)
+void common_lsm_audit(struct common_audit_data *a,
+ void (*pre_audit)(struct audit_buffer *, void *),
+ void (*post_audit)(struct audit_buffer *, void *))
{
struct audit_buffer *ab;
if (a == NULL)
return;
/* we use GFP_ATOMIC so we won't sleep */
- ab = audit_log_start(current->audit_context, GFP_ATOMIC, AUDIT_AVC);
+ ab = audit_log_start(current->audit_context, GFP_ATOMIC | __GFP_NOWARN,
+ AUDIT_AVC);
if (ab == NULL)
return;
- if (a->lsm_pre_audit)
- a->lsm_pre_audit(ab, a);
+ if (pre_audit)
+ pre_audit(ab, a);
dump_common_audit_data(ab, a);
- if (a->lsm_post_audit)
- a->lsm_post_audit(ab, a);
+ if (post_audit)
+ post_audit(ab, a);
audit_log_end(ab);
}
diff --git a/security/min_addr.c b/security/min_addr.c
index e86f297522b..f728728f193 100644
--- a/security/min_addr.c
+++ b/security/min_addr.c
@@ -33,7 +33,7 @@ int mmap_min_addr_handler(struct ctl_table *table, int write,
{
int ret;
- if (!capable(CAP_SYS_RAWIO))
+ if (write && !capable(CAP_SYS_RAWIO))
return -EPERM;
ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
diff --git a/security/security.c b/security/security.c
index 24e060be9fa..31614e9e96e 100644
--- a/security/security.c
+++ b/security/security.c
@@ -12,23 +12,33 @@
*/
#include <linux/capability.h>
+#include <linux/dcache.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/security.h>
+#include <linux/integrity.h>
#include <linux/ima.h>
+#include <linux/evm.h>
+#include <linux/fsnotify.h>
+#include <linux/mman.h>
+#include <linux/mount.h>
+#include <linux/personality.h>
+#include <linux/backing-dev.h>
+#include <net/flow.h>
+
+#define MAX_LSM_EVM_XATTR 2
/* Boot-time LSM user choice */
static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] =
CONFIG_DEFAULT_SECURITY;
-/* things that live in capability.c */
-extern struct security_operations default_security_ops;
-extern void security_fixup_ops(struct security_operations *ops);
-
-struct security_operations *security_ops; /* Initialized to NULL */
+static struct security_operations *security_ops;
+static struct security_operations default_security_ops = {
+ .name = "default",
+};
-static inline int verify(struct security_operations *ops)
+static inline int __init verify(struct security_operations *ops)
{
/* verify the security_operations structure exists */
if (!ops)
@@ -63,6 +73,11 @@ int __init security_init(void)
return 0;
}
+void reset_security_ops(void)
+{
+ security_ops = &default_security_ops;
+}
+
/* Save user chosen LSM */
static int __init choose_lsm(char *str)
{
@@ -82,20 +97,12 @@ __setup("security=", choose_lsm);
* Return true if:
* -The passed LSM is the one chosen by user at boot time,
* -or the passed LSM is configured as the default and the user did not
- * choose an alternate LSM at boot time,
- * -or there is no default LSM set and the user didn't specify a
- * specific LSM and we're the first to ask for registration permission,
- * -or the passed LSM is currently loaded.
+ * choose an alternate LSM at boot time.
* Otherwise, return false.
*/
int __init security_module_enable(struct security_operations *ops)
{
- if (!*chosen_lsm)
- strncpy(chosen_lsm, ops->name, SECURITY_NAME_MAX);
- else if (strncmp(ops->name, chosen_lsm, SECURITY_NAME_MAX))
- return 0;
-
- return 1;
+ return !strcmp(ops->name, chosen_lsm);
}
/**
@@ -110,7 +117,7 @@ int __init security_module_enable(struct security_operations *ops)
* If there is already a security module registered with the kernel,
* an error will be returned. Otherwise %0 is returned on success.
*/
-int register_security(struct security_operations *ops)
+int __init register_security(struct security_operations *ops)
{
if (verify(ops)) {
printk(KERN_DEBUG "%s could not verify "
@@ -130,11 +137,23 @@ int register_security(struct security_operations *ops)
int security_ptrace_access_check(struct task_struct *child, unsigned int mode)
{
+#ifdef CONFIG_SECURITY_YAMA_STACKED
+ int rc;
+ rc = yama_ptrace_access_check(child, mode);
+ if (rc)
+ return rc;
+#endif
return security_ops->ptrace_access_check(child, mode);
}
int security_ptrace_traceme(struct task_struct *parent)
{
+#ifdef CONFIG_SECURITY_YAMA_STACKED
+ int rc;
+ rc = yama_ptrace_traceme(parent);
+ if (rc)
+ return rc;
+#endif
return security_ops->ptrace_traceme(parent);
}
@@ -155,42 +174,16 @@ int security_capset(struct cred *new, const struct cred *old,
effective, inheritable, permitted);
}
-int security_capable(int cap)
+int security_capable(const struct cred *cred, struct user_namespace *ns,
+ int cap)
{
- return security_ops->capable(current, current_cred(), cap,
- SECURITY_CAP_AUDIT);
+ return security_ops->capable(cred, ns, cap, SECURITY_CAP_AUDIT);
}
-int security_real_capable(struct task_struct *tsk, int cap)
+int security_capable_noaudit(const struct cred *cred, struct user_namespace *ns,
+ int cap)
{
- const struct cred *cred;
- int ret;
-
- cred = get_task_cred(tsk);
- ret = security_ops->capable(tsk, cred, cap, SECURITY_CAP_AUDIT);
- put_cred(cred);
- return ret;
-}
-
-int security_real_capable_noaudit(struct task_struct *tsk, int cap)
-{
- const struct cred *cred;
- int ret;
-
- cred = get_task_cred(tsk);
- ret = security_ops->capable(tsk, cred, cap, SECURITY_CAP_NOAUDIT);
- put_cred(cred);
- return ret;
-}
-
-int security_acct(struct file *file)
-{
- return security_ops->acct(file);
-}
-
-int security_sysctl(struct ctl_table *table, int op)
-{
- return security_ops->sysctl(table, op);
+ return security_ops->capable(cred, ns, cap, SECURITY_CAP_NOAUDIT);
}
int security_quotactl(int cmds, int type, int id, struct super_block *sb)
@@ -208,30 +201,16 @@ int security_syslog(int type)
return security_ops->syslog(type);
}
-int security_settime(struct timespec *ts, struct timezone *tz)
+int security_settime(const struct timespec *ts, const struct timezone *tz)
{
return security_ops->settime(ts, tz);
}
-int security_vm_enough_memory(long pages)
-{
- WARN_ON(current->mm == NULL);
- return security_ops->vm_enough_memory(current->mm, pages);
-}
-
int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
{
- WARN_ON(mm == NULL);
return security_ops->vm_enough_memory(mm, pages);
}
-int security_vm_enough_memory_kern(long pages)
-{
- /* If current->mm is a kernel thread then we will pass NULL,
- for this specific case that is fine */
- return security_ops->vm_enough_memory(current->mm, pages);
-}
-
int security_bprm_set_creds(struct linux_binprm *bprm)
{
return security_ops->bprm_set_creds(bprm);
@@ -278,6 +257,11 @@ int security_sb_copy_data(char *orig, char *copy)
}
EXPORT_SYMBOL(security_sb_copy_data);
+int security_sb_remount(struct super_block *sb, void *data)
+{
+ return security_ops->sb_remount(sb, data);
+}
+
int security_sb_kern_mount(struct super_block *sb, int flags, void *data)
{
return security_ops->sb_kern_mount(sb, flags, data);
@@ -293,63 +277,36 @@ int security_sb_statfs(struct dentry *dentry)
return security_ops->sb_statfs(dentry);
}
-int security_sb_mount(char *dev_name, struct path *path,
- char *type, unsigned long flags, void *data)
+int security_sb_mount(const char *dev_name, struct path *path,
+ const char *type, unsigned long flags, void *data)
{
return security_ops->sb_mount(dev_name, path, type, flags, data);
}
-int security_sb_check_sb(struct vfsmount *mnt, struct path *path)
-{
- return security_ops->sb_check_sb(mnt, path);
-}
-
int security_sb_umount(struct vfsmount *mnt, int flags)
{
return security_ops->sb_umount(mnt, flags);
}
-void security_sb_umount_close(struct vfsmount *mnt)
-{
- security_ops->sb_umount_close(mnt);
-}
-
-void security_sb_umount_busy(struct vfsmount *mnt)
-{
- security_ops->sb_umount_busy(mnt);
-}
-
-void security_sb_post_remount(struct vfsmount *mnt, unsigned long flags, void *data)
-{
- security_ops->sb_post_remount(mnt, flags, data);
-}
-
-void security_sb_post_addmount(struct vfsmount *mnt, struct path *mountpoint)
-{
- security_ops->sb_post_addmount(mnt, mountpoint);
-}
-
int security_sb_pivotroot(struct path *old_path, struct path *new_path)
{
return security_ops->sb_pivotroot(old_path, new_path);
}
-void security_sb_post_pivotroot(struct path *old_path, struct path *new_path)
-{
- security_ops->sb_post_pivotroot(old_path, new_path);
-}
-
int security_sb_set_mnt_opts(struct super_block *sb,
- struct security_mnt_opts *opts)
+ struct security_mnt_opts *opts,
+ unsigned long kern_flags,
+ unsigned long *set_kern_flags)
{
- return security_ops->sb_set_mnt_opts(sb, opts);
+ return security_ops->sb_set_mnt_opts(sb, opts, kern_flags,
+ set_kern_flags);
}
EXPORT_SYMBOL(security_sb_set_mnt_opts);
-void security_sb_clone_mnt_opts(const struct super_block *oldsb,
+int security_sb_clone_mnt_opts(const struct super_block *oldsb,
struct super_block *newsb)
{
- security_ops->sb_clone_mnt_opts(oldsb, newsb);
+ return security_ops->sb_clone_mnt_opts(oldsb, newsb);
}
EXPORT_SYMBOL(security_sb_clone_mnt_opts);
@@ -361,70 +318,110 @@ EXPORT_SYMBOL(security_sb_parse_opts_str);
int security_inode_alloc(struct inode *inode)
{
- int ret;
-
inode->i_security = NULL;
- ret = security_ops->inode_alloc_security(inode);
- if (ret)
- return ret;
- ret = ima_inode_alloc(inode);
- if (ret)
- security_inode_free(inode);
- return ret;
+ return security_ops->inode_alloc_security(inode);
}
void security_inode_free(struct inode *inode)
{
- ima_inode_free(inode);
+ integrity_inode_free(inode);
security_ops->inode_free_security(inode);
}
+int security_dentry_init_security(struct dentry *dentry, int mode,
+ struct qstr *name, void **ctx,
+ u32 *ctxlen)
+{
+ return security_ops->dentry_init_security(dentry, mode, name,
+ ctx, ctxlen);
+}
+EXPORT_SYMBOL(security_dentry_init_security);
+
int security_inode_init_security(struct inode *inode, struct inode *dir,
- char **name, void **value, size_t *len)
+ const struct qstr *qstr,
+ const initxattrs initxattrs, void *fs_data)
{
+ struct xattr new_xattrs[MAX_LSM_EVM_XATTR + 1];
+ struct xattr *lsm_xattr, *evm_xattr, *xattr;
+ int ret;
+
if (unlikely(IS_PRIVATE(inode)))
- return -EOPNOTSUPP;
- return security_ops->inode_init_security(inode, dir, name, value, len);
+ return 0;
+
+ if (!initxattrs)
+ return security_ops->inode_init_security(inode, dir, qstr,
+ NULL, NULL, NULL);
+ memset(new_xattrs, 0, sizeof(new_xattrs));
+ lsm_xattr = new_xattrs;
+ ret = security_ops->inode_init_security(inode, dir, qstr,
+ &lsm_xattr->name,
+ &lsm_xattr->value,
+ &lsm_xattr->value_len);
+ if (ret)
+ goto out;
+
+ evm_xattr = lsm_xattr + 1;
+ ret = evm_inode_init_security(inode, lsm_xattr, evm_xattr);
+ if (ret)
+ goto out;
+ ret = initxattrs(inode, new_xattrs, fs_data);
+out:
+ for (xattr = new_xattrs; xattr->value != NULL; xattr++)
+ kfree(xattr->value);
+ return (ret == -EOPNOTSUPP) ? 0 : ret;
}
EXPORT_SYMBOL(security_inode_init_security);
+int security_old_inode_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr, const char **name,
+ void **value, size_t *len)
+{
+ if (unlikely(IS_PRIVATE(inode)))
+ return -EOPNOTSUPP;
+ return security_ops->inode_init_security(inode, dir, qstr, name, value,
+ len);
+}
+EXPORT_SYMBOL(security_old_inode_init_security);
+
#ifdef CONFIG_SECURITY_PATH
-int security_path_mknod(struct path *path, struct dentry *dentry, int mode,
+int security_path_mknod(struct path *dir, struct dentry *dentry, umode_t mode,
unsigned int dev)
{
- if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
return 0;
- return security_ops->path_mknod(path, dentry, mode, dev);
+ return security_ops->path_mknod(dir, dentry, mode, dev);
}
EXPORT_SYMBOL(security_path_mknod);
-int security_path_mkdir(struct path *path, struct dentry *dentry, int mode)
+int security_path_mkdir(struct path *dir, struct dentry *dentry, umode_t mode)
{
- if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
return 0;
- return security_ops->path_mkdir(path, dentry, mode);
+ return security_ops->path_mkdir(dir, dentry, mode);
}
+EXPORT_SYMBOL(security_path_mkdir);
-int security_path_rmdir(struct path *path, struct dentry *dentry)
+int security_path_rmdir(struct path *dir, struct dentry *dentry)
{
- if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
return 0;
- return security_ops->path_rmdir(path, dentry);
+ return security_ops->path_rmdir(dir, dentry);
}
-int security_path_unlink(struct path *path, struct dentry *dentry)
+int security_path_unlink(struct path *dir, struct dentry *dentry)
{
- if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
return 0;
- return security_ops->path_unlink(path, dentry);
+ return security_ops->path_unlink(dir, dentry);
}
+EXPORT_SYMBOL(security_path_unlink);
-int security_path_symlink(struct path *path, struct dentry *dentry,
+int security_path_symlink(struct path *dir, struct dentry *dentry,
const char *old_name)
{
- if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(dir->dentry->d_inode)))
return 0;
- return security_ops->path_symlink(path, dentry, old_name);
+ return security_ops->path_symlink(dir, dentry, old_name);
}
int security_path_link(struct dentry *old_dentry, struct path *new_dir,
@@ -436,32 +433,40 @@ int security_path_link(struct dentry *old_dentry, struct path *new_dir,
}
int security_path_rename(struct path *old_dir, struct dentry *old_dentry,
- struct path *new_dir, struct dentry *new_dentry)
+ struct path *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
{
if (unlikely(IS_PRIVATE(old_dentry->d_inode) ||
(new_dentry->d_inode && IS_PRIVATE(new_dentry->d_inode))))
return 0;
+
+ if (flags & RENAME_EXCHANGE) {
+ int err = security_ops->path_rename(new_dir, new_dentry,
+ old_dir, old_dentry);
+ if (err)
+ return err;
+ }
+
return security_ops->path_rename(old_dir, old_dentry, new_dir,
new_dentry);
}
+EXPORT_SYMBOL(security_path_rename);
-int security_path_truncate(struct path *path, loff_t length,
- unsigned int time_attrs)
+int security_path_truncate(struct path *path)
{
if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
return 0;
- return security_ops->path_truncate(path, length, time_attrs);
+ return security_ops->path_truncate(path);
}
-int security_path_chmod(struct dentry *dentry, struct vfsmount *mnt,
- mode_t mode)
+int security_path_chmod(struct path *path, umode_t mode)
{
- if (unlikely(IS_PRIVATE(dentry->d_inode)))
+ if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
return 0;
- return security_ops->path_chmod(dentry, mnt, mode);
+ return security_ops->path_chmod(path, mode);
}
-int security_path_chown(struct path *path, uid_t uid, gid_t gid)
+int security_path_chown(struct path *path, kuid_t uid, kgid_t gid)
{
if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
return 0;
@@ -474,7 +479,7 @@ int security_path_chroot(struct path *path)
}
#endif
-int security_inode_create(struct inode *dir, struct dentry *dentry, int mode)
+int security_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode)
{
if (unlikely(IS_PRIVATE(dir)))
return 0;
@@ -505,7 +510,7 @@ int security_inode_symlink(struct inode *dir, struct dentry *dentry,
return security_ops->inode_symlink(dir, dentry, old_name);
}
-int security_inode_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+int security_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
if (unlikely(IS_PRIVATE(dir)))
return 0;
@@ -520,7 +525,7 @@ int security_inode_rmdir(struct inode *dir, struct dentry *dentry)
return security_ops->inode_rmdir(dir, dentry);
}
-int security_inode_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
+int security_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
{
if (unlikely(IS_PRIVATE(dir)))
return 0;
@@ -528,11 +533,20 @@ int security_inode_mknod(struct inode *dir, struct dentry *dentry, int mode, dev
}
int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry)
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
{
if (unlikely(IS_PRIVATE(old_dentry->d_inode) ||
(new_dentry->d_inode && IS_PRIVATE(new_dentry->d_inode))))
return 0;
+
+ if (flags & RENAME_EXCHANGE) {
+ int err = security_ops->inode_rename(new_dir, new_dentry,
+ old_dir, old_dentry);
+ if (err)
+ return err;
+ }
+
return security_ops->inode_rename(old_dir, old_dentry,
new_dir, new_dentry);
}
@@ -560,9 +574,14 @@ int security_inode_permission(struct inode *inode, int mask)
int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
{
+ int ret;
+
if (unlikely(IS_PRIVATE(dentry->d_inode)))
return 0;
- return security_ops->inode_setattr(dentry, attr);
+ ret = security_ops->inode_setattr(dentry, attr);
+ if (ret)
+ return ret;
+ return evm_inode_setattr(dentry, attr);
}
EXPORT_SYMBOL_GPL(security_inode_setattr);
@@ -573,19 +592,20 @@ int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
return security_ops->inode_getattr(mnt, dentry);
}
-void security_inode_delete(struct inode *inode)
-{
- if (unlikely(IS_PRIVATE(inode)))
- return;
- security_ops->inode_delete(inode);
-}
-
int security_inode_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
+ int ret;
+
if (unlikely(IS_PRIVATE(dentry->d_inode)))
return 0;
- return security_ops->inode_setxattr(dentry, name, value, size, flags);
+ ret = security_ops->inode_setxattr(dentry, name, value, size, flags);
+ if (ret)
+ return ret;
+ ret = ima_inode_setxattr(dentry, name, value, size);
+ if (ret)
+ return ret;
+ return evm_inode_setxattr(dentry, name, value, size);
}
void security_inode_post_setxattr(struct dentry *dentry, const char *name,
@@ -594,6 +614,7 @@ void security_inode_post_setxattr(struct dentry *dentry, const char *name,
if (unlikely(IS_PRIVATE(dentry->d_inode)))
return;
security_ops->inode_post_setxattr(dentry, name, value, size, flags);
+ evm_inode_post_setxattr(dentry, name, value, size);
}
int security_inode_getxattr(struct dentry *dentry, const char *name)
@@ -612,9 +633,17 @@ int security_inode_listxattr(struct dentry *dentry)
int security_inode_removexattr(struct dentry *dentry, const char *name)
{
+ int ret;
+
if (unlikely(IS_PRIVATE(dentry->d_inode)))
return 0;
- return security_ops->inode_removexattr(dentry, name);
+ ret = security_ops->inode_removexattr(dentry, name);
+ if (ret)
+ return ret;
+ ret = ima_inode_removexattr(dentry, name);
+ if (ret)
+ return ret;
+ return evm_inode_removexattr(dentry, name);
}
int security_inode_need_killpriv(struct dentry *dentry)
@@ -630,14 +659,14 @@ int security_inode_killpriv(struct dentry *dentry)
int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc)
{
if (unlikely(IS_PRIVATE(inode)))
- return 0;
+ return -EOPNOTSUPP;
return security_ops->inode_getsecurity(inode, name, buffer, alloc);
}
int security_inode_setsecurity(struct inode *inode, const char *name, const void *value, size_t size, int flags)
{
if (unlikely(IS_PRIVATE(inode)))
- return 0;
+ return -EOPNOTSUPP;
return security_ops->inode_setsecurity(inode, name, value, size, flags);
}
@@ -647,6 +676,7 @@ int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer
return 0;
return security_ops->inode_listsecurity(inode, buffer, buffer_size);
}
+EXPORT_SYMBOL(security_inode_listsecurity);
void security_inode_getsecid(const struct inode *inode, u32 *secid)
{
@@ -655,7 +685,13 @@ void security_inode_getsecid(const struct inode *inode, u32 *secid)
int security_file_permission(struct file *file, int mask)
{
- return security_ops->file_permission(file, mask);
+ int ret;
+
+ ret = security_ops->file_permission(file, mask);
+ if (ret)
+ return ret;
+
+ return fsnotify_perm(file, mask);
}
int security_file_alloc(struct file *file)
@@ -666,8 +702,6 @@ int security_file_alloc(struct file *file)
void security_file_free(struct file *file)
{
security_ops->file_free_security(file);
- if (file->f_dentry)
- ima_file_free(file);
}
int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
@@ -675,18 +709,56 @@ int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return security_ops->file_ioctl(file, cmd, arg);
}
-int security_file_mmap(struct file *file, unsigned long reqprot,
- unsigned long prot, unsigned long flags,
- unsigned long addr, unsigned long addr_only)
+static inline unsigned long mmap_prot(struct file *file, unsigned long prot)
{
- int ret;
+ /*
+ * Does we have PROT_READ and does the application expect
+ * it to imply PROT_EXEC? If not, nothing to talk about...
+ */
+ if ((prot & (PROT_READ | PROT_EXEC)) != PROT_READ)
+ return prot;
+ if (!(current->personality & READ_IMPLIES_EXEC))
+ return prot;
+ /*
+ * if that's an anonymous mapping, let it.
+ */
+ if (!file)
+ return prot | PROT_EXEC;
+ /*
+ * ditto if it's not on noexec mount, except that on !MMU we need
+ * BDI_CAP_EXEC_MMAP (== VM_MAYEXEC) in this case
+ */
+ if (!(file->f_path.mnt->mnt_flags & MNT_NOEXEC)) {
+#ifndef CONFIG_MMU
+ unsigned long caps = 0;
+ struct address_space *mapping = file->f_mapping;
+ if (mapping && mapping->backing_dev_info)
+ caps = mapping->backing_dev_info->capabilities;
+ if (!(caps & BDI_CAP_EXEC_MAP))
+ return prot;
+#endif
+ return prot | PROT_EXEC;
+ }
+ /* anything on noexec mount won't get PROT_EXEC */
+ return prot;
+}
- ret = security_ops->file_mmap(file, reqprot, prot, flags, addr, addr_only);
+int security_mmap_file(struct file *file, unsigned long prot,
+ unsigned long flags)
+{
+ int ret;
+ ret = security_ops->mmap_file(file, prot,
+ mmap_prot(file, prot), flags);
if (ret)
return ret;
return ima_file_mmap(file, prot);
}
+int security_mmap_addr(unsigned long addr)
+{
+ return security_ops->mmap_addr(addr);
+}
+
int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
unsigned long prot)
{
@@ -719,9 +791,15 @@ int security_file_receive(struct file *file)
return security_ops->file_receive(file);
}
-int security_dentry_open(struct file *file, const struct cred *cred)
+int security_file_open(struct file *file, const struct cred *cred)
{
- return security_ops->dentry_open(file, cred);
+ int ret;
+
+ ret = security_ops->file_open(file, cred);
+ if (ret)
+ return ret;
+
+ return fsnotify_perm(file, MAY_OPEN);
}
int security_task_create(unsigned long clone_flags)
@@ -729,6 +807,14 @@ int security_task_create(unsigned long clone_flags)
return security_ops->task_create(clone_flags);
}
+void security_task_free(struct task_struct *task)
+{
+#ifdef CONFIG_SECURITY_YAMA_STACKED
+ yama_task_free(task);
+#endif
+ security_ops->task_free(task);
+}
+
int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
{
return security_ops->cred_alloc_blank(cred, gfp);
@@ -744,11 +830,6 @@ int security_prepare_creds(struct cred *new, const struct cred *old, gfp_t gfp)
return security_ops->cred_prepare(new, old, gfp);
}
-void security_commit_creds(struct cred *new, const struct cred *old)
-{
- security_ops->cred_commit(new, old);
-}
-
void security_transfer_creds(struct cred *new, const struct cred *old)
{
security_ops->cred_transfer(new, old);
@@ -769,9 +850,14 @@ int security_kernel_module_request(char *kmod_name)
return security_ops->kernel_module_request(kmod_name);
}
-int security_task_setuid(uid_t id0, uid_t id1, uid_t id2, int flags)
+int security_kernel_module_from_file(struct file *file)
{
- return security_ops->task_setuid(id0, id1, id2, flags);
+ int ret;
+
+ ret = security_ops->kernel_module_from_file(file);
+ if (ret)
+ return ret;
+ return ima_module_check(file);
}
int security_task_fix_setuid(struct cred *new, const struct cred *old,
@@ -780,11 +866,6 @@ int security_task_fix_setuid(struct cred *new, const struct cred *old,
return security_ops->task_fix_setuid(new, old, flags);
}
-int security_task_setgid(gid_t id0, gid_t id1, gid_t id2, int flags)
-{
- return security_ops->task_setgid(id0, id1, id2, flags);
-}
-
int security_task_setpgid(struct task_struct *p, pid_t pgid)
{
return security_ops->task_setpgid(p, pgid);
@@ -806,11 +887,6 @@ void security_task_getsecid(struct task_struct *p, u32 *secid)
}
EXPORT_SYMBOL(security_task_getsecid);
-int security_task_setgroups(struct group_info *group_info)
-{
- return security_ops->task_setgroups(group_info);
-}
-
int security_task_setnice(struct task_struct *p, int nice)
{
return security_ops->task_setnice(p, nice);
@@ -826,15 +902,15 @@ int security_task_getioprio(struct task_struct *p)
return security_ops->task_getioprio(p);
}
-int security_task_setrlimit(unsigned int resource, struct rlimit *new_rlim)
+int security_task_setrlimit(struct task_struct *p, unsigned int resource,
+ struct rlimit *new_rlim)
{
- return security_ops->task_setrlimit(resource, new_rlim);
+ return security_ops->task_setrlimit(p, resource, new_rlim);
}
-int security_task_setscheduler(struct task_struct *p,
- int policy, struct sched_param *lp)
+int security_task_setscheduler(struct task_struct *p)
{
- return security_ops->task_setscheduler(p, policy, lp);
+ return security_ops->task_setscheduler(p);
}
int security_task_getscheduler(struct task_struct *p)
@@ -861,6 +937,12 @@ int security_task_wait(struct task_struct *p)
int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5)
{
+#ifdef CONFIG_SECURITY_YAMA_STACKED
+ int rc;
+ rc = yama_task_prctl(option, arg2, arg3, arg4, arg5);
+ if (rc != -ENOSYS)
+ return rc;
+#endif
return security_ops->task_prctl(option, arg2, arg3, arg4, arg5);
}
@@ -995,11 +1077,11 @@ int security_netlink_send(struct sock *sk, struct sk_buff *skb)
return security_ops->netlink_send(sk, skb);
}
-int security_netlink_recv(struct sk_buff *skb, int cap)
+int security_ismaclabel(const char *name)
{
- return security_ops->netlink_recv(skb, cap);
+ return security_ops->ismaclabel(name);
}
-EXPORT_SYMBOL(security_netlink_recv);
+EXPORT_SYMBOL(security_ismaclabel);
int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
{
@@ -1039,8 +1121,7 @@ EXPORT_SYMBOL(security_inode_getsecctx);
#ifdef CONFIG_SECURITY_NETWORK
-int security_unix_stream_connect(struct socket *sock, struct socket *other,
- struct sock *newsk)
+int security_unix_stream_connect(struct sock *sock, struct sock *other, struct sock *newsk)
{
return security_ops->unix_stream_connect(sock, other, newsk);
}
@@ -1152,10 +1233,11 @@ void security_sk_clone(const struct sock *sk, struct sock *newsk)
{
security_ops->sk_clone_security(sk, newsk);
}
+EXPORT_SYMBOL(security_sk_clone);
void security_sk_classify_flow(struct sock *sk, struct flowi *fl)
{
- security_ops->sk_getsecid(sk, &fl->secid);
+ security_ops->sk_getsecid(sk, &fl->flowi_secid);
}
EXPORT_SYMBOL(security_sk_classify_flow);
@@ -1190,31 +1272,74 @@ void security_inet_conn_established(struct sock *sk,
security_ops->inet_conn_established(sk, skb);
}
+int security_secmark_relabel_packet(u32 secid)
+{
+ return security_ops->secmark_relabel_packet(secid);
+}
+EXPORT_SYMBOL(security_secmark_relabel_packet);
+
+void security_secmark_refcount_inc(void)
+{
+ security_ops->secmark_refcount_inc();
+}
+EXPORT_SYMBOL(security_secmark_refcount_inc);
+
+void security_secmark_refcount_dec(void)
+{
+ security_ops->secmark_refcount_dec();
+}
+EXPORT_SYMBOL(security_secmark_refcount_dec);
+
+int security_tun_dev_alloc_security(void **security)
+{
+ return security_ops->tun_dev_alloc_security(security);
+}
+EXPORT_SYMBOL(security_tun_dev_alloc_security);
+
+void security_tun_dev_free_security(void *security)
+{
+ security_ops->tun_dev_free_security(security);
+}
+EXPORT_SYMBOL(security_tun_dev_free_security);
+
int security_tun_dev_create(void)
{
return security_ops->tun_dev_create();
}
EXPORT_SYMBOL(security_tun_dev_create);
-void security_tun_dev_post_create(struct sock *sk)
+int security_tun_dev_attach_queue(void *security)
{
- return security_ops->tun_dev_post_create(sk);
+ return security_ops->tun_dev_attach_queue(security);
}
-EXPORT_SYMBOL(security_tun_dev_post_create);
+EXPORT_SYMBOL(security_tun_dev_attach_queue);
-int security_tun_dev_attach(struct sock *sk)
+int security_tun_dev_attach(struct sock *sk, void *security)
{
- return security_ops->tun_dev_attach(sk);
+ return security_ops->tun_dev_attach(sk, security);
}
EXPORT_SYMBOL(security_tun_dev_attach);
+int security_tun_dev_open(void *security)
+{
+ return security_ops->tun_dev_open(security);
+}
+EXPORT_SYMBOL(security_tun_dev_open);
+
+void security_skb_owned_by(struct sk_buff *skb, struct sock *sk)
+{
+ security_ops->skb_owned_by(skb, sk);
+}
+
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
-int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx)
+int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
+ struct xfrm_user_sec_ctx *sec_ctx,
+ gfp_t gfp)
{
- return security_ops->xfrm_policy_alloc_security(ctxp, sec_ctx);
+ return security_ops->xfrm_policy_alloc_security(ctxp, sec_ctx, gfp);
}
EXPORT_SYMBOL(security_xfrm_policy_alloc);
@@ -1235,22 +1360,17 @@ int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
return security_ops->xfrm_policy_delete_security(ctx);
}
-int security_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx)
+int security_xfrm_state_alloc(struct xfrm_state *x,
+ struct xfrm_user_sec_ctx *sec_ctx)
{
- return security_ops->xfrm_state_alloc_security(x, sec_ctx, 0);
+ return security_ops->xfrm_state_alloc(x, sec_ctx);
}
EXPORT_SYMBOL(security_xfrm_state_alloc);
int security_xfrm_state_alloc_acquire(struct xfrm_state *x,
struct xfrm_sec_ctx *polsec, u32 secid)
{
- if (!polsec)
- return 0;
- /*
- * We want the context to be taken from secid which is usually
- * from the sock.
- */
- return security_ops->xfrm_state_alloc_security(x, NULL, secid);
+ return security_ops->xfrm_state_alloc_acquire(x, polsec, secid);
}
int security_xfrm_state_delete(struct xfrm_state *x)
@@ -1270,7 +1390,8 @@ int security_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
}
int security_xfrm_state_pol_flow_match(struct xfrm_state *x,
- struct xfrm_policy *xp, struct flowi *fl)
+ struct xfrm_policy *xp,
+ const struct flowi *fl)
{
return security_ops->xfrm_state_pol_flow_match(x, xp, fl);
}
@@ -1282,7 +1403,7 @@ int security_xfrm_decode_session(struct sk_buff *skb, u32 *secid)
void security_skb_classify_flow(struct sk_buff *skb, struct flowi *fl)
{
- int rc = security_ops->xfrm_decode_session(skb, &fl->secid, 0);
+ int rc = security_ops->xfrm_decode_session(skb, &fl->flowi_secid, 0);
BUG_ON(rc);
}
@@ -1304,7 +1425,7 @@ void security_key_free(struct key *key)
}
int security_key_permission(key_ref_t key_ref,
- const struct cred *cred, key_perm_t perm)
+ const struct cred *cred, unsigned perm)
{
return security_ops->key_permission(key_ref, cred, perm);
}
@@ -1314,13 +1435,6 @@ int security_key_getsecurity(struct key *key, char **_buffer)
return security_ops->key_getsecurity(key, _buffer);
}
-int security_key_session_to_parent(const struct cred *cred,
- const struct cred *parent_cred,
- struct key *key)
-{
- return security_ops->key_session_to_parent(cred, parent_cred, key);
-}
-
#endif /* CONFIG_KEYS */
#ifdef CONFIG_AUDIT
diff --git a/security/selinux/Makefile b/security/selinux/Makefile
index f013982df41..ad5cd76ec23 100644
--- a/security/selinux/Makefile
+++ b/security/selinux/Makefile
@@ -2,29 +2,24 @@
# Makefile for building the SELinux module as part of the kernel tree.
#
-obj-$(CONFIG_SECURITY_SELINUX) := selinux.o ss/
-
-selinux-y := avc.o \
- hooks.o \
- selinuxfs.o \
- netlink.o \
- nlmsgtab.o \
- netif.o \
- netnode.o \
- netport.o \
- exports.o
+obj-$(CONFIG_SECURITY_SELINUX) := selinux.o
+
+selinux-y := avc.o hooks.o selinuxfs.o netlink.o nlmsgtab.o netif.o \
+ netnode.o netport.o exports.o \
+ ss/ebitmap.o ss/hashtab.o ss/symtab.o ss/sidtab.o ss/avtab.o \
+ ss/policydb.o ss/services.o ss/conditional.o ss/mls.o ss/status.o
selinux-$(CONFIG_SECURITY_NETWORK_XFRM) += xfrm.o
selinux-$(CONFIG_NETLABEL) += netlabel.o
-EXTRA_CFLAGS += -Isecurity/selinux -Isecurity/selinux/include
+ccflags-y := -Isecurity/selinux -Isecurity/selinux/include
-$(obj)/avc.o: $(obj)/flask.h
+$(addprefix $(obj)/,$(selinux-y)): $(obj)/flask.h
quiet_cmd_flask = GEN $(obj)/flask.h $(obj)/av_permissions.h
cmd_flask = scripts/selinux/genheaders/genheaders $(obj)/flask.h $(obj)/av_permissions.h
-targets += flask.h
+targets += flask.h av_permissions.h
$(obj)/flask.h: $(src)/include/classmap.h FORCE
$(call if_changed,flask)
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index f2dde268165..a18f1fa6440 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -38,11 +38,7 @@
#define AVC_CACHE_RECLAIM 16
#ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
-#define avc_cache_stats_incr(field) \
-do { \
- per_cpu(avc_cache_stats, get_cpu()).field++; \
- put_cpu(); \
-} while (0)
+#define avc_cache_stats_incr(field) this_cpu_inc(avc_cache_stats.field)
#else
#define avc_cache_stats_incr(field) do {} while (0)
#endif
@@ -69,14 +65,8 @@ struct avc_cache {
};
struct avc_callback_node {
- int (*callback) (u32 event, u32 ssid, u32 tsid,
- u16 tclass, u32 perms,
- u32 *out_retained);
+ int (*callback) (u32 event);
u32 events;
- u32 ssid;
- u32 tsid;
- u16 tclass;
- u32 perms;
struct avc_callback_node *next;
};
@@ -198,11 +188,9 @@ int avc_get_hash_stats(char *page)
for (i = 0; i < AVC_CACHE_SLOTS; i++) {
head = &avc_cache.slots[i];
if (!hlist_empty(head)) {
- struct hlist_node *next;
-
slots_used++;
chain_len = 0;
- hlist_for_each_entry_rcu(node, next, head, list)
+ hlist_for_each_entry_rcu(node, head, list)
chain_len++;
if (chain_len > max_chain_len)
max_chain_len = chain_len;
@@ -251,7 +239,6 @@ static inline int avc_reclaim_node(void)
int hvalue, try, ecx;
unsigned long flags;
struct hlist_head *head;
- struct hlist_node *next;
spinlock_t *lock;
for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) {
@@ -263,7 +250,7 @@ static inline int avc_reclaim_node(void)
continue;
rcu_read_lock();
- hlist_for_each_entry(node, next, head, list) {
+ hlist_for_each_entry(node, head, list) {
avc_node_delete(node);
avc_cache_stats_incr(reclaims);
ecx++;
@@ -284,11 +271,10 @@ static struct avc_node *avc_alloc_node(void)
{
struct avc_node *node;
- node = kmem_cache_zalloc(avc_node_cachep, GFP_ATOMIC);
+ node = kmem_cache_zalloc(avc_node_cachep, GFP_ATOMIC|__GFP_NOMEMALLOC);
if (!node)
goto out;
- INIT_RCU_HEAD(&node->rhead);
INIT_HLIST_NODE(&node->list);
avc_cache_stats_incr(allocations);
@@ -312,11 +298,10 @@ static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass)
struct avc_node *node, *ret = NULL;
int hvalue;
struct hlist_head *head;
- struct hlist_node *next;
hvalue = avc_hash(ssid, tsid, tclass);
head = &avc_cache.slots[hvalue];
- hlist_for_each_entry_rcu(node, next, head, list) {
+ hlist_for_each_entry_rcu(node, head, list) {
if (ssid == node->ae.ssid &&
tclass == node->ae.tclass &&
tsid == node->ae.tsid) {
@@ -337,7 +322,7 @@ static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass)
* Look up an AVC entry that is valid for the
* (@ssid, @tsid), interpreting the permissions
* based on @tclass. If a valid AVC entry exists,
- * then this function return the avc_node.
+ * then this function returns the avc_node.
* Otherwise, this function returns NULL.
*/
static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass)
@@ -348,11 +333,10 @@ static struct avc_node *avc_lookup(u32 ssid, u32 tsid, u16 tclass)
node = avc_search_node(ssid, tsid, tclass);
if (node)
- avc_cache_stats_incr(hits);
- else
- avc_cache_stats_incr(misses);
+ return node;
- return node;
+ avc_cache_stats_incr(misses);
+ return NULL;
}
static int avc_latest_notif_update(int seqno, int is_insert)
@@ -406,7 +390,6 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_dec
node = avc_alloc_node();
if (node) {
struct hlist_head *head;
- struct hlist_node *next;
spinlock_t *lock;
hvalue = avc_hash(ssid, tsid, tclass);
@@ -416,7 +399,7 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_dec
lock = &avc_cache.slots_lock[hvalue];
spin_lock_irqsave(lock, flag);
- hlist_for_each_entry(pos, next, head, list) {
+ hlist_for_each_entry(pos, head, list) {
if (pos->ae.ssid == ssid &&
pos->ae.tsid == tsid &&
pos->ae.tclass == tclass) {
@@ -442,9 +425,9 @@ static void avc_audit_pre_callback(struct audit_buffer *ab, void *a)
{
struct common_audit_data *ad = a;
audit_log_format(ab, "avc: %s ",
- ad->selinux_audit_data.denied ? "denied" : "granted");
- avc_dump_av(ab, ad->selinux_audit_data.tclass,
- ad->selinux_audit_data.audited);
+ ad->selinux_audit_data->denied ? "denied" : "granted");
+ avc_dump_av(ab, ad->selinux_audit_data->tclass,
+ ad->selinux_audit_data->audited);
audit_log_format(ab, " for ");
}
@@ -458,89 +441,69 @@ static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
{
struct common_audit_data *ad = a;
audit_log_format(ab, " ");
- avc_dump_query(ab, ad->selinux_audit_data.ssid,
- ad->selinux_audit_data.tsid,
- ad->selinux_audit_data.tclass);
+ avc_dump_query(ab, ad->selinux_audit_data->ssid,
+ ad->selinux_audit_data->tsid,
+ ad->selinux_audit_data->tclass);
+ if (ad->selinux_audit_data->denied) {
+ audit_log_format(ab, " permissive=%u",
+ ad->selinux_audit_data->result ? 0 : 1);
+ }
}
-/**
- * avc_audit - Audit the granting or denial of permissions.
- * @ssid: source security identifier
- * @tsid: target security identifier
- * @tclass: target security class
- * @requested: requested permissions
- * @avd: access vector decisions
- * @result: result from avc_has_perm_noaudit
- * @a: auxiliary audit data
- *
- * Audit the granting or denial of permissions in accordance
- * with the policy. This function is typically called by
- * avc_has_perm() after a permission check, but can also be
- * called directly by callers who use avc_has_perm_noaudit()
- * in order to separate the permission check from the auditing.
- * For example, this separation is useful when the permission check must
- * be performed under a lock, to allow the lock to be released
- * before calling the auditing code.
- */
-void avc_audit(u32 ssid, u32 tsid,
- u16 tclass, u32 requested,
- struct av_decision *avd, int result, struct common_audit_data *a)
+/* This is the slow part of avc audit with big stack footprint */
+noinline int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass,
+ u32 requested, u32 audited, u32 denied, int result,
+ struct common_audit_data *a,
+ unsigned flags)
{
struct common_audit_data stack_data;
- u32 denied, audited;
- denied = requested & ~avd->allowed;
- if (denied) {
- audited = denied;
- if (!(audited & avd->auditdeny))
- return;
- } else if (result) {
- audited = denied = requested;
- } else {
- audited = requested;
- if (!(audited & avd->auditallow))
- return;
- }
+ struct selinux_audit_data sad;
+
if (!a) {
a = &stack_data;
- memset(a, 0, sizeof(*a));
- a->type = LSM_AUDIT_NO_AUDIT;
+ a->type = LSM_AUDIT_DATA_NONE;
}
- a->selinux_audit_data.tclass = tclass;
- a->selinux_audit_data.requested = requested;
- a->selinux_audit_data.ssid = ssid;
- a->selinux_audit_data.tsid = tsid;
- a->selinux_audit_data.audited = audited;
- a->selinux_audit_data.denied = denied;
- a->lsm_pre_audit = avc_audit_pre_callback;
- a->lsm_post_audit = avc_audit_post_callback;
- common_lsm_audit(a);
+
+ /*
+ * When in a RCU walk do the audit on the RCU retry. This is because
+ * the collection of the dname in an inode audit message is not RCU
+ * safe. Note this may drop some audits when the situation changes
+ * during retry. However this is logically just as if the operation
+ * happened a little later.
+ */
+ if ((a->type == LSM_AUDIT_DATA_INODE) &&
+ (flags & MAY_NOT_BLOCK))
+ return -ECHILD;
+
+ sad.tclass = tclass;
+ sad.requested = requested;
+ sad.ssid = ssid;
+ sad.tsid = tsid;
+ sad.audited = audited;
+ sad.denied = denied;
+ sad.result = result;
+
+ a->selinux_audit_data = &sad;
+
+ common_lsm_audit(a, avc_audit_pre_callback, avc_audit_post_callback);
+ return 0;
}
/**
* avc_add_callback - Register a callback for security events.
* @callback: callback function
* @events: security events
- * @ssid: source security identifier or %SECSID_WILD
- * @tsid: target security identifier or %SECSID_WILD
- * @tclass: target security class
- * @perms: permissions
*
- * Register a callback function for events in the set @events
- * related to the SID pair (@ssid, @tsid) and
- * and the permissions @perms, interpreting
- * @perms based on @tclass. Returns %0 on success or
- * -%ENOMEM if insufficient memory exists to add the callback.
+ * Register a callback function for events in the set @events.
+ * Returns %0 on success or -%ENOMEM if insufficient memory
+ * exists to add the callback.
*/
-int avc_add_callback(int (*callback)(u32 event, u32 ssid, u32 tsid,
- u16 tclass, u32 perms,
- u32 *out_retained),
- u32 events, u32 ssid, u32 tsid,
- u16 tclass, u32 perms)
+int __init avc_add_callback(int (*callback)(u32 event), u32 events)
{
struct avc_callback_node *c;
int rc = 0;
- c = kmalloc(sizeof(*c), GFP_ATOMIC);
+ c = kmalloc(sizeof(*c), GFP_KERNEL);
if (!c) {
rc = -ENOMEM;
goto out;
@@ -548,9 +511,6 @@ int avc_add_callback(int (*callback)(u32 event, u32 ssid, u32 tsid,
c->callback = callback;
c->events = events;
- c->ssid = ssid;
- c->tsid = tsid;
- c->perms = perms;
c->next = avc_callbacks;
avc_callbacks = c;
out:
@@ -571,7 +531,7 @@ static inline int avc_sidcmp(u32 x, u32 y)
*
* if a valid AVC entry doesn't exist,this function returns -ENOENT.
* if kmalloc() called internal returns NULL, this function returns -ENOMEM.
- * otherwise, this function update the AVC entry. The original AVC-entry object
+ * otherwise, this function updates the AVC entry. The original AVC-entry object
* will release later by RCU.
*/
static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass,
@@ -581,7 +541,6 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass,
unsigned long flag;
struct avc_node *pos, *node, *orig = NULL;
struct hlist_head *head;
- struct hlist_node *next;
spinlock_t *lock;
node = avc_alloc_node();
@@ -598,7 +557,7 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass,
spin_lock_irqsave(lock, flag);
- hlist_for_each_entry(pos, next, head, list) {
+ hlist_for_each_entry(pos, head, list) {
if (ssid == pos->ae.ssid &&
tsid == pos->ae.tsid &&
tclass == pos->ae.tclass &&
@@ -654,7 +613,6 @@ out:
static void avc_flush(void)
{
struct hlist_head *head;
- struct hlist_node *next;
struct avc_node *node;
spinlock_t *lock;
unsigned long flag;
@@ -670,7 +628,7 @@ static void avc_flush(void)
* prevent RCU grace periods from ending.
*/
rcu_read_lock();
- hlist_for_each_entry(node, next, head, list)
+ hlist_for_each_entry(node, head, list)
avc_node_delete(node);
rcu_read_unlock();
spin_unlock_irqrestore(lock, flag);
@@ -690,8 +648,7 @@ int avc_ss_reset(u32 seqno)
for (c = avc_callbacks; c; c = c->next) {
if (c->events & AVC_CALLBACK_RESET) {
- tmprc = c->callback(AVC_CALLBACK_RESET,
- 0, 0, 0, 0, NULL);
+ tmprc = c->callback(AVC_CALLBACK_RESET);
/* save the first error encountered for the return
value and continue processing the callbacks */
if (!rc)
@@ -703,6 +660,41 @@ int avc_ss_reset(u32 seqno)
return rc;
}
+/*
+ * Slow-path helper function for avc_has_perm_noaudit,
+ * when the avc_node lookup fails. We get called with
+ * the RCU read lock held, and need to return with it
+ * still held, but drop if for the security compute.
+ *
+ * Don't inline this, since it's the slow-path and just
+ * results in a bigger stack frame.
+ */
+static noinline struct avc_node *avc_compute_av(u32 ssid, u32 tsid,
+ u16 tclass, struct av_decision *avd)
+{
+ rcu_read_unlock();
+ security_compute_av(ssid, tsid, tclass, avd);
+ rcu_read_lock();
+ return avc_insert(ssid, tsid, tclass, avd);
+}
+
+static noinline int avc_denied(u32 ssid, u32 tsid,
+ u16 tclass, u32 requested,
+ unsigned flags,
+ struct av_decision *avd)
+{
+ if (flags & AVC_STRICT)
+ return -EACCES;
+
+ if (selinux_enforcing && !(avd->flags & AVD_FLAGS_PERMISSIVE))
+ return -EACCES;
+
+ avc_update_node(AVC_CALLBACK_GRANT, requested, ssid,
+ tsid, tclass, avd->seqno);
+ return 0;
+}
+
+
/**
* avc_has_perm_noaudit - Check permissions but perform no auditing.
* @ssid: source security identifier
@@ -723,13 +715,12 @@ int avc_ss_reset(u32 seqno)
* auditing, e.g. in cases where a lock must be held for the check but
* should be released for the auditing.
*/
-int avc_has_perm_noaudit(u32 ssid, u32 tsid,
+inline int avc_has_perm_noaudit(u32 ssid, u32 tsid,
u16 tclass, u32 requested,
unsigned flags,
- struct av_decision *in_avd)
+ struct av_decision *avd)
{
struct avc_node *node;
- struct av_decision avd_entry, *avd;
int rc = 0;
u32 denied;
@@ -738,39 +729,18 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
rcu_read_lock();
node = avc_lookup(ssid, tsid, tclass);
- if (!node) {
- rcu_read_unlock();
-
- if (in_avd)
- avd = in_avd;
- else
- avd = &avd_entry;
-
- rc = security_compute_av(ssid, tsid, tclass, requested, avd);
- if (rc)
- goto out;
- rcu_read_lock();
- node = avc_insert(ssid, tsid, tclass, avd);
+ if (unlikely(!node)) {
+ node = avc_compute_av(ssid, tsid, tclass, avd);
} else {
- if (in_avd)
- memcpy(in_avd, &node->ae.avd, sizeof(*in_avd));
+ memcpy(avd, &node->ae.avd, sizeof(*avd));
avd = &node->ae.avd;
}
denied = requested & ~(avd->allowed);
-
- if (denied) {
- if (flags & AVC_STRICT)
- rc = -EACCES;
- else if (!selinux_enforcing || (avd->flags & AVD_FLAGS_PERMISSIVE))
- avc_update_node(AVC_CALLBACK_GRANT, requested, ssid,
- tsid, tclass, avd->seqno);
- else
- rc = -EACCES;
- }
+ if (unlikely(denied))
+ rc = avc_denied(ssid, tsid, tclass, requested, flags, avd);
rcu_read_unlock();
-out:
return rc;
}
@@ -794,10 +764,13 @@ int avc_has_perm(u32 ssid, u32 tsid, u16 tclass,
u32 requested, struct common_audit_data *auditdata)
{
struct av_decision avd;
- int rc;
+ int rc, rc2;
rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd);
- avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata);
+
+ rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata);
+ if (rc2)
+ return rc2;
return rc;
}
diff --git a/security/selinux/exports.c b/security/selinux/exports.c
index c0a454aee1e..e75dd94e2d2 100644
--- a/security/selinux/exports.c
+++ b/security/selinux/exports.c
@@ -11,58 +11,10 @@
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*/
-#include <linux/types.h>
-#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/selinux.h>
-#include <linux/fs.h>
-#include <linux/ipc.h>
-#include <asm/atomic.h>
#include "security.h"
-#include "objsec.h"
-
-/* SECMARK reference count */
-extern atomic_t selinux_secmark_refcount;
-
-int selinux_string_to_sid(char *str, u32 *sid)
-{
- if (selinux_enabled)
- return security_context_to_sid(str, strlen(str), sid);
- else {
- *sid = 0;
- return 0;
- }
-}
-EXPORT_SYMBOL_GPL(selinux_string_to_sid);
-
-int selinux_secmark_relabel_packet_permission(u32 sid)
-{
- if (selinux_enabled) {
- const struct task_security_struct *__tsec;
- u32 tsid;
-
- __tsec = current_security();
- tsid = __tsec->sid;
-
- return avc_has_perm(tsid, sid, SECCLASS_PACKET,
- PACKET__RELABELTO, NULL);
- }
- return 0;
-}
-EXPORT_SYMBOL_GPL(selinux_secmark_relabel_packet_permission);
-
-void selinux_secmark_refcount_inc(void)
-{
- atomic_inc(&selinux_secmark_refcount);
-}
-EXPORT_SYMBOL_GPL(selinux_secmark_refcount_inc);
-
-void selinux_secmark_refcount_dec(void)
-{
- atomic_dec(&selinux_secmark_refcount);
-}
-EXPORT_SYMBOL_GPL(selinux_secmark_refcount_dec);
bool selinux_is_enabled(void)
{
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 9a2ee845e9d..83d06db34d0 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -14,7 +14,7 @@
* Copyright (C) 2004-2005 Trusted Computer Solutions, Inc.
* <dgoeddel@trustedcs.com>
* Copyright (C) 2006, 2007, 2009 Hewlett-Packard Development Company, L.P.
- * Paul Moore <paul.moore@hp.com>
+ * Paul Moore <paul@paul-moore.com>
* Copyright (C) 2007 Hitachi Software Engineering Co., Ltd.
* Yuichi Nakamura <ynakam@hitachisoft.jp>
*
@@ -24,6 +24,7 @@
*/
#include <linux/init.h>
+#include <linux/kd.h>
#include <linux/kernel.h>
#include <linux/tracehook.h>
#include <linux/errno.h>
@@ -36,29 +37,32 @@
#include <linux/mman.h>
#include <linux/slab.h>
#include <linux/pagemap.h>
+#include <linux/proc_fs.h>
#include <linux/swap.h>
#include <linux/spinlock.h>
#include <linux/syscalls.h>
+#include <linux/dcache.h>
#include <linux/file.h>
#include <linux/fdtable.h>
#include <linux/namei.h>
#include <linux/mount.h>
-#include <linux/proc_fs.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
#include <linux/tty.h>
#include <net/icmp.h>
#include <net/ip.h> /* for local_port_range[] */
+#include <net/sock.h>
#include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */
+#include <net/inet_connection_sock.h>
#include <net/net_namespace.h>
#include <net/netlabel.h>
#include <linux/uaccess.h>
#include <asm/ioctls.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include <linux/bitops.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h> /* for network interface checks */
-#include <linux/netlink.h>
+#include <net/netlink.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/dccp.h>
@@ -70,12 +74,16 @@
#include <net/ipv6.h>
#include <linux/hugetlb.h>
#include <linux/personality.h>
-#include <linux/sysctl.h>
#include <linux/audit.h>
#include <linux/string.h>
#include <linux/selinux.h>
#include <linux/mutex.h>
#include <linux/posix-timers.h>
+#include <linux/syslog.h>
+#include <linux/user_namespace.h>
+#include <linux/export.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
#include "avc.h"
#include "objsec.h"
@@ -85,17 +93,12 @@
#include "xfrm.h"
#include "netlabel.h"
#include "audit.h"
+#include "avc_ss.h"
-#define XATTR_SELINUX_SUFFIX "selinux"
-#define XATTR_NAME_SELINUX XATTR_SECURITY_PREFIX XATTR_SELINUX_SUFFIX
-
-#define NUM_SEL_MNT_OPTS 5
-
-extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
extern struct security_operations *security_ops;
/* SECMARK reference count */
-atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
+static atomic_t selinux_secmark_refcount = ATOMIC_INIT(0);
#ifdef CONFIG_SECURITY_SELINUX_DEVELOP
int selinux_enforcing;
@@ -103,7 +106,7 @@ int selinux_enforcing;
static int __init enforcing_setup(char *str)
{
unsigned long enforcing;
- if (!strict_strtoul(str, 0, &enforcing))
+ if (!kstrtoul(str, 0, &enforcing))
selinux_enforcing = enforcing ? 1 : 0;
return 1;
}
@@ -116,7 +119,7 @@ int selinux_enabled = CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE;
static int __init selinux_enabled_setup(char *str)
{
unsigned long enabled;
- if (!strict_strtoul(str, 0, &enabled))
+ if (!kstrtoul(str, 0, &enabled))
selinux_enabled = enabled ? 1 : 0;
return 1;
}
@@ -125,18 +128,6 @@ __setup("selinux=", selinux_enabled_setup);
int selinux_enabled = 1;
#endif
-
-/*
- * Minimal support for a secondary security module,
- * just to allow the use of the capability module.
- */
-static struct security_operations *secondary_ops;
-
-/* Lists of inode and superblock security structures initialized
- before the policy was loaded. */
-static LIST_HEAD(superblock_security_head);
-static DEFINE_SPINLOCK(sb_security_lock);
-
static struct kmem_cache *sel_inode_cache;
/**
@@ -146,12 +137,28 @@ static struct kmem_cache *sel_inode_cache;
* This function checks the SECMARK reference counter to see if any SECMARK
* targets are currently configured, if the reference counter is greater than
* zero SECMARK is considered to be enabled. Returns true (1) if SECMARK is
- * enabled, false (0) if SECMARK is disabled.
+ * enabled, false (0) if SECMARK is disabled. If the always_check_network
+ * policy capability is enabled, SECMARK is always considered enabled.
*
*/
static int selinux_secmark_enabled(void)
{
- return (atomic_read(&selinux_secmark_refcount) > 0);
+ return (selinux_policycap_alwaysnetwork || atomic_read(&selinux_secmark_refcount));
+}
+
+/**
+ * selinux_peerlbl_enabled - Check to see if peer labeling is currently enabled
+ *
+ * Description:
+ * This function checks if NetLabel or labeled IPSEC is enabled. Returns true
+ * (1) if any are enabled or false (0) if neither are enabled. If the
+ * always_check_network policy capability is enabled, peer labeling
+ * is always considered enabled.
+ *
+ */
+static int selinux_peerlbl_enabled(void)
+{
+ return (selinux_policycap_alwaysnetwork || netlbl_enabled() || selinux_xfrm_enabled());
}
/*
@@ -199,7 +206,7 @@ static inline u32 task_sid(const struct task_struct *task)
*/
static inline u32 current_sid(void)
{
- const struct task_security_struct *tsec = current_cred()->security;
+ const struct task_security_struct *tsec = current_security();
return tsec->sid;
}
@@ -226,6 +233,14 @@ static int inode_alloc_security(struct inode *inode)
return 0;
}
+static void inode_free_rcu(struct rcu_head *head)
+{
+ struct inode_security_struct *isec;
+
+ isec = container_of(head, struct inode_security_struct, rcu);
+ kmem_cache_free(sel_inode_cache, isec);
+}
+
static void inode_free_security(struct inode *inode)
{
struct inode_security_struct *isec = inode->i_security;
@@ -236,8 +251,16 @@ static void inode_free_security(struct inode *inode)
list_del_init(&isec->list);
spin_unlock(&sbsec->isec_lock);
- inode->i_security = NULL;
- kmem_cache_free(sel_inode_cache, isec);
+ /*
+ * The inode may still be referenced in a path walk and
+ * a call to selinux_inode_permission() can be made
+ * after inode_free_security() is called. Ideally, the VFS
+ * wouldn't do this, but fixing that is a much harder
+ * job. For now, simply free the i_security via RCU, and
+ * leave the current inode->i_security pointer intact.
+ * The inode will be freed after the RCU grace period too.
+ */
+ call_rcu(&isec->rcu, inode_free_rcu);
}
static int file_alloc_security(struct file *file)
@@ -272,7 +295,6 @@ static int superblock_alloc_security(struct super_block *sb)
return -ENOMEM;
mutex_init(&sbsec->lock);
- INIT_LIST_HEAD(&sbsec->list);
INIT_LIST_HEAD(&sbsec->isec_head);
spin_lock_init(&sbsec->isec_lock);
sbsec->sb = sb;
@@ -287,55 +309,20 @@ static int superblock_alloc_security(struct super_block *sb)
static void superblock_free_security(struct super_block *sb)
{
struct superblock_security_struct *sbsec = sb->s_security;
-
- spin_lock(&sb_security_lock);
- if (!list_empty(&sbsec->list))
- list_del_init(&sbsec->list);
- spin_unlock(&sb_security_lock);
-
sb->s_security = NULL;
kfree(sbsec);
}
-static int sk_alloc_security(struct sock *sk, int family, gfp_t priority)
-{
- struct sk_security_struct *ssec;
-
- ssec = kzalloc(sizeof(*ssec), priority);
- if (!ssec)
- return -ENOMEM;
-
- ssec->peer_sid = SECINITSID_UNLABELED;
- ssec->sid = SECINITSID_UNLABELED;
- sk->sk_security = ssec;
-
- selinux_netlbl_sk_security_reset(ssec);
-
- return 0;
-}
-
-static void sk_free_security(struct sock *sk)
-{
- struct sk_security_struct *ssec = sk->sk_security;
-
- sk->sk_security = NULL;
- selinux_netlbl_sk_security_free(ssec);
- kfree(ssec);
-}
-
-/* The security server must be initialized before
- any labeling or access decisions can be provided. */
-extern int ss_initialized;
-
/* The file system's label must be initialized prior to use. */
-static char *labeling_behaviors[6] = {
+static const char *labeling_behaviors[7] = {
"uses xattr",
"uses transition SIDs",
"uses task SIDs",
"uses genfs_contexts",
"not configured for labeling",
"uses mountpoint labeling",
+ "uses native labeling",
};
static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dentry);
@@ -352,8 +339,11 @@ enum {
Opt_defcontext = 3,
Opt_rootcontext = 4,
Opt_labelsupport = 5,
+ Opt_nextmntopt = 6,
};
+#define NUM_SEL_MNT_OPTS (Opt_nextmntopt - 1)
+
static const match_table_t tokens = {
{Opt_context, CONTEXT_STR "%s"},
{Opt_fscontext, FSCONTEXT_STR "%s"},
@@ -398,6 +388,29 @@ static int may_context_mount_inode_relabel(u32 sid,
return rc;
}
+static int selinux_is_sblabel_mnt(struct super_block *sb)
+{
+ struct superblock_security_struct *sbsec = sb->s_security;
+
+ if (sbsec->behavior == SECURITY_FS_USE_XATTR ||
+ sbsec->behavior == SECURITY_FS_USE_TRANS ||
+ sbsec->behavior == SECURITY_FS_USE_TASK)
+ return 1;
+
+ /* Special handling for sysfs. Is genfs but also has setxattr handler*/
+ if (strncmp(sb->s_type->name, "sysfs", sizeof("sysfs")) == 0)
+ return 1;
+
+ /*
+ * Special handling for rootfs. Is genfs but supports
+ * setting SELinux context on in-core inodes.
+ */
+ if (strncmp(sb->s_type->name, "rootfs", sizeof("rootfs")) == 0)
+ return 1;
+
+ return 0;
+}
+
static int sb_finish_set_opts(struct super_block *sb)
{
struct superblock_security_struct *sbsec = sb->s_security;
@@ -431,8 +444,6 @@ static int sb_finish_set_opts(struct super_block *sb)
}
}
- sbsec->flags |= (SE_SBINITIALIZED | SE_SBLABELSUPP);
-
if (sbsec->behavior > ARRAY_SIZE(labeling_behaviors))
printk(KERN_ERR "SELinux: initialized (dev %s, type %s), unknown behavior\n",
sb->s_id, sb->s_type->name);
@@ -441,15 +452,9 @@ static int sb_finish_set_opts(struct super_block *sb)
sb->s_id, sb->s_type->name,
labeling_behaviors[sbsec->behavior-1]);
- if (sbsec->behavior == SECURITY_FS_USE_GENFS ||
- sbsec->behavior == SECURITY_FS_USE_MNTPOINT ||
- sbsec->behavior == SECURITY_FS_USE_NONE ||
- sbsec->behavior > ARRAY_SIZE(labeling_behaviors))
- sbsec->flags &= ~SE_SBLABELSUPP;
-
- /* Special handling for sysfs. Is genfs but also has setxattr handler*/
- if (strncmp(sb->s_type->name, "sysfs", sizeof("sysfs")) == 0)
- sbsec->flags |= SE_SBLABELSUPP;
+ sbsec->flags |= SE_SBINITIALIZED;
+ if (selinux_is_sblabel_mnt(sb))
+ sbsec->flags |= SBLABEL_MNT;
/* Initialize the root inode. */
rc = inode_doinit_with_dentry(root_inode, root);
@@ -503,15 +508,18 @@ static int selinux_get_mnt_opts(const struct super_block *sb,
if (!ss_initialized)
return -EINVAL;
+ /* make sure we always check enough bits to cover the mask */
+ BUILD_BUG_ON(SE_MNTMASK >= (1 << NUM_SEL_MNT_OPTS));
+
tmp = sbsec->flags & SE_MNTMASK;
/* count the number of mount options for this sb */
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < NUM_SEL_MNT_OPTS; i++) {
if (tmp & 0x01)
opts->num_mnt_opts++;
tmp >>= 1;
}
/* Check if the Label support flag is set */
- if (sbsec->flags & SE_SBLABELSUPP)
+ if (sbsec->flags & SBLABEL_MNT)
opts->num_mnt_opts++;
opts->mnt_opts = kcalloc(opts->num_mnt_opts, sizeof(char *), GFP_ATOMIC);
@@ -558,9 +566,9 @@ static int selinux_get_mnt_opts(const struct super_block *sb,
opts->mnt_opts[i] = context;
opts->mnt_opts_flags[i++] = ROOTCONTEXT_MNT;
}
- if (sbsec->flags & SE_SBLABELSUPP) {
+ if (sbsec->flags & SBLABEL_MNT) {
opts->mnt_opts[i] = NULL;
- opts->mnt_opts_flags[i++] = SE_SBLABELSUPP;
+ opts->mnt_opts_flags[i++] = SBLABEL_MNT;
}
BUG_ON(i != opts->num_mnt_opts);
@@ -597,7 +605,9 @@ static int bad_option(struct superblock_security_struct *sbsec, char flag,
* labeling information.
*/
static int selinux_set_mnt_opts(struct super_block *sb,
- struct security_mnt_opts *opts)
+ struct security_mnt_opts *opts,
+ unsigned long kern_flags,
+ unsigned long *set_kern_flags)
{
const struct cred *cred = current_cred();
int rc = 0, i;
@@ -618,10 +628,6 @@ static int selinux_set_mnt_opts(struct super_block *sb,
/* Defer initialization until selinux_complete_init,
after the initial policy is loaded and the security
server is ready to handle calls. */
- spin_lock(&sb_security_lock);
- if (list_empty(&sbsec->list))
- list_add(&sbsec->list, &superblock_security_head);
- spin_unlock(&sb_security_lock);
goto out;
}
rc = -EINVAL;
@@ -629,6 +635,12 @@ static int selinux_set_mnt_opts(struct super_block *sb,
"before the security server is initialized\n");
goto out;
}
+ if (kern_flags && !set_kern_flags) {
+ /* Specifying internal flags without providing a place to
+ * place the results is not allowed */
+ rc = -EINVAL;
+ goto out;
+ }
/*
* Binary mount data FS will come through this function twice. Once
@@ -653,10 +665,10 @@ static int selinux_set_mnt_opts(struct super_block *sb,
for (i = 0; i < num_opts; i++) {
u32 sid;
- if (flags[i] == SE_SBLABELSUPP)
+ if (flags[i] == SBLABEL_MNT)
continue;
rc = security_context_to_sid(mount_options[i],
- strlen(mount_options[i]), &sid);
+ strlen(mount_options[i]), &sid, GFP_KERNEL);
if (rc) {
printk(KERN_WARNING "SELinux: security_context_to_sid"
"(%s) failed for (dev %s, type %s) errno=%d\n",
@@ -719,14 +731,19 @@ static int selinux_set_mnt_opts(struct super_block *sb,
if (strcmp(sb->s_type->name, "proc") == 0)
sbsec->flags |= SE_SBPROC;
- /* Determine the labeling behavior to use for this filesystem type. */
- rc = security_fs_use((sbsec->flags & SE_SBPROC) ? "proc" : sb->s_type->name, &sbsec->behavior, &sbsec->sid);
- if (rc) {
- printk(KERN_WARNING "%s: security_fs_use(%s) returned %d\n",
- __func__, sb->s_type->name, rc);
- goto out;
+ if (!sbsec->behavior) {
+ /*
+ * Determine the labeling behavior to use for this
+ * filesystem type.
+ */
+ rc = security_fs_use(sb);
+ if (rc) {
+ printk(KERN_WARNING
+ "%s: security_fs_use(%s) returned %d\n",
+ __func__, sb->s_type->name, rc);
+ goto out;
+ }
}
-
/* sets the context of the superblock for the fs being mounted. */
if (fscontext_sid) {
rc = may_context_mount_sb_relabel(fscontext_sid, sbsec, cred);
@@ -741,6 +758,11 @@ static int selinux_set_mnt_opts(struct super_block *sb,
* sets the label used on all file below the mountpoint, and will set
* the superblock context if not already set.
*/
+ if (kern_flags & SECURITY_LSM_NATIVE_LABELS && !context_sid) {
+ sbsec->behavior = SECURITY_FS_USE_NATIVE;
+ *set_kern_flags |= SECURITY_LSM_NATIVE_LABELS;
+ }
+
if (context_sid) {
if (!fscontext_sid) {
rc = may_context_mount_sb_relabel(context_sid, sbsec,
@@ -772,7 +794,8 @@ static int selinux_set_mnt_opts(struct super_block *sb,
}
if (defcontext_sid) {
- if (sbsec->behavior != SECURITY_FS_USE_XATTR) {
+ if (sbsec->behavior != SECURITY_FS_USE_XATTR &&
+ sbsec->behavior != SECURITY_FS_USE_NATIVE) {
rc = -EINVAL;
printk(KERN_WARNING "SELinux: defcontext option is "
"invalid for this filesystem type\n");
@@ -800,7 +823,37 @@ out_double_mount:
goto out;
}
-static void selinux_sb_clone_mnt_opts(const struct super_block *oldsb,
+static int selinux_cmp_sb_context(const struct super_block *oldsb,
+ const struct super_block *newsb)
+{
+ struct superblock_security_struct *old = oldsb->s_security;
+ struct superblock_security_struct *new = newsb->s_security;
+ char oldflags = old->flags & SE_MNTMASK;
+ char newflags = new->flags & SE_MNTMASK;
+
+ if (oldflags != newflags)
+ goto mismatch;
+ if ((oldflags & FSCONTEXT_MNT) && old->sid != new->sid)
+ goto mismatch;
+ if ((oldflags & CONTEXT_MNT) && old->mntpoint_sid != new->mntpoint_sid)
+ goto mismatch;
+ if ((oldflags & DEFCONTEXT_MNT) && old->def_sid != new->def_sid)
+ goto mismatch;
+ if (oldflags & ROOTCONTEXT_MNT) {
+ struct inode_security_struct *oldroot = oldsb->s_root->d_inode->i_security;
+ struct inode_security_struct *newroot = newsb->s_root->d_inode->i_security;
+ if (oldroot->sid != newroot->sid)
+ goto mismatch;
+ }
+ return 0;
+mismatch:
+ printk(KERN_WARNING "SELinux: mount invalid. Same superblock, "
+ "different security settings for (dev %s, "
+ "type %s)\n", newsb->s_id, newsb->s_type->name);
+ return -EBUSY;
+}
+
+static int selinux_sb_clone_mnt_opts(const struct super_block *oldsb,
struct super_block *newsb)
{
const struct superblock_security_struct *oldsbsec = oldsb->s_security;
@@ -812,23 +865,17 @@ static void selinux_sb_clone_mnt_opts(const struct super_block *oldsb,
/*
* if the parent was able to be mounted it clearly had no special lsm
- * mount options. thus we can safely put this sb on the list and deal
- * with it later
+ * mount options. thus we can safely deal with this superblock later
*/
- if (!ss_initialized) {
- spin_lock(&sb_security_lock);
- if (list_empty(&newsbsec->list))
- list_add(&newsbsec->list, &superblock_security_head);
- spin_unlock(&sb_security_lock);
- return;
- }
+ if (!ss_initialized)
+ return 0;
/* how can we clone if the old one wasn't set up?? */
BUG_ON(!(oldsbsec->flags & SE_SBINITIALIZED));
- /* if fs is reusing a sb, just let its options stand... */
+ /* if fs is reusing a sb, make sure that the contexts match */
if (newsbsec->flags & SE_SBINITIALIZED)
- return;
+ return selinux_cmp_sb_context(oldsb, newsb);
mutex_lock(&newsbsec->lock);
@@ -861,6 +908,7 @@ static void selinux_sb_clone_mnt_opts(const struct super_block *oldsb,
sb_finish_set_opts(newsb);
mutex_unlock(&newsbsec->lock);
+ return 0;
}
static int selinux_parse_opts_str(char *options,
@@ -1004,7 +1052,7 @@ static int superblock_doinit(struct super_block *sb, void *data)
goto out_err;
out:
- rc = selinux_set_mnt_opts(sb, &opts);
+ rc = selinux_set_mnt_opts(sb, &opts, 0, NULL);
out_err:
security_free_mnt_opts(&opts);
@@ -1038,12 +1086,13 @@ static void selinux_write_opts(struct seq_file *m,
case DEFCONTEXT_MNT:
prefix = DEFCONTEXT_STR;
break;
- case SE_SBLABELSUPP:
+ case SBLABEL_MNT:
seq_putc(m, ',');
seq_puts(m, LABELSUPP_STR);
continue;
default:
BUG();
+ return;
};
/* we need a comma before each option */
seq_putc(m, ',');
@@ -1146,7 +1195,7 @@ static inline u16 socket_type_to_security_class(int family, int type, int protoc
return SECCLASS_NETLINK_ROUTE_SOCKET;
case NETLINK_FIREWALL:
return SECCLASS_NETLINK_FIREWALL_SOCKET;
- case NETLINK_INET_DIAG:
+ case NETLINK_SOCK_DIAG:
return SECCLASS_NETLINK_TCPDIAG_SOCKET;
case NETLINK_NFLOG:
return SECCLASS_NETLINK_NFLOG_SOCKET;
@@ -1177,39 +1226,35 @@ static inline u16 socket_type_to_security_class(int family, int type, int protoc
}
#ifdef CONFIG_PROC_FS
-static int selinux_proc_get_sid(struct proc_dir_entry *de,
+static int selinux_proc_get_sid(struct dentry *dentry,
u16 tclass,
u32 *sid)
{
- int buflen, rc;
- char *buffer, *path, *end;
+ int rc;
+ char *buffer, *path;
buffer = (char *)__get_free_page(GFP_KERNEL);
if (!buffer)
return -ENOMEM;
- buflen = PAGE_SIZE;
- end = buffer+buflen;
- *--end = '\0';
- buflen--;
- path = end-1;
- *path = '/';
- while (de && de != de->parent) {
- buflen -= de->namelen + 1;
- if (buflen < 0)
- break;
- end -= de->namelen;
- memcpy(end, de->name, de->namelen);
- *--end = '/';
- path = end;
- de = de->parent;
+ path = dentry_path_raw(dentry, buffer, PAGE_SIZE);
+ if (IS_ERR(path))
+ rc = PTR_ERR(path);
+ else {
+ /* each process gets a /proc/PID/ entry. Strip off the
+ * PID part to get a valid selinux labeling.
+ * e.g. /proc/1/net/rpc/nfs -> /net/rpc/nfs */
+ while (path[1] >= '0' && path[1] <= '9') {
+ path[1] = '/';
+ path++;
+ }
+ rc = security_genfs_sid("proc", path, tclass, sid);
}
- rc = security_genfs_sid("proc", path, tclass, sid);
free_page((unsigned long)buffer);
return rc;
}
#else
-static int selinux_proc_get_sid(struct proc_dir_entry *de,
+static int selinux_proc_get_sid(struct dentry *dentry,
u16 tclass,
u32 *sid)
{
@@ -1249,6 +1294,8 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
}
switch (sbsec->behavior) {
+ case SECURITY_FS_USE_NATIVE:
+ break;
case SECURITY_FS_USE_XATTR:
if (!inode->i_op->getxattr) {
isec->sid = sbsec->def_sid;
@@ -1357,10 +1404,8 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
/* Try to obtain a transition SID. */
isec->sclass = inode_mode_to_security_class(inode->i_mode);
- rc = security_transition_sid(isec->task_sid,
- sbsec->sid,
- isec->sclass,
- &sid);
+ rc = security_transition_sid(isec->task_sid, sbsec->sid,
+ isec->sclass, NULL, &sid);
if (rc)
goto out_unlock;
isec->sid = sid;
@@ -1373,16 +1418,33 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
isec->sid = sbsec->sid;
if ((sbsec->flags & SE_SBPROC) && !S_ISLNK(inode->i_mode)) {
- struct proc_inode *proci = PROC_I(inode);
- if (proci->pde) {
- isec->sclass = inode_mode_to_security_class(inode->i_mode);
- rc = selinux_proc_get_sid(proci->pde,
- isec->sclass,
- &sid);
- if (rc)
- goto out_unlock;
- isec->sid = sid;
- }
+ /* We must have a dentry to determine the label on
+ * procfs inodes */
+ if (opt_dentry)
+ /* Called from d_instantiate or
+ * d_splice_alias. */
+ dentry = dget(opt_dentry);
+ else
+ /* Called from selinux_complete_init, try to
+ * find a dentry. */
+ dentry = d_find_alias(inode);
+ /*
+ * This can be hit on boot when a file is accessed
+ * before the policy is loaded. When we load policy we
+ * may find inodes that have no dentry on the
+ * sbsec->isec_head list. No reason to complain as
+ * these will get fixed up the next time we go through
+ * inode_doinit() with a dentry, before these inodes
+ * could be used again by userspace.
+ */
+ if (!dentry)
+ goto out_unlock;
+ isec->sclass = inode_mode_to_security_class(inode->i_mode);
+ rc = selinux_proc_get_sid(dentry, isec->sclass, &sid);
+ dput(dentry);
+ if (rc)
+ goto out_unlock;
+ isec->sid = sid;
}
break;
}
@@ -1478,8 +1540,7 @@ static int current_has_perm(const struct task_struct *tsk,
#endif
/* Check whether a task is allowed to use a capability. */
-static int task_has_capability(struct task_struct *tsk,
- const struct cred *cred,
+static int cred_has_capability(const struct cred *cred,
int cap, int audit)
{
struct common_audit_data ad;
@@ -1489,8 +1550,7 @@ static int task_has_capability(struct task_struct *tsk,
u32 av = CAP_TO_MASK(cap);
int rc;
- COMMON_AUDIT_DATA_INIT(&ad, CAP);
- ad.tsk = tsk;
+ ad.type = LSM_AUDIT_DATA_CAP;
ad.u.cap = cap;
switch (CAP_TO_INDEX(cap)) {
@@ -1504,11 +1564,15 @@ static int task_has_capability(struct task_struct *tsk,
printk(KERN_ERR
"SELinux: out of range capability %d\n", cap);
BUG();
+ return -EINVAL;
}
rc = avc_has_perm_noaudit(sid, sid, sclass, av, 0, &avd);
- if (audit == SECURITY_CAP_AUDIT)
- avc_audit(sid, sid, sclass, av, &avd, rc, &ad);
+ if (audit == SECURITY_CAP_AUDIT) {
+ int rc2 = avc_audit(sid, sid, sclass, av, &avd, rc, &ad);
+ if (rc2)
+ return rc2;
+ }
return rc;
}
@@ -1531,7 +1595,6 @@ static int inode_has_perm(const struct cred *cred,
struct common_audit_data *adp)
{
struct inode_security_struct *isec;
- struct common_audit_data ad;
u32 sid;
validate_creds(cred);
@@ -1542,12 +1605,6 @@ static int inode_has_perm(const struct cred *cred,
sid = cred_sid(cred);
isec = inode->i_security;
- if (!adp) {
- adp = &ad;
- COMMON_AUDIT_DATA_INIT(&ad, FS);
- ad.u.fs.inode = inode;
- }
-
return avc_has_perm(sid, isec->sid, isec->sclass, perms, adp);
}
@@ -1555,19 +1612,44 @@ static int inode_has_perm(const struct cred *cred,
the dentry to help the auditing code to more easily generate the
pathname if needed. */
static inline int dentry_has_perm(const struct cred *cred,
- struct vfsmount *mnt,
struct dentry *dentry,
u32 av)
{
struct inode *inode = dentry->d_inode;
struct common_audit_data ad;
- COMMON_AUDIT_DATA_INIT(&ad, FS);
- ad.u.fs.path.mnt = mnt;
- ad.u.fs.path.dentry = dentry;
+ ad.type = LSM_AUDIT_DATA_DENTRY;
+ ad.u.dentry = dentry;
return inode_has_perm(cred, inode, av, &ad);
}
+/* Same as inode_has_perm, but pass explicit audit data containing
+ the path to help the auditing code to more easily generate the
+ pathname if needed. */
+static inline int path_has_perm(const struct cred *cred,
+ struct path *path,
+ u32 av)
+{
+ struct inode *inode = path->dentry->d_inode;
+ struct common_audit_data ad;
+
+ ad.type = LSM_AUDIT_DATA_PATH;
+ ad.u.path = *path;
+ return inode_has_perm(cred, inode, av, &ad);
+}
+
+/* Same as path_has_perm, but uses the inode from the file struct. */
+static inline int file_path_has_perm(const struct cred *cred,
+ struct file *file,
+ u32 av)
+{
+ struct common_audit_data ad;
+
+ ad.type = LSM_AUDIT_DATA_PATH;
+ ad.u.path = file->f_path;
+ return inode_has_perm(cred, file_inode(file), av, &ad);
+}
+
/* Check whether a task can use an open file descriptor to
access an inode in a given way. Check access to the
descriptor itself, and then use dentry_has_perm to
@@ -1581,13 +1663,13 @@ static int file_has_perm(const struct cred *cred,
u32 av)
{
struct file_security_struct *fsec = file->f_security;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct common_audit_data ad;
u32 sid = cred_sid(cred);
int rc;
- COMMON_AUDIT_DATA_INIT(&ad, FS);
- ad.u.fs.path = file->f_path;
+ ad.type = LSM_AUDIT_DATA_PATH;
+ ad.u.path = file->f_path;
if (sid != fsec->sid) {
rc = avc_has_perm(sid, fsec->sid,
@@ -1612,8 +1694,7 @@ static int may_create(struct inode *dir,
struct dentry *dentry,
u16 tclass)
{
- const struct cred *cred = current_cred();
- const struct task_security_struct *tsec = cred->security;
+ const struct task_security_struct *tsec = current_security();
struct inode_security_struct *dsec;
struct superblock_security_struct *sbsec;
u32 sid, newsid;
@@ -1626,8 +1707,8 @@ static int may_create(struct inode *dir,
sid = tsec->sid;
newsid = tsec->create_sid;
- COMMON_AUDIT_DATA_INIT(&ad, FS);
- ad.u.fs.path.dentry = dentry;
+ ad.type = LSM_AUDIT_DATA_DENTRY;
+ ad.u.dentry = dentry;
rc = avc_has_perm(sid, dsec->sid, SECCLASS_DIR,
DIR__ADD_NAME | DIR__SEARCH,
@@ -1635,8 +1716,9 @@ static int may_create(struct inode *dir,
if (rc)
return rc;
- if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) {
- rc = security_transition_sid(sid, dsec->sid, tclass, &newsid);
+ if (!newsid || !(sbsec->flags & SBLABEL_MNT)) {
+ rc = security_transition_sid(sid, dsec->sid, tclass,
+ &dentry->d_name, &newsid);
if (rc)
return rc;
}
@@ -1678,8 +1760,8 @@ static int may_link(struct inode *dir,
dsec = dir->i_security;
isec = dentry->d_inode->i_security;
- COMMON_AUDIT_DATA_INIT(&ad, FS);
- ad.u.fs.path.dentry = dentry;
+ ad.type = LSM_AUDIT_DATA_DENTRY;
+ ad.u.dentry = dentry;
av = DIR__SEARCH;
av |= (kind ? DIR__REMOVE_NAME : DIR__ADD_NAME);
@@ -1724,9 +1806,9 @@ static inline int may_rename(struct inode *old_dir,
old_is_dir = S_ISDIR(old_dentry->d_inode->i_mode);
new_dsec = new_dir->i_security;
- COMMON_AUDIT_DATA_INIT(&ad, FS);
+ ad.type = LSM_AUDIT_DATA_DENTRY;
- ad.u.fs.path.dentry = old_dentry;
+ ad.u.dentry = old_dentry;
rc = avc_has_perm(sid, old_dsec->sid, SECCLASS_DIR,
DIR__REMOVE_NAME | DIR__SEARCH, &ad);
if (rc)
@@ -1742,7 +1824,7 @@ static inline int may_rename(struct inode *old_dir,
return rc;
}
- ad.u.fs.path.dentry = new_dentry;
+ ad.u.dentry = new_dentry;
av = DIR__ADD_NAME | DIR__SEARCH;
if (new_dentry->d_inode)
av |= DIR__REMOVE_NAME;
@@ -1780,7 +1862,7 @@ static inline u32 file_mask_to_av(int mode, int mask)
{
u32 av = 0;
- if ((mode & S_IFMT) != S_IFDIR) {
+ if (!S_ISDIR(mode)) {
if (mask & MAY_EXEC)
av |= FILE__EXECUTE;
if (mask & MAY_READ)
@@ -1834,27 +1916,9 @@ static inline u32 open_file_to_av(struct file *file)
{
u32 av = file_to_av(file);
- if (selinux_policycap_openperm) {
- mode_t mode = file->f_path.dentry->d_inode->i_mode;
- /*
- * lnk files and socks do not really have an 'open'
- */
- if (S_ISREG(mode))
- av |= FILE__OPEN;
- else if (S_ISCHR(mode))
- av |= CHR_FILE__OPEN;
- else if (S_ISBLK(mode))
- av |= BLK_FILE__OPEN;
- else if (S_ISFIFO(mode))
- av |= FIFO_FILE__OPEN;
- else if (S_ISDIR(mode))
- av |= DIR__OPEN;
- else if (S_ISSOCK(mode))
- av |= SOCK_FILE__OPEN;
- else
- printk(KERN_ERR "SELinux: WARNING: inside %s with "
- "unknown mode:%o\n", __func__, mode);
- }
+ if (selinux_policycap_openperm)
+ av |= FILE__OPEN;
+
return av;
}
@@ -1869,7 +1933,7 @@ static int selinux_ptrace_access_check(struct task_struct *child,
if (rc)
return rc;
- if (mode == PTRACE_MODE_READ) {
+ if (mode & PTRACE_MODE_READ) {
u32 sid = current_sid();
u32 csid = task_sid(child);
return avc_has_perm(sid, csid, SECCLASS_FILE, FILE__READ, NULL);
@@ -1926,92 +1990,16 @@ static int selinux_capset(struct cred *new, const struct cred *old,
* the CAP_SETUID and CAP_SETGID capabilities using the capable hook.
*/
-static int selinux_capable(struct task_struct *tsk, const struct cred *cred,
+static int selinux_capable(const struct cred *cred, struct user_namespace *ns,
int cap, int audit)
{
int rc;
- rc = cap_capable(tsk, cred, cap, audit);
+ rc = cap_capable(cred, ns, cap, audit);
if (rc)
return rc;
- return task_has_capability(tsk, cred, cap, audit);
-}
-
-static int selinux_sysctl_get_sid(ctl_table *table, u16 tclass, u32 *sid)
-{
- int buflen, rc;
- char *buffer, *path, *end;
-
- rc = -ENOMEM;
- buffer = (char *)__get_free_page(GFP_KERNEL);
- if (!buffer)
- goto out;
-
- buflen = PAGE_SIZE;
- end = buffer+buflen;
- *--end = '\0';
- buflen--;
- path = end-1;
- *path = '/';
- while (table) {
- const char *name = table->procname;
- size_t namelen = strlen(name);
- buflen -= namelen + 1;
- if (buflen < 0)
- goto out_free;
- end -= namelen;
- memcpy(end, name, namelen);
- *--end = '/';
- path = end;
- table = table->parent;
- }
- buflen -= 4;
- if (buflen < 0)
- goto out_free;
- end -= 4;
- memcpy(end, "/sys", 4);
- path = end;
- rc = security_genfs_sid("proc", path, tclass, sid);
-out_free:
- free_page((unsigned long)buffer);
-out:
- return rc;
-}
-
-static int selinux_sysctl(ctl_table *table, int op)
-{
- int error = 0;
- u32 av;
- u32 tsid, sid;
- int rc;
-
- sid = current_sid();
-
- rc = selinux_sysctl_get_sid(table, (op == 0001) ?
- SECCLASS_DIR : SECCLASS_FILE, &tsid);
- if (rc) {
- /* Default to the well-defined sysctl SID. */
- tsid = SECINITSID_SYSCTL;
- }
-
- /* The op values are "defined" in sysctl.c, thereby creating
- * a bad coupling between this module and sysctl.c */
- if (op == 001) {
- error = avc_has_perm(sid, tsid,
- SECCLASS_DIR, DIR__SEARCH, NULL);
- } else {
- av = 0;
- if (op & 004)
- av |= FILE__READ;
- if (op & 002)
- av |= FILE__WRITE;
- if (av)
- error = avc_has_perm(sid, tsid,
- SECCLASS_FILE, av, NULL);
- }
-
- return error;
+ return cred_has_capability(cred, cap, audit);
}
static int selinux_quotactl(int cmds, int type, int id, struct super_block *sb)
@@ -2046,32 +2034,29 @@ static int selinux_quota_on(struct dentry *dentry)
{
const struct cred *cred = current_cred();
- return dentry_has_perm(cred, NULL, dentry, FILE__QUOTAON);
+ return dentry_has_perm(cred, dentry, FILE__QUOTAON);
}
static int selinux_syslog(int type)
{
int rc;
- rc = cap_syslog(type);
- if (rc)
- return rc;
-
switch (type) {
- case 3: /* Read last kernel messages */
- case 10: /* Return size of the log buffer */
+ case SYSLOG_ACTION_READ_ALL: /* Read last kernel messages */
+ case SYSLOG_ACTION_SIZE_BUFFER: /* Return size of the log buffer */
rc = task_has_system(current, SYSTEM__SYSLOG_READ);
break;
- case 6: /* Disable logging to console */
- case 7: /* Enable logging to console */
- case 8: /* Set level of messages printed to console */
+ case SYSLOG_ACTION_CONSOLE_OFF: /* Disable logging to console */
+ case SYSLOG_ACTION_CONSOLE_ON: /* Enable logging to console */
+ /* Set level of messages printed to console */
+ case SYSLOG_ACTION_CONSOLE_LEVEL:
rc = task_has_system(current, SYSTEM__SYSLOG_CONSOLE);
break;
- case 0: /* Close log */
- case 1: /* Open log */
- case 2: /* Read from log */
- case 4: /* Read/clear last kernel messages */
- case 5: /* Clear ring buffer */
+ case SYSLOG_ACTION_CLOSE: /* Close log */
+ case SYSLOG_ACTION_OPEN: /* Open log */
+ case SYSLOG_ACTION_READ: /* Read from log */
+ case SYSLOG_ACTION_READ_CLEAR: /* Read/clear last kernel messages */
+ case SYSLOG_ACTION_CLEAR: /* Clear ring buffer */
default:
rc = task_has_system(current, SYSTEM__SYSLOG_MOD);
break;
@@ -2091,7 +2076,7 @@ static int selinux_vm_enough_memory(struct mm_struct *mm, long pages)
{
int rc, cap_sys_admin = 0;
- rc = selinux_capable(current, current_cred(), CAP_SYS_ADMIN,
+ rc = selinux_capable(current_cred(), &init_user_ns, CAP_SYS_ADMIN,
SECURITY_CAP_NOAUDIT);
if (rc == 0)
cap_sys_admin = 1;
@@ -2107,7 +2092,7 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
struct task_security_struct *new_tsec;
struct inode_security_struct *isec;
struct common_audit_data ad;
- struct inode *inode = bprm->file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(bprm->file);
int rc;
rc = cap_bprm_set_creds(bprm);
@@ -2136,18 +2121,29 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
new_tsec->sid = old_tsec->exec_sid;
/* Reset exec SID on execve. */
new_tsec->exec_sid = 0;
+
+ /*
+ * Minimize confusion: if no_new_privs or nosuid and a
+ * transition is explicitly requested, then fail the exec.
+ */
+ if (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS)
+ return -EPERM;
+ if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
+ return -EACCES;
} else {
/* Check for a default transition on this program. */
rc = security_transition_sid(old_tsec->sid, isec->sid,
- SECCLASS_PROCESS, &new_tsec->sid);
+ SECCLASS_PROCESS, NULL,
+ &new_tsec->sid);
if (rc)
return rc;
}
- COMMON_AUDIT_DATA_INIT(&ad, FS);
- ad.u.fs.path = bprm->file->f_path;
+ ad.type = LSM_AUDIT_DATA_PATH;
+ ad.u.path = bprm->file->f_path;
- if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
+ if ((bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) ||
+ (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS))
new_tsec->sid = old_tsec->sid;
if (new_tsec->sid == old_tsec->sid) {
@@ -2185,7 +2181,7 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
u32 ptsid = 0;
rcu_read_lock();
- tracer = tracehook_tracer_task(current);
+ tracer = ptrace_parent(current);
if (likely(tracer != NULL)) {
sec = __task_cred(tracer)->security;
ptsid = sec->sid;
@@ -2210,8 +2206,7 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
static int selinux_bprm_secureexec(struct linux_binprm *bprm)
{
- const struct cred *cred = current_cred();
- const struct task_security_struct *tsec = cred->security;
+ const struct task_security_struct *tsec = current_security();
u32 sid, osid;
int atsecure = 0;
@@ -2230,39 +2225,38 @@ static int selinux_bprm_secureexec(struct linux_binprm *bprm)
return (atsecure || cap_bprm_secureexec(bprm));
}
-extern struct vfsmount *selinuxfs_mount;
-extern struct dentry *selinux_null;
+static int match_file(const void *p, struct file *file, unsigned fd)
+{
+ return file_has_perm(p, file, file_to_av(file)) ? fd + 1 : 0;
+}
/* Derived from fs/exec.c:flush_old_files. */
static inline void flush_unauthorized_files(const struct cred *cred,
struct files_struct *files)
{
- struct common_audit_data ad;
struct file *file, *devnull = NULL;
struct tty_struct *tty;
- struct fdtable *fdt;
- long j = -1;
int drop_tty = 0;
+ unsigned n;
tty = get_current_tty();
if (tty) {
- file_list_lock();
+ spin_lock(&tty_files_lock);
if (!list_empty(&tty->tty_files)) {
- struct inode *inode;
+ struct tty_file_private *file_priv;
/* Revalidate access to controlling tty.
- Use inode_has_perm on the tty inode directly rather
- than using file_has_perm, as this particular open
- file may belong to another process and we are only
- interested in the inode-based check here. */
- file = list_first_entry(&tty->tty_files, struct file, f_u.fu_list);
- inode = file->f_path.dentry->d_inode;
- if (inode_has_perm(cred, inode,
- FILE__READ | FILE__WRITE, NULL)) {
+ Use file_path_has_perm on the tty path directly
+ rather than using file_has_perm, as this particular
+ open file may belong to another process and we are
+ only interested in the inode-based check here. */
+ file_priv = list_first_entry(&tty->tty_files,
+ struct tty_file_private, list);
+ file = file_priv->file;
+ if (file_path_has_perm(cred, file, FILE__READ | FILE__WRITE))
drop_tty = 1;
- }
}
- file_list_unlock();
+ spin_unlock(&tty_files_lock);
tty_kref_put(tty);
}
/* Reset controlling tty. */
@@ -2270,62 +2264,19 @@ static inline void flush_unauthorized_files(const struct cred *cred,
no_tty();
/* Revalidate access to inherited open files. */
+ n = iterate_fd(files, 0, match_file, cred);
+ if (!n) /* none found? */
+ return;
- COMMON_AUDIT_DATA_INIT(&ad, FS);
-
- spin_lock(&files->file_lock);
- for (;;) {
- unsigned long set, i;
- int fd;
-
- j++;
- i = j * __NFDBITS;
- fdt = files_fdtable(files);
- if (i >= fdt->max_fds)
- break;
- set = fdt->open_fds->fds_bits[j];
- if (!set)
- continue;
- spin_unlock(&files->file_lock);
- for ( ; set ; i++, set >>= 1) {
- if (set & 1) {
- file = fget(i);
- if (!file)
- continue;
- if (file_has_perm(cred,
- file,
- file_to_av(file))) {
- sys_close(i);
- fd = get_unused_fd();
- if (fd != i) {
- if (fd >= 0)
- put_unused_fd(fd);
- fput(file);
- continue;
- }
- if (devnull) {
- get_file(devnull);
- } else {
- devnull = dentry_open(
- dget(selinux_null),
- mntget(selinuxfs_mount),
- O_RDWR, cred);
- if (IS_ERR(devnull)) {
- devnull = NULL;
- put_unused_fd(fd);
- fput(file);
- continue;
- }
- }
- fd_install(fd, devnull);
- }
- fput(file);
- }
- }
- spin_lock(&files->file_lock);
-
- }
- spin_unlock(&files->file_lock);
+ devnull = dentry_open(&selinux_null, O_RDWR, cred);
+ if (IS_ERR(devnull))
+ devnull = NULL;
+ /* replace all the matching ones with this */
+ do {
+ replace_fd(n - 1, devnull, 0);
+ } while ((n = iterate_fd(files, n, match_file, cred)) != 0);
+ if (devnull)
+ fput(devnull);
}
/*
@@ -2360,12 +2311,15 @@ static void selinux_bprm_committing_creds(struct linux_binprm *bprm)
rc = avc_has_perm(new_tsec->osid, new_tsec->sid, SECCLASS_PROCESS,
PROCESS__RLIMITINH, NULL);
if (rc) {
+ /* protect against do_prlimit() */
+ task_lock(current);
for (i = 0; i < RLIM_NLIMITS; i++) {
rlim = current->signal->rlim + i;
initrlim = init_task.signal->rlim + i;
rlim->rlim_cur = min(rlim->rlim_max, initrlim->rlim_cur);
}
- update_rlimit_cpu(current->signal->rlim[RLIMIT_CPU].rlim_cur);
+ task_unlock(current);
+ update_rlimit_cpu(current, rlimit(RLIMIT_CPU));
}
}
@@ -2517,6 +2471,92 @@ out:
return rc;
}
+static int selinux_sb_remount(struct super_block *sb, void *data)
+{
+ int rc, i, *flags;
+ struct security_mnt_opts opts;
+ char *secdata, **mount_options;
+ struct superblock_security_struct *sbsec = sb->s_security;
+
+ if (!(sbsec->flags & SE_SBINITIALIZED))
+ return 0;
+
+ if (!data)
+ return 0;
+
+ if (sb->s_type->fs_flags & FS_BINARY_MOUNTDATA)
+ return 0;
+
+ security_init_mnt_opts(&opts);
+ secdata = alloc_secdata();
+ if (!secdata)
+ return -ENOMEM;
+ rc = selinux_sb_copy_data(data, secdata);
+ if (rc)
+ goto out_free_secdata;
+
+ rc = selinux_parse_opts_str(secdata, &opts);
+ if (rc)
+ goto out_free_secdata;
+
+ mount_options = opts.mnt_opts;
+ flags = opts.mnt_opts_flags;
+
+ for (i = 0; i < opts.num_mnt_opts; i++) {
+ u32 sid;
+ size_t len;
+
+ if (flags[i] == SBLABEL_MNT)
+ continue;
+ len = strlen(mount_options[i]);
+ rc = security_context_to_sid(mount_options[i], len, &sid,
+ GFP_KERNEL);
+ if (rc) {
+ printk(KERN_WARNING "SELinux: security_context_to_sid"
+ "(%s) failed for (dev %s, type %s) errno=%d\n",
+ mount_options[i], sb->s_id, sb->s_type->name, rc);
+ goto out_free_opts;
+ }
+ rc = -EINVAL;
+ switch (flags[i]) {
+ case FSCONTEXT_MNT:
+ if (bad_option(sbsec, FSCONTEXT_MNT, sbsec->sid, sid))
+ goto out_bad_option;
+ break;
+ case CONTEXT_MNT:
+ if (bad_option(sbsec, CONTEXT_MNT, sbsec->mntpoint_sid, sid))
+ goto out_bad_option;
+ break;
+ case ROOTCONTEXT_MNT: {
+ struct inode_security_struct *root_isec;
+ root_isec = sb->s_root->d_inode->i_security;
+
+ if (bad_option(sbsec, ROOTCONTEXT_MNT, root_isec->sid, sid))
+ goto out_bad_option;
+ break;
+ }
+ case DEFCONTEXT_MNT:
+ if (bad_option(sbsec, DEFCONTEXT_MNT, sbsec->def_sid, sid))
+ goto out_bad_option;
+ break;
+ default:
+ goto out_free_opts;
+ }
+ }
+
+ rc = 0;
+out_free_opts:
+ security_free_mnt_opts(&opts);
+out_free_secdata:
+ free_secdata(secdata);
+ return rc;
+out_bad_option:
+ printk(KERN_WARNING "SELinux: unable to change security options "
+ "during remount (dev %s, type=%s)\n", sb->s_id,
+ sb->s_type->name);
+ goto out_free_opts;
+}
+
static int selinux_sb_kern_mount(struct super_block *sb, int flags, void *data)
{
const struct cred *cred = current_cred();
@@ -2531,8 +2571,8 @@ static int selinux_sb_kern_mount(struct super_block *sb, int flags, void *data)
if (flags & MS_KERNMOUNT)
return 0;
- COMMON_AUDIT_DATA_INIT(&ad, FS);
- ad.u.fs.path.dentry = sb->s_root;
+ ad.type = LSM_AUDIT_DATA_DENTRY;
+ ad.u.dentry = sb->s_root;
return superblock_has_perm(cred, sb, FILESYSTEM__MOUNT, &ad);
}
@@ -2541,25 +2581,24 @@ static int selinux_sb_statfs(struct dentry *dentry)
const struct cred *cred = current_cred();
struct common_audit_data ad;
- COMMON_AUDIT_DATA_INIT(&ad, FS);
- ad.u.fs.path.dentry = dentry->d_sb->s_root;
+ ad.type = LSM_AUDIT_DATA_DENTRY;
+ ad.u.dentry = dentry->d_sb->s_root;
return superblock_has_perm(cred, dentry->d_sb, FILESYSTEM__GETATTR, &ad);
}
-static int selinux_mount(char *dev_name,
+static int selinux_mount(const char *dev_name,
struct path *path,
- char *type,
+ const char *type,
unsigned long flags,
void *data)
{
const struct cred *cred = current_cred();
if (flags & MS_REMOUNT)
- return superblock_has_perm(cred, path->mnt->mnt_sb,
+ return superblock_has_perm(cred, path->dentry->d_sb,
FILESYSTEM__REMOUNT, NULL);
else
- return dentry_has_perm(cred, path->mnt, path->dentry,
- FILE__MOUNTON);
+ return path_has_perm(cred, path, FILE__MOUNTON);
}
static int selinux_umount(struct vfsmount *mnt, int flags)
@@ -2582,17 +2621,51 @@ static void selinux_inode_free_security(struct inode *inode)
inode_free_security(inode);
}
-static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
- char **name, void **value,
- size_t *len)
+static int selinux_dentry_init_security(struct dentry *dentry, int mode,
+ struct qstr *name, void **ctx,
+ u32 *ctxlen)
{
const struct cred *cred = current_cred();
- const struct task_security_struct *tsec = cred->security;
+ struct task_security_struct *tsec;
+ struct inode_security_struct *dsec;
+ struct superblock_security_struct *sbsec;
+ struct inode *dir = dentry->d_parent->d_inode;
+ u32 newsid;
+ int rc;
+
+ tsec = cred->security;
+ dsec = dir->i_security;
+ sbsec = dir->i_sb->s_security;
+
+ if (tsec->create_sid && sbsec->behavior != SECURITY_FS_USE_MNTPOINT) {
+ newsid = tsec->create_sid;
+ } else {
+ rc = security_transition_sid(tsec->sid, dsec->sid,
+ inode_mode_to_security_class(mode),
+ name,
+ &newsid);
+ if (rc) {
+ printk(KERN_WARNING
+ "%s: security_transition_sid failed, rc=%d\n",
+ __func__, -rc);
+ return rc;
+ }
+ }
+
+ return security_sid_to_context(newsid, (char **)ctx, ctxlen);
+}
+
+static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
+ const struct qstr *qstr,
+ const char **name,
+ void **value, size_t *len)
+{
+ const struct task_security_struct *tsec = current_security();
struct inode_security_struct *dsec;
struct superblock_security_struct *sbsec;
u32 sid, newsid, clen;
int rc;
- char *namep = NULL, *context;
+ char *context;
dsec = dir->i_security;
sbsec = dir->i_sb->s_security;
@@ -2600,10 +2673,13 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
sid = tsec->sid;
newsid = tsec->create_sid;
- if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) {
+ if ((sbsec->flags & SE_SBINITIALIZED) &&
+ (sbsec->behavior == SECURITY_FS_USE_MNTPOINT))
+ newsid = sbsec->mntpoint_sid;
+ else if (!newsid || !(sbsec->flags & SBLABEL_MNT)) {
rc = security_transition_sid(sid, dsec->sid,
inode_mode_to_security_class(inode->i_mode),
- &newsid);
+ qstr, &newsid);
if (rc) {
printk(KERN_WARNING "%s: "
"security_transition_sid failed, rc=%d (dev=%s "
@@ -2622,22 +2698,16 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
isec->initialized = 1;
}
- if (!ss_initialized || !(sbsec->flags & SE_SBLABELSUPP))
+ if (!ss_initialized || !(sbsec->flags & SBLABEL_MNT))
return -EOPNOTSUPP;
- if (name) {
- namep = kstrdup(XATTR_SELINUX_SUFFIX, GFP_NOFS);
- if (!namep)
- return -ENOMEM;
- *name = namep;
- }
+ if (name)
+ *name = XATTR_SELINUX_SUFFIX;
if (value && len) {
rc = security_sid_to_context_force(newsid, &context, &clen);
- if (rc) {
- kfree(namep);
+ if (rc)
return rc;
- }
*value = context;
*len = clen;
}
@@ -2645,7 +2715,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
return 0;
}
-static int selinux_inode_create(struct inode *dir, struct dentry *dentry, int mask)
+static int selinux_inode_create(struct inode *dir, struct dentry *dentry, umode_t mode)
{
return may_create(dir, dentry, SECCLASS_FILE);
}
@@ -2665,7 +2735,7 @@ static int selinux_inode_symlink(struct inode *dir, struct dentry *dentry, const
return may_create(dir, dentry, SECCLASS_LNK_FILE);
}
-static int selinux_inode_mkdir(struct inode *dir, struct dentry *dentry, int mask)
+static int selinux_inode_mkdir(struct inode *dir, struct dentry *dentry, umode_t mask)
{
return may_create(dir, dentry, SECCLASS_DIR);
}
@@ -2675,7 +2745,7 @@ static int selinux_inode_rmdir(struct inode *dir, struct dentry *dentry)
return may_link(dir, dentry, MAY_RMDIR);
}
-static int selinux_inode_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
+static int selinux_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
{
return may_create(dir, dentry, inode_mode_to_security_class(mode));
}
@@ -2690,33 +2760,82 @@ static int selinux_inode_readlink(struct dentry *dentry)
{
const struct cred *cred = current_cred();
- return dentry_has_perm(cred, NULL, dentry, FILE__READ);
+ return dentry_has_perm(cred, dentry, FILE__READ);
}
static int selinux_inode_follow_link(struct dentry *dentry, struct nameidata *nameidata)
{
const struct cred *cred = current_cred();
- return dentry_has_perm(cred, NULL, dentry, FILE__READ);
+ return dentry_has_perm(cred, dentry, FILE__READ);
+}
+
+static noinline int audit_inode_permission(struct inode *inode,
+ u32 perms, u32 audited, u32 denied,
+ int result,
+ unsigned flags)
+{
+ struct common_audit_data ad;
+ struct inode_security_struct *isec = inode->i_security;
+ int rc;
+
+ ad.type = LSM_AUDIT_DATA_INODE;
+ ad.u.inode = inode;
+
+ rc = slow_avc_audit(current_sid(), isec->sid, isec->sclass, perms,
+ audited, denied, result, &ad, flags);
+ if (rc)
+ return rc;
+ return 0;
}
static int selinux_inode_permission(struct inode *inode, int mask)
{
const struct cred *cred = current_cred();
+ u32 perms;
+ bool from_access;
+ unsigned flags = mask & MAY_NOT_BLOCK;
+ struct inode_security_struct *isec;
+ u32 sid;
+ struct av_decision avd;
+ int rc, rc2;
+ u32 audited, denied;
- if (!mask) {
- /* No permission to check. Existence test. */
+ from_access = mask & MAY_ACCESS;
+ mask &= (MAY_READ|MAY_WRITE|MAY_EXEC|MAY_APPEND);
+
+ /* No permission to check. Existence test. */
+ if (!mask)
return 0;
- }
- return inode_has_perm(cred, inode,
- file_mask_to_av(inode->i_mode, mask), NULL);
+ validate_creds(cred);
+
+ if (unlikely(IS_PRIVATE(inode)))
+ return 0;
+
+ perms = file_mask_to_av(inode->i_mode, mask);
+
+ sid = cred_sid(cred);
+ isec = inode->i_security;
+
+ rc = avc_has_perm_noaudit(sid, isec->sid, isec->sclass, perms, 0, &avd);
+ audited = avc_audit_required(perms, &avd, rc,
+ from_access ? FILE__AUDIT_ACCESS : 0,
+ &denied);
+ if (likely(!audited))
+ return rc;
+
+ rc2 = audit_inode_permission(inode, perms, audited, denied, rc, flags);
+ if (rc2)
+ return rc2;
+ return rc;
}
static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
{
const struct cred *cred = current_cred();
unsigned int ia_valid = iattr->ia_valid;
+ __u32 av = FILE__WRITE;
/* ATTR_FORCE is just used for ATTR_KILL_S[UG]ID. */
if (ia_valid & ATTR_FORCE) {
@@ -2728,16 +2847,23 @@ static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
if (ia_valid & (ATTR_MODE | ATTR_UID | ATTR_GID |
ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_TIMES_SET))
- return dentry_has_perm(cred, NULL, dentry, FILE__SETATTR);
+ return dentry_has_perm(cred, dentry, FILE__SETATTR);
+
+ if (selinux_policycap_openperm && (ia_valid & ATTR_SIZE))
+ av |= FILE__OPEN;
- return dentry_has_perm(cred, NULL, dentry, FILE__WRITE);
+ return dentry_has_perm(cred, dentry, av);
}
static int selinux_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
{
const struct cred *cred = current_cred();
+ struct path path;
- return dentry_has_perm(cred, mnt, dentry, FILE__GETATTR);
+ path.dentry = dentry;
+ path.mnt = mnt;
+
+ return path_has_perm(cred, &path, FILE__GETATTR);
}
static int selinux_inode_setotherxattr(struct dentry *dentry, const char *name)
@@ -2758,7 +2884,7 @@ static int selinux_inode_setotherxattr(struct dentry *dentry, const char *name)
/* Not an attribute we recognize, so just check the
ordinary setattr permission. */
- return dentry_has_perm(cred, NULL, dentry, FILE__SETATTR);
+ return dentry_has_perm(cred, dentry, FILE__SETATTR);
}
static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
@@ -2775,24 +2901,46 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
return selinux_inode_setotherxattr(dentry, name);
sbsec = inode->i_sb->s_security;
- if (!(sbsec->flags & SE_SBLABELSUPP))
+ if (!(sbsec->flags & SBLABEL_MNT))
return -EOPNOTSUPP;
- if (!is_owner_or_cap(inode))
+ if (!inode_owner_or_capable(inode))
return -EPERM;
- COMMON_AUDIT_DATA_INIT(&ad, FS);
- ad.u.fs.path.dentry = dentry;
+ ad.type = LSM_AUDIT_DATA_DENTRY;
+ ad.u.dentry = dentry;
rc = avc_has_perm(sid, isec->sid, isec->sclass,
FILE__RELABELFROM, &ad);
if (rc)
return rc;
- rc = security_context_to_sid(value, size, &newsid);
+ rc = security_context_to_sid(value, size, &newsid, GFP_KERNEL);
if (rc == -EINVAL) {
- if (!capable(CAP_MAC_ADMIN))
+ if (!capable(CAP_MAC_ADMIN)) {
+ struct audit_buffer *ab;
+ size_t audit_size;
+ const char *str;
+
+ /* We strip a nul only if it is at the end, otherwise the
+ * context contains a nul and we should audit that */
+ if (value) {
+ str = value;
+ if (str[size - 1] == '\0')
+ audit_size = size - 1;
+ else
+ audit_size = size;
+ } else {
+ str = "";
+ audit_size = 0;
+ }
+ ab = audit_log_start(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR);
+ audit_log_format(ab, "op=setxattr invalid_context=");
+ audit_log_n_untrustedstring(ab, value, audit_size);
+ audit_log_end(ab);
+
return rc;
+ }
rc = security_context_to_sid_force(value, size, &newsid);
}
if (rc)
@@ -2837,7 +2985,10 @@ static void selinux_inode_post_setxattr(struct dentry *dentry, const char *name,
return;
}
+ isec->sclass = inode_mode_to_security_class(inode->i_mode);
isec->sid = newsid;
+ isec->initialized = 1;
+
return;
}
@@ -2845,14 +2996,14 @@ static int selinux_inode_getxattr(struct dentry *dentry, const char *name)
{
const struct cred *cred = current_cred();
- return dentry_has_perm(cred, NULL, dentry, FILE__GETATTR);
+ return dentry_has_perm(cred, dentry, FILE__GETATTR);
}
static int selinux_inode_listxattr(struct dentry *dentry)
{
const struct cred *cred = current_cred();
- return dentry_has_perm(cred, NULL, dentry, FILE__GETATTR);
+ return dentry_has_perm(cred, dentry, FILE__GETATTR);
}
static int selinux_inode_removexattr(struct dentry *dentry, const char *name)
@@ -2889,7 +3040,7 @@ static int selinux_inode_getsecurity(const struct inode *inode, const char *name
* and lack of permission just means that we fall back to the
* in-core context value, not a denial.
*/
- error = selinux_capable(current, current_cred(), CAP_MAC_ADMIN,
+ error = selinux_capable(current_cred(), &init_user_ns, CAP_MAC_ADMIN,
SECURITY_CAP_NOAUDIT);
if (!error)
error = security_sid_to_context_force(isec->sid, &context,
@@ -2921,10 +3072,11 @@ static int selinux_inode_setsecurity(struct inode *inode, const char *name,
if (!value || !size)
return -EACCES;
- rc = security_context_to_sid((void *)value, size, &newsid);
+ rc = security_context_to_sid((void *)value, size, &newsid, GFP_KERNEL);
if (rc)
return rc;
+ isec->sclass = inode_mode_to_security_class(inode->i_mode);
isec->sid = newsid;
isec->initialized = 1;
return 0;
@@ -2949,7 +3101,7 @@ static void selinux_inode_getsecid(const struct inode *inode, u32 *secid)
static int selinux_revalidate_file_permission(struct file *file, int mask)
{
const struct cred *cred = current_cred();
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
/* file_mask_to_av won't add FILE__WRITE if MAY_APPEND is set */
if ((file->f_flags & O_APPEND) && (mask & MAY_WRITE))
@@ -2961,7 +3113,7 @@ static int selinux_revalidate_file_permission(struct file *file, int mask)
static int selinux_file_permission(struct file *file, int mask)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct file_security_struct *fsec = file->f_security;
struct inode_security_struct *isec = inode->i_security;
u32 sid = current_sid();
@@ -2972,7 +3124,7 @@ static int selinux_file_permission(struct file *file, int mask)
if (sid == fsec->sid && fsec->isid == isec->sid &&
fsec->pseqno == avc_policy_seqno())
- /* No change since dentry_open check. */
+ /* No change since file_open check. */
return 0;
return selinux_revalidate_file_permission(file, mask);
@@ -2992,25 +3144,58 @@ static int selinux_file_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
const struct cred *cred = current_cred();
- u32 av = 0;
+ int error = 0;
- if (_IOC_DIR(cmd) & _IOC_WRITE)
- av |= FILE__WRITE;
- if (_IOC_DIR(cmd) & _IOC_READ)
- av |= FILE__READ;
- if (!av)
- av = FILE__IOCTL;
+ switch (cmd) {
+ case FIONREAD:
+ /* fall through */
+ case FIBMAP:
+ /* fall through */
+ case FIGETBSZ:
+ /* fall through */
+ case FS_IOC_GETFLAGS:
+ /* fall through */
+ case FS_IOC_GETVERSION:
+ error = file_has_perm(cred, file, FILE__GETATTR);
+ break;
+
+ case FS_IOC_SETFLAGS:
+ /* fall through */
+ case FS_IOC_SETVERSION:
+ error = file_has_perm(cred, file, FILE__SETATTR);
+ break;
- return file_has_perm(cred, file, av);
+ /* sys_ioctl() checks */
+ case FIONBIO:
+ /* fall through */
+ case FIOASYNC:
+ error = file_has_perm(cred, file, 0);
+ break;
+
+ case KDSKBENT:
+ case KDSKBSENT:
+ error = cred_has_capability(cred, CAP_SYS_TTY_CONFIG,
+ SECURITY_CAP_AUDIT);
+ break;
+
+ /* default case assumes that the command will go
+ * to the file's ioctl() function.
+ */
+ default:
+ error = file_has_perm(cred, file, FILE__IOCTL);
+ }
+ return error;
}
+static int default_noexec;
+
static int file_map_prot_check(struct file *file, unsigned long prot, int shared)
{
const struct cred *cred = current_cred();
int rc = 0;
-#ifndef CONFIG_PPC32
- if ((prot & PROT_EXEC) && (!file || (!shared && (prot & PROT_WRITE)))) {
+ if (default_noexec &&
+ (prot & PROT_EXEC) && (!file || (!shared && (prot & PROT_WRITE)))) {
/*
* We are making executable an anonymous mapping or a
* private file mapping that will also be writable.
@@ -3020,7 +3205,6 @@ static int file_map_prot_check(struct file *file, unsigned long prot, int shared
if (rc)
goto error;
}
-#endif
if (file) {
/* read access is always possible with a mapping */
@@ -3040,31 +3224,27 @@ error:
return rc;
}
-static int selinux_file_mmap(struct file *file, unsigned long reqprot,
- unsigned long prot, unsigned long flags,
- unsigned long addr, unsigned long addr_only)
+static int selinux_mmap_addr(unsigned long addr)
{
- int rc = 0;
- u32 sid = current_sid();
+ int rc;
+
+ /* do DAC check on address space usage */
+ rc = cap_mmap_addr(addr);
+ if (rc)
+ return rc;
- /*
- * notice that we are intentionally putting the SELinux check before
- * the secondary cap_file_mmap check. This is such a likely attempt
- * at bad behaviour/exploit that we always want to get the AVC, even
- * if DAC would have also denied the operation.
- */
if (addr < CONFIG_LSM_MMAP_MIN_ADDR) {
+ u32 sid = current_sid();
rc = avc_has_perm(sid, sid, SECCLASS_MEMPROTECT,
MEMPROTECT__MMAP_ZERO, NULL);
- if (rc)
- return rc;
}
- /* do DAC check on address space usage */
- rc = cap_file_mmap(file, reqprot, prot, flags, addr, addr_only);
- if (rc || addr_only)
- return rc;
+ return rc;
+}
+static int selinux_mmap_file(struct file *file, unsigned long reqprot,
+ unsigned long prot, unsigned long flags)
+{
if (selinux_checkreqprot)
prot = reqprot;
@@ -3081,8 +3261,8 @@ static int selinux_file_mprotect(struct vm_area_struct *vma,
if (selinux_checkreqprot)
prot = reqprot;
-#ifndef CONFIG_PPC32
- if ((prot & PROT_EXEC) && !(vma->vm_flags & VM_EXEC)) {
+ if (default_noexec &&
+ (prot & PROT_EXEC) && !(vma->vm_flags & VM_EXEC)) {
int rc = 0;
if (vma->vm_start >= vma->vm_mm->start_brk &&
vma->vm_end <= vma->vm_mm->brk) {
@@ -3104,7 +3284,6 @@ static int selinux_file_mprotect(struct vm_area_struct *vma,
if (rc)
return rc;
}
-#endif
return file_map_prot_check(vma->vm_file, prot, vma->vm_flags&VM_SHARED);
}
@@ -3124,11 +3303,6 @@ static int selinux_file_fcntl(struct file *file, unsigned int cmd,
switch (cmd) {
case F_SETFL:
- if (!file->f_path.dentry || !file->f_path.dentry->d_inode) {
- err = -EINVAL;
- break;
- }
-
if ((file->f_flags & O_APPEND) && !(arg & O_APPEND)) {
err = file_has_perm(cred, file, FILE__WRITE);
break;
@@ -3139,21 +3313,21 @@ static int selinux_file_fcntl(struct file *file, unsigned int cmd,
case F_GETFL:
case F_GETOWN:
case F_GETSIG:
+ case F_GETOWNER_UIDS:
/* Just check FD__USE permission */
err = file_has_perm(cred, file, 0);
break;
case F_GETLK:
case F_SETLK:
case F_SETLKW:
+ case F_OFD_GETLK:
+ case F_OFD_SETLK:
+ case F_OFD_SETLKW:
#if BITS_PER_LONG == 32
case F_GETLK64:
case F_SETLK64:
case F_SETLKW64:
#endif
- if (!file->f_path.dentry || !file->f_path.dentry->d_inode) {
- err = -EINVAL;
- break;
- }
err = file_has_perm(cred, file, FILE__LOCK);
break;
}
@@ -3200,15 +3374,13 @@ static int selinux_file_receive(struct file *file)
return file_has_perm(cred, file, file_to_av(file));
}
-static int selinux_dentry_open(struct file *file, const struct cred *cred)
+static int selinux_file_open(struct file *file, const struct cred *cred)
{
struct file_security_struct *fsec;
- struct inode *inode;
struct inode_security_struct *isec;
- inode = file->f_path.dentry->d_inode;
fsec = file->f_security;
- isec = inode->i_security;
+ isec = file_inode(file)->i_security;
/*
* Save inode label and policy sequence number
* at open-time so that selinux_file_permission
@@ -3226,7 +3398,7 @@ static int selinux_dentry_open(struct file *file, const struct cred *cred)
* new inode label or new policy.
* This check is not redundant - do not remove.
*/
- return inode_has_perm(cred, inode, open_file_to_av(file), NULL);
+ return file_path_has_perm(cred, file, open_file_to_av(file));
}
/* task security operations */
@@ -3258,7 +3430,11 @@ static void selinux_cred_free(struct cred *cred)
{
struct task_security_struct *tsec = cred->security;
- BUG_ON((unsigned long) cred->security < PAGE_SIZE);
+ /*
+ * cred->security == NULL if security_cred_alloc_blank() or
+ * security_prepare_creds() returned an error.
+ */
+ BUG_ON(cred->security && (unsigned long) cred->security < PAGE_SIZE);
cred->security = (void *) 0x7UL;
kfree(tsec);
}
@@ -3334,7 +3510,7 @@ static int selinux_kernel_create_files_as(struct cred *new, struct inode *inode)
if (ret == 0)
tsec->create_sid = isec->sid;
- return 0;
+ return ret;
}
static int selinux_kernel_module_request(char *kmod_name)
@@ -3344,7 +3520,7 @@ static int selinux_kernel_module_request(char *kmod_name)
sid = task_sid(current);
- COMMON_AUDIT_DATA_INIT(&ad, KMOD);
+ ad.type = LSM_AUDIT_DATA_KMOD;
ad.u.kmod_name = kmod_name;
return avc_has_perm(sid, SECINITSID_KERNEL, SECCLASS_SYSTEM,
@@ -3398,25 +3574,26 @@ static int selinux_task_getioprio(struct task_struct *p)
return current_has_perm(p, PROCESS__GETSCHED);
}
-static int selinux_task_setrlimit(unsigned int resource, struct rlimit *new_rlim)
+static int selinux_task_setrlimit(struct task_struct *p, unsigned int resource,
+ struct rlimit *new_rlim)
{
- struct rlimit *old_rlim = current->signal->rlim + resource;
+ struct rlimit *old_rlim = p->signal->rlim + resource;
/* Control the ability to change the hard limit (whether
lowering or raising it), so that the hard limit can
later be used as a safe reset point for the soft limit
upon context transitions. See selinux_bprm_committing_creds. */
if (old_rlim->rlim_max != new_rlim->rlim_max)
- return current_has_perm(current, PROCESS__SETRLIMIT);
+ return current_has_perm(p, PROCESS__SETRLIMIT);
return 0;
}
-static int selinux_task_setscheduler(struct task_struct *p, int policy, struct sched_param *lp)
+static int selinux_task_setscheduler(struct task_struct *p)
{
int rc;
- rc = cap_task_setscheduler(p, policy, lp);
+ rc = cap_task_setscheduler(p);
if (rc)
return rc;
@@ -3482,8 +3659,8 @@ static int selinux_parse_skb_ipv4(struct sk_buff *skb,
if (ihlen < sizeof(_iph))
goto out;
- ad->u.net.v4info.saddr = ih->saddr;
- ad->u.net.v4info.daddr = ih->daddr;
+ ad->u.net->v4info.saddr = ih->saddr;
+ ad->u.net->v4info.daddr = ih->daddr;
ret = 0;
if (proto)
@@ -3501,8 +3678,8 @@ static int selinux_parse_skb_ipv4(struct sk_buff *skb,
if (th == NULL)
break;
- ad->u.net.sport = th->source;
- ad->u.net.dport = th->dest;
+ ad->u.net->sport = th->source;
+ ad->u.net->dport = th->dest;
break;
}
@@ -3517,8 +3694,8 @@ static int selinux_parse_skb_ipv4(struct sk_buff *skb,
if (uh == NULL)
break;
- ad->u.net.sport = uh->source;
- ad->u.net.dport = uh->dest;
+ ad->u.net->sport = uh->source;
+ ad->u.net->dport = uh->dest;
break;
}
@@ -3533,8 +3710,8 @@ static int selinux_parse_skb_ipv4(struct sk_buff *skb,
if (dh == NULL)
break;
- ad->u.net.sport = dh->dccph_sport;
- ad->u.net.dport = dh->dccph_dport;
+ ad->u.net->sport = dh->dccph_sport;
+ ad->u.net->dport = dh->dccph_dport;
break;
}
@@ -3554,19 +3731,20 @@ static int selinux_parse_skb_ipv6(struct sk_buff *skb,
u8 nexthdr;
int ret = -EINVAL, offset;
struct ipv6hdr _ipv6h, *ip6;
+ __be16 frag_off;
offset = skb_network_offset(skb);
ip6 = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
if (ip6 == NULL)
goto out;
- ipv6_addr_copy(&ad->u.net.v6info.saddr, &ip6->saddr);
- ipv6_addr_copy(&ad->u.net.v6info.daddr, &ip6->daddr);
+ ad->u.net->v6info.saddr = ip6->saddr;
+ ad->u.net->v6info.daddr = ip6->daddr;
ret = 0;
nexthdr = ip6->nexthdr;
offset += sizeof(_ipv6h);
- offset = ipv6_skip_exthdr(skb, offset, &nexthdr);
+ offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
if (offset < 0)
goto out;
@@ -3581,8 +3759,8 @@ static int selinux_parse_skb_ipv6(struct sk_buff *skb,
if (th == NULL)
break;
- ad->u.net.sport = th->source;
- ad->u.net.dport = th->dest;
+ ad->u.net->sport = th->source;
+ ad->u.net->dport = th->dest;
break;
}
@@ -3593,8 +3771,8 @@ static int selinux_parse_skb_ipv6(struct sk_buff *skb,
if (uh == NULL)
break;
- ad->u.net.sport = uh->source;
- ad->u.net.dport = uh->dest;
+ ad->u.net->sport = uh->source;
+ ad->u.net->dport = uh->dest;
break;
}
@@ -3605,8 +3783,8 @@ static int selinux_parse_skb_ipv6(struct sk_buff *skb,
if (dh == NULL)
break;
- ad->u.net.sport = dh->dccph_sport;
- ad->u.net.dport = dh->dccph_dport;
+ ad->u.net->sport = dh->dccph_sport;
+ ad->u.net->dport = dh->dccph_dport;
break;
}
@@ -3626,13 +3804,13 @@ static int selinux_parse_skb(struct sk_buff *skb, struct common_audit_data *ad,
char *addrp;
int ret;
- switch (ad->u.net.family) {
+ switch (ad->u.net->family) {
case PF_INET:
ret = selinux_parse_skb_ipv4(skb, ad, proto);
if (ret)
goto parse_error;
- addrp = (char *)(src ? &ad->u.net.v4info.saddr :
- &ad->u.net.v4info.daddr);
+ addrp = (char *)(src ? &ad->u.net->v4info.saddr :
+ &ad->u.net->v4info.daddr);
goto okay;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -3640,8 +3818,8 @@ static int selinux_parse_skb(struct sk_buff *skb, struct common_audit_data *ad,
ret = selinux_parse_skb_ipv6(skb, ad, proto);
if (ret)
goto parse_error;
- addrp = (char *)(src ? &ad->u.net.v6info.saddr :
- &ad->u.net.v6info.daddr);
+ addrp = (char *)(src ? &ad->u.net->v6info.saddr :
+ &ad->u.net->v6info.daddr);
goto okay;
#endif /* IPV6 */
default:
@@ -3683,8 +3861,12 @@ static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid)
u32 nlbl_sid;
u32 nlbl_type;
- selinux_skb_xfrm_sid(skb, &xfrm_sid);
- selinux_netlbl_skbuff_getsid(skb, family, &nlbl_type, &nlbl_sid);
+ err = selinux_xfrm_skb_sid(skb, &xfrm_sid);
+ if (unlikely(err))
+ return -EACCES;
+ err = selinux_netlbl_skbuff_getsid(skb, family, &nlbl_type, &nlbl_sid);
+ if (unlikely(err))
+ return -EACCES;
err = security_net_peersid_resolve(nlbl_sid, nlbl_type, xfrm_sid, sid);
if (unlikely(err)) {
@@ -3697,74 +3879,98 @@ static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid)
return 0;
}
-/* socket security operations */
-static int socket_has_perm(struct task_struct *task, struct socket *sock,
- u32 perms)
+/**
+ * selinux_conn_sid - Determine the child socket label for a connection
+ * @sk_sid: the parent socket's SID
+ * @skb_sid: the packet's SID
+ * @conn_sid: the resulting connection SID
+ *
+ * If @skb_sid is valid then the user:role:type information from @sk_sid is
+ * combined with the MLS information from @skb_sid in order to create
+ * @conn_sid. If @skb_sid is not valid then then @conn_sid is simply a copy
+ * of @sk_sid. Returns zero on success, negative values on failure.
+ *
+ */
+static int selinux_conn_sid(u32 sk_sid, u32 skb_sid, u32 *conn_sid)
{
- struct inode_security_struct *isec;
- struct common_audit_data ad;
- u32 sid;
int err = 0;
- isec = SOCK_INODE(sock)->i_security;
+ if (skb_sid != SECSID_NULL)
+ err = security_sid_mls_copy(sk_sid, skb_sid, conn_sid);
+ else
+ *conn_sid = sk_sid;
- if (isec->sid == SECINITSID_KERNEL)
- goto out;
- sid = task_sid(task);
+ return err;
+}
- COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.sk = sock->sk;
- err = avc_has_perm(sid, isec->sid, isec->sclass, perms, &ad);
+/* socket security operations */
-out:
- return err;
+static int socket_sockcreate_sid(const struct task_security_struct *tsec,
+ u16 secclass, u32 *socksid)
+{
+ if (tsec->sockcreate_sid > SECSID_NULL) {
+ *socksid = tsec->sockcreate_sid;
+ return 0;
+ }
+
+ return security_transition_sid(tsec->sid, tsec->sid, secclass, NULL,
+ socksid);
+}
+
+static int sock_has_perm(struct task_struct *task, struct sock *sk, u32 perms)
+{
+ struct sk_security_struct *sksec = sk->sk_security;
+ struct common_audit_data ad;
+ struct lsm_network_audit net = {0,};
+ u32 tsid = task_sid(task);
+
+ if (sksec->sid == SECINITSID_KERNEL)
+ return 0;
+
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->sk = sk;
+
+ return avc_has_perm(tsid, sksec->sid, sksec->sclass, perms, &ad);
}
static int selinux_socket_create(int family, int type,
int protocol, int kern)
{
- const struct cred *cred = current_cred();
- const struct task_security_struct *tsec = cred->security;
- u32 sid, newsid;
+ const struct task_security_struct *tsec = current_security();
+ u32 newsid;
u16 secclass;
- int err = 0;
+ int rc;
if (kern)
- goto out;
-
- sid = tsec->sid;
- newsid = tsec->sockcreate_sid ?: sid;
+ return 0;
secclass = socket_type_to_security_class(family, type, protocol);
- err = avc_has_perm(sid, newsid, secclass, SOCKET__CREATE, NULL);
+ rc = socket_sockcreate_sid(tsec, secclass, &newsid);
+ if (rc)
+ return rc;
-out:
- return err;
+ return avc_has_perm(tsec->sid, newsid, secclass, SOCKET__CREATE, NULL);
}
static int selinux_socket_post_create(struct socket *sock, int family,
int type, int protocol, int kern)
{
- const struct cred *cred = current_cred();
- const struct task_security_struct *tsec = cred->security;
- struct inode_security_struct *isec;
+ const struct task_security_struct *tsec = current_security();
+ struct inode_security_struct *isec = SOCK_INODE(sock)->i_security;
struct sk_security_struct *sksec;
- u32 sid, newsid;
int err = 0;
- sid = tsec->sid;
- newsid = tsec->sockcreate_sid;
-
- isec = SOCK_INODE(sock)->i_security;
+ isec->sclass = socket_type_to_security_class(family, type, protocol);
if (kern)
isec->sid = SECINITSID_KERNEL;
- else if (newsid)
- isec->sid = newsid;
- else
- isec->sid = sid;
+ else {
+ err = socket_sockcreate_sid(tsec, isec->sclass, &(isec->sid));
+ if (err)
+ return err;
+ }
- isec->sclass = socket_type_to_security_class(family, type, protocol);
isec->initialized = 1;
if (sock->sk) {
@@ -3783,10 +3989,11 @@ static int selinux_socket_post_create(struct socket *sock, int family,
static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen)
{
+ struct sock *sk = sock->sk;
u16 family;
int err;
- err = socket_has_perm(current, sock, SOCKET__BIND);
+ err = sock_has_perm(current, sk, SOCKET__BIND);
if (err)
goto out;
@@ -3795,19 +4002,17 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
* Multiple address binding for SCTP is not supported yet: we just
* check the first address now.
*/
- family = sock->sk->sk_family;
+ family = sk->sk_family;
if (family == PF_INET || family == PF_INET6) {
char *addrp;
- struct inode_security_struct *isec;
+ struct sk_security_struct *sksec = sk->sk_security;
struct common_audit_data ad;
+ struct lsm_network_audit net = {0,};
struct sockaddr_in *addr4 = NULL;
struct sockaddr_in6 *addr6 = NULL;
unsigned short snum;
- struct sock *sk = sock->sk;
u32 sid, node_perm;
- isec = SOCK_INODE(sock)->i_security;
-
if (family == PF_INET) {
addr4 = (struct sockaddr_in *)address;
snum = ntohs(addr4->sin_port);
@@ -3821,25 +4026,26 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
if (snum) {
int low, high;
- inet_get_local_port_range(&low, &high);
+ inet_get_local_port_range(sock_net(sk), &low, &high);
if (snum < max(PROT_SOCK, low) || snum > high) {
err = sel_netport_sid(sk->sk_protocol,
snum, &sid);
if (err)
goto out;
- COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.sport = htons(snum);
- ad.u.net.family = family;
- err = avc_has_perm(isec->sid, sid,
- isec->sclass,
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->sport = htons(snum);
+ ad.u.net->family = family;
+ err = avc_has_perm(sksec->sid, sid,
+ sksec->sclass,
SOCKET__NAME_BIND, &ad);
if (err)
goto out;
}
}
- switch (isec->sclass) {
+ switch (sksec->sclass) {
case SECCLASS_TCP_SOCKET:
node_perm = TCP_SOCKET__NODE_BIND;
break;
@@ -3861,17 +4067,18 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
if (err)
goto out;
- COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.sport = htons(snum);
- ad.u.net.family = family;
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->sport = htons(snum);
+ ad.u.net->family = family;
if (family == PF_INET)
- ad.u.net.v4info.saddr = addr4->sin_addr.s_addr;
+ ad.u.net->v4info.saddr = addr4->sin_addr.s_addr;
else
- ipv6_addr_copy(&ad.u.net.v6info.saddr, &addr6->sin6_addr);
+ ad.u.net->v6info.saddr = addr6->sin6_addr;
- err = avc_has_perm(isec->sid, sid,
- isec->sclass, node_perm, &ad);
+ err = avc_has_perm(sksec->sid, sid,
+ sksec->sclass, node_perm, &ad);
if (err)
goto out;
}
@@ -3882,20 +4089,20 @@ out:
static int selinux_socket_connect(struct socket *sock, struct sockaddr *address, int addrlen)
{
struct sock *sk = sock->sk;
- struct inode_security_struct *isec;
+ struct sk_security_struct *sksec = sk->sk_security;
int err;
- err = socket_has_perm(current, sock, SOCKET__CONNECT);
+ err = sock_has_perm(current, sk, SOCKET__CONNECT);
if (err)
return err;
/*
* If a TCP or DCCP socket, check name_connect permission for the port.
*/
- isec = SOCK_INODE(sock)->i_security;
- if (isec->sclass == SECCLASS_TCP_SOCKET ||
- isec->sclass == SECCLASS_DCCP_SOCKET) {
+ if (sksec->sclass == SECCLASS_TCP_SOCKET ||
+ sksec->sclass == SECCLASS_DCCP_SOCKET) {
struct common_audit_data ad;
+ struct lsm_network_audit net = {0,};
struct sockaddr_in *addr4 = NULL;
struct sockaddr_in6 *addr6 = NULL;
unsigned short snum;
@@ -3917,13 +4124,14 @@ static int selinux_socket_connect(struct socket *sock, struct sockaddr *address,
if (err)
goto out;
- perm = (isec->sclass == SECCLASS_TCP_SOCKET) ?
+ perm = (sksec->sclass == SECCLASS_TCP_SOCKET) ?
TCP_SOCKET__NAME_CONNECT : DCCP_SOCKET__NAME_CONNECT;
- COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.dport = htons(snum);
- ad.u.net.family = sk->sk_family;
- err = avc_has_perm(isec->sid, sid, isec->sclass, perm, &ad);
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->dport = htons(snum);
+ ad.u.net->family = sk->sk_family;
+ err = avc_has_perm(sksec->sid, sid, sksec->sclass, perm, &ad);
if (err)
goto out;
}
@@ -3936,7 +4144,7 @@ out:
static int selinux_socket_listen(struct socket *sock, int backlog)
{
- return socket_has_perm(current, sock, SOCKET__LISTEN);
+ return sock_has_perm(current, sock->sk, SOCKET__LISTEN);
}
static int selinux_socket_accept(struct socket *sock, struct socket *newsock)
@@ -3945,7 +4153,7 @@ static int selinux_socket_accept(struct socket *sock, struct socket *newsock)
struct inode_security_struct *isec;
struct inode_security_struct *newisec;
- err = socket_has_perm(current, sock, SOCKET__ACCEPT);
+ err = sock_has_perm(current, sock->sk, SOCKET__ACCEPT);
if (err)
return err;
@@ -3962,30 +4170,30 @@ static int selinux_socket_accept(struct socket *sock, struct socket *newsock)
static int selinux_socket_sendmsg(struct socket *sock, struct msghdr *msg,
int size)
{
- return socket_has_perm(current, sock, SOCKET__WRITE);
+ return sock_has_perm(current, sock->sk, SOCKET__WRITE);
}
static int selinux_socket_recvmsg(struct socket *sock, struct msghdr *msg,
int size, int flags)
{
- return socket_has_perm(current, sock, SOCKET__READ);
+ return sock_has_perm(current, sock->sk, SOCKET__READ);
}
static int selinux_socket_getsockname(struct socket *sock)
{
- return socket_has_perm(current, sock, SOCKET__GETATTR);
+ return sock_has_perm(current, sock->sk, SOCKET__GETATTR);
}
static int selinux_socket_getpeername(struct socket *sock)
{
- return socket_has_perm(current, sock, SOCKET__GETATTR);
+ return sock_has_perm(current, sock->sk, SOCKET__GETATTR);
}
static int selinux_socket_setsockopt(struct socket *sock, int level, int optname)
{
int err;
- err = socket_has_perm(current, sock, SOCKET__SETOPT);
+ err = sock_has_perm(current, sock->sk, SOCKET__SETOPT);
if (err)
return err;
@@ -3995,68 +4203,62 @@ static int selinux_socket_setsockopt(struct socket *sock, int level, int optname
static int selinux_socket_getsockopt(struct socket *sock, int level,
int optname)
{
- return socket_has_perm(current, sock, SOCKET__GETOPT);
+ return sock_has_perm(current, sock->sk, SOCKET__GETOPT);
}
static int selinux_socket_shutdown(struct socket *sock, int how)
{
- return socket_has_perm(current, sock, SOCKET__SHUTDOWN);
+ return sock_has_perm(current, sock->sk, SOCKET__SHUTDOWN);
}
-static int selinux_socket_unix_stream_connect(struct socket *sock,
- struct socket *other,
+static int selinux_socket_unix_stream_connect(struct sock *sock,
+ struct sock *other,
struct sock *newsk)
{
- struct sk_security_struct *ssec;
- struct inode_security_struct *isec;
- struct inode_security_struct *other_isec;
+ struct sk_security_struct *sksec_sock = sock->sk_security;
+ struct sk_security_struct *sksec_other = other->sk_security;
+ struct sk_security_struct *sksec_new = newsk->sk_security;
struct common_audit_data ad;
+ struct lsm_network_audit net = {0,};
int err;
- isec = SOCK_INODE(sock)->i_security;
- other_isec = SOCK_INODE(other)->i_security;
-
- COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.sk = other->sk;
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->sk = other;
- err = avc_has_perm(isec->sid, other_isec->sid,
- isec->sclass,
+ err = avc_has_perm(sksec_sock->sid, sksec_other->sid,
+ sksec_other->sclass,
UNIX_STREAM_SOCKET__CONNECTTO, &ad);
if (err)
return err;
- /* connecting socket */
- ssec = sock->sk->sk_security;
- ssec->peer_sid = other_isec->sid;
-
/* server child socket */
- ssec = newsk->sk_security;
- ssec->peer_sid = isec->sid;
- err = security_sid_mls_copy(other_isec->sid, ssec->peer_sid, &ssec->sid);
+ sksec_new->peer_sid = sksec_sock->sid;
+ err = security_sid_mls_copy(sksec_other->sid, sksec_sock->sid,
+ &sksec_new->sid);
+ if (err)
+ return err;
- return err;
+ /* connecting socket */
+ sksec_sock->peer_sid = sksec_new->sid;
+
+ return 0;
}
static int selinux_socket_unix_may_send(struct socket *sock,
struct socket *other)
{
- struct inode_security_struct *isec;
- struct inode_security_struct *other_isec;
+ struct sk_security_struct *ssec = sock->sk->sk_security;
+ struct sk_security_struct *osec = other->sk->sk_security;
struct common_audit_data ad;
- int err;
-
- isec = SOCK_INODE(sock)->i_security;
- other_isec = SOCK_INODE(other)->i_security;
-
- COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.sk = other->sk;
+ struct lsm_network_audit net = {0,};
- err = avc_has_perm(isec->sid, other_isec->sid,
- isec->sclass, SOCKET__SENDTO, &ad);
- if (err)
- return err;
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->sk = other->sk;
- return 0;
+ return avc_has_perm(ssec->sid, osec->sid, osec->sclass, SOCKET__SENDTO,
+ &ad);
}
static int selinux_inet_sys_rcv_skb(int ifindex, char *addrp, u16 family,
@@ -4087,14 +4289,15 @@ static int selinux_sock_rcv_skb_compat(struct sock *sk, struct sk_buff *skb,
{
int err = 0;
struct sk_security_struct *sksec = sk->sk_security;
- u32 peer_sid;
u32 sk_sid = sksec->sid;
struct common_audit_data ad;
+ struct lsm_network_audit net = {0,};
char *addrp;
- COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.netif = skb->skb_iif;
- ad.u.net.family = family;
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->netif = skb->skb_iif;
+ ad.u.net->family = family;
err = selinux_parse_skb(skb, &ad, &addrp, 1, NULL);
if (err)
return err;
@@ -4106,20 +4309,10 @@ static int selinux_sock_rcv_skb_compat(struct sock *sk, struct sk_buff *skb,
return err;
}
- if (selinux_policycap_netpeer) {
- err = selinux_skb_peerlbl_sid(skb, family, &peer_sid);
- if (err)
- return err;
- err = avc_has_perm(sk_sid, peer_sid,
- SECCLASS_PEER, PEER__RECV, &ad);
- if (err)
- selinux_netlbl_err(skb, err, 0);
- } else {
- err = selinux_netlbl_sock_rcv_skb(sksec, skb, family, &ad);
- if (err)
- return err;
- err = selinux_xfrm_sock_rcv_skb(sksec->sid, skb, &ad);
- }
+ err = selinux_netlbl_sock_rcv_skb(sksec, skb, family, &ad);
+ if (err)
+ return err;
+ err = selinux_xfrm_sock_rcv_skb(sksec->sid, skb, &ad);
return err;
}
@@ -4131,6 +4324,7 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
u16 family = sk->sk_family;
u32 sk_sid = sksec->sid;
struct common_audit_data ad;
+ struct lsm_network_audit net = {0,};
char *addrp;
u8 secmark_active;
u8 peerlbl_active;
@@ -4150,13 +4344,14 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
return selinux_sock_rcv_skb_compat(sk, skb, family);
secmark_active = selinux_secmark_enabled();
- peerlbl_active = netlbl_enabled() || selinux_xfrm_enabled();
+ peerlbl_active = selinux_peerlbl_enabled();
if (!secmark_active && !peerlbl_active)
return 0;
- COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.netif = skb->skb_iif;
- ad.u.net.family = family;
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->netif = skb->skb_iif;
+ ad.u.net->family = family;
err = selinux_parse_skb(skb, &ad, &addrp, 1, NULL);
if (err)
return err;
@@ -4175,8 +4370,10 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
}
err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER,
PEER__RECV, &ad);
- if (err)
+ if (err) {
selinux_netlbl_err(skb, err, 0);
+ return err;
+ }
}
if (secmark_active) {
@@ -4195,26 +4392,18 @@ static int selinux_socket_getpeersec_stream(struct socket *sock, char __user *op
int err = 0;
char *scontext;
u32 scontext_len;
- struct sk_security_struct *ssec;
- struct inode_security_struct *isec;
+ struct sk_security_struct *sksec = sock->sk->sk_security;
u32 peer_sid = SECSID_NULL;
- isec = SOCK_INODE(sock)->i_security;
-
- if (isec->sclass == SECCLASS_UNIX_STREAM_SOCKET ||
- isec->sclass == SECCLASS_TCP_SOCKET) {
- ssec = sock->sk->sk_security;
- peer_sid = ssec->peer_sid;
- }
- if (peer_sid == SECSID_NULL) {
- err = -ENOPROTOOPT;
- goto out;
- }
+ if (sksec->sclass == SECCLASS_UNIX_STREAM_SOCKET ||
+ sksec->sclass == SECCLASS_TCP_SOCKET)
+ peer_sid = sksec->peer_sid;
+ if (peer_sid == SECSID_NULL)
+ return -ENOPROTOOPT;
err = security_sid_to_context(peer_sid, &scontext, &scontext_len);
-
if (err)
- goto out;
+ return err;
if (scontext_len > len) {
err = -ERANGE;
@@ -4227,9 +4416,7 @@ static int selinux_socket_getpeersec_stream(struct socket *sock, char __user *op
out_len:
if (put_user(scontext_len, optlen))
err = -EFAULT;
-
kfree(scontext);
-out:
return err;
}
@@ -4261,24 +4448,39 @@ out:
static int selinux_sk_alloc_security(struct sock *sk, int family, gfp_t priority)
{
- return sk_alloc_security(sk, family, priority);
+ struct sk_security_struct *sksec;
+
+ sksec = kzalloc(sizeof(*sksec), priority);
+ if (!sksec)
+ return -ENOMEM;
+
+ sksec->peer_sid = SECINITSID_UNLABELED;
+ sksec->sid = SECINITSID_UNLABELED;
+ selinux_netlbl_sk_security_reset(sksec);
+ sk->sk_security = sksec;
+
+ return 0;
}
static void selinux_sk_free_security(struct sock *sk)
{
- sk_free_security(sk);
+ struct sk_security_struct *sksec = sk->sk_security;
+
+ sk->sk_security = NULL;
+ selinux_netlbl_sk_security_free(sksec);
+ kfree(sksec);
}
static void selinux_sk_clone_security(const struct sock *sk, struct sock *newsk)
{
- struct sk_security_struct *ssec = sk->sk_security;
- struct sk_security_struct *newssec = newsk->sk_security;
+ struct sk_security_struct *sksec = sk->sk_security;
+ struct sk_security_struct *newsksec = newsk->sk_security;
- newssec->sid = ssec->sid;
- newssec->peer_sid = ssec->peer_sid;
- newssec->sclass = ssec->sclass;
+ newsksec->sid = sksec->sid;
+ newsksec->peer_sid = sksec->peer_sid;
+ newsksec->sclass = sksec->sclass;
- selinux_netlbl_sk_security_reset(newssec);
+ selinux_netlbl_sk_security_reset(newsksec);
}
static void selinux_sk_getsecid(struct sock *sk, u32 *secid)
@@ -4308,27 +4510,18 @@ static int selinux_inet_conn_request(struct sock *sk, struct sk_buff *skb,
{
struct sk_security_struct *sksec = sk->sk_security;
int err;
- u16 family = sk->sk_family;
- u32 newsid;
+ u16 family = req->rsk_ops->family;
+ u32 connsid;
u32 peersid;
- /* handle mapped IPv4 packets arriving via IPv6 sockets */
- if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP))
- family = PF_INET;
-
err = selinux_skb_peerlbl_sid(skb, family, &peersid);
if (err)
return err;
- if (peersid == SECSID_NULL) {
- req->secid = sksec->sid;
- req->peer_secid = SECSID_NULL;
- } else {
- err = security_sid_mls_copy(sksec->sid, peersid, &newsid);
- if (err)
- return err;
- req->secid = newsid;
- req->peer_secid = peersid;
- }
+ err = selinux_conn_sid(sksec->sid, peersid, &connsid);
+ if (err)
+ return err;
+ req->secid = connsid;
+ req->peer_secid = peersid;
return selinux_netlbl_inet_conn_request(req, family);
}
@@ -4362,10 +4555,54 @@ static void selinux_inet_conn_established(struct sock *sk, struct sk_buff *skb)
selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid);
}
+static void selinux_skb_owned_by(struct sk_buff *skb, struct sock *sk)
+{
+ skb_set_owner_w(skb, sk);
+}
+
+static int selinux_secmark_relabel_packet(u32 sid)
+{
+ const struct task_security_struct *__tsec;
+ u32 tsid;
+
+ __tsec = current_security();
+ tsid = __tsec->sid;
+
+ return avc_has_perm(tsid, sid, SECCLASS_PACKET, PACKET__RELABELTO, NULL);
+}
+
+static void selinux_secmark_refcount_inc(void)
+{
+ atomic_inc(&selinux_secmark_refcount);
+}
+
+static void selinux_secmark_refcount_dec(void)
+{
+ atomic_dec(&selinux_secmark_refcount);
+}
+
static void selinux_req_classify_flow(const struct request_sock *req,
struct flowi *fl)
{
- fl->secid = req->secid;
+ fl->flowi_secid = req->secid;
+}
+
+static int selinux_tun_dev_alloc_security(void **security)
+{
+ struct tun_security_struct *tunsec;
+
+ tunsec = kzalloc(sizeof(*tunsec), GFP_KERNEL);
+ if (!tunsec)
+ return -ENOMEM;
+ tunsec->sid = current_sid();
+
+ *security = tunsec;
+ return 0;
+}
+
+static void selinux_tun_dev_free_security(void *security)
+{
+ kfree(security);
}
static int selinux_tun_dev_create(void)
@@ -4383,8 +4620,17 @@ static int selinux_tun_dev_create(void)
NULL);
}
-static void selinux_tun_dev_post_create(struct sock *sk)
+static int selinux_tun_dev_attach_queue(void *security)
+{
+ struct tun_security_struct *tunsec = security;
+
+ return avc_has_perm(current_sid(), tunsec->sid, SECCLASS_TUN_SOCKET,
+ TUN_SOCKET__ATTACH_QUEUE, NULL);
+}
+
+static int selinux_tun_dev_attach(struct sock *sk, void *security)
{
+ struct tun_security_struct *tunsec = security;
struct sk_security_struct *sksec = sk->sk_security;
/* we don't currently perform any NetLabel based labeling here and it
@@ -4394,20 +4640,19 @@ static void selinux_tun_dev_post_create(struct sock *sk)
* cause confusion to the TUN user that had no idea network labeling
* protocols were being used */
- /* see the comments in selinux_tun_dev_create() about why we don't use
- * the sockcreate SID here */
-
- sksec->sid = current_sid();
+ sksec->sid = tunsec->sid;
sksec->sclass = SECCLASS_TUN_SOCKET;
+
+ return 0;
}
-static int selinux_tun_dev_attach(struct sock *sk)
+static int selinux_tun_dev_open(void *security)
{
- struct sk_security_struct *sksec = sk->sk_security;
+ struct tun_security_struct *tunsec = security;
u32 sid = current_sid();
int err;
- err = avc_has_perm(sid, sksec->sid, SECCLASS_TUN_SOCKET,
+ err = avc_has_perm(sid, tunsec->sid, SECCLASS_TUN_SOCKET,
TUN_SOCKET__RELABELFROM, NULL);
if (err)
return err;
@@ -4415,8 +4660,7 @@ static int selinux_tun_dev_attach(struct sock *sk)
TUN_SOCKET__RELABELTO, NULL);
if (err)
return err;
-
- sksec->sid = sid;
+ tunsec->sid = sid;
return 0;
}
@@ -4426,22 +4670,21 @@ static int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb)
int err = 0;
u32 perm;
struct nlmsghdr *nlh;
- struct socket *sock = sk->sk_socket;
- struct inode_security_struct *isec = SOCK_INODE(sock)->i_security;
+ struct sk_security_struct *sksec = sk->sk_security;
- if (skb->len < NLMSG_SPACE(0)) {
+ if (skb->len < NLMSG_HDRLEN) {
err = -EINVAL;
goto out;
}
nlh = nlmsg_hdr(skb);
- err = selinux_nlmsg_lookup(isec->sclass, nlh->nlmsg_type, &perm);
+ err = selinux_nlmsg_lookup(sksec->sclass, nlh->nlmsg_type, &perm);
if (err) {
if (err == -EINVAL) {
audit_log(current->audit_context, GFP_KERNEL, AUDIT_SELINUX_ERR,
"SELinux: unrecognized netlink message"
" type=%hu for sclass=%hu\n",
- nlh->nlmsg_type, isec->sclass);
+ nlh->nlmsg_type, sksec->sclass);
if (!selinux_enforcing || security_get_allow_unknown())
err = 0;
}
@@ -4452,7 +4695,7 @@ static int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb)
goto out;
}
- err = socket_has_perm(current, sock, perm);
+ err = sock_has_perm(current, sk, perm);
out:
return err;
}
@@ -4466,6 +4709,7 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb, int ifindex,
char *addrp;
u32 peer_sid;
struct common_audit_data ad;
+ struct lsm_network_audit net = {0,};
u8 secmark_active;
u8 netlbl_active;
u8 peerlbl_active;
@@ -4475,16 +4719,17 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb, int ifindex,
secmark_active = selinux_secmark_enabled();
netlbl_active = netlbl_enabled();
- peerlbl_active = netlbl_active || selinux_xfrm_enabled();
+ peerlbl_active = selinux_peerlbl_enabled();
if (!secmark_active && !peerlbl_active)
return NF_ACCEPT;
if (selinux_skb_peerlbl_sid(skb, family, &peer_sid) != 0)
return NF_DROP;
- COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.netif = ifindex;
- ad.u.net.family = family;
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->netif = ifindex;
+ ad.u.net->family = family;
if (selinux_parse_skb(skb, &ad, &addrp, 1, NULL) != 0)
return NF_DROP;
@@ -4513,7 +4758,7 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb, int ifindex,
return NF_ACCEPT;
}
-static unsigned int selinux_ipv4_forward(unsigned int hooknum,
+static unsigned int selinux_ipv4_forward(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -4523,7 +4768,7 @@ static unsigned int selinux_ipv4_forward(unsigned int hooknum,
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static unsigned int selinux_ipv6_forward(unsigned int hooknum,
+static unsigned int selinux_ipv6_forward(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -4536,6 +4781,7 @@ static unsigned int selinux_ipv6_forward(unsigned int hooknum,
static unsigned int selinux_ip_output(struct sk_buff *skb,
u16 family)
{
+ struct sock *sk;
u32 sid;
if (!netlbl_enabled())
@@ -4544,8 +4790,27 @@ static unsigned int selinux_ip_output(struct sk_buff *skb,
/* we do this in the LOCAL_OUT path and not the POST_ROUTING path
* because we want to make sure we apply the necessary labeling
* before IPsec is applied so we can leverage AH protection */
- if (skb->sk) {
- struct sk_security_struct *sksec = skb->sk->sk_security;
+ sk = skb->sk;
+ if (sk) {
+ struct sk_security_struct *sksec;
+
+ if (sk->sk_state == TCP_LISTEN)
+ /* if the socket is the listening state then this
+ * packet is a SYN-ACK packet which means it needs to
+ * be labeled based on the connection/request_sock and
+ * not the parent socket. unfortunately, we can't
+ * lookup the request_sock yet as it isn't queued on
+ * the parent socket until after the SYN-ACK is sent.
+ * the "solution" is to simply pass the packet as-is
+ * as any IP option based labeling should be copied
+ * from the initial connection request (in the IP
+ * layer). it is far from ideal, but until we get a
+ * security label in the packet itself this is the
+ * best we can do. */
+ return NF_ACCEPT;
+
+ /* standard practice, label using the parent socket */
+ sksec = sk->sk_security;
sid = sksec->sid;
} else
sid = SECINITSID_KERNEL;
@@ -4555,7 +4820,7 @@ static unsigned int selinux_ip_output(struct sk_buff *skb,
return NF_ACCEPT;
}
-static unsigned int selinux_ipv4_output(unsigned int hooknum,
+static unsigned int selinux_ipv4_output(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -4571,6 +4836,7 @@ static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb,
struct sock *sk = skb->sk;
struct sk_security_struct *sksec;
struct common_audit_data ad;
+ struct lsm_network_audit net = {0,};
char *addrp;
u8 proto;
@@ -4578,20 +4844,20 @@ static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb,
return NF_ACCEPT;
sksec = sk->sk_security;
- COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.netif = ifindex;
- ad.u.net.family = family;
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->netif = ifindex;
+ ad.u.net->family = family;
if (selinux_parse_skb(skb, &ad, &addrp, 0, &proto))
return NF_DROP;
if (selinux_secmark_enabled())
if (avc_has_perm(sksec->sid, skb->secmark,
SECCLASS_PACKET, PACKET__SEND, &ad))
- return NF_DROP;
+ return NF_DROP_ERR(-ECONNREFUSED);
- if (selinux_policycap_netpeer)
- if (selinux_xfrm_postroute_last(sksec->sid, skb, &ad, proto))
- return NF_DROP;
+ if (selinux_xfrm_postroute_last(sksec->sid, skb, &ad, proto))
+ return NF_DROP_ERR(-ECONNREFUSED);
return NF_ACCEPT;
}
@@ -4603,6 +4869,7 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
u32 peer_sid;
struct sock *sk;
struct common_audit_data ad;
+ struct lsm_network_audit net = {0,};
char *addrp;
u8 secmark_active;
u8 peerlbl_active;
@@ -4613,64 +4880,99 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
* as fast and as clean as possible. */
if (!selinux_policycap_netpeer)
return selinux_ip_postroute_compat(skb, ifindex, family);
+
+ secmark_active = selinux_secmark_enabled();
+ peerlbl_active = selinux_peerlbl_enabled();
+ if (!secmark_active && !peerlbl_active)
+ return NF_ACCEPT;
+
+ sk = skb->sk;
+
#ifdef CONFIG_XFRM
/* If skb->dst->xfrm is non-NULL then the packet is undergoing an IPsec
* packet transformation so allow the packet to pass without any checks
* since we'll have another chance to perform access control checks
* when the packet is on it's final way out.
* NOTE: there appear to be some IPv6 multicast cases where skb->dst
- * is NULL, in this case go ahead and apply access control. */
- if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL)
+ * is NULL, in this case go ahead and apply access control.
+ * NOTE: if this is a local socket (skb->sk != NULL) that is in the
+ * TCP listening state we cannot wait until the XFRM processing
+ * is done as we will miss out on the SA label if we do;
+ * unfortunately, this means more work, but it is only once per
+ * connection. */
+ if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL &&
+ !(sk != NULL && sk->sk_state == TCP_LISTEN))
return NF_ACCEPT;
#endif
- secmark_active = selinux_secmark_enabled();
- peerlbl_active = netlbl_enabled() || selinux_xfrm_enabled();
- if (!secmark_active && !peerlbl_active)
- return NF_ACCEPT;
- /* if the packet is being forwarded then get the peer label from the
- * packet itself; otherwise check to see if it is from a local
- * application or the kernel, if from an application get the peer label
- * from the sending socket, otherwise use the kernel's sid */
- sk = skb->sk;
if (sk == NULL) {
- switch (family) {
- case PF_INET:
- if (IPCB(skb)->flags & IPSKB_FORWARDED)
- secmark_perm = PACKET__FORWARD_OUT;
- else
- secmark_perm = PACKET__SEND;
- break;
- case PF_INET6:
- if (IP6CB(skb)->flags & IP6SKB_FORWARDED)
- secmark_perm = PACKET__FORWARD_OUT;
- else
- secmark_perm = PACKET__SEND;
- break;
- default:
- return NF_DROP;
- }
- if (secmark_perm == PACKET__FORWARD_OUT) {
+ /* Without an associated socket the packet is either coming
+ * from the kernel or it is being forwarded; check the packet
+ * to determine which and if the packet is being forwarded
+ * query the packet directly to determine the security label. */
+ if (skb->skb_iif) {
+ secmark_perm = PACKET__FORWARD_OUT;
if (selinux_skb_peerlbl_sid(skb, family, &peer_sid))
return NF_DROP;
- } else
+ } else {
+ secmark_perm = PACKET__SEND;
peer_sid = SECINITSID_KERNEL;
+ }
+ } else if (sk->sk_state == TCP_LISTEN) {
+ /* Locally generated packet but the associated socket is in the
+ * listening state which means this is a SYN-ACK packet. In
+ * this particular case the correct security label is assigned
+ * to the connection/request_sock but unfortunately we can't
+ * query the request_sock as it isn't queued on the parent
+ * socket until after the SYN-ACK packet is sent; the only
+ * viable choice is to regenerate the label like we do in
+ * selinux_inet_conn_request(). See also selinux_ip_output()
+ * for similar problems. */
+ u32 skb_sid;
+ struct sk_security_struct *sksec = sk->sk_security;
+ if (selinux_skb_peerlbl_sid(skb, family, &skb_sid))
+ return NF_DROP;
+ /* At this point, if the returned skb peerlbl is SECSID_NULL
+ * and the packet has been through at least one XFRM
+ * transformation then we must be dealing with the "final"
+ * form of labeled IPsec packet; since we've already applied
+ * all of our access controls on this packet we can safely
+ * pass the packet. */
+ if (skb_sid == SECSID_NULL) {
+ switch (family) {
+ case PF_INET:
+ if (IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
+ return NF_ACCEPT;
+ break;
+ case PF_INET6:
+ if (IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
+ return NF_ACCEPT;
+ default:
+ return NF_DROP_ERR(-ECONNREFUSED);
+ }
+ }
+ if (selinux_conn_sid(sksec->sid, skb_sid, &peer_sid))
+ return NF_DROP;
+ secmark_perm = PACKET__SEND;
} else {
+ /* Locally generated packet, fetch the security label from the
+ * associated socket. */
struct sk_security_struct *sksec = sk->sk_security;
peer_sid = sksec->sid;
secmark_perm = PACKET__SEND;
}
- COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.netif = ifindex;
- ad.u.net.family = family;
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->netif = ifindex;
+ ad.u.net->family = family;
if (selinux_parse_skb(skb, &ad, &addrp, 0, NULL))
return NF_DROP;
if (secmark_active)
if (avc_has_perm(peer_sid, skb->secmark,
SECCLASS_PACKET, secmark_perm, &ad))
- return NF_DROP;
+ return NF_DROP_ERR(-ECONNREFUSED);
if (peerlbl_active) {
u32 if_sid;
@@ -4680,19 +4982,19 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
return NF_DROP;
if (avc_has_perm(peer_sid, if_sid,
SECCLASS_NETIF, NETIF__EGRESS, &ad))
- return NF_DROP;
+ return NF_DROP_ERR(-ECONNREFUSED);
if (sel_netnode_sid(addrp, family, &node_sid))
return NF_DROP;
if (avc_has_perm(peer_sid, node_sid,
SECCLASS_NODE, NODE__SENDTO, &ad))
- return NF_DROP;
+ return NF_DROP_ERR(-ECONNREFUSED);
}
return NF_ACCEPT;
}
-static unsigned int selinux_ipv4_postroute(unsigned int hooknum,
+static unsigned int selinux_ipv4_postroute(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -4702,7 +5004,7 @@ static unsigned int selinux_ipv4_postroute(unsigned int hooknum,
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static unsigned int selinux_ipv6_postroute(unsigned int hooknum,
+static unsigned int selinux_ipv6_postroute(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -4725,22 +5027,6 @@ static int selinux_netlink_send(struct sock *sk, struct sk_buff *skb)
return selinux_nlmsg_perm(sk, skb);
}
-static int selinux_netlink_recv(struct sk_buff *skb, int capability)
-{
- int err;
- struct common_audit_data ad;
-
- err = cap_netlink_recv(skb, capability);
- if (err)
- return err;
-
- COMMON_AUDIT_DATA_INIT(&ad, CAP);
- ad.u.cap = capability;
-
- return avc_has_perm(NETLINK_CB(skb).sid, NETLINK_CB(skb).sid,
- SECCLASS_CAPABILITY, CAP_TO_MASK(capability), &ad);
-}
-
static int ipc_alloc_security(struct task_struct *task,
struct kern_ipc_perm *perm,
u16 sclass)
@@ -4798,7 +5084,7 @@ static int ipc_has_perm(struct kern_ipc_perm *ipc_perms,
isec = ipc_perms->security;
- COMMON_AUDIT_DATA_INIT(&ad, IPC);
+ ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = ipc_perms->key;
return avc_has_perm(sid, isec->sid, isec->sclass, perms, &ad);
@@ -4828,7 +5114,7 @@ static int selinux_msg_queue_alloc_security(struct msg_queue *msq)
isec = msq->q_perm.security;
- COMMON_AUDIT_DATA_INIT(&ad, IPC);
+ ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = msq->q_perm.key;
rc = avc_has_perm(sid, isec->sid, SECCLASS_MSGQ,
@@ -4853,7 +5139,7 @@ static int selinux_msg_queue_associate(struct msg_queue *msq, int msqflg)
isec = msq->q_perm.security;
- COMMON_AUDIT_DATA_INIT(&ad, IPC);
+ ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = msq->q_perm.key;
return avc_has_perm(sid, isec->sid, SECCLASS_MSGQ,
@@ -4908,12 +5194,12 @@ static int selinux_msg_queue_msgsnd(struct msg_queue *msq, struct msg_msg *msg,
* message queue this message will be stored in
*/
rc = security_transition_sid(sid, isec->sid, SECCLASS_MSG,
- &msec->sid);
+ NULL, &msec->sid);
if (rc)
return rc;
}
- COMMON_AUDIT_DATA_INIT(&ad, IPC);
+ ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = msq->q_perm.key;
/* Can this process write to the queue? */
@@ -4944,7 +5230,7 @@ static int selinux_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg,
isec = msq->q_perm.security;
msec = msg->security;
- COMMON_AUDIT_DATA_INIT(&ad, IPC);
+ ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = msq->q_perm.key;
rc = avc_has_perm(sid, isec->sid,
@@ -4969,7 +5255,7 @@ static int selinux_shm_alloc_security(struct shmid_kernel *shp)
isec = shp->shm_perm.security;
- COMMON_AUDIT_DATA_INIT(&ad, IPC);
+ ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = shp->shm_perm.key;
rc = avc_has_perm(sid, isec->sid, SECCLASS_SHM,
@@ -4994,7 +5280,7 @@ static int selinux_shm_associate(struct shmid_kernel *shp, int shmflg)
isec = shp->shm_perm.security;
- COMMON_AUDIT_DATA_INIT(&ad, IPC);
+ ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = shp->shm_perm.key;
return avc_has_perm(sid, isec->sid, SECCLASS_SHM,
@@ -5061,7 +5347,7 @@ static int selinux_sem_alloc_security(struct sem_array *sma)
isec = sma->sem_perm.security;
- COMMON_AUDIT_DATA_INIT(&ad, IPC);
+ ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = sma->sem_perm.key;
rc = avc_has_perm(sid, isec->sid, SECCLASS_SEM,
@@ -5086,7 +5372,7 @@ static int selinux_sem_associate(struct sem_array *sma, int semflg)
isec = sma->sem_perm.security;
- COMMON_AUDIT_DATA_INIT(&ad, IPC);
+ ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = sma->sem_perm.key;
return avc_has_perm(sid, isec->sid, SECCLASS_SEM,
@@ -5264,10 +5550,25 @@ static int selinux_setprocattr(struct task_struct *p,
str[size-1] = 0;
size--;
}
- error = security_context_to_sid(value, size, &sid);
+ error = security_context_to_sid(value, size, &sid, GFP_KERNEL);
if (error == -EINVAL && !strcmp(name, "fscreate")) {
- if (!capable(CAP_MAC_ADMIN))
+ if (!capable(CAP_MAC_ADMIN)) {
+ struct audit_buffer *ab;
+ size_t audit_size;
+
+ /* We strip a nul only if it is at the end, otherwise the
+ * context contains a nul and we should audit that */
+ if (str[size - 1] == '\0')
+ audit_size = size - 1;
+ else
+ audit_size = size;
+ ab = audit_log_start(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR);
+ audit_log_format(ab, "op=fscreate invalid_context=");
+ audit_log_n_untrustedstring(ab, value, audit_size);
+ audit_log_end(ab);
+
return error;
+ }
error = security_context_to_sid_force(value, size,
&sid);
}
@@ -5319,11 +5620,11 @@ static int selinux_setprocattr(struct task_struct *p,
/* Check for ptracing, and update the task SID if ok.
Otherwise, leave SID unchanged and fail. */
ptsid = 0;
- task_lock(p);
- tracer = tracehook_tracer_task(p);
+ rcu_read_lock();
+ tracer = ptrace_parent(p);
if (tracer)
ptsid = task_sid(tracer);
- task_unlock(p);
+ rcu_read_unlock();
if (tracer) {
error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS,
@@ -5346,6 +5647,11 @@ abort_change:
return error;
}
+static int selinux_ismaclabel(const char *name)
+{
+ return (strcmp(name, XATTR_SELINUX_SUFFIX) == 0);
+}
+
static int selinux_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
{
return security_sid_to_context(secid, secdata, seclen);
@@ -5353,7 +5659,7 @@ static int selinux_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
static int selinux_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
{
- return security_context_to_sid(secdata, seclen, secid);
+ return security_context_to_sid(secdata, seclen, secid, GFP_KERNEL);
}
static void selinux_release_secctx(char *secdata, u32 seclen)
@@ -5419,7 +5725,7 @@ static void selinux_key_free(struct key *k)
static int selinux_key_permission(key_ref_t key_ref,
const struct cred *cred,
- key_perm_t perm)
+ unsigned perm)
{
struct key *key;
struct key_security_struct *ksec;
@@ -5462,7 +5768,6 @@ static struct security_operations selinux_ops = {
.ptrace_traceme = selinux_ptrace_traceme,
.capget = selinux_capget,
.capset = selinux_capset,
- .sysctl = selinux_sysctl,
.capable = selinux_capable,
.quotactl = selinux_quotactl,
.quota_on = selinux_quota_on,
@@ -5470,7 +5775,6 @@ static struct security_operations selinux_ops = {
.vm_enough_memory = selinux_vm_enough_memory,
.netlink_send = selinux_netlink_send,
- .netlink_recv = selinux_netlink_recv,
.bprm_set_creds = selinux_bprm_set_creds,
.bprm_committing_creds = selinux_bprm_committing_creds,
@@ -5480,6 +5784,7 @@ static struct security_operations selinux_ops = {
.sb_alloc_security = selinux_sb_alloc_security,
.sb_free_security = selinux_sb_free_security,
.sb_copy_data = selinux_sb_copy_data,
+ .sb_remount = selinux_sb_remount,
.sb_kern_mount = selinux_sb_kern_mount,
.sb_show_options = selinux_sb_show_options,
.sb_statfs = selinux_sb_statfs,
@@ -5489,6 +5794,7 @@ static struct security_operations selinux_ops = {
.sb_clone_mnt_opts = selinux_sb_clone_mnt_opts,
.sb_parse_opts_str = selinux_parse_opts_str,
+ .dentry_init_security = selinux_dentry_init_security,
.inode_alloc_security = selinux_inode_alloc_security,
.inode_free_security = selinux_inode_free_security,
@@ -5520,7 +5826,8 @@ static struct security_operations selinux_ops = {
.file_alloc_security = selinux_file_alloc_security,
.file_free_security = selinux_file_free_security,
.file_ioctl = selinux_file_ioctl,
- .file_mmap = selinux_file_mmap,
+ .mmap_file = selinux_mmap_file,
+ .mmap_addr = selinux_mmap_addr,
.file_mprotect = selinux_file_mprotect,
.file_lock = selinux_file_lock,
.file_fcntl = selinux_file_fcntl,
@@ -5528,7 +5835,7 @@ static struct security_operations selinux_ops = {
.file_send_sigiotask = selinux_file_send_sigiotask,
.file_receive = selinux_file_receive,
- .dentry_open = selinux_dentry_open,
+ .file_open = selinux_file_open,
.task_create = selinux_task_create,
.cred_alloc_blank = selinux_cred_alloc_blank,
@@ -5583,6 +5890,7 @@ static struct security_operations selinux_ops = {
.getprocattr = selinux_getprocattr,
.setprocattr = selinux_setprocattr,
+ .ismaclabel = selinux_ismaclabel,
.secid_to_secctx = selinux_secid_to_secctx,
.secctx_to_secid = selinux_secctx_to_secid,
.release_secctx = selinux_release_secctx,
@@ -5617,17 +5925,25 @@ static struct security_operations selinux_ops = {
.inet_conn_request = selinux_inet_conn_request,
.inet_csk_clone = selinux_inet_csk_clone,
.inet_conn_established = selinux_inet_conn_established,
+ .secmark_relabel_packet = selinux_secmark_relabel_packet,
+ .secmark_refcount_inc = selinux_secmark_refcount_inc,
+ .secmark_refcount_dec = selinux_secmark_refcount_dec,
.req_classify_flow = selinux_req_classify_flow,
+ .tun_dev_alloc_security = selinux_tun_dev_alloc_security,
+ .tun_dev_free_security = selinux_tun_dev_free_security,
.tun_dev_create = selinux_tun_dev_create,
- .tun_dev_post_create = selinux_tun_dev_post_create,
+ .tun_dev_attach_queue = selinux_tun_dev_attach_queue,
.tun_dev_attach = selinux_tun_dev_attach,
+ .tun_dev_open = selinux_tun_dev_open,
+ .skb_owned_by = selinux_skb_owned_by,
#ifdef CONFIG_SECURITY_NETWORK_XFRM
.xfrm_policy_alloc_security = selinux_xfrm_policy_alloc,
.xfrm_policy_clone_security = selinux_xfrm_policy_clone,
.xfrm_policy_free_security = selinux_xfrm_policy_free,
.xfrm_policy_delete_security = selinux_xfrm_policy_delete,
- .xfrm_state_alloc_security = selinux_xfrm_state_alloc,
+ .xfrm_state_alloc = selinux_xfrm_state_alloc,
+ .xfrm_state_alloc_acquire = selinux_xfrm_state_alloc_acquire,
.xfrm_state_free_security = selinux_xfrm_state_free,
.xfrm_state_delete_security = selinux_xfrm_state_delete,
.xfrm_policy_lookup = selinux_xfrm_policy_lookup,
@@ -5667,14 +5983,13 @@ static __init int selinux_init(void)
/* Set the security state for the initial task. */
cred_init_security();
+ default_noexec = !(VM_DATA_DEFAULT_FLAGS & VM_EXEC);
+
sel_inode_cache = kmem_cache_create("selinux_inode_security",
sizeof(struct inode_security_struct),
0, SLAB_PANIC, NULL);
avc_init();
- secondary_ops = security_ops;
- if (!secondary_ops)
- panic("SELinux: No initial security operations\n");
if (register_security(&selinux_ops))
panic("SELinux: Unable to register with kernel.\n");
@@ -5686,35 +6001,18 @@ static __init int selinux_init(void)
return 0;
}
+static void delayed_superblock_init(struct super_block *sb, void *unused)
+{
+ superblock_doinit(sb, NULL);
+}
+
void selinux_complete_init(void)
{
printk(KERN_DEBUG "SELinux: Completing initialization.\n");
/* Set up any superblocks initialized prior to the policy load. */
printk(KERN_DEBUG "SELinux: Setting up existing superblocks.\n");
- spin_lock(&sb_lock);
- spin_lock(&sb_security_lock);
-next_sb:
- if (!list_empty(&superblock_security_head)) {
- struct superblock_security_struct *sbsec =
- list_entry(superblock_security_head.next,
- struct superblock_security_struct,
- list);
- struct super_block *sb = sbsec->sb;
- sb->s_count++;
- spin_unlock(&sb_security_lock);
- spin_unlock(&sb_lock);
- down_read(&sb->s_umount);
- if (sb->s_root)
- superblock_doinit(sb, NULL);
- drop_super(sb);
- spin_lock(&sb_lock);
- spin_lock(&sb_security_lock);
- list_del_init(&sbsec->list);
- goto next_sb;
- }
- spin_unlock(&sb_security_lock);
- spin_unlock(&sb_lock);
+ iterate_supers(delayed_superblock_init, NULL);
}
/* SELinux requires early initialization in order to label
@@ -5727,21 +6025,21 @@ static struct nf_hook_ops selinux_ipv4_ops[] = {
{
.hook = selinux_ipv4_postroute,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_SELINUX_LAST,
},
{
.hook = selinux_ipv4_forward,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_FORWARD,
.priority = NF_IP_PRI_SELINUX_FIRST,
},
{
.hook = selinux_ipv4_output,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_SELINUX_FIRST,
}
@@ -5753,14 +6051,14 @@ static struct nf_hook_ops selinux_ipv6_ops[] = {
{
.hook = selinux_ipv6_postroute,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP6_PRI_SELINUX_LAST,
},
{
.hook = selinux_ipv6_forward,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_FORWARD,
.priority = NF_IP6_PRI_SELINUX_FIRST,
}
@@ -5818,8 +6116,6 @@ static int selinux_disabled;
int selinux_disable(void)
{
- extern void exit_sel_fs(void);
-
if (ss_initialized) {
/* Not permitted after initial policy load. */
return -EINVAL;
@@ -5835,8 +6131,7 @@ int selinux_disable(void)
selinux_disabled = 1;
selinux_enabled = 0;
- /* Reset security_ops to the secondary module, dummy or capability. */
- security_ops = secondary_ops;
+ reset_security_ops();
/* Try to destroy the avc node cache */
avc_disable();
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
index e94e82f7381..ddf8eec03f2 100644
--- a/security/selinux/include/avc.h
+++ b/security/selinux/include/avc.h
@@ -15,8 +15,6 @@
#include <linux/audit.h>
#include <linux/lsm_audit.h>
#include <linux/in6.h>
-#include <linux/path.h>
-#include <asm/system.h>
#include "flask.h"
#include "av_permissions.h"
#include "security.h"
@@ -42,7 +40,6 @@ struct sk_buff;
*/
struct avc_cache_stats {
unsigned int lookups;
- unsigned int hits;
unsigned int misses;
unsigned int allocations;
unsigned int reclaims;
@@ -50,16 +47,99 @@ struct avc_cache_stats {
};
/*
+ * We only need this data after we have decided to send an audit message.
+ */
+struct selinux_audit_data {
+ u32 ssid;
+ u32 tsid;
+ u16 tclass;
+ u32 requested;
+ u32 audited;
+ u32 denied;
+ int result;
+};
+
+/*
* AVC operations
*/
void __init avc_init(void);
-void avc_audit(u32 ssid, u32 tsid,
- u16 tclass, u32 requested,
- struct av_decision *avd,
- int result,
- struct common_audit_data *a);
+static inline u32 avc_audit_required(u32 requested,
+ struct av_decision *avd,
+ int result,
+ u32 auditdeny,
+ u32 *deniedp)
+{
+ u32 denied, audited;
+ denied = requested & ~avd->allowed;
+ if (unlikely(denied)) {
+ audited = denied & avd->auditdeny;
+ /*
+ * auditdeny is TRICKY! Setting a bit in
+ * this field means that ANY denials should NOT be audited if
+ * the policy contains an explicit dontaudit rule for that
+ * permission. Take notice that this is unrelated to the
+ * actual permissions that were denied. As an example lets
+ * assume:
+ *
+ * denied == READ
+ * avd.auditdeny & ACCESS == 0 (not set means explicit rule)
+ * auditdeny & ACCESS == 1
+ *
+ * We will NOT audit the denial even though the denied
+ * permission was READ and the auditdeny checks were for
+ * ACCESS
+ */
+ if (auditdeny && !(auditdeny & avd->auditdeny))
+ audited = 0;
+ } else if (result)
+ audited = denied = requested;
+ else
+ audited = requested & avd->auditallow;
+ *deniedp = denied;
+ return audited;
+}
+
+int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass,
+ u32 requested, u32 audited, u32 denied, int result,
+ struct common_audit_data *a,
+ unsigned flags);
+
+/**
+ * avc_audit - Audit the granting or denial of permissions.
+ * @ssid: source security identifier
+ * @tsid: target security identifier
+ * @tclass: target security class
+ * @requested: requested permissions
+ * @avd: access vector decisions
+ * @result: result from avc_has_perm_noaudit
+ * @a: auxiliary audit data
+ * @flags: VFS walk flags
+ *
+ * Audit the granting or denial of permissions in accordance
+ * with the policy. This function is typically called by
+ * avc_has_perm() after a permission check, but can also be
+ * called directly by callers who use avc_has_perm_noaudit()
+ * in order to separate the permission check from the auditing.
+ * For example, this separation is useful when the permission check must
+ * be performed under a lock, to allow the lock to be released
+ * before calling the auditing code.
+ */
+static inline int avc_audit(u32 ssid, u32 tsid,
+ u16 tclass, u32 requested,
+ struct av_decision *avd,
+ int result,
+ struct common_audit_data *a)
+{
+ u32 audited, denied;
+ audited = avc_audit_required(requested, avd, result, 0, &denied);
+ if (likely(!audited))
+ return 0;
+ return slow_avc_audit(ssid, tsid, tclass,
+ requested, audited, denied, result,
+ a, 0);
+}
#define AVC_STRICT 1 /* Ignore permissive mode. */
int avc_has_perm_noaudit(u32 ssid, u32 tsid,
@@ -82,11 +162,7 @@ u32 avc_policy_seqno(void);
#define AVC_CALLBACK_AUDITDENY_ENABLE 64
#define AVC_CALLBACK_AUDITDENY_DISABLE 128
-int avc_add_callback(int (*callback)(u32 event, u32 ssid, u32 tsid,
- u16 tclass, u32 perms,
- u32 *out_retained),
- u32 events, u32 ssid, u32 tsid,
- u16 tclass, u32 perms);
+int avc_add_callback(int (*callback)(u32 event), u32 events);
/* Exported to selinuxfs */
int avc_get_hash_stats(char *page);
diff --git a/security/selinux/include/avc_ss.h b/security/selinux/include/avc_ss.h
index 4677aa519b0..d5c328452df 100644
--- a/security/selinux/include/avc_ss.h
+++ b/security/selinux/include/avc_ss.h
@@ -18,5 +18,11 @@ struct security_class_mapping {
extern struct security_class_mapping secclass_map[];
+/*
+ * The security server must be initialized before
+ * any labeling or access decisions can be provided.
+ */
+extern int ss_initialized;
+
#endif /* _SELINUX_AVC_SS_H_ */
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
index 8b32e959bb2..be491a74c1e 100644
--- a/security/selinux/include/classmap.h
+++ b/security/selinux/include/classmap.h
@@ -2,7 +2,8 @@
"getattr", "setattr", "lock", "relabelfrom", "relabelto", "append"
#define COMMON_FILE_PERMS COMMON_FILE_SOCK_PERMS, "unlink", "link", \
- "rename", "execute", "swapon", "quotaon", "mounton"
+ "rename", "execute", "swapon", "quotaon", "mounton", "audit_access", \
+ "open", "execmod"
#define COMMON_SOCK_PERMS COMMON_FILE_SOCK_PERMS, "bind", "connect", \
"listen", "accept", "getopt", "setopt", "shutdown", "recvfrom", \
@@ -11,12 +12,16 @@
#define COMMON_IPC_PERMS "create", "destroy", "getattr", "setattr", "read", \
"write", "associate", "unix_read", "unix_write"
+/*
+ * Note: The name for any socket class should be suffixed by "socket",
+ * and doesn't contain more than one substr of "socket".
+ */
struct security_class_mapping secclass_map[] = {
{ "security",
{ "compute_av", "compute_create", "compute_member",
"check_context", "load_policy", "compute_relabel",
"compute_user", "setenforce", "setbool", "setsecparam",
- "setcheckreqprot", NULL } },
+ "setcheckreqprot", "read_policy", NULL } },
{ "process",
{ "fork", "transition", "sigchld", "sigkill",
"sigstop", "signull", "signal", "ptrace", "getsched", "setsched",
@@ -43,22 +48,21 @@ struct security_class_mapping secclass_map[] = {
"quotaget", NULL } },
{ "file",
{ COMMON_FILE_PERMS,
- "execute_no_trans", "entrypoint", "execmod", "open", NULL } },
+ "execute_no_trans", "entrypoint", NULL } },
{ "dir",
{ COMMON_FILE_PERMS, "add_name", "remove_name",
- "reparent", "search", "rmdir", "open", NULL } },
+ "reparent", "search", "rmdir", NULL } },
{ "fd", { "use", NULL } },
{ "lnk_file",
{ COMMON_FILE_PERMS, NULL } },
{ "chr_file",
- { COMMON_FILE_PERMS,
- "execute_no_trans", "entrypoint", "execmod", "open", NULL } },
+ { COMMON_FILE_PERMS, NULL } },
{ "blk_file",
- { COMMON_FILE_PERMS, "open", NULL } },
+ { COMMON_FILE_PERMS, NULL } },
{ "sock_file",
- { COMMON_FILE_PERMS, "open", NULL } },
+ { COMMON_FILE_PERMS, NULL } },
{ "fifo_file",
- { COMMON_FILE_PERMS, "open", NULL } },
+ { COMMON_FILE_PERMS, NULL } },
{ "socket",
{ COMMON_SOCK_PERMS, NULL } },
{ "tcp_socket",
@@ -132,8 +136,7 @@ struct security_class_mapping secclass_map[] = {
{ "appletalk_socket",
{ COMMON_SOCK_PERMS, NULL } },
{ "packet",
- { "send", "recv", "relabelto", "flow_in", "flow_out",
- "forward_in", "forward_out", NULL } },
+ { "send", "recv", "relabelto", "forward_in", "forward_out", NULL } },
{ "key",
{ "view", "read", "write", "search", "link", "setattr", "create",
NULL } },
@@ -142,9 +145,11 @@ struct security_class_mapping secclass_map[] = {
"node_bind", "name_connect", NULL } },
{ "memprotect", { "mmap_zero", NULL } },
{ "peer", { "recv", NULL } },
- { "capability2", { "mac_override", "mac_admin", NULL } },
+ { "capability2",
+ { "mac_override", "mac_admin", "syslog", "wake_alarm", "block_suspend",
+ "audit_read", NULL } },
{ "kernel_service", { "use_as_override", "create_files_as", NULL } },
{ "tun_socket",
- { COMMON_SOCK_PERMS, NULL } },
+ { COMMON_SOCK_PERMS, "attach_queue", NULL } },
{ NULL }
};
diff --git a/security/selinux/include/initial_sid_to_string.h b/security/selinux/include/initial_sid_to_string.h
index d4fac82793a..a59b64e3fd0 100644
--- a/security/selinux/include/initial_sid_to_string.h
+++ b/security/selinux/include/initial_sid_to_string.h
@@ -1,5 +1,5 @@
/* This file is automatically generated. Do not edit. */
-static char *initial_sid_to_string[] =
+static const char *initial_sid_to_string[] =
{
"null",
"kernel",
diff --git a/security/selinux/include/netif.h b/security/selinux/include/netif.h
index ce23edd128b..43d507242b4 100644
--- a/security/selinux/include/netif.h
+++ b/security/selinux/include/netif.h
@@ -8,7 +8,7 @@
*
* Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
* Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
- * Paul Moore, <paul.moore@hp.com>
+ * Paul Moore <paul@paul-moore.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
diff --git a/security/selinux/include/netlabel.h b/security/selinux/include/netlabel.h
index 8d7384280a7..8c59b8f150e 100644
--- a/security/selinux/include/netlabel.h
+++ b/security/selinux/include/netlabel.h
@@ -1,7 +1,7 @@
/*
* SELinux interface to the NetLabel subsystem
*
- * Author : Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
@@ -42,8 +42,8 @@ void selinux_netlbl_cache_invalidate(void);
void selinux_netlbl_err(struct sk_buff *skb, int error, int gateway);
-void selinux_netlbl_sk_security_free(struct sk_security_struct *ssec);
-void selinux_netlbl_sk_security_reset(struct sk_security_struct *ssec);
+void selinux_netlbl_sk_security_free(struct sk_security_struct *sksec);
+void selinux_netlbl_sk_security_reset(struct sk_security_struct *sksec);
int selinux_netlbl_skbuff_getsid(struct sk_buff *skb,
u16 family,
@@ -79,13 +79,13 @@ static inline void selinux_netlbl_err(struct sk_buff *skb,
}
static inline void selinux_netlbl_sk_security_free(
- struct sk_security_struct *ssec)
+ struct sk_security_struct *sksec)
{
return;
}
static inline void selinux_netlbl_sk_security_reset(
- struct sk_security_struct *ssec)
+ struct sk_security_struct *sksec)
{
return;
}
diff --git a/security/selinux/include/netnode.h b/security/selinux/include/netnode.h
index 1b94450d11d..df7a5ed6c69 100644
--- a/security/selinux/include/netnode.h
+++ b/security/selinux/include/netnode.h
@@ -6,7 +6,7 @@
* needed to reduce the lookup overhead since most of these queries happen on
* a per-packet basis.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/security/selinux/include/netport.h b/security/selinux/include/netport.h
index 8991752eaf9..4d965b83d73 100644
--- a/security/selinux/include/netport.h
+++ b/security/selinux/include/netport.h
@@ -5,7 +5,7 @@
* mapping is maintained as part of the normal policy but a fast cache is
* needed to reduce the lookup overhead.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index c4e062336ef..078e553f52f 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -38,7 +38,10 @@ struct task_security_struct {
struct inode_security_struct {
struct inode *inode; /* back pointer to inode object */
- struct list_head list; /* list of inode_security_struct */
+ union {
+ struct list_head list; /* list of inode_security_struct */
+ struct rcu_head rcu; /* for freeing the inode_security_struct */
+ };
u32 task_sid; /* SID of creating task */
u32 sid; /* SID of this object */
u16 sclass; /* security class of this object */
@@ -55,12 +58,11 @@ struct file_security_struct {
struct superblock_security_struct {
struct super_block *sb; /* back pointer to sb object */
- struct list_head list; /* list of superblock_security_struct */
u32 sid; /* SID of file system superblock */
u32 def_sid; /* default SID for labeling */
u32 mntpoint_sid; /* SECURITY_FS_USE_MNTPOINT context for files */
- unsigned int behavior; /* labeling behavior */
- unsigned char flags; /* which mount options were specified */
+ unsigned short behavior; /* labeling behavior */
+ unsigned short flags; /* which mount options were specified */
struct mutex lock;
struct list_head isec_head;
spinlock_t isec_lock;
@@ -111,6 +113,10 @@ struct sk_security_struct {
u16 sclass; /* sock security class */
};
+struct tun_security_struct {
+ u32 sid; /* SID for the tun device sockets */
+};
+
struct key_security_struct {
u32 sid; /* SID of key */
};
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index 2553266ad79..ce7852cf526 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -8,7 +8,9 @@
#ifndef _SELINUX_SECURITY_H_
#define _SELINUX_SECURITY_H_
+#include <linux/dcache.h>
#include <linux/magic.h>
+#include <linux/types.h>
#include "flask.h"
#define SECSID_NULL 0x00000000 /* unspecified SID */
@@ -27,26 +29,32 @@
#define POLICYDB_VERSION_POLCAP 22
#define POLICYDB_VERSION_PERMISSIVE 23
#define POLICYDB_VERSION_BOUNDARY 24
+#define POLICYDB_VERSION_FILENAME_TRANS 25
+#define POLICYDB_VERSION_ROLETRANS 26
+#define POLICYDB_VERSION_NEW_OBJECT_DEFAULTS 27
+#define POLICYDB_VERSION_DEFAULT_TYPE 28
+#define POLICYDB_VERSION_CONSTRAINT_NAMES 29
/* Range of policy versions we understand*/
#define POLICYDB_VERSION_MIN POLICYDB_VERSION_BASE
#ifdef CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX
#define POLICYDB_VERSION_MAX CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX_VALUE
#else
-#define POLICYDB_VERSION_MAX POLICYDB_VERSION_BOUNDARY
+#define POLICYDB_VERSION_MAX POLICYDB_VERSION_CONSTRAINT_NAMES
#endif
/* Mask for just the mount related flags */
#define SE_MNTMASK 0x0f
/* Super block security struct flags for mount options */
+/* BE CAREFUL, these need to be the low order bits for selinux_get_mnt_opts */
#define CONTEXT_MNT 0x01
#define FSCONTEXT_MNT 0x02
#define ROOTCONTEXT_MNT 0x04
#define DEFCONTEXT_MNT 0x08
+#define SBLABEL_MNT 0x10
/* Non-mount related flags */
-#define SE_SBINITIALIZED 0x10
-#define SE_SBPROC 0x20
-#define SE_SBLABELSUPP 0x40
+#define SE_SBINITIALIZED 0x0100
+#define SE_SBPROC 0x0200
#define CONTEXT_STR "context="
#define FSCONTEXT_STR "fscontext="
@@ -57,18 +65,20 @@
struct netlbl_lsm_secattr;
extern int selinux_enabled;
-extern int selinux_mls_enabled;
/* Policy capabilities */
enum {
POLICYDB_CAPABILITY_NETPEER,
POLICYDB_CAPABILITY_OPENPERM,
+ POLICYDB_CAPABILITY_REDHAT1,
+ POLICYDB_CAPABILITY_ALWAYSNETWORK,
__POLICYDB_CAPABILITY_MAX
};
#define POLICYDB_CAPABILITY_MAX (__POLICYDB_CAPABILITY_MAX - 1)
extern int selinux_policycap_netpeer;
extern int selinux_policycap_openperm;
+extern int selinux_policycap_alwaysnetwork;
/*
* type_datum properties
@@ -80,7 +90,11 @@ extern int selinux_policycap_openperm;
/* limitation of boundary depth */
#define POLICYDB_BOUNDS_MAXDEPTH 4
+int security_mls_enabled(void);
+
int security_load_policy(void *data, size_t len);
+int security_read_policy(void **data, size_t *len);
+size_t security_policydb_len(void);
int security_policycap_supported(unsigned int req_cap);
@@ -96,19 +110,17 @@ struct av_decision {
/* definitions of av_decision.flags */
#define AVD_FLAGS_PERMISSIVE 0x0001
-int security_compute_av(u32 ssid, u32 tsid,
- u16 tclass, u32 requested,
- struct av_decision *avd);
+void security_compute_av(u32 ssid, u32 tsid,
+ u16 tclass, struct av_decision *avd);
-int security_compute_av_user(u32 ssid, u32 tsid,
- u16 tclass, u32 requested,
- struct av_decision *avd);
+void security_compute_av_user(u32 ssid, u32 tsid,
+ u16 tclass, struct av_decision *avd);
-int security_transition_sid(u32 ssid, u32 tsid,
- u16 tclass, u32 *out_sid);
+int security_transition_sid(u32 ssid, u32 tsid, u16 tclass,
+ const struct qstr *qstr, u32 *out_sid);
-int security_transition_sid_user(u32 ssid, u32 tsid,
- u16 tclass, u32 *out_sid);
+int security_transition_sid_user(u32 ssid, u32 tsid, u16 tclass,
+ const char *objname, u32 *out_sid);
int security_member_sid(u32 ssid, u32 tsid,
u16 tclass, u32 *out_sid);
@@ -122,7 +134,7 @@ int security_sid_to_context(u32 sid, char **scontext,
int security_sid_to_context_force(u32 sid, char **scontext, u32 *scontext_len);
int security_context_to_sid(const char *scontext, u32 scontext_len,
- u32 *out_sid);
+ u32 *out_sid, gfp_t gfp);
int security_context_to_sid_default(const char *scontext, u32 scontext_len,
u32 *out_sid, u32 def_sid, gfp_t gfp_flags);
@@ -162,9 +174,10 @@ int security_get_allow_unknown(void);
#define SECURITY_FS_USE_GENFS 4 /* use the genfs support */
#define SECURITY_FS_USE_NONE 5 /* no labeling support */
#define SECURITY_FS_USE_MNTPOINT 6 /* use mountpoint labeling */
+#define SECURITY_FS_USE_NATIVE 7 /* use native label support */
+#define SECURITY_FS_USE_MAX 7 /* Highest SECURITY_FS_USE_XXX */
-int security_fs_use(const char *fstype, unsigned int *behavior,
- u32 *sid);
+int security_fs_use(struct super_block *sb);
int security_genfs_sid(const char *fstype, char *name, u16 sclass,
u32 *sid);
@@ -192,5 +205,33 @@ static inline int security_netlbl_sid_to_secattr(u32 sid,
const char *security_get_initial_sid_context(u32 sid);
+/*
+ * status notifier using mmap interface
+ */
+extern struct page *selinux_kernel_status_page(void);
+
+#define SELINUX_KERNEL_STATUS_VERSION 1
+struct selinux_kernel_status {
+ u32 version; /* version number of thie structure */
+ u32 sequence; /* sequence number of seqlock logic */
+ u32 enforcing; /* current setting of enforcing mode */
+ u32 policyload; /* times of policy reloaded */
+ u32 deny_unknown; /* current setting of deny_unknown */
+ /*
+ * The version > 0 supports above members.
+ */
+} __attribute__((packed));
+
+extern void selinux_status_update_setenforce(int enforcing);
+extern void selinux_status_update_policyload(int seqno);
+extern void selinux_complete_init(void);
+extern int selinux_disable(void);
+extern void exit_sel_fs(void);
+extern struct path selinux_null;
+extern struct vfsmount *selinuxfs_mount;
+extern void selnl_notify_setenforce(int val);
+extern void selnl_notify_policyload(u32 seqno);
+extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm);
+
#endif /* _SELINUX_SECURITY_H_ */
diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
index 13128f9a3e5..1450f85b946 100644
--- a/security/selinux/include/xfrm.h
+++ b/security/selinux/include/xfrm.h
@@ -7,30 +7,25 @@
#ifndef _SELINUX_XFRM_H_
#define _SELINUX_XFRM_H_
+#include <net/flow.h>
+
int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
- struct xfrm_user_sec_ctx *sec_ctx);
+ struct xfrm_user_sec_ctx *uctx,
+ gfp_t gfp);
int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
struct xfrm_sec_ctx **new_ctxp);
void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx);
int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx);
int selinux_xfrm_state_alloc(struct xfrm_state *x,
- struct xfrm_user_sec_ctx *sec_ctx, u32 secid);
+ struct xfrm_user_sec_ctx *uctx);
+int selinux_xfrm_state_alloc_acquire(struct xfrm_state *x,
+ struct xfrm_sec_ctx *polsec, u32 secid);
void selinux_xfrm_state_free(struct xfrm_state *x);
int selinux_xfrm_state_delete(struct xfrm_state *x);
int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x,
- struct xfrm_policy *xp, struct flowi *fl);
-
-/*
- * Extract the security blob from the sock (it's actually on the socket)
- */
-static inline struct inode_security_struct *get_sock_isec(struct sock *sk)
-{
- if (!sk->sk_socket)
- return NULL;
-
- return SOCK_INODE(sk->sk_socket)->i_security;
-}
+ struct xfrm_policy *xp,
+ const struct flowi *fl);
#ifdef CONFIG_SECURITY_NETWORK_XFRM
extern atomic_t selinux_xfrm_refcount;
@@ -40,15 +35,23 @@ static inline int selinux_xfrm_enabled(void)
return (atomic_read(&selinux_xfrm_refcount) > 0);
}
-int selinux_xfrm_sock_rcv_skb(u32 sid, struct sk_buff *skb,
- struct common_audit_data *ad);
-int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb,
- struct common_audit_data *ad, u8 proto);
+int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
+ struct common_audit_data *ad);
+int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
+ struct common_audit_data *ad, u8 proto);
int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
+int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid);
static inline void selinux_xfrm_notify_policyload(void)
{
- atomic_inc(&flow_cache_genid);
+ struct net *net;
+
+ rtnl_lock();
+ for_each_net(net) {
+ atomic_inc(&net->xfrm.flow_cache_genid);
+ rt_genid_bump_all(net);
+ }
+ rtnl_unlock();
}
#else
static inline int selinux_xfrm_enabled(void)
@@ -56,19 +59,21 @@ static inline int selinux_xfrm_enabled(void)
return 0;
}
-static inline int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb,
- struct common_audit_data *ad)
+static inline int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
+ struct common_audit_data *ad)
{
return 0;
}
-static inline int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb,
- struct common_audit_data *ad, u8 proto)
+static inline int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
+ struct common_audit_data *ad,
+ u8 proto)
{
return 0;
}
-static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
+static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid,
+ int ckall)
{
*sid = SECSID_NULL;
return 0;
@@ -77,12 +82,12 @@ static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int
static inline void selinux_xfrm_notify_policyload(void)
{
}
-#endif
-static inline void selinux_skb_xfrm_sid(struct sk_buff *skb, u32 *sid)
+static inline int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid)
{
- int err = selinux_xfrm_decode_session(skb, sid, 0);
- BUG_ON(err);
+ *sid = SECSID_NULL;
+ return 0;
}
+#endif
#endif /* _SELINUX_XFRM_H_ */
diff --git a/security/selinux/netif.c b/security/selinux/netif.c
index b4e14bc0bf3..694e9e43855 100644
--- a/security/selinux/netif.c
+++ b/security/selinux/netif.c
@@ -8,7 +8,7 @@
*
* Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com>
* Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
- * Paul Moore <paul.moore@hp.com>
+ * Paul Moore <paul@paul-moore.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
@@ -16,6 +16,7 @@
*/
#include <linux/init.h>
#include <linux/types.h>
+#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/list.h>
@@ -103,22 +104,6 @@ static int sel_netif_insert(struct sel_netif *netif)
}
/**
- * sel_netif_free - Frees an interface entry
- * @p: the entry's RCU field
- *
- * Description:
- * This function is designed to be used as a callback to the call_rcu()
- * function so that memory allocated to a hash table interface entry can be
- * released safely.
- *
- */
-static void sel_netif_free(struct rcu_head *p)
-{
- struct sel_netif *netif = container_of(p, struct sel_netif, rcu_head);
- kfree(netif);
-}
-
-/**
* sel_netif_destroy - Remove an interface record from the table
* @netif: the existing interface record
*
@@ -130,7 +115,7 @@ static void sel_netif_destroy(struct sel_netif *netif)
{
list_del_rcu(&netif->list);
sel_netif_total--;
- call_rcu(&netif->rcu_head, sel_netif_free);
+ kfree_rcu(netif, rcu_head);
}
/**
@@ -267,8 +252,7 @@ static void sel_netif_flush(void)
spin_unlock_bh(&sel_netif_lock);
}
-static int sel_netif_avc_callback(u32 event, u32 ssid, u32 tsid,
- u16 class, u32 perms, u32 *retained)
+static int sel_netif_avc_callback(u32 event)
{
if (event == AVC_CALLBACK_RESET) {
sel_netif_flush();
@@ -280,7 +264,7 @@ static int sel_netif_avc_callback(u32 event, u32 ssid, u32 tsid,
static int sel_netif_netdev_notifier_handler(struct notifier_block *this,
unsigned long event, void *ptr)
{
- struct net_device *dev = ptr;
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
if (dev_net(dev) != &init_net)
return NOTIFY_DONE;
@@ -307,8 +291,7 @@ static __init int sel_netif_init(void)
register_netdevice_notifier(&sel_netif_netdev_notifier);
- err = avc_add_callback(sel_netif_avc_callback, AVC_CALLBACK_RESET,
- SECSID_NULL, SECSID_NULL, SECCLASS_NULL, 0);
+ err = avc_add_callback(sel_netif_avc_callback, AVC_CALLBACK_RESET);
if (err)
panic("avc_add_callback() failed, error %d\n", err);
diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c
index 2534400317c..0364120d1ec 100644
--- a/security/selinux/netlabel.c
+++ b/security/selinux/netlabel.c
@@ -4,7 +4,7 @@
* This file provides the necessary glue to tie NetLabel into the SELinux
* subsystem.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
*/
@@ -29,6 +29,7 @@
#include <linux/spinlock.h>
#include <linux/rcupdate.h>
+#include <linux/gfp.h>
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/sock.h>
@@ -100,6 +101,32 @@ static struct netlbl_lsm_secattr *selinux_netlbl_sock_genattr(struct sock *sk)
}
/**
+ * selinux_netlbl_sock_getattr - Get the cached NetLabel secattr
+ * @sk: the socket
+ * @sid: the SID
+ *
+ * Query the socket's cached secattr and if the SID matches the cached value
+ * return the cache, otherwise return NULL.
+ *
+ */
+static struct netlbl_lsm_secattr *selinux_netlbl_sock_getattr(
+ const struct sock *sk,
+ u32 sid)
+{
+ struct sk_security_struct *sksec = sk->sk_security;
+ struct netlbl_lsm_secattr *secattr = sksec->nlbl_secattr;
+
+ if (secattr == NULL)
+ return NULL;
+
+ if ((secattr->flags & NETLBL_SECATTR_SECID) &&
+ (secattr->attr.secid == sid))
+ return secattr;
+
+ return NULL;
+}
+
+/**
* selinux_netlbl_cache_invalidate - Invalidate the NetLabel cache
*
* Description:
@@ -131,31 +158,31 @@ void selinux_netlbl_err(struct sk_buff *skb, int error, int gateway)
/**
* selinux_netlbl_sk_security_free - Free the NetLabel fields
- * @sssec: the sk_security_struct
+ * @sksec: the sk_security_struct
*
* Description:
* Free all of the memory in the NetLabel fields of a sk_security_struct.
*
*/
-void selinux_netlbl_sk_security_free(struct sk_security_struct *ssec)
+void selinux_netlbl_sk_security_free(struct sk_security_struct *sksec)
{
- if (ssec->nlbl_secattr != NULL)
- netlbl_secattr_free(ssec->nlbl_secattr);
+ if (sksec->nlbl_secattr != NULL)
+ netlbl_secattr_free(sksec->nlbl_secattr);
}
/**
* selinux_netlbl_sk_security_reset - Reset the NetLabel fields
- * @ssec: the sk_security_struct
+ * @sksec: the sk_security_struct
* @family: the socket family
*
* Description:
* Called when the NetLabel state of a sk_security_struct needs to be reset.
- * The caller is responsibile for all the NetLabel sk_security_struct locking.
+ * The caller is responsible for all the NetLabel sk_security_struct locking.
*
*/
-void selinux_netlbl_sk_security_reset(struct sk_security_struct *ssec)
+void selinux_netlbl_sk_security_reset(struct sk_security_struct *sksec)
{
- ssec->nlbl_state = NLBL_UNSET;
+ sksec->nlbl_state = NLBL_UNSET;
}
/**
@@ -223,7 +250,7 @@ int selinux_netlbl_skbuff_setsid(struct sk_buff *skb,
struct sk_security_struct *sksec = sk->sk_security;
if (sksec->nlbl_state != NLBL_REQSKB)
return 0;
- secattr = sksec->nlbl_secattr;
+ secattr = selinux_netlbl_sock_getattr(sk, sid);
}
if (secattr == NULL) {
secattr = &secattr_storage;
@@ -409,6 +436,9 @@ int selinux_netlbl_socket_setsockopt(struct socket *sock,
sksec->nlbl_state == NLBL_CONNLABELED)) {
netlbl_secattr_init(&secattr);
lock_sock(sk);
+ /* call the netlabel function directly as we want to see the
+ * on-the-wire label that is assigned via the socket's options
+ * and not the cached netlabel/lsm attributes */
rc = netlbl_sock_getattr(sk, &secattr);
release_sock(sk);
if (rc == 0)
@@ -441,8 +471,7 @@ int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr)
sksec->nlbl_state != NLBL_CONNLABELED)
return 0;
- local_bh_disable();
- bh_lock_sock_nested(sk);
+ lock_sock(sk);
/* connected sockets are allowed to disconnect when the address family
* is set to AF_UNSPEC, if that is what is happening we want to reset
@@ -463,7 +492,6 @@ int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr)
sksec->nlbl_state = NLBL_CONNLABELED;
socket_connect_return:
- bh_unlock_sock(sk);
- local_bh_enable();
+ release_sock(sk);
return rc;
}
diff --git a/security/selinux/netlink.c b/security/selinux/netlink.c
index 1ae556446e6..828fb6a4e94 100644
--- a/security/selinux/netlink.c
+++ b/security/selinux/netlink.c
@@ -11,13 +11,16 @@
*/
#include <linux/init.h>
#include <linux/types.h>
+#include <linux/slab.h>
#include <linux/stddef.h>
#include <linux/kernel.h>
-#include <linux/list.h>
+#include <linux/export.h>
#include <linux/skbuff.h>
-#include <linux/netlink.h>
#include <linux/selinux_netlink.h>
#include <net/net_namespace.h>
+#include <net/netlink.h>
+
+#include "security.h"
static struct sock *selnl;
@@ -44,7 +47,7 @@ static void selnl_add_payload(struct nlmsghdr *nlh, int len, int msgtype, void *
{
switch (msgtype) {
case SELNL_MSG_SETENFORCE: {
- struct selnl_msg_setenforce *msg = NLMSG_DATA(nlh);
+ struct selnl_msg_setenforce *msg = nlmsg_data(nlh);
memset(msg, 0, len);
msg->val = *((int *)data);
@@ -52,7 +55,7 @@ static void selnl_add_payload(struct nlmsghdr *nlh, int len, int msgtype, void *
}
case SELNL_MSG_POLICYLOAD: {
- struct selnl_msg_policyload *msg = NLMSG_DATA(nlh);
+ struct selnl_msg_policyload *msg = nlmsg_data(nlh);
memset(msg, 0, len);
msg->seqno = *((u32 *)data);
@@ -73,12 +76,14 @@ static void selnl_notify(int msgtype, void *data)
len = selnl_msglen(msgtype);
- skb = alloc_skb(NLMSG_SPACE(len), GFP_USER);
+ skb = nlmsg_new(len, GFP_USER);
if (!skb)
goto oom;
tmp = skb->tail;
- nlh = NLMSG_PUT(skb, 0, 0, msgtype, len);
+ nlh = nlmsg_put(skb, 0, 0, msgtype, len, 0);
+ if (!nlh)
+ goto out_kfree_skb;
selnl_add_payload(nlh, len, msgtype, data);
nlh->nlmsg_len = skb->tail - tmp;
NETLINK_CB(skb).dst_group = SELNLGRP_AVC;
@@ -86,7 +91,7 @@ static void selnl_notify(int msgtype, void *data)
out:
return;
-nlmsg_failure:
+out_kfree_skb:
kfree_skb(skb);
oom:
printk(KERN_ERR "SELinux: OOM in %s\n", __func__);
@@ -105,11 +110,14 @@ void selnl_notify_policyload(u32 seqno)
static int __init selnl_init(void)
{
- selnl = netlink_kernel_create(&init_net, NETLINK_SELINUX,
- SELNLGRP_MAX, NULL, NULL, THIS_MODULE);
+ struct netlink_kernel_cfg cfg = {
+ .groups = SELNLGRP_MAX,
+ .flags = NL_CFG_F_NONROOT_RECV,
+ };
+
+ selnl = netlink_kernel_create(&init_net, NETLINK_SELINUX, &cfg);
if (selnl == NULL)
panic("SELinux: Cannot create netlink socket.");
- netlink_set_nonroot(NETLINK_SELINUX, NL_NONROOT_RECV);
return 0;
}
diff --git a/security/selinux/netnode.c b/security/selinux/netnode.c
index 7100072bb1b..03a72c32afd 100644
--- a/security/selinux/netnode.c
+++ b/security/selinux/netnode.c
@@ -6,7 +6,7 @@
* needed to reduce the lookup overhead since most of these queries happen on
* a per-packet basis.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
* This code is heavily based on the "netif" concept originally developed by
* James Morris <jmorris@redhat.com>
@@ -31,6 +31,7 @@
#include <linux/types.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
+#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/in.h>
#include <linux/in6.h>
@@ -68,22 +69,6 @@ static DEFINE_SPINLOCK(sel_netnode_lock);
static struct sel_netnode_bkt sel_netnode_hash[SEL_NETNODE_HASH_SIZE];
/**
- * sel_netnode_free - Frees a node entry
- * @p: the entry's RCU field
- *
- * Description:
- * This function is designed to be used as a callback to the call_rcu()
- * function so that memory allocated to a hash table node entry can be
- * released safely.
- *
- */
-static void sel_netnode_free(struct rcu_head *p)
-{
- struct sel_netnode *node = container_of(p, struct sel_netnode, rcu);
- kfree(node);
-}
-
-/**
* sel_netnode_hashfn_ipv4 - IPv4 hashing function for the node table
* @addr: IPv4 address
*
@@ -140,6 +125,7 @@ static struct sel_netnode *sel_netnode_find(const void *addr, u16 family)
break;
default:
BUG();
+ return NULL;
}
list_for_each_entry_rcu(node, &sel_netnode_hash[idx].list, list)
@@ -180,20 +166,20 @@ static void sel_netnode_insert(struct sel_netnode *node)
break;
default:
BUG();
+ return;
}
- INIT_RCU_HEAD(&node->rcu);
-
/* we need to impose a limit on the growth of the hash table so check
* this bucket to make sure it is within the specified bounds */
list_add_rcu(&node->list, &sel_netnode_hash[idx].list);
if (sel_netnode_hash[idx].size == SEL_NETNODE_HASH_BKT_LIMIT) {
struct sel_netnode *tail;
tail = list_entry(
- rcu_dereference(sel_netnode_hash[idx].list.prev),
+ rcu_dereference_protected(sel_netnode_hash[idx].list.prev,
+ lockdep_is_held(&sel_netnode_lock)),
struct sel_netnode, list);
list_del_rcu(&tail->list);
- call_rcu(&tail->rcu, sel_netnode_free);
+ kfree_rcu(tail, rcu);
} else
sel_netnode_hash[idx].size++;
}
@@ -236,10 +222,11 @@ static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid)
case PF_INET6:
ret = security_node_sid(PF_INET6,
addr, sizeof(struct in6_addr), sid);
- ipv6_addr_copy(&new->nsec.addr.ipv6, addr);
+ new->nsec.addr.ipv6 = *(struct in6_addr *)addr;
break;
default:
BUG();
+ ret = -EINVAL;
}
if (ret != 0)
goto out;
@@ -306,15 +293,14 @@ static void sel_netnode_flush(void)
list_for_each_entry_safe(node, node_tmp,
&sel_netnode_hash[idx].list, list) {
list_del_rcu(&node->list);
- call_rcu(&node->rcu, sel_netnode_free);
+ kfree_rcu(node, rcu);
}
sel_netnode_hash[idx].size = 0;
}
spin_unlock_bh(&sel_netnode_lock);
}
-static int sel_netnode_avc_callback(u32 event, u32 ssid, u32 tsid,
- u16 class, u32 perms, u32 *retained)
+static int sel_netnode_avc_callback(u32 event)
{
if (event == AVC_CALLBACK_RESET) {
sel_netnode_flush();
@@ -336,8 +322,7 @@ static __init int sel_netnode_init(void)
sel_netnode_hash[iter].size = 0;
}
- ret = avc_add_callback(sel_netnode_avc_callback, AVC_CALLBACK_RESET,
- SECSID_NULL, SECSID_NULL, SECCLASS_NULL, 0);
+ ret = avc_add_callback(sel_netnode_avc_callback, AVC_CALLBACK_RESET);
if (ret != 0)
panic("avc_add_callback() failed, error %d\n", ret);
diff --git a/security/selinux/netport.c b/security/selinux/netport.c
index fe7fba67f19..d35379781c2 100644
--- a/security/selinux/netport.c
+++ b/security/selinux/netport.c
@@ -5,7 +5,7 @@
* mapping is maintained as part of the normal policy but a fast cache is
* needed to reduce the lookup overhead.
*
- * Author: Paul Moore <paul.moore@hp.com>
+ * Author: Paul Moore <paul@paul-moore.com>
*
* This code is heavily based on the "netif" concept originally developed by
* James Morris <jmorris@redhat.com>
@@ -30,6 +30,7 @@
#include <linux/types.h>
#include <linux/rcupdate.h>
#include <linux/list.h>
+#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/in.h>
#include <linux/in6.h>
@@ -67,22 +68,6 @@ static DEFINE_SPINLOCK(sel_netport_lock);
static struct sel_netport_bkt sel_netport_hash[SEL_NETPORT_HASH_SIZE];
/**
- * sel_netport_free - Frees a port entry
- * @p: the entry's RCU field
- *
- * Description:
- * This function is designed to be used as a callback to the call_rcu()
- * function so that memory allocated to a hash table port entry can be
- * released safely.
- *
- */
-static void sel_netport_free(struct rcu_head *p)
-{
- struct sel_netport *port = container_of(p, struct sel_netport, rcu);
- kfree(port);
-}
-
-/**
* sel_netport_hashfn - Hashing function for the port table
* @pnum: port number
*
@@ -138,10 +123,12 @@ static void sel_netport_insert(struct sel_netport *port)
if (sel_netport_hash[idx].size == SEL_NETPORT_HASH_BKT_LIMIT) {
struct sel_netport *tail;
tail = list_entry(
- rcu_dereference(sel_netport_hash[idx].list.prev),
+ rcu_dereference_protected(
+ sel_netport_hash[idx].list.prev,
+ lockdep_is_held(&sel_netport_lock)),
struct sel_netport, list);
list_del_rcu(&tail->list);
- call_rcu(&tail->rcu, sel_netport_free);
+ kfree_rcu(tail, rcu);
} else
sel_netport_hash[idx].size++;
}
@@ -240,15 +227,14 @@ static void sel_netport_flush(void)
list_for_each_entry_safe(port, port_tmp,
&sel_netport_hash[idx].list, list) {
list_del_rcu(&port->list);
- call_rcu(&port->rcu, sel_netport_free);
+ kfree_rcu(port, rcu);
}
sel_netport_hash[idx].size = 0;
}
spin_unlock_bh(&sel_netport_lock);
}
-static int sel_netport_avc_callback(u32 event, u32 ssid, u32 tsid,
- u16 class, u32 perms, u32 *retained)
+static int sel_netport_avc_callback(u32 event)
{
if (event == AVC_CALLBACK_RESET) {
sel_netport_flush();
@@ -270,8 +256,7 @@ static __init int sel_netport_init(void)
sel_netport_hash[iter].size = 0;
}
- ret = avc_add_callback(sel_netport_avc_callback, AVC_CALLBACK_RESET,
- SECSID_NULL, SECSID_NULL, SECCLASS_NULL, 0);
+ ret = avc_add_callback(sel_netport_avc_callback, AVC_CALLBACK_RESET);
if (ret != 0)
panic("avc_add_callback() failed, error %d\n", ret);
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index dd7cc6de77f..2df7b900e25 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -11,17 +11,17 @@
*/
#include <linux/types.h>
#include <linux/kernel.h>
-#include <linux/skbuff.h>
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
#include <linux/if.h>
-#include <linux/netfilter_ipv4/ip_queue.h>
#include <linux/inet_diag.h>
#include <linux/xfrm.h>
#include <linux/audit.h>
+#include <linux/sock_diag.h>
#include "flask.h"
#include "av_permissions.h"
+#include "security.h"
struct nlmsg_perm {
u16 nlmsg_type;
@@ -66,18 +66,20 @@ static struct nlmsg_perm nlmsg_route_perms[] =
{ RTM_NEWADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_DELADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
{ RTM_GETADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_READ },
-};
-
-static struct nlmsg_perm nlmsg_firewall_perms[] =
-{
- { IPQM_MODE, NETLINK_FIREWALL_SOCKET__NLMSG_WRITE },
- { IPQM_VERDICT, NETLINK_FIREWALL_SOCKET__NLMSG_WRITE },
+ { RTM_GETDCB, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_SETDCB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_NEWNETCONF, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_GETNETCONF, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_NEWMDB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_DELMDB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_GETMDB, NETLINK_ROUTE_SOCKET__NLMSG_READ },
};
static struct nlmsg_perm nlmsg_tcpdiag_perms[] =
{
{ TCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
{ DCCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
+ { SOCK_DIAG_BY_FAMILY, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
};
static struct nlmsg_perm nlmsg_xfrm_perms[] =
@@ -116,6 +118,8 @@ static struct nlmsg_perm nlmsg_audit_perms[] =
{ AUDIT_MAKE_EQUIV, NETLINK_AUDIT_SOCKET__NLMSG_WRITE },
{ AUDIT_TTY_GET, NETLINK_AUDIT_SOCKET__NLMSG_READ },
{ AUDIT_TTY_SET, NETLINK_AUDIT_SOCKET__NLMSG_TTY_AUDIT },
+ { AUDIT_GET_FEATURE, NETLINK_AUDIT_SOCKET__NLMSG_READ },
+ { AUDIT_SET_FEATURE, NETLINK_AUDIT_SOCKET__NLMSG_WRITE },
};
@@ -143,12 +147,6 @@ int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm)
sizeof(nlmsg_route_perms));
break;
- case SECCLASS_NETLINK_FIREWALL_SOCKET:
- case SECCLASS_NETLINK_IP6FW_SOCKET:
- err = nlmsg_perm(nlmsg_type, perm, nlmsg_firewall_perms,
- sizeof(nlmsg_firewall_perms));
- break;
-
case SECCLASS_NETLINK_TCPDIAG_SOCKET:
err = nlmsg_perm(nlmsg_type, perm, nlmsg_tcpdiag_perms,
sizeof(nlmsg_tcpdiag_perms));
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index fab36fdf276..c71737f6d1c 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -2,7 +2,7 @@
*
* Added conditional policy language extensions
*
- * Updated: Hewlett-Packard <paul.moore@hp.com>
+ * Updated: Hewlett-Packard <paul@paul-moore.com>
*
* Added support for the policy capability bitmap
*
@@ -28,6 +28,8 @@
#include <linux/percpu.h>
#include <linux/audit.h>
#include <linux/uaccess.h>
+#include <linux/kobject.h>
+#include <linux/ctype.h>
/* selinuxfs pseudo filesystem for exporting the security policy API.
Based on the proc code and the fs/nfsd/nfsctl.c code. */
@@ -42,7 +44,9 @@
/* Policy capability filenames */
static char *policycap_names[] = {
"network_peer_controls",
- "open_perms"
+ "open_perms",
+ "redhat1",
+ "always_check_network"
};
unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE;
@@ -50,7 +54,7 @@ unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE;
static int __init checkreqprot_setup(char *str)
{
unsigned long checkreqprot;
- if (!strict_strtoul(str, 0, &checkreqprot))
+ if (!kstrtoul(str, 0, &checkreqprot))
selinux_checkreqprot = checkreqprot ? 1 : 0;
return 1;
}
@@ -68,11 +72,11 @@ static int *bool_pending_values;
static struct dentry *class_dir;
static unsigned long last_class_ino;
+static char policy_opened;
+
/* global data for policy capabilities */
static struct dentry *policycap_dir;
-extern void selnl_notify_setenforce(int val);
-
/* Check whether a task is allowed to use a security operation. */
static int task_has_security(struct task_struct *tsk,
u32 perms)
@@ -110,6 +114,8 @@ enum sel_inos {
SEL_COMPAT_NET, /* whether to use old compat network packet controls */
SEL_REJECT_UNKNOWN, /* export unknown reject handling to userspace */
SEL_DENY_UNKNOWN, /* export unknown deny handling to userspace */
+ SEL_STATUS, /* export current status using mmap() */
+ SEL_POLICY, /* allow userspace to read the in kernel policy */
SEL_INO_NEXT, /* The next inode number to use */
};
@@ -137,19 +143,24 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- char *page;
+ char *page = NULL;
ssize_t length;
int new_value;
+ length = -ENOMEM;
if (count >= PAGE_SIZE)
- return -ENOMEM;
- if (*ppos != 0) {
- /* No partial writes. */
- return -EINVAL;
- }
+ goto out;
+
+ /* No partial writes. */
+ length = EINVAL;
+ if (*ppos != 0)
+ goto out;
+
+ length = -ENOMEM;
page = (char *)get_zeroed_page(GFP_KERNEL);
if (!page)
- return -ENOMEM;
+ goto out;
+
length = -EFAULT;
if (copy_from_user(page, buf, count))
goto out;
@@ -165,12 +176,13 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
audit_log(current->audit_context, GFP_KERNEL, AUDIT_MAC_STATUS,
"enforcing=%d old_enforcing=%d auid=%u ses=%u",
new_value, selinux_enforcing,
- audit_get_loginuid(current),
+ from_kuid(&init_user_ns, audit_get_loginuid(current)),
audit_get_sessionid(current));
selinux_enforcing = new_value;
if (selinux_enforcing)
avc_ss_reset(0);
selnl_notify_setenforce(selinux_enforcing);
+ selinux_status_update_setenforce(selinux_enforcing);
}
length = count;
out:
@@ -184,6 +196,7 @@ out:
static const struct file_operations sel_enforce_ops = {
.read = sel_read_enforce,
.write = sel_write_enforce,
+ .llseek = generic_file_llseek,
};
static ssize_t sel_read_handle_unknown(struct file *filp, char __user *buf,
@@ -191,7 +204,7 @@ static ssize_t sel_read_handle_unknown(struct file *filp, char __user *buf,
{
char tmpbuf[TMPBUFLEN];
ssize_t length;
- ino_t ino = filp->f_path.dentry->d_inode->i_ino;
+ ino_t ino = file_inode(filp)->i_ino;
int handle_unknown = (ino == SEL_REJECT_UNKNOWN) ?
security_get_reject_unknown() : !security_get_allow_unknown();
@@ -201,6 +214,60 @@ static ssize_t sel_read_handle_unknown(struct file *filp, char __user *buf,
static const struct file_operations sel_handle_unknown_ops = {
.read = sel_read_handle_unknown,
+ .llseek = generic_file_llseek,
+};
+
+static int sel_open_handle_status(struct inode *inode, struct file *filp)
+{
+ struct page *status = selinux_kernel_status_page();
+
+ if (!status)
+ return -ENOMEM;
+
+ filp->private_data = status;
+
+ return 0;
+}
+
+static ssize_t sel_read_handle_status(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct page *status = filp->private_data;
+
+ BUG_ON(!status);
+
+ return simple_read_from_buffer(buf, count, ppos,
+ page_address(status),
+ sizeof(struct selinux_kernel_status));
+}
+
+static int sel_mmap_handle_status(struct file *filp,
+ struct vm_area_struct *vma)
+{
+ struct page *status = filp->private_data;
+ unsigned long size = vma->vm_end - vma->vm_start;
+
+ BUG_ON(!status);
+
+ /* only allows one page from the head */
+ if (vma->vm_pgoff > 0 || size != PAGE_SIZE)
+ return -EIO;
+ /* disallow writable mapping */
+ if (vma->vm_flags & VM_WRITE)
+ return -EPERM;
+ /* disallow mprotect() turns it into writable */
+ vma->vm_flags &= ~VM_MAYWRITE;
+
+ return remap_pfn_range(vma, vma->vm_start,
+ page_to_pfn(status),
+ size, vma->vm_page_prot);
+}
+
+static const struct file_operations sel_handle_status_ops = {
+ .open = sel_open_handle_status,
+ .read = sel_read_handle_status,
+ .mmap = sel_mmap_handle_status,
+ .llseek = generic_file_llseek,
};
#ifdef CONFIG_SECURITY_SELINUX_DISABLE
@@ -208,20 +275,24 @@ static ssize_t sel_write_disable(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- char *page;
+ char *page = NULL;
ssize_t length;
int new_value;
- extern int selinux_disable(void);
+ length = -ENOMEM;
if (count >= PAGE_SIZE)
- return -ENOMEM;
- if (*ppos != 0) {
- /* No partial writes. */
- return -EINVAL;
- }
+ goto out;
+
+ /* No partial writes. */
+ length = -EINVAL;
+ if (*ppos != 0)
+ goto out;
+
+ length = -ENOMEM;
page = (char *)get_zeroed_page(GFP_KERNEL);
if (!page)
- return -ENOMEM;
+ goto out;
+
length = -EFAULT;
if (copy_from_user(page, buf, count))
goto out;
@@ -232,11 +303,11 @@ static ssize_t sel_write_disable(struct file *file, const char __user *buf,
if (new_value) {
length = selinux_disable();
- if (length < 0)
+ if (length)
goto out;
audit_log(current->audit_context, GFP_KERNEL, AUDIT_MAC_STATUS,
"selinux=0 auid=%u ses=%u",
- audit_get_loginuid(current),
+ from_kuid(&init_user_ns, audit_get_loginuid(current)),
audit_get_sessionid(current));
}
@@ -251,6 +322,7 @@ out:
static const struct file_operations sel_disable_ops = {
.write = sel_write_disable,
+ .llseek = generic_file_llseek,
};
static ssize_t sel_read_policyvers(struct file *filp, char __user *buf,
@@ -265,6 +337,7 @@ static ssize_t sel_read_policyvers(struct file *filp, char __user *buf,
static const struct file_operations sel_policyvers_ops = {
.read = sel_read_policyvers,
+ .llseek = generic_file_llseek,
};
/* declaration for sel_write_load */
@@ -273,7 +346,7 @@ static int sel_make_classes(void);
static int sel_make_policycap(void);
/* declaration for sel_make_class_dirs */
-static int sel_make_dir(struct inode *dir, struct dentry *dentry,
+static struct dentry *sel_make_dir(struct dentry *dir, const char *name,
unsigned long *ino);
static ssize_t sel_read_mls(struct file *filp, char __user *buf,
@@ -282,19 +355,156 @@ static ssize_t sel_read_mls(struct file *filp, char __user *buf,
char tmpbuf[TMPBUFLEN];
ssize_t length;
- length = scnprintf(tmpbuf, TMPBUFLEN, "%d", selinux_mls_enabled);
+ length = scnprintf(tmpbuf, TMPBUFLEN, "%d",
+ security_mls_enabled());
return simple_read_from_buffer(buf, count, ppos, tmpbuf, length);
}
static const struct file_operations sel_mls_ops = {
.read = sel_read_mls,
+ .llseek = generic_file_llseek,
+};
+
+struct policy_load_memory {
+ size_t len;
+ void *data;
+};
+
+static int sel_open_policy(struct inode *inode, struct file *filp)
+{
+ struct policy_load_memory *plm = NULL;
+ int rc;
+
+ BUG_ON(filp->private_data);
+
+ mutex_lock(&sel_mutex);
+
+ rc = task_has_security(current, SECURITY__READ_POLICY);
+ if (rc)
+ goto err;
+
+ rc = -EBUSY;
+ if (policy_opened)
+ goto err;
+
+ rc = -ENOMEM;
+ plm = kzalloc(sizeof(*plm), GFP_KERNEL);
+ if (!plm)
+ goto err;
+
+ if (i_size_read(inode) != security_policydb_len()) {
+ mutex_lock(&inode->i_mutex);
+ i_size_write(inode, security_policydb_len());
+ mutex_unlock(&inode->i_mutex);
+ }
+
+ rc = security_read_policy(&plm->data, &plm->len);
+ if (rc)
+ goto err;
+
+ policy_opened = 1;
+
+ filp->private_data = plm;
+
+ mutex_unlock(&sel_mutex);
+
+ return 0;
+err:
+ mutex_unlock(&sel_mutex);
+
+ if (plm)
+ vfree(plm->data);
+ kfree(plm);
+ return rc;
+}
+
+static int sel_release_policy(struct inode *inode, struct file *filp)
+{
+ struct policy_load_memory *plm = filp->private_data;
+
+ BUG_ON(!plm);
+
+ policy_opened = 0;
+
+ vfree(plm->data);
+ kfree(plm);
+
+ return 0;
+}
+
+static ssize_t sel_read_policy(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct policy_load_memory *plm = filp->private_data;
+ int ret;
+
+ mutex_lock(&sel_mutex);
+
+ ret = task_has_security(current, SECURITY__READ_POLICY);
+ if (ret)
+ goto out;
+
+ ret = simple_read_from_buffer(buf, count, ppos, plm->data, plm->len);
+out:
+ mutex_unlock(&sel_mutex);
+ return ret;
+}
+
+static int sel_mmap_policy_fault(struct vm_area_struct *vma,
+ struct vm_fault *vmf)
+{
+ struct policy_load_memory *plm = vma->vm_file->private_data;
+ unsigned long offset;
+ struct page *page;
+
+ if (vmf->flags & (FAULT_FLAG_MKWRITE | FAULT_FLAG_WRITE))
+ return VM_FAULT_SIGBUS;
+
+ offset = vmf->pgoff << PAGE_SHIFT;
+ if (offset >= roundup(plm->len, PAGE_SIZE))
+ return VM_FAULT_SIGBUS;
+
+ page = vmalloc_to_page(plm->data + offset);
+ get_page(page);
+
+ vmf->page = page;
+
+ return 0;
+}
+
+static struct vm_operations_struct sel_mmap_policy_ops = {
+ .fault = sel_mmap_policy_fault,
+ .page_mkwrite = sel_mmap_policy_fault,
+};
+
+static int sel_mmap_policy(struct file *filp, struct vm_area_struct *vma)
+{
+ if (vma->vm_flags & VM_SHARED) {
+ /* do not allow mprotect to make mapping writable */
+ vma->vm_flags &= ~VM_MAYWRITE;
+
+ if (vma->vm_flags & VM_WRITE)
+ return -EACCES;
+ }
+
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_ops = &sel_mmap_policy_ops;
+
+ return 0;
+}
+
+static const struct file_operations sel_policy_ops = {
+ .open = sel_open_policy,
+ .read = sel_read_policy,
+ .mmap = sel_mmap_policy,
+ .release = sel_release_policy,
+ .llseek = generic_file_llseek,
};
static ssize_t sel_write_load(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- int ret;
ssize_t length;
void *data = NULL;
@@ -304,17 +514,19 @@ static ssize_t sel_write_load(struct file *file, const char __user *buf,
if (length)
goto out;
- if (*ppos != 0) {
- /* No partial writes. */
- length = -EINVAL;
+ /* No partial writes. */
+ length = -EINVAL;
+ if (*ppos != 0)
goto out;
- }
- if ((count > 64 * 1024 * 1024)
- || (data = vmalloc(count)) == NULL) {
- length = -ENOMEM;
+ length = -EFBIG;
+ if (count > 64 * 1024 * 1024)
+ goto out;
+
+ length = -ENOMEM;
+ data = vmalloc(count);
+ if (!data)
goto out;
- }
length = -EFAULT;
if (copy_from_user(data, buf, count) != 0)
@@ -324,28 +536,24 @@ static ssize_t sel_write_load(struct file *file, const char __user *buf,
if (length)
goto out;
- ret = sel_make_bools();
- if (ret) {
- length = ret;
+ length = sel_make_bools();
+ if (length)
goto out1;
- }
- ret = sel_make_classes();
- if (ret) {
- length = ret;
+ length = sel_make_classes();
+ if (length)
goto out1;
- }
- ret = sel_make_policycap();
- if (ret)
- length = ret;
- else
- length = count;
+ length = sel_make_policycap();
+ if (length)
+ goto out1;
+
+ length = count;
out1:
audit_log(current->audit_context, GFP_KERNEL, AUDIT_MAC_POLICY_LOAD,
"policy loaded auid=%u ses=%u",
- audit_get_loginuid(current),
+ from_kuid(&init_user_ns, audit_get_loginuid(current)),
audit_get_sessionid(current));
out:
mutex_unlock(&sel_mutex);
@@ -355,30 +563,31 @@ out:
static const struct file_operations sel_load_ops = {
.write = sel_write_load,
+ .llseek = generic_file_llseek,
};
static ssize_t sel_write_context(struct file *file, char *buf, size_t size)
{
- char *canon;
+ char *canon = NULL;
u32 sid, len;
ssize_t length;
length = task_has_security(current, SECURITY__CHECK_CONTEXT);
if (length)
- return length;
+ goto out;
- length = security_context_to_sid(buf, size, &sid);
- if (length < 0)
- return length;
+ length = security_context_to_sid(buf, size, &sid, GFP_KERNEL);
+ if (length)
+ goto out;
length = security_sid_to_context(sid, &canon, &len);
- if (length < 0)
- return length;
+ if (length)
+ goto out;
+ length = -ERANGE;
if (len > SIMPLE_TRANSACTION_LIMIT) {
printk(KERN_ERR "SELinux: %s: context size (%u) exceeds "
"payload max\n", __func__, len);
- length = -ERANGE;
goto out;
}
@@ -402,23 +611,28 @@ static ssize_t sel_read_checkreqprot(struct file *filp, char __user *buf,
static ssize_t sel_write_checkreqprot(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- char *page;
+ char *page = NULL;
ssize_t length;
unsigned int new_value;
length = task_has_security(current, SECURITY__SETCHECKREQPROT);
if (length)
- return length;
+ goto out;
+ length = -ENOMEM;
if (count >= PAGE_SIZE)
- return -ENOMEM;
- if (*ppos != 0) {
- /* No partial writes. */
- return -EINVAL;
- }
+ goto out;
+
+ /* No partial writes. */
+ length = -EINVAL;
+ if (*ppos != 0)
+ goto out;
+
+ length = -ENOMEM;
page = (char *)get_zeroed_page(GFP_KERNEL);
if (!page)
- return -ENOMEM;
+ goto out;
+
length = -EFAULT;
if (copy_from_user(page, buf, count))
goto out;
@@ -436,6 +650,7 @@ out:
static const struct file_operations sel_checkreqprot_ops = {
.read = sel_read_checkreqprot,
.write = sel_write_checkreqprot,
+ .llseek = generic_file_llseek,
};
/*
@@ -458,7 +673,7 @@ static ssize_t (*write_op[])(struct file *, char *, size_t) = {
static ssize_t selinux_transaction_write(struct file *file, const char __user *buf, size_t size, loff_t *pos)
{
- ino_t ino = file->f_path.dentry->d_inode->i_ino;
+ ino_t ino = file_inode(file)->i_ino;
char *data;
ssize_t rv;
@@ -481,6 +696,7 @@ static const struct file_operations transaction_ops = {
.write = selinux_transaction_write,
.read = simple_transaction_read,
.release = simple_transaction_release,
+ .llseek = generic_file_llseek,
};
/*
@@ -491,173 +707,216 @@ static const struct file_operations transaction_ops = {
static ssize_t sel_write_access(struct file *file, char *buf, size_t size)
{
- char *scon, *tcon;
+ char *scon = NULL, *tcon = NULL;
u32 ssid, tsid;
u16 tclass;
- u32 req;
struct av_decision avd;
ssize_t length;
length = task_has_security(current, SECURITY__COMPUTE_AV);
if (length)
- return length;
+ goto out;
length = -ENOMEM;
- scon = kzalloc(size+1, GFP_KERNEL);
+ scon = kzalloc(size + 1, GFP_KERNEL);
if (!scon)
- return length;
+ goto out;
- tcon = kzalloc(size+1, GFP_KERNEL);
+ length = -ENOMEM;
+ tcon = kzalloc(size + 1, GFP_KERNEL);
if (!tcon)
goto out;
length = -EINVAL;
- if (sscanf(buf, "%s %s %hu %x", scon, tcon, &tclass, &req) != 4)
- goto out2;
+ if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
+ goto out;
- length = security_context_to_sid(scon, strlen(scon)+1, &ssid);
- if (length < 0)
- goto out2;
- length = security_context_to_sid(tcon, strlen(tcon)+1, &tsid);
- if (length < 0)
- goto out2;
+ length = security_context_to_sid(scon, strlen(scon) + 1, &ssid,
+ GFP_KERNEL);
+ if (length)
+ goto out;
- length = security_compute_av_user(ssid, tsid, tclass, req, &avd);
- if (length < 0)
- goto out2;
+ length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid,
+ GFP_KERNEL);
+ if (length)
+ goto out;
+
+ security_compute_av_user(ssid, tsid, tclass, &avd);
length = scnprintf(buf, SIMPLE_TRANSACTION_LIMIT,
"%x %x %x %x %u %x",
avd.allowed, 0xffffffff,
avd.auditallow, avd.auditdeny,
avd.seqno, avd.flags);
-out2:
- kfree(tcon);
out:
+ kfree(tcon);
kfree(scon);
return length;
}
static ssize_t sel_write_create(struct file *file, char *buf, size_t size)
{
- char *scon, *tcon;
+ char *scon = NULL, *tcon = NULL;
+ char *namebuf = NULL, *objname = NULL;
u32 ssid, tsid, newsid;
u16 tclass;
ssize_t length;
- char *newcon;
+ char *newcon = NULL;
u32 len;
+ int nargs;
length = task_has_security(current, SECURITY__COMPUTE_CREATE);
if (length)
- return length;
+ goto out;
length = -ENOMEM;
- scon = kzalloc(size+1, GFP_KERNEL);
+ scon = kzalloc(size + 1, GFP_KERNEL);
if (!scon)
- return length;
+ goto out;
- tcon = kzalloc(size+1, GFP_KERNEL);
+ length = -ENOMEM;
+ tcon = kzalloc(size + 1, GFP_KERNEL);
if (!tcon)
goto out;
+ length = -ENOMEM;
+ namebuf = kzalloc(size + 1, GFP_KERNEL);
+ if (!namebuf)
+ goto out;
+
length = -EINVAL;
- if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
- goto out2;
+ nargs = sscanf(buf, "%s %s %hu %s", scon, tcon, &tclass, namebuf);
+ if (nargs < 3 || nargs > 4)
+ goto out;
+ if (nargs == 4) {
+ /*
+ * If and when the name of new object to be queried contains
+ * either whitespace or multibyte characters, they shall be
+ * encoded based on the percentage-encoding rule.
+ * If not encoded, the sscanf logic picks up only left-half
+ * of the supplied name; splitted by a whitespace unexpectedly.
+ */
+ char *r, *w;
+ int c1, c2;
+
+ r = w = namebuf;
+ do {
+ c1 = *r++;
+ if (c1 == '+')
+ c1 = ' ';
+ else if (c1 == '%') {
+ c1 = hex_to_bin(*r++);
+ if (c1 < 0)
+ goto out;
+ c2 = hex_to_bin(*r++);
+ if (c2 < 0)
+ goto out;
+ c1 = (c1 << 4) | c2;
+ }
+ *w++ = c1;
+ } while (c1 != '\0');
+
+ objname = namebuf;
+ }
- length = security_context_to_sid(scon, strlen(scon)+1, &ssid);
- if (length < 0)
- goto out2;
- length = security_context_to_sid(tcon, strlen(tcon)+1, &tsid);
- if (length < 0)
- goto out2;
+ length = security_context_to_sid(scon, strlen(scon) + 1, &ssid,
+ GFP_KERNEL);
+ if (length)
+ goto out;
- length = security_transition_sid_user(ssid, tsid, tclass, &newsid);
- if (length < 0)
- goto out2;
+ length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid,
+ GFP_KERNEL);
+ if (length)
+ goto out;
+
+ length = security_transition_sid_user(ssid, tsid, tclass,
+ objname, &newsid);
+ if (length)
+ goto out;
length = security_sid_to_context(newsid, &newcon, &len);
- if (length < 0)
- goto out2;
+ if (length)
+ goto out;
+ length = -ERANGE;
if (len > SIMPLE_TRANSACTION_LIMIT) {
printk(KERN_ERR "SELinux: %s: context size (%u) exceeds "
"payload max\n", __func__, len);
- length = -ERANGE;
- goto out3;
+ goto out;
}
memcpy(buf, newcon, len);
length = len;
-out3:
+out:
kfree(newcon);
-out2:
+ kfree(namebuf);
kfree(tcon);
-out:
kfree(scon);
return length;
}
static ssize_t sel_write_relabel(struct file *file, char *buf, size_t size)
{
- char *scon, *tcon;
+ char *scon = NULL, *tcon = NULL;
u32 ssid, tsid, newsid;
u16 tclass;
ssize_t length;
- char *newcon;
+ char *newcon = NULL;
u32 len;
length = task_has_security(current, SECURITY__COMPUTE_RELABEL);
if (length)
- return length;
+ goto out;
length = -ENOMEM;
- scon = kzalloc(size+1, GFP_KERNEL);
+ scon = kzalloc(size + 1, GFP_KERNEL);
if (!scon)
- return length;
+ goto out;
- tcon = kzalloc(size+1, GFP_KERNEL);
+ length = -ENOMEM;
+ tcon = kzalloc(size + 1, GFP_KERNEL);
if (!tcon)
goto out;
length = -EINVAL;
if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
- goto out2;
+ goto out;
- length = security_context_to_sid(scon, strlen(scon)+1, &ssid);
- if (length < 0)
- goto out2;
- length = security_context_to_sid(tcon, strlen(tcon)+1, &tsid);
- if (length < 0)
- goto out2;
+ length = security_context_to_sid(scon, strlen(scon) + 1, &ssid,
+ GFP_KERNEL);
+ if (length)
+ goto out;
+
+ length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid,
+ GFP_KERNEL);
+ if (length)
+ goto out;
length = security_change_sid(ssid, tsid, tclass, &newsid);
- if (length < 0)
- goto out2;
+ if (length)
+ goto out;
length = security_sid_to_context(newsid, &newcon, &len);
- if (length < 0)
- goto out2;
+ if (length)
+ goto out;
- if (len > SIMPLE_TRANSACTION_LIMIT) {
- length = -ERANGE;
- goto out3;
- }
+ length = -ERANGE;
+ if (len > SIMPLE_TRANSACTION_LIMIT)
+ goto out;
memcpy(buf, newcon, len);
length = len;
-out3:
+out:
kfree(newcon);
-out2:
kfree(tcon);
-out:
kfree(scon);
return length;
}
static ssize_t sel_write_user(struct file *file, char *buf, size_t size)
{
- char *con, *user, *ptr;
- u32 sid, *sids;
+ char *con = NULL, *user = NULL, *ptr;
+ u32 sid, *sids = NULL;
ssize_t length;
char *newcon;
int i, rc;
@@ -665,28 +924,29 @@ static ssize_t sel_write_user(struct file *file, char *buf, size_t size)
length = task_has_security(current, SECURITY__COMPUTE_USER);
if (length)
- return length;
+ goto out;
length = -ENOMEM;
- con = kzalloc(size+1, GFP_KERNEL);
+ con = kzalloc(size + 1, GFP_KERNEL);
if (!con)
- return length;
+ goto out;
- user = kzalloc(size+1, GFP_KERNEL);
+ length = -ENOMEM;
+ user = kzalloc(size + 1, GFP_KERNEL);
if (!user)
goto out;
length = -EINVAL;
if (sscanf(buf, "%s %s", con, user) != 2)
- goto out2;
+ goto out;
- length = security_context_to_sid(con, strlen(con)+1, &sid);
- if (length < 0)
- goto out2;
+ length = security_context_to_sid(con, strlen(con) + 1, &sid, GFP_KERNEL);
+ if (length)
+ goto out;
length = security_get_user_sids(sid, user, &sids, &nsids);
- if (length < 0)
- goto out2;
+ if (length)
+ goto out;
length = sprintf(buf, "%u", nsids) + 1;
ptr = buf + length;
@@ -694,82 +954,82 @@ static ssize_t sel_write_user(struct file *file, char *buf, size_t size)
rc = security_sid_to_context(sids[i], &newcon, &len);
if (rc) {
length = rc;
- goto out3;
+ goto out;
}
if ((length + len) >= SIMPLE_TRANSACTION_LIMIT) {
kfree(newcon);
length = -ERANGE;
- goto out3;
+ goto out;
}
memcpy(ptr, newcon, len);
kfree(newcon);
ptr += len;
length += len;
}
-out3:
+out:
kfree(sids);
-out2:
kfree(user);
-out:
kfree(con);
return length;
}
static ssize_t sel_write_member(struct file *file, char *buf, size_t size)
{
- char *scon, *tcon;
+ char *scon = NULL, *tcon = NULL;
u32 ssid, tsid, newsid;
u16 tclass;
ssize_t length;
- char *newcon;
+ char *newcon = NULL;
u32 len;
length = task_has_security(current, SECURITY__COMPUTE_MEMBER);
if (length)
- return length;
+ goto out;
length = -ENOMEM;
- scon = kzalloc(size+1, GFP_KERNEL);
+ scon = kzalloc(size + 1, GFP_KERNEL);
if (!scon)
- return length;
+ goto out;
- tcon = kzalloc(size+1, GFP_KERNEL);
+ length = -ENOMEM;
+ tcon = kzalloc(size + 1, GFP_KERNEL);
if (!tcon)
goto out;
length = -EINVAL;
if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
- goto out2;
+ goto out;
- length = security_context_to_sid(scon, strlen(scon)+1, &ssid);
- if (length < 0)
- goto out2;
- length = security_context_to_sid(tcon, strlen(tcon)+1, &tsid);
- if (length < 0)
- goto out2;
+ length = security_context_to_sid(scon, strlen(scon) + 1, &ssid,
+ GFP_KERNEL);
+ if (length)
+ goto out;
+
+ length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid,
+ GFP_KERNEL);
+ if (length)
+ goto out;
length = security_member_sid(ssid, tsid, tclass, &newsid);
- if (length < 0)
- goto out2;
+ if (length)
+ goto out;
length = security_sid_to_context(newsid, &newcon, &len);
- if (length < 0)
- goto out2;
+ if (length)
+ goto out;
+ length = -ERANGE;
if (len > SIMPLE_TRANSACTION_LIMIT) {
printk(KERN_ERR "SELinux: %s: context size (%u) exceeds "
"payload max\n", __func__, len);
- length = -ERANGE;
- goto out3;
+ goto out;
}
memcpy(buf, newcon, len);
length = len;
-out3:
+out:
kfree(newcon);
-out2:
kfree(tcon);
-out:
kfree(scon);
return length;
}
@@ -792,22 +1052,19 @@ static ssize_t sel_read_bool(struct file *filep, char __user *buf,
ssize_t length;
ssize_t ret;
int cur_enforcing;
- struct inode *inode = filep->f_path.dentry->d_inode;
- unsigned index = inode->i_ino & SEL_INO_MASK;
+ unsigned index = file_inode(filep)->i_ino & SEL_INO_MASK;
const char *name = filep->f_path.dentry->d_name.name;
mutex_lock(&sel_mutex);
- if (index >= bool_num || strcmp(name, bool_pending_names[index])) {
- ret = -EINVAL;
+ ret = -EINVAL;
+ if (index >= bool_num || strcmp(name, bool_pending_names[index]))
goto out;
- }
+ ret = -ENOMEM;
page = (char *)get_zeroed_page(GFP_KERNEL);
- if (!page) {
- ret = -ENOMEM;
+ if (!page)
goto out;
- }
cur_enforcing = security_get_bool_value(index);
if (cur_enforcing < 0) {
@@ -819,8 +1076,7 @@ static ssize_t sel_read_bool(struct file *filep, char __user *buf,
ret = simple_read_from_buffer(buf, count, ppos, page, length);
out:
mutex_unlock(&sel_mutex);
- if (page)
- free_page((unsigned long)page);
+ free_page((unsigned long)page);
return ret;
}
@@ -830,8 +1086,7 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
char *page = NULL;
ssize_t length;
int new_value;
- struct inode *inode = filep->f_path.dentry->d_inode;
- unsigned index = inode->i_ino & SEL_INO_MASK;
+ unsigned index = file_inode(filep)->i_ino & SEL_INO_MASK;
const char *name = filep->f_path.dentry->d_name.name;
mutex_lock(&sel_mutex);
@@ -840,26 +1095,23 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
if (length)
goto out;
- if (index >= bool_num || strcmp(name, bool_pending_names[index])) {
- length = -EINVAL;
+ length = -EINVAL;
+ if (index >= bool_num || strcmp(name, bool_pending_names[index]))
goto out;
- }
- if (count >= PAGE_SIZE) {
- length = -ENOMEM;
+ length = -ENOMEM;
+ if (count >= PAGE_SIZE)
goto out;
- }
- if (*ppos != 0) {
- /* No partial writes. */
- length = -EINVAL;
+ /* No partial writes. */
+ length = -EINVAL;
+ if (*ppos != 0)
goto out;
- }
+
+ length = -ENOMEM;
page = (char *)get_zeroed_page(GFP_KERNEL);
- if (!page) {
- length = -ENOMEM;
+ if (!page)
goto out;
- }
length = -EFAULT;
if (copy_from_user(page, buf, count))
@@ -877,14 +1129,14 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
out:
mutex_unlock(&sel_mutex);
- if (page)
- free_page((unsigned long) page);
+ free_page((unsigned long) page);
return length;
}
static const struct file_operations sel_bool_ops = {
.read = sel_read_bool,
.write = sel_write_bool,
+ .llseek = generic_file_llseek,
};
static ssize_t sel_commit_bools_write(struct file *filep,
@@ -901,19 +1153,19 @@ static ssize_t sel_commit_bools_write(struct file *filep,
if (length)
goto out;
- if (count >= PAGE_SIZE) {
- length = -ENOMEM;
+ length = -ENOMEM;
+ if (count >= PAGE_SIZE)
goto out;
- }
- if (*ppos != 0) {
- /* No partial writes. */
+
+ /* No partial writes. */
+ length = -EINVAL;
+ if (*ppos != 0)
goto out;
- }
+
+ length = -ENOMEM;
page = (char *)get_zeroed_page(GFP_KERNEL);
- if (!page) {
- length = -ENOMEM;
+ if (!page)
goto out;
- }
length = -EFAULT;
if (copy_from_user(page, buf, count))
@@ -923,51 +1175,57 @@ static ssize_t sel_commit_bools_write(struct file *filep,
if (sscanf(page, "%d", &new_value) != 1)
goto out;
+ length = 0;
if (new_value && bool_pending_values)
- security_set_bools(bool_num, bool_pending_values);
+ length = security_set_bools(bool_num, bool_pending_values);
- length = count;
+ if (!length)
+ length = count;
out:
mutex_unlock(&sel_mutex);
- if (page)
- free_page((unsigned long) page);
+ free_page((unsigned long) page);
return length;
}
static const struct file_operations sel_commit_bools_ops = {
.write = sel_commit_bools_write,
+ .llseek = generic_file_llseek,
};
static void sel_remove_entries(struct dentry *de)
{
struct list_head *node;
- spin_lock(&dcache_lock);
+ spin_lock(&de->d_lock);
node = de->d_subdirs.next;
while (node != &de->d_subdirs) {
struct dentry *d = list_entry(node, struct dentry, d_u.d_child);
+
+ spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED);
list_del_init(node);
if (d->d_inode) {
- d = dget_locked(d);
- spin_unlock(&dcache_lock);
+ dget_dlock(d);
+ spin_unlock(&de->d_lock);
+ spin_unlock(&d->d_lock);
d_delete(d);
simple_unlink(de->d_inode, d);
dput(d);
- spin_lock(&dcache_lock);
- }
+ spin_lock(&de->d_lock);
+ } else
+ spin_unlock(&d->d_lock);
node = de->d_subdirs.next;
}
- spin_unlock(&dcache_lock);
+ spin_unlock(&de->d_lock);
}
#define BOOL_DIR_NAME "booleans"
static int sel_make_bools(void)
{
- int i, ret = 0;
+ int i, ret;
ssize_t len;
struct dentry *dentry = NULL;
struct dentry *dir = bool_dir;
@@ -979,45 +1237,46 @@ static int sel_make_bools(void)
u32 sid;
/* remove any existing files */
+ for (i = 0; i < bool_num; i++)
+ kfree(bool_pending_names[i]);
kfree(bool_pending_names);
kfree(bool_pending_values);
+ bool_num = 0;
bool_pending_names = NULL;
bool_pending_values = NULL;
sel_remove_entries(dir);
+ ret = -ENOMEM;
page = (char *)get_zeroed_page(GFP_KERNEL);
if (!page)
- return -ENOMEM;
+ goto out;
ret = security_get_bools(&num, &names, &values);
- if (ret != 0)
+ if (ret)
goto out;
for (i = 0; i < num; i++) {
+ ret = -ENOMEM;
dentry = d_alloc_name(dir, names[i]);
- if (!dentry) {
- ret = -ENOMEM;
- goto err;
- }
+ if (!dentry)
+ goto out;
+
+ ret = -ENOMEM;
inode = sel_make_inode(dir->d_sb, S_IFREG | S_IRUGO | S_IWUSR);
- if (!inode) {
- ret = -ENOMEM;
- goto err;
- }
+ if (!inode)
+ goto out;
+ ret = -ENAMETOOLONG;
len = snprintf(page, PAGE_SIZE, "/%s/%s", BOOL_DIR_NAME, names[i]);
- if (len < 0) {
- ret = -EINVAL;
- goto err;
- } else if (len >= PAGE_SIZE) {
- ret = -ENAMETOOLONG;
- goto err;
- }
+ if (len >= PAGE_SIZE)
+ goto out;
+
isec = (struct inode_security_struct *)inode->i_security;
ret = security_genfs_sid("selinuxfs", page, SECCLASS_FILE, &sid);
if (ret)
- goto err;
+ goto out;
+
isec->sid = sid;
isec->initialized = 1;
inode->i_fop = &sel_bool_ops;
@@ -1027,10 +1286,12 @@ static int sel_make_bools(void)
bool_num = num;
bool_pending_names = names;
bool_pending_values = values;
+
+ free_page((unsigned long)page);
+ return 0;
out:
free_page((unsigned long)page);
- return ret;
-err:
+
if (names) {
for (i = 0; i < num; i++)
kfree(names[i]);
@@ -1038,13 +1299,13 @@ err:
}
kfree(values);
sel_remove_entries(dir);
- ret = -ENOMEM;
- goto out;
+
+ return ret;
}
#define NULL_FILE_NAME "null"
-struct dentry *selinux_null;
+struct path selinux_null;
static ssize_t sel_read_avc_cache_threshold(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
@@ -1061,47 +1322,41 @@ static ssize_t sel_write_avc_cache_threshold(struct file *file,
size_t count, loff_t *ppos)
{
- char *page;
+ char *page = NULL;
ssize_t ret;
int new_value;
- if (count >= PAGE_SIZE) {
- ret = -ENOMEM;
+ ret = task_has_security(current, SECURITY__SETSECPARAM);
+ if (ret)
goto out;
- }
- if (*ppos != 0) {
- /* No partial writes. */
- ret = -EINVAL;
+ ret = -ENOMEM;
+ if (count >= PAGE_SIZE)
+ goto out;
+
+ /* No partial writes. */
+ ret = -EINVAL;
+ if (*ppos != 0)
goto out;
- }
+ ret = -ENOMEM;
page = (char *)get_zeroed_page(GFP_KERNEL);
- if (!page) {
- ret = -ENOMEM;
+ if (!page)
goto out;
- }
- if (copy_from_user(page, buf, count)) {
- ret = -EFAULT;
- goto out_free;
- }
+ ret = -EFAULT;
+ if (copy_from_user(page, buf, count))
+ goto out;
- if (sscanf(page, "%u", &new_value) != 1) {
- ret = -EINVAL;
+ ret = -EINVAL;
+ if (sscanf(page, "%u", &new_value) != 1)
goto out;
- }
- if (new_value != avc_cache_threshold) {
- ret = task_has_security(current, SECURITY__SETSECPARAM);
- if (ret)
- goto out_free;
- avc_cache_threshold = new_value;
- }
+ avc_cache_threshold = new_value;
+
ret = count;
-out_free:
- free_page((unsigned long)page);
out:
+ free_page((unsigned long)page);
return ret;
}
@@ -1109,28 +1364,29 @@ static ssize_t sel_read_avc_hash_stats(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
{
char *page;
- ssize_t ret = 0;
+ ssize_t length;
page = (char *)__get_free_page(GFP_KERNEL);
- if (!page) {
- ret = -ENOMEM;
- goto out;
- }
- ret = avc_get_hash_stats(page);
- if (ret >= 0)
- ret = simple_read_from_buffer(buf, count, ppos, page, ret);
+ if (!page)
+ return -ENOMEM;
+
+ length = avc_get_hash_stats(page);
+ if (length >= 0)
+ length = simple_read_from_buffer(buf, count, ppos, page, length);
free_page((unsigned long)page);
-out:
- return ret;
+
+ return length;
}
static const struct file_operations sel_avc_cache_threshold_ops = {
.read = sel_read_avc_cache_threshold,
.write = sel_write_avc_cache_threshold,
+ .llseek = generic_file_llseek,
};
static const struct file_operations sel_avc_hash_stats_ops = {
.read = sel_read_avc_hash_stats,
+ .llseek = generic_file_llseek,
};
#ifdef CONFIG_SECURITY_SELINUX_AVC_STATS
@@ -1169,10 +1425,14 @@ static int sel_avc_stats_seq_show(struct seq_file *seq, void *v)
if (v == SEQ_START_TOKEN)
seq_printf(seq, "lookups hits misses allocations reclaims "
"frees\n");
- else
- seq_printf(seq, "%u %u %u %u %u %u\n", st->lookups,
- st->hits, st->misses, st->allocations,
+ else {
+ unsigned int lookups = st->lookups;
+ unsigned int misses = st->misses;
+ unsigned int hits = lookups - misses;
+ seq_printf(seq, "%u %u %u %u %u %u\n", lookups,
+ hits, misses, st->allocations,
st->reclaims, st->frees);
+ }
return 0;
}
@@ -1201,7 +1461,7 @@ static const struct file_operations sel_avc_cache_stats_ops = {
static int sel_make_avc_files(struct dentry *dir)
{
- int i, ret = 0;
+ int i;
static struct tree_descr files[] = {
{ "cache_threshold",
&sel_avc_cache_threshold_ops, S_IRUGO|S_IWUSR },
@@ -1216,36 +1476,31 @@ static int sel_make_avc_files(struct dentry *dir)
struct dentry *dentry;
dentry = d_alloc_name(dir, files[i].name);
- if (!dentry) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!dentry)
+ return -ENOMEM;
inode = sel_make_inode(dir->d_sb, S_IFREG|files[i].mode);
- if (!inode) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!inode)
+ return -ENOMEM;
+
inode->i_fop = files[i].ops;
inode->i_ino = ++sel_last_ino;
d_add(dentry, inode);
}
-out:
- return ret;
+
+ return 0;
}
static ssize_t sel_read_initcon(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- struct inode *inode;
char *con;
u32 sid, len;
ssize_t ret;
- inode = file->f_path.dentry->d_inode;
- sid = inode->i_ino&SEL_INO_MASK;
+ sid = file_inode(file)->i_ino&SEL_INO_MASK;
ret = security_sid_to_context(sid, &con, &len);
- if (ret < 0)
+ if (ret)
return ret;
ret = simple_read_from_buffer(buf, count, ppos, con, len);
@@ -1255,37 +1510,30 @@ static ssize_t sel_read_initcon(struct file *file, char __user *buf,
static const struct file_operations sel_initcon_ops = {
.read = sel_read_initcon,
+ .llseek = generic_file_llseek,
};
static int sel_make_initcon_files(struct dentry *dir)
{
- int i, ret = 0;
+ int i;
for (i = 1; i <= SECINITSID_NUM; i++) {
struct inode *inode;
struct dentry *dentry;
dentry = d_alloc_name(dir, security_get_initial_sid_context(i));
- if (!dentry) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!dentry)
+ return -ENOMEM;
inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO);
- if (!inode) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!inode)
+ return -ENOMEM;
+
inode->i_fop = &sel_initcon_ops;
inode->i_ino = i|SEL_INITCON_INO_OFFSET;
d_add(dentry, inode);
}
-out:
- return ret;
-}
-static inline unsigned int sel_div(unsigned long a, unsigned long b)
-{
- return a / b - (a % b < 0);
+ return 0;
}
static inline unsigned long sel_class_to_ino(u16 class)
@@ -1295,7 +1543,7 @@ static inline unsigned long sel_class_to_ino(u16 class)
static inline u16 sel_ino_to_class(unsigned long ino)
{
- return sel_div(ino & SEL_INO_MASK, SEL_VEC_MAX + 1);
+ return (ino & SEL_INO_MASK) / (SEL_VEC_MAX + 1);
}
static inline unsigned long sel_perm_to_ino(u16 class, u32 perm)
@@ -1311,49 +1559,29 @@ static inline u32 sel_ino_to_perm(unsigned long ino)
static ssize_t sel_read_class(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- ssize_t rc, len;
- char *page;
- unsigned long ino = file->f_path.dentry->d_inode->i_ino;
-
- page = (char *)__get_free_page(GFP_KERNEL);
- if (!page) {
- rc = -ENOMEM;
- goto out;
- }
-
- len = snprintf(page, PAGE_SIZE, "%d", sel_ino_to_class(ino));
- rc = simple_read_from_buffer(buf, count, ppos, page, len);
- free_page((unsigned long)page);
-out:
- return rc;
+ unsigned long ino = file_inode(file)->i_ino;
+ char res[TMPBUFLEN];
+ ssize_t len = snprintf(res, sizeof(res), "%d", sel_ino_to_class(ino));
+ return simple_read_from_buffer(buf, count, ppos, res, len);
}
static const struct file_operations sel_class_ops = {
.read = sel_read_class,
+ .llseek = generic_file_llseek,
};
static ssize_t sel_read_perm(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- ssize_t rc, len;
- char *page;
- unsigned long ino = file->f_path.dentry->d_inode->i_ino;
-
- page = (char *)__get_free_page(GFP_KERNEL);
- if (!page) {
- rc = -ENOMEM;
- goto out;
- }
-
- len = snprintf(page, PAGE_SIZE, "%d", sel_ino_to_perm(ino));
- rc = simple_read_from_buffer(buf, count, ppos, page, len);
- free_page((unsigned long)page);
-out:
- return rc;
+ unsigned long ino = file_inode(file)->i_ino;
+ char res[TMPBUFLEN];
+ ssize_t len = snprintf(res, sizeof(res), "%d", sel_ino_to_perm(ino));
+ return simple_read_from_buffer(buf, count, ppos, res, len);
}
static const struct file_operations sel_perm_ops = {
.read = sel_read_perm,
+ .llseek = generic_file_llseek,
};
static ssize_t sel_read_policycap(struct file *file, char __user *buf,
@@ -1362,7 +1590,7 @@ static ssize_t sel_read_policycap(struct file *file, char __user *buf,
int value;
char tmpbuf[TMPBUFLEN];
ssize_t length;
- unsigned long i_ino = file->f_path.dentry->d_inode->i_ino;
+ unsigned long i_ino = file_inode(file)->i_ino;
value = security_policycap_supported(i_ino & SEL_INO_MASK);
length = scnprintf(tmpbuf, TMPBUFLEN, "%d", value);
@@ -1372,44 +1600,43 @@ static ssize_t sel_read_policycap(struct file *file, char __user *buf,
static const struct file_operations sel_policycap_ops = {
.read = sel_read_policycap,
+ .llseek = generic_file_llseek,
};
static int sel_make_perm_files(char *objclass, int classvalue,
struct dentry *dir)
{
- int i, rc = 0, nperms;
+ int i, rc, nperms;
char **perms;
rc = security_get_permissions(objclass, &perms, &nperms);
if (rc)
- goto out;
+ return rc;
for (i = 0; i < nperms; i++) {
struct inode *inode;
struct dentry *dentry;
+ rc = -ENOMEM;
dentry = d_alloc_name(dir, perms[i]);
- if (!dentry) {
- rc = -ENOMEM;
- goto out1;
- }
+ if (!dentry)
+ goto out;
+ rc = -ENOMEM;
inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO);
- if (!inode) {
- rc = -ENOMEM;
- goto out1;
- }
+ if (!inode)
+ goto out;
+
inode->i_fop = &sel_perm_ops;
/* i+1 since perm values are 1-indexed */
- inode->i_ino = sel_perm_to_ino(classvalue, i+1);
+ inode->i_ino = sel_perm_to_ino(classvalue, i + 1);
d_add(dentry, inode);
}
-
-out1:
+ rc = 0;
+out:
for (i = 0; i < nperms; i++)
kfree(perms[i]);
kfree(perms);
-out:
return rc;
}
@@ -1421,34 +1648,23 @@ static int sel_make_class_dir_entries(char *classname, int index,
int rc;
dentry = d_alloc_name(dir, "index");
- if (!dentry) {
- rc = -ENOMEM;
- goto out;
- }
+ if (!dentry)
+ return -ENOMEM;
inode = sel_make_inode(dir->d_sb, S_IFREG|S_IRUGO);
- if (!inode) {
- rc = -ENOMEM;
- goto out;
- }
+ if (!inode)
+ return -ENOMEM;
inode->i_fop = &sel_class_ops;
inode->i_ino = sel_class_to_ino(index);
d_add(dentry, inode);
- dentry = d_alloc_name(dir, "perms");
- if (!dentry) {
- rc = -ENOMEM;
- goto out;
- }
-
- rc = sel_make_dir(dir->d_inode, dentry, &last_class_ino);
- if (rc)
- goto out;
+ dentry = sel_make_dir(dir, "perms", &last_class_ino);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
rc = sel_make_perm_files(classname, index, dentry);
-out:
return rc;
}
@@ -1478,45 +1694,40 @@ static void sel_remove_classes(void)
static int sel_make_classes(void)
{
- int rc = 0, nclasses, i;
+ int rc, nclasses, i;
char **classes;
/* delete any existing entries */
sel_remove_classes();
rc = security_get_classes(&classes, &nclasses);
- if (rc < 0)
- goto out;
+ if (rc)
+ return rc;
/* +2 since classes are 1-indexed */
- last_class_ino = sel_class_to_ino(nclasses+2);
+ last_class_ino = sel_class_to_ino(nclasses + 2);
for (i = 0; i < nclasses; i++) {
struct dentry *class_name_dir;
- class_name_dir = d_alloc_name(class_dir, classes[i]);
- if (!class_name_dir) {
- rc = -ENOMEM;
- goto out1;
- }
-
- rc = sel_make_dir(class_dir->d_inode, class_name_dir,
+ class_name_dir = sel_make_dir(class_dir, classes[i],
&last_class_ino);
- if (rc)
- goto out1;
+ if (IS_ERR(class_name_dir)) {
+ rc = PTR_ERR(class_name_dir);
+ goto out;
+ }
/* i+1 since class values are 1-indexed */
- rc = sel_make_class_dir_entries(classes[i], i+1,
+ rc = sel_make_class_dir_entries(classes[i], i + 1,
class_name_dir);
if (rc)
- goto out1;
+ goto out;
}
-
-out1:
+ rc = 0;
+out:
for (i = 0; i < nclasses; i++)
kfree(classes[i]);
kfree(classes);
-out:
return rc;
}
@@ -1550,17 +1761,21 @@ static int sel_make_policycap(void)
return 0;
}
-static int sel_make_dir(struct inode *dir, struct dentry *dentry,
+static struct dentry *sel_make_dir(struct dentry *dir, const char *name,
unsigned long *ino)
{
- int ret = 0;
+ struct dentry *dentry = d_alloc_name(dir, name);
struct inode *inode;
- inode = sel_make_inode(dir->i_sb, S_IFDIR | S_IRUGO | S_IXUGO);
+ if (!dentry)
+ return ERR_PTR(-ENOMEM);
+
+ inode = sel_make_inode(dir->d_sb, S_IFDIR | S_IRUGO | S_IXUGO);
if (!inode) {
- ret = -ENOMEM;
- goto out;
+ dput(dentry);
+ return ERR_PTR(-ENOMEM);
}
+
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
inode->i_ino = ++(*ino);
@@ -1568,16 +1783,16 @@ static int sel_make_dir(struct inode *dir, struct dentry *dentry,
inc_nlink(inode);
d_add(dentry, inode);
/* bump link count on parent directory, too */
- inc_nlink(dir);
-out:
- return ret;
+ inc_nlink(dir->d_inode);
+
+ return dentry;
}
static int sel_fill_super(struct super_block *sb, void *data, int silent)
{
int ret;
struct dentry *dentry;
- struct inode *inode, *root_inode;
+ struct inode *inode;
struct inode_security_struct *isec;
static struct tree_descr selinux_files[] = {
@@ -1596,37 +1811,31 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent)
[SEL_CHECKREQPROT] = {"checkreqprot", &sel_checkreqprot_ops, S_IRUGO|S_IWUSR},
[SEL_REJECT_UNKNOWN] = {"reject_unknown", &sel_handle_unknown_ops, S_IRUGO},
[SEL_DENY_UNKNOWN] = {"deny_unknown", &sel_handle_unknown_ops, S_IRUGO},
+ [SEL_STATUS] = {"status", &sel_handle_status_ops, S_IRUGO},
+ [SEL_POLICY] = {"policy", &sel_policy_ops, S_IRUGO},
/* last one */ {""}
};
ret = simple_fill_super(sb, SELINUX_MAGIC, selinux_files);
if (ret)
goto err;
- root_inode = sb->s_root->d_inode;
-
- dentry = d_alloc_name(sb->s_root, BOOL_DIR_NAME);
- if (!dentry) {
- ret = -ENOMEM;
+ bool_dir = sel_make_dir(sb->s_root, BOOL_DIR_NAME, &sel_last_ino);
+ if (IS_ERR(bool_dir)) {
+ ret = PTR_ERR(bool_dir);
+ bool_dir = NULL;
goto err;
}
- ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
- if (ret)
- goto err;
-
- bool_dir = dentry;
-
+ ret = -ENOMEM;
dentry = d_alloc_name(sb->s_root, NULL_FILE_NAME);
- if (!dentry) {
- ret = -ENOMEM;
+ if (!dentry)
goto err;
- }
+ ret = -ENOMEM;
inode = sel_make_inode(sb, S_IFCHR | S_IRUGO | S_IWUGO);
- if (!inode) {
- ret = -ENOMEM;
+ if (!inode)
goto err;
- }
+
inode->i_ino = ++sel_last_ino;
isec = (struct inode_security_struct *)inode->i_security;
isec->sid = SECINITSID_DEVNULL;
@@ -1635,82 +1844,62 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent)
init_special_inode(inode, S_IFCHR | S_IRUGO | S_IWUGO, MKDEV(MEM_MAJOR, 3));
d_add(dentry, inode);
- selinux_null = dentry;
+ selinux_null.dentry = dentry;
- dentry = d_alloc_name(sb->s_root, "avc");
- if (!dentry) {
- ret = -ENOMEM;
+ dentry = sel_make_dir(sb->s_root, "avc", &sel_last_ino);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
goto err;
}
- ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
- if (ret)
- goto err;
-
ret = sel_make_avc_files(dentry);
if (ret)
goto err;
- dentry = d_alloc_name(sb->s_root, "initial_contexts");
- if (!dentry) {
- ret = -ENOMEM;
+ dentry = sel_make_dir(sb->s_root, "initial_contexts", &sel_last_ino);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
goto err;
}
- ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
- if (ret)
- goto err;
-
ret = sel_make_initcon_files(dentry);
if (ret)
goto err;
- dentry = d_alloc_name(sb->s_root, "class");
- if (!dentry) {
- ret = -ENOMEM;
+ class_dir = sel_make_dir(sb->s_root, "class", &sel_last_ino);
+ if (IS_ERR(class_dir)) {
+ ret = PTR_ERR(class_dir);
+ class_dir = NULL;
goto err;
}
- ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
- if (ret)
- goto err;
-
- class_dir = dentry;
-
- dentry = d_alloc_name(sb->s_root, "policy_capabilities");
- if (!dentry) {
- ret = -ENOMEM;
+ policycap_dir = sel_make_dir(sb->s_root, "policy_capabilities", &sel_last_ino);
+ if (IS_ERR(policycap_dir)) {
+ ret = PTR_ERR(policycap_dir);
+ policycap_dir = NULL;
goto err;
}
-
- ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
- if (ret)
- goto err;
-
- policycap_dir = dentry;
-
-out:
- return ret;
+ return 0;
err:
printk(KERN_ERR "SELinux: %s: failed while creating inodes\n",
__func__);
- goto out;
+ return ret;
}
-static int sel_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data,
- struct vfsmount *mnt)
+static struct dentry *sel_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
{
- return get_sb_single(fs_type, flags, data, sel_fill_super, mnt);
+ return mount_single(fs_type, flags, data, sel_fill_super);
}
static struct file_system_type sel_fs_type = {
.name = "selinuxfs",
- .get_sb = sel_get_sb,
+ .mount = sel_mount,
.kill_sb = kill_litter_super,
};
struct vfsmount *selinuxfs_mount;
+static struct kobject *selinuxfs_kobj;
static int __init init_sel_fs(void)
{
@@ -1718,15 +1907,24 @@ static int __init init_sel_fs(void)
if (!selinux_enabled)
return 0;
+
+ selinuxfs_kobj = kobject_create_and_add("selinux", fs_kobj);
+ if (!selinuxfs_kobj)
+ return -ENOMEM;
+
err = register_filesystem(&sel_fs_type);
- if (!err) {
- selinuxfs_mount = kern_mount(&sel_fs_type);
- if (IS_ERR(selinuxfs_mount)) {
- printk(KERN_ERR "selinuxfs: could not mount!\n");
- err = PTR_ERR(selinuxfs_mount);
- selinuxfs_mount = NULL;
- }
+ if (err) {
+ kobject_put(selinuxfs_kobj);
+ return err;
}
+
+ selinux_null.mnt = selinuxfs_mount = kern_mount(&sel_fs_type);
+ if (IS_ERR(selinuxfs_mount)) {
+ printk(KERN_ERR "selinuxfs: could not mount!\n");
+ err = PTR_ERR(selinuxfs_mount);
+ selinuxfs_mount = NULL;
+ }
+
return err;
}
@@ -1735,6 +1933,8 @@ __initcall(init_sel_fs);
#ifdef CONFIG_SECURITY_SELINUX_DISABLE
void exit_sel_fs(void)
{
+ kobject_put(selinuxfs_kobj);
+ kern_unmount(selinuxfs_mount);
unregister_filesystem(&sel_fs_type);
}
#endif
diff --git a/security/selinux/ss/Makefile b/security/selinux/ss/Makefile
deleted file mode 100644
index 15d4e62917d..00000000000
--- a/security/selinux/ss/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
-#
-# Makefile for building the SELinux security server as part of the kernel tree.
-#
-
-EXTRA_CFLAGS += -Isecurity/selinux -Isecurity/selinux/include
-obj-y := ss.o
-
-ss-y := ebitmap.o hashtab.o symtab.o sidtab.o avtab.o policydb.o services.o conditional.o mls.o
-
diff --git a/security/selinux/ss/avtab.c b/security/selinux/ss/avtab.c
index 1215b8e47db..a3dd9faa19c 100644
--- a/security/selinux/ss/avtab.c
+++ b/security/selinux/ss/avtab.c
@@ -266,8 +266,8 @@ int avtab_alloc(struct avtab *h, u32 nrules)
if (shift > 2)
shift = shift - 2;
nslot = 1 << shift;
- if (nslot > MAX_AVTAB_SIZE)
- nslot = MAX_AVTAB_SIZE;
+ if (nslot > MAX_AVTAB_HASH_BUCKETS)
+ nslot = MAX_AVTAB_HASH_BUCKETS;
mask = nslot - 1;
h->htable = kcalloc(nslot, sizeof(*(h->htable)), GFP_KERNEL);
@@ -342,20 +342,20 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
if (vers < POLICYDB_VERSION_AVTAB) {
rc = next_entry(buf32, fp, sizeof(u32));
- if (rc < 0) {
+ if (rc) {
printk(KERN_ERR "SELinux: avtab: truncated entry\n");
- return -1;
+ return rc;
}
items2 = le32_to_cpu(buf32[0]);
if (items2 > ARRAY_SIZE(buf32)) {
printk(KERN_ERR "SELinux: avtab: entry overflow\n");
- return -1;
+ return -EINVAL;
}
rc = next_entry(buf32, fp, sizeof(u32)*items2);
- if (rc < 0) {
+ if (rc) {
printk(KERN_ERR "SELinux: avtab: truncated entry\n");
- return -1;
+ return rc;
}
items = 0;
@@ -363,19 +363,19 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
key.source_type = (u16)val;
if (key.source_type != val) {
printk(KERN_ERR "SELinux: avtab: truncated source type\n");
- return -1;
+ return -EINVAL;
}
val = le32_to_cpu(buf32[items++]);
key.target_type = (u16)val;
if (key.target_type != val) {
printk(KERN_ERR "SELinux: avtab: truncated target type\n");
- return -1;
+ return -EINVAL;
}
val = le32_to_cpu(buf32[items++]);
key.target_class = (u16)val;
if (key.target_class != val) {
printk(KERN_ERR "SELinux: avtab: truncated target class\n");
- return -1;
+ return -EINVAL;
}
val = le32_to_cpu(buf32[items++]);
@@ -383,12 +383,12 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
if (!(val & (AVTAB_AV | AVTAB_TYPE))) {
printk(KERN_ERR "SELinux: avtab: null entry\n");
- return -1;
+ return -EINVAL;
}
if ((val & AVTAB_AV) &&
(val & AVTAB_TYPE)) {
printk(KERN_ERR "SELinux: avtab: entry has both access vectors and types\n");
- return -1;
+ return -EINVAL;
}
for (i = 0; i < ARRAY_SIZE(spec_order); i++) {
@@ -403,15 +403,15 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
if (items != items2) {
printk(KERN_ERR "SELinux: avtab: entry only had %d items, expected %d\n", items2, items);
- return -1;
+ return -EINVAL;
}
return 0;
}
rc = next_entry(buf16, fp, sizeof(u16)*4);
- if (rc < 0) {
+ if (rc) {
printk(KERN_ERR "SELinux: avtab: truncated entry\n");
- return -1;
+ return rc;
}
items = 0;
@@ -424,7 +424,7 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
!policydb_type_isvalid(pol, key.target_type) ||
!policydb_class_isvalid(pol, key.target_class)) {
printk(KERN_ERR "SELinux: avtab: invalid type or class\n");
- return -1;
+ return -EINVAL;
}
set = 0;
@@ -434,19 +434,19 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
}
if (!set || set > 1) {
printk(KERN_ERR "SELinux: avtab: more than one specifier\n");
- return -1;
+ return -EINVAL;
}
rc = next_entry(buf32, fp, sizeof(u32));
- if (rc < 0) {
+ if (rc) {
printk(KERN_ERR "SELinux: avtab: truncated entry\n");
- return -1;
+ return rc;
}
datum.data = le32_to_cpu(*buf32);
if ((key.specified & AVTAB_TYPE) &&
!policydb_type_isvalid(pol, datum.data)) {
printk(KERN_ERR "SELinux: avtab: invalid type\n");
- return -1;
+ return -EINVAL;
}
return insertf(a, &key, &datum, p);
}
@@ -487,8 +487,7 @@ int avtab_read(struct avtab *a, void *fp, struct policydb *pol)
printk(KERN_ERR "SELinux: avtab: out of memory\n");
else if (rc == -EEXIST)
printk(KERN_ERR "SELinux: avtab: duplicate entry\n");
- else
- rc = -EINVAL;
+
goto bad;
}
}
@@ -502,6 +501,48 @@ bad:
goto out;
}
+int avtab_write_item(struct policydb *p, struct avtab_node *cur, void *fp)
+{
+ __le16 buf16[4];
+ __le32 buf32[1];
+ int rc;
+
+ buf16[0] = cpu_to_le16(cur->key.source_type);
+ buf16[1] = cpu_to_le16(cur->key.target_type);
+ buf16[2] = cpu_to_le16(cur->key.target_class);
+ buf16[3] = cpu_to_le16(cur->key.specified);
+ rc = put_entry(buf16, sizeof(u16), 4, fp);
+ if (rc)
+ return rc;
+ buf32[0] = cpu_to_le32(cur->datum.data);
+ rc = put_entry(buf32, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ return 0;
+}
+
+int avtab_write(struct policydb *p, struct avtab *a, void *fp)
+{
+ unsigned int i;
+ int rc = 0;
+ struct avtab_node *cur;
+ __le32 buf[1];
+
+ buf[0] = cpu_to_le32(a->nel);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < a->nslot; i++) {
+ for (cur = a->htable[i]; cur; cur = cur->next) {
+ rc = avtab_write_item(p, cur, fp);
+ if (rc)
+ return rc;
+ }
+ }
+
+ return rc;
+}
void avtab_cache_init(void)
{
avtab_node_cachep = kmem_cache_create("avtab_node",
diff --git a/security/selinux/ss/avtab.h b/security/selinux/ss/avtab.h
index 8da6a842808..63ce2f9e441 100644
--- a/security/selinux/ss/avtab.h
+++ b/security/selinux/ss/avtab.h
@@ -14,7 +14,7 @@
*
* Copyright (C) 2003 Tresys Technology, LLC
* This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
+ * it under the terms of the GNU General Public License as published by
* the Free Software Foundation, version 2.
*
* Updated: Yuichi Nakamura <ynakam@hitachisoft.jp>
@@ -27,16 +27,16 @@ struct avtab_key {
u16 source_type; /* source type */
u16 target_type; /* target type */
u16 target_class; /* target object class */
-#define AVTAB_ALLOWED 1
-#define AVTAB_AUDITALLOW 2
-#define AVTAB_AUDITDENY 4
-#define AVTAB_AV (AVTAB_ALLOWED | AVTAB_AUDITALLOW | AVTAB_AUDITDENY)
-#define AVTAB_TRANSITION 16
-#define AVTAB_MEMBER 32
-#define AVTAB_CHANGE 64
-#define AVTAB_TYPE (AVTAB_TRANSITION | AVTAB_MEMBER | AVTAB_CHANGE)
-#define AVTAB_ENABLED_OLD 0x80000000 /* reserved for used in cond_avtab */
-#define AVTAB_ENABLED 0x8000 /* reserved for used in cond_avtab */
+#define AVTAB_ALLOWED 0x0001
+#define AVTAB_AUDITALLOW 0x0002
+#define AVTAB_AUDITDENY 0x0004
+#define AVTAB_AV (AVTAB_ALLOWED | AVTAB_AUDITALLOW | AVTAB_AUDITDENY)
+#define AVTAB_TRANSITION 0x0010
+#define AVTAB_MEMBER 0x0020
+#define AVTAB_CHANGE 0x0040
+#define AVTAB_TYPE (AVTAB_TRANSITION | AVTAB_MEMBER | AVTAB_CHANGE)
+#define AVTAB_ENABLED_OLD 0x80000000 /* reserved for used in cond_avtab */
+#define AVTAB_ENABLED 0x8000 /* reserved for used in cond_avtab */
u16 specified; /* what field is specified */
};
@@ -71,6 +71,8 @@ int avtab_read_item(struct avtab *a, void *fp, struct policydb *pol,
void *p);
int avtab_read(struct avtab *a, void *fp, struct policydb *pol);
+int avtab_write_item(struct policydb *p, struct avtab_node *cur, void *fp);
+int avtab_write(struct policydb *p, struct avtab *a, void *fp);
struct avtab_node *avtab_insert_nonunique(struct avtab *h, struct avtab_key *key,
struct avtab_datum *datum);
@@ -82,10 +84,8 @@ struct avtab_node *avtab_search_node_next(struct avtab_node *node, int specified
void avtab_cache_init(void);
void avtab_cache_destroy(void);
-#define MAX_AVTAB_HASH_BITS 13
+#define MAX_AVTAB_HASH_BITS 11
#define MAX_AVTAB_HASH_BUCKETS (1 << MAX_AVTAB_HASH_BITS)
-#define MAX_AVTAB_HASH_MASK (MAX_AVTAB_HASH_BUCKETS-1)
-#define MAX_AVTAB_SIZE MAX_AVTAB_HASH_BUCKETS
#endif /* _SS_AVTAB_H_ */
diff --git a/security/selinux/ss/conditional.c b/security/selinux/ss/conditional.c
index 4a4e35cac22..377d148e715 100644
--- a/security/selinux/ss/conditional.c
+++ b/security/selinux/ss/conditional.c
@@ -117,10 +117,14 @@ int evaluate_cond_node(struct policydb *p, struct cond_node *node)
int cond_policydb_init(struct policydb *p)
{
+ int rc;
+
p->bool_val_to_struct = NULL;
p->cond_list = NULL;
- if (avtab_init(&p->te_cond_avtab))
- return -1;
+
+ rc = avtab_init(&p->te_cond_avtab);
+ if (rc)
+ return rc;
return 0;
}
@@ -171,10 +175,10 @@ void cond_policydb_destroy(struct policydb *p)
int cond_init_bool_indexes(struct policydb *p)
{
kfree(p->bool_val_to_struct);
- p->bool_val_to_struct = (struct cond_bool_datum **)
+ p->bool_val_to_struct =
kmalloc(p->p_bools.nprim * sizeof(struct cond_bool_datum *), GFP_KERNEL);
if (!p->bool_val_to_struct)
- return -1;
+ return -ENOMEM;
return 0;
}
@@ -189,6 +193,7 @@ int cond_index_bool(void *key, void *datum, void *datap)
{
struct policydb *p;
struct cond_bool_datum *booldatum;
+ struct flex_array *fa;
booldatum = datum;
p = datap;
@@ -196,7 +201,10 @@ int cond_index_bool(void *key, void *datum, void *datap)
if (!booldatum->value || booldatum->value > p->p_bools.nprim)
return -EINVAL;
- p->p_bool_val_to_name[booldatum->value - 1] = key;
+ fa = p->sym_val_to_name[SYM_BOOLS];
+ if (flex_array_put_ptr(fa, booldatum->value - 1, key,
+ GFP_KERNEL | __GFP_ZERO))
+ BUG();
p->bool_val_to_struct[booldatum->value - 1] = booldatum;
return 0;
@@ -219,34 +227,37 @@ int cond_read_bool(struct policydb *p, struct hashtab *h, void *fp)
booldatum = kzalloc(sizeof(struct cond_bool_datum), GFP_KERNEL);
if (!booldatum)
- return -1;
+ return -ENOMEM;
rc = next_entry(buf, fp, sizeof buf);
- if (rc < 0)
+ if (rc)
goto err;
booldatum->value = le32_to_cpu(buf[0]);
booldatum->state = le32_to_cpu(buf[1]);
+ rc = -EINVAL;
if (!bool_isvalid(booldatum))
goto err;
len = le32_to_cpu(buf[2]);
+ rc = -ENOMEM;
key = kmalloc(len + 1, GFP_KERNEL);
if (!key)
goto err;
rc = next_entry(key, fp, len);
- if (rc < 0)
+ if (rc)
goto err;
key[len] = '\0';
- if (hashtab_insert(h, key, booldatum))
+ rc = hashtab_insert(h, key, booldatum);
+ if (rc)
goto err;
return 0;
err:
cond_destroy_bool(key, booldatum, NULL);
- return -1;
+ return rc;
}
struct cond_insertf_data {
@@ -263,7 +274,7 @@ static int cond_insertf(struct avtab *a, struct avtab_key *k, struct avtab_datum
struct cond_av_list *other = data->other, *list, *cur;
struct avtab_node *node_ptr;
u8 found;
-
+ int rc = -EINVAL;
/*
* For type rules we have to make certain there aren't any
@@ -313,12 +324,15 @@ static int cond_insertf(struct avtab *a, struct avtab_key *k, struct avtab_datum
node_ptr = avtab_insert_nonunique(&p->te_cond_avtab, k, d);
if (!node_ptr) {
printk(KERN_ERR "SELinux: could not insert rule.\n");
+ rc = -ENOMEM;
goto err;
}
list = kzalloc(sizeof(struct cond_av_list), GFP_KERNEL);
- if (!list)
+ if (!list) {
+ rc = -ENOMEM;
goto err;
+ }
list->node = node_ptr;
if (!data->head)
@@ -331,7 +345,7 @@ static int cond_insertf(struct avtab *a, struct avtab_key *k, struct avtab_datum
err:
cond_av_list_destroy(data->head);
data->head = NULL;
- return -1;
+ return rc;
}
static int cond_read_av_list(struct policydb *p, void *fp, struct cond_av_list **ret_list, struct cond_av_list *other)
@@ -345,8 +359,8 @@ static int cond_read_av_list(struct policydb *p, void *fp, struct cond_av_list *
len = 0;
rc = next_entry(buf, fp, sizeof(u32));
- if (rc < 0)
- return -1;
+ if (rc)
+ return rc;
len = le32_to_cpu(buf[0]);
if (len == 0)
@@ -361,7 +375,6 @@ static int cond_read_av_list(struct policydb *p, void *fp, struct cond_av_list *
&data);
if (rc)
return rc;
-
}
*ret_list = data.head;
@@ -390,24 +403,25 @@ static int cond_read_node(struct policydb *p, struct cond_node *node, void *fp)
struct cond_expr *expr = NULL, *last = NULL;
rc = next_entry(buf, fp, sizeof(u32));
- if (rc < 0)
- return -1;
+ if (rc)
+ return rc;
node->cur_state = le32_to_cpu(buf[0]);
len = 0;
rc = next_entry(buf, fp, sizeof(u32));
- if (rc < 0)
- return -1;
+ if (rc)
+ return rc;
/* expr */
len = le32_to_cpu(buf[0]);
for (i = 0; i < len; i++) {
rc = next_entry(buf, fp, sizeof(u32) * 2);
- if (rc < 0)
+ if (rc)
goto err;
+ rc = -ENOMEM;
expr = kzalloc(sizeof(struct cond_expr), GFP_KERNEL);
if (!expr)
goto err;
@@ -416,6 +430,7 @@ static int cond_read_node(struct policydb *p, struct cond_node *node, void *fp)
expr->bool = le32_to_cpu(buf[1]);
if (!expr_isvalid(p, expr)) {
+ rc = -EINVAL;
kfree(expr);
goto err;
}
@@ -427,14 +442,16 @@ static int cond_read_node(struct policydb *p, struct cond_node *node, void *fp)
last = expr;
}
- if (cond_read_av_list(p, fp, &node->true_list, NULL) != 0)
+ rc = cond_read_av_list(p, fp, &node->true_list, NULL);
+ if (rc)
goto err;
- if (cond_read_av_list(p, fp, &node->false_list, node->true_list) != 0)
+ rc = cond_read_av_list(p, fp, &node->false_list, node->true_list);
+ if (rc)
goto err;
return 0;
err:
cond_node_destroy(node);
- return -1;
+ return rc;
}
int cond_read_list(struct policydb *p, void *fp)
@@ -445,8 +462,8 @@ int cond_read_list(struct policydb *p, void *fp)
int rc;
rc = next_entry(buf, fp, sizeof buf);
- if (rc < 0)
- return -1;
+ if (rc)
+ return rc;
len = le32_to_cpu(buf[0]);
@@ -455,11 +472,13 @@ int cond_read_list(struct policydb *p, void *fp)
goto err;
for (i = 0; i < len; i++) {
+ rc = -ENOMEM;
node = kzalloc(sizeof(struct cond_node), GFP_KERNEL);
if (!node)
goto err;
- if (cond_read_node(p, node, fp) != 0)
+ rc = cond_read_node(p, node, fp);
+ if (rc)
goto err;
if (i == 0)
@@ -472,9 +491,132 @@ int cond_read_list(struct policydb *p, void *fp)
err:
cond_list_destroy(p->cond_list);
p->cond_list = NULL;
- return -1;
+ return rc;
+}
+
+int cond_write_bool(void *vkey, void *datum, void *ptr)
+{
+ char *key = vkey;
+ struct cond_bool_datum *booldatum = datum;
+ struct policy_data *pd = ptr;
+ void *fp = pd->fp;
+ __le32 buf[3];
+ u32 len;
+ int rc;
+
+ len = strlen(key);
+ buf[0] = cpu_to_le32(booldatum->value);
+ buf[1] = cpu_to_le32(booldatum->state);
+ buf[2] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 3, fp);
+ if (rc)
+ return rc;
+ rc = put_entry(key, 1, len, fp);
+ if (rc)
+ return rc;
+ return 0;
+}
+
+/*
+ * cond_write_cond_av_list doesn't write out the av_list nodes.
+ * Instead it writes out the key/value pairs from the avtab. This
+ * is necessary because there is no way to uniquely identifying rules
+ * in the avtab so it is not possible to associate individual rules
+ * in the avtab with a conditional without saving them as part of
+ * the conditional. This means that the avtab with the conditional
+ * rules will not be saved but will be rebuilt on policy load.
+ */
+static int cond_write_av_list(struct policydb *p,
+ struct cond_av_list *list, struct policy_file *fp)
+{
+ __le32 buf[1];
+ struct cond_av_list *cur_list;
+ u32 len;
+ int rc;
+
+ len = 0;
+ for (cur_list = list; cur_list != NULL; cur_list = cur_list->next)
+ len++;
+
+ buf[0] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ if (len == 0)
+ return 0;
+
+ for (cur_list = list; cur_list != NULL; cur_list = cur_list->next) {
+ rc = avtab_write_item(p, cur_list->node, fp);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
}
+static int cond_write_node(struct policydb *p, struct cond_node *node,
+ struct policy_file *fp)
+{
+ struct cond_expr *cur_expr;
+ __le32 buf[2];
+ int rc;
+ u32 len = 0;
+
+ buf[0] = cpu_to_le32(node->cur_state);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ for (cur_expr = node->expr; cur_expr != NULL; cur_expr = cur_expr->next)
+ len++;
+
+ buf[0] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ for (cur_expr = node->expr; cur_expr != NULL; cur_expr = cur_expr->next) {
+ buf[0] = cpu_to_le32(cur_expr->expr_type);
+ buf[1] = cpu_to_le32(cur_expr->bool);
+ rc = put_entry(buf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+ }
+
+ rc = cond_write_av_list(p, node->true_list, fp);
+ if (rc)
+ return rc;
+ rc = cond_write_av_list(p, node->false_list, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+int cond_write_list(struct policydb *p, struct cond_node *list, void *fp)
+{
+ struct cond_node *cur;
+ u32 len;
+ __le32 buf[1];
+ int rc;
+
+ len = 0;
+ for (cur = list; cur != NULL; cur = cur->next)
+ len++;
+ buf[0] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ for (cur = list; cur != NULL; cur = cur->next) {
+ rc = cond_write_node(p, cur, fp);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
/* Determine whether additional permissions are granted by the conditional
* av table, and if so, add them to the result
*/
diff --git a/security/selinux/ss/conditional.h b/security/selinux/ss/conditional.h
index 53ddb013ae5..4d1f8746650 100644
--- a/security/selinux/ss/conditional.h
+++ b/security/selinux/ss/conditional.h
@@ -13,6 +13,7 @@
#include "avtab.h"
#include "symtab.h"
#include "policydb.h"
+#include "../include/conditional.h"
#define COND_EXPR_MAXDEPTH 10
@@ -69,6 +70,8 @@ int cond_index_bool(void *key, void *datum, void *datap);
int cond_read_bool(struct policydb *p, struct hashtab *h, void *fp);
int cond_read_list(struct policydb *p, void *fp);
+int cond_write_bool(void *key, void *datum, void *ptr);
+int cond_write_list(struct policydb *p, struct cond_node *list, void *fp);
void cond_compute_av(struct avtab *ctab, struct avtab_key *key, struct av_decision *avd);
diff --git a/security/selinux/ss/constraint.h b/security/selinux/ss/constraint.h
index 149dda731fd..96fd947c494 100644
--- a/security/selinux/ss/constraint.h
+++ b/security/selinux/ss/constraint.h
@@ -48,6 +48,7 @@ struct constraint_expr {
u32 op; /* operator */
struct ebitmap names; /* names */
+ struct type_set *type_names;
struct constraint_expr *next; /* next expression */
};
diff --git a/security/selinux/ss/context.h b/security/selinux/ss/context.h
index d9dd7a2f6a8..212e3479a0d 100644
--- a/security/selinux/ss/context.h
+++ b/security/selinux/ss/context.h
@@ -41,9 +41,6 @@ static inline int mls_context_cpy(struct context *dst, struct context *src)
{
int rc;
- if (!selinux_mls_enabled)
- return 0;
-
dst->range.level[0].sens = src->range.level[0].sens;
rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat);
if (rc)
@@ -64,9 +61,6 @@ static inline int mls_context_cpy_low(struct context *dst, struct context *src)
{
int rc;
- if (!selinux_mls_enabled)
- return 0;
-
dst->range.level[0].sens = src->range.level[0].sens;
rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[0].cat);
if (rc)
@@ -80,11 +74,28 @@ out:
return rc;
}
-static inline int mls_context_cmp(struct context *c1, struct context *c2)
+/*
+ * Sets both levels in the MLS range of 'dst' to the high level of 'src'.
+ */
+static inline int mls_context_cpy_high(struct context *dst, struct context *src)
{
- if (!selinux_mls_enabled)
- return 1;
+ int rc;
+
+ dst->range.level[0].sens = src->range.level[1].sens;
+ rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[1].cat);
+ if (rc)
+ goto out;
+ dst->range.level[1].sens = src->range.level[1].sens;
+ rc = ebitmap_cpy(&dst->range.level[1].cat, &src->range.level[1].cat);
+ if (rc)
+ ebitmap_destroy(&dst->range.level[0].cat);
+out:
+ return rc;
+}
+
+static inline int mls_context_cmp(struct context *c1, struct context *c2)
+{
return ((c1->range.level[0].sens == c2->range.level[0].sens) &&
ebitmap_cmp(&c1->range.level[0].cat, &c2->range.level[0].cat) &&
(c1->range.level[1].sens == c2->range.level[1].sens) &&
@@ -93,9 +104,6 @@ static inline int mls_context_cmp(struct context *c1, struct context *c2)
static inline void mls_context_destroy(struct context *c)
{
- if (!selinux_mls_enabled)
- return;
-
ebitmap_destroy(&c->range.level[0].cat);
ebitmap_destroy(&c->range.level[1].cat);
mls_context_init(c);
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
index 68c7348d1ac..820313a04d4 100644
--- a/security/selinux/ss/ebitmap.c
+++ b/security/selinux/ss/ebitmap.c
@@ -4,7 +4,7 @@
* Author : Stephen Smalley, <sds@epoch.ncsc.mil>
*/
/*
- * Updated: Hewlett-Packard <paul.moore@hp.com>
+ * Updated: Hewlett-Packard <paul@paul-moore.com>
*
* Added support to import/export the NetLabel category bitmap
*
@@ -22,6 +22,8 @@
#include "ebitmap.h"
#include "policydb.h"
+#define BITS_PER_U64 (sizeof(u64) * 8)
+
int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2)
{
struct ebitmap_node *n1, *n2;
@@ -128,7 +130,7 @@ int ebitmap_netlbl_export(struct ebitmap *ebmap,
cmap_idx = delta / NETLBL_CATMAP_MAPSIZE;
cmap_sft = delta % NETLBL_CATMAP_MAPSIZE;
c_iter->bitmap[cmap_idx]
- |= e_iter->maps[cmap_idx] << cmap_sft;
+ |= e_iter->maps[i] << cmap_sft;
}
e_iter = e_iter->next;
}
@@ -211,7 +213,12 @@ netlbl_import_failure:
}
#endif /* CONFIG_NETLABEL */
-int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2)
+/*
+ * Check to see if all the bits set in e2 are also set in e1. Optionally,
+ * if last_e2bit is non-zero, the highest set bit in e2 cannot exceed
+ * last_e2bit.
+ */
+int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit)
{
struct ebitmap_node *n1, *n2;
int i;
@@ -221,14 +228,25 @@ int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2)
n1 = e1->node;
n2 = e2->node;
+
while (n1 && n2 && (n1->startbit <= n2->startbit)) {
if (n1->startbit < n2->startbit) {
n1 = n1->next;
continue;
}
- for (i = 0; i < EBITMAP_UNIT_NUMS; i++) {
+ for (i = EBITMAP_UNIT_NUMS - 1; (i >= 0) && !n2->maps[i]; )
+ i--; /* Skip trailing NULL map entries */
+ if (last_e2bit && (i >= 0)) {
+ u32 lastsetbit = n2->startbit + i * EBITMAP_UNIT_SIZE +
+ __fls(n2->maps[i]);
+ if (lastsetbit > last_e2bit)
+ return 0;
+ }
+
+ while (i >= 0) {
if ((n1->maps[i] & n2->maps[i]) != n2->maps[i])
return 0;
+ i--;
}
n1 = n1->next;
@@ -363,10 +381,10 @@ int ebitmap_read(struct ebitmap *e, void *fp)
e->highbit = le32_to_cpu(buf[1]);
count = le32_to_cpu(buf[2]);
- if (mapunit != sizeof(u64) * 8) {
+ if (mapunit != BITS_PER_U64) {
printk(KERN_ERR "SELinux: ebitmap: map size %u does not "
"match my size %Zd (high bit was %d)\n",
- mapunit, sizeof(u64) * 8, e->highbit);
+ mapunit, BITS_PER_U64, e->highbit);
goto bad;
}
@@ -446,3 +464,78 @@ bad:
ebitmap_destroy(e);
goto out;
}
+
+int ebitmap_write(struct ebitmap *e, void *fp)
+{
+ struct ebitmap_node *n;
+ u32 count;
+ __le32 buf[3];
+ u64 map;
+ int bit, last_bit, last_startbit, rc;
+
+ buf[0] = cpu_to_le32(BITS_PER_U64);
+
+ count = 0;
+ last_bit = 0;
+ last_startbit = -1;
+ ebitmap_for_each_positive_bit(e, n, bit) {
+ if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) {
+ count++;
+ last_startbit = rounddown(bit, BITS_PER_U64);
+ }
+ last_bit = roundup(bit + 1, BITS_PER_U64);
+ }
+ buf[1] = cpu_to_le32(last_bit);
+ buf[2] = cpu_to_le32(count);
+
+ rc = put_entry(buf, sizeof(u32), 3, fp);
+ if (rc)
+ return rc;
+
+ map = 0;
+ last_startbit = INT_MIN;
+ ebitmap_for_each_positive_bit(e, n, bit) {
+ if (rounddown(bit, (int)BITS_PER_U64) > last_startbit) {
+ __le64 buf64[1];
+
+ /* this is the very first bit */
+ if (!map) {
+ last_startbit = rounddown(bit, BITS_PER_U64);
+ map = (u64)1 << (bit - last_startbit);
+ continue;
+ }
+
+ /* write the last node */
+ buf[0] = cpu_to_le32(last_startbit);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ buf64[0] = cpu_to_le64(map);
+ rc = put_entry(buf64, sizeof(u64), 1, fp);
+ if (rc)
+ return rc;
+
+ /* set up for the next node */
+ map = 0;
+ last_startbit = rounddown(bit, BITS_PER_U64);
+ }
+ map |= (u64)1 << (bit - last_startbit);
+ }
+ /* write the last node */
+ if (map) {
+ __le64 buf64[1];
+
+ /* write the last node */
+ buf[0] = cpu_to_le32(last_startbit);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ buf64[0] = cpu_to_le64(map);
+ rc = put_entry(buf64, sizeof(u64), 1, fp);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
diff --git a/security/selinux/ss/ebitmap.h b/security/selinux/ss/ebitmap.h
index f283b4367f5..712c8a7b8e8 100644
--- a/security/selinux/ss/ebitmap.h
+++ b/security/selinux/ss/ebitmap.h
@@ -16,7 +16,13 @@
#include <net/netlabel.h>
-#define EBITMAP_UNIT_NUMS ((32 - sizeof(void *) - sizeof(u32)) \
+#ifdef CONFIG_64BIT
+#define EBITMAP_NODE_SIZE 64
+#else
+#define EBITMAP_NODE_SIZE 32
+#endif
+
+#define EBITMAP_UNIT_NUMS ((EBITMAP_NODE_SIZE-sizeof(void *)-sizeof(u32))\
/ sizeof(unsigned long))
#define EBITMAP_UNIT_SIZE BITS_PER_LONG
#define EBITMAP_SIZE (EBITMAP_UNIT_NUMS * EBITMAP_UNIT_SIZE)
@@ -36,7 +42,6 @@ struct ebitmap {
};
#define ebitmap_length(e) ((e)->highbit)
-#define ebitmap_startbit(e) ((e)->node ? (e)->node->startbit : 0)
static inline unsigned int ebitmap_start_positive(struct ebitmap *e,
struct ebitmap_node **n)
@@ -118,11 +123,12 @@ static inline void ebitmap_node_clr_bit(struct ebitmap_node *n,
int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2);
int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src);
-int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2);
+int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit);
int ebitmap_get_bit(struct ebitmap *e, unsigned long bit);
int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value);
void ebitmap_destroy(struct ebitmap *e);
int ebitmap_read(struct ebitmap *e, void *fp);
+int ebitmap_write(struct ebitmap *e, void *fp);
#ifdef CONFIG_NETLABEL
int ebitmap_netlbl_export(struct ebitmap *ebmap,
diff --git a/security/selinux/ss/hashtab.c b/security/selinux/ss/hashtab.c
index 933e735bb18..2cc49614984 100644
--- a/security/selinux/ss/hashtab.c
+++ b/security/selinux/ss/hashtab.c
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/errno.h>
+#include <linux/sched.h>
#include "hashtab.h"
struct hashtab *hashtab_create(u32 (*hash_value)(struct hashtab *h, const void *key),
@@ -40,6 +41,8 @@ int hashtab_insert(struct hashtab *h, void *key, void *datum)
u32 hvalue;
struct hashtab_node *prev, *cur, *newnode;
+ cond_resched();
+
if (!h || h->nel == HASHTAB_MAX_NODES)
return -EINVAL;
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
index 3f2b2706b5b..d307b37ddc2 100644
--- a/security/selinux/ss/mls.c
+++ b/security/selinux/ss/mls.c
@@ -11,7 +11,7 @@
* Copyright (C) 2004-2006 Trusted Computer Solutions, Inc.
*/
/*
- * Updated: Hewlett-Packard <paul.moore@hp.com>
+ * Updated: Hewlett-Packard <paul@paul-moore.com>
*
* Added support to import/export the MLS label from NetLabel
*
@@ -39,13 +39,13 @@ int mls_compute_context_len(struct context *context)
struct ebitmap *e;
struct ebitmap_node *node;
- if (!selinux_mls_enabled)
+ if (!policydb.mls_enabled)
return 0;
len = 1; /* for the beginning ":" */
for (l = 0; l < 2; l++) {
int index_sens = context->range.level[l].sens;
- len += strlen(policydb.p_sens_val_to_name[index_sens - 1]);
+ len += strlen(sym_name(&policydb, SYM_LEVELS, index_sens - 1));
/* categories */
head = -2;
@@ -55,17 +55,17 @@ int mls_compute_context_len(struct context *context)
if (i - prev > 1) {
/* one or more negative bits are skipped */
if (head != prev) {
- nm = policydb.p_cat_val_to_name[prev];
+ nm = sym_name(&policydb, SYM_CATS, prev);
len += strlen(nm) + 1;
}
- nm = policydb.p_cat_val_to_name[i];
+ nm = sym_name(&policydb, SYM_CATS, i);
len += strlen(nm) + 1;
head = i;
}
prev = i;
}
if (prev != head) {
- nm = policydb.p_cat_val_to_name[prev];
+ nm = sym_name(&policydb, SYM_CATS, prev);
len += strlen(nm) + 1;
}
if (l == 0) {
@@ -93,7 +93,7 @@ void mls_sid_to_context(struct context *context,
struct ebitmap *e;
struct ebitmap_node *node;
- if (!selinux_mls_enabled)
+ if (!policydb.mls_enabled)
return;
scontextp = *scontext;
@@ -102,8 +102,8 @@ void mls_sid_to_context(struct context *context,
scontextp++;
for (l = 0; l < 2; l++) {
- strcpy(scontextp,
- policydb.p_sens_val_to_name[context->range.level[l].sens - 1]);
+ strcpy(scontextp, sym_name(&policydb, SYM_LEVELS,
+ context->range.level[l].sens - 1));
scontextp += strlen(scontextp);
/* categories */
@@ -118,7 +118,7 @@ void mls_sid_to_context(struct context *context,
*scontextp++ = '.';
else
*scontextp++ = ',';
- nm = policydb.p_cat_val_to_name[prev];
+ nm = sym_name(&policydb, SYM_CATS, prev);
strcpy(scontextp, nm);
scontextp += strlen(nm);
}
@@ -126,7 +126,7 @@ void mls_sid_to_context(struct context *context,
*scontextp++ = ':';
else
*scontextp++ = ',';
- nm = policydb.p_cat_val_to_name[i];
+ nm = sym_name(&policydb, SYM_CATS, i);
strcpy(scontextp, nm);
scontextp += strlen(nm);
head = i;
@@ -139,7 +139,7 @@ void mls_sid_to_context(struct context *context,
*scontextp++ = '.';
else
*scontextp++ = ',';
- nm = policydb.p_cat_val_to_name[prev];
+ nm = sym_name(&policydb, SYM_CATS, prev);
strcpy(scontextp, nm);
scontextp += strlen(nm);
}
@@ -160,29 +160,21 @@ void mls_sid_to_context(struct context *context,
int mls_level_isvalid(struct policydb *p, struct mls_level *l)
{
struct level_datum *levdatum;
- struct ebitmap_node *node;
- int i;
if (!l->sens || l->sens > p->p_levels.nprim)
return 0;
levdatum = hashtab_search(p->p_levels.table,
- p->p_sens_val_to_name[l->sens - 1]);
+ sym_name(p, SYM_LEVELS, l->sens - 1));
if (!levdatum)
return 0;
- ebitmap_for_each_positive_bit(&l->cat, node, i) {
- if (i > p->p_cats.nprim)
- return 0;
- if (!ebitmap_get_bit(&levdatum->level->cat, i)) {
- /*
- * Category may not be associated with
- * sensitivity.
- */
- return 0;
- }
- }
-
- return 1;
+ /*
+ * Return 1 iff all the bits set in l->cat are also be set in
+ * levdatum->level->cat and no bit in l->cat is larger than
+ * p->p_cats.nprim.
+ */
+ return ebitmap_contains(&levdatum->level->cat, &l->cat,
+ p->p_cats.nprim);
}
int mls_range_isvalid(struct policydb *p, struct mls_range *r)
@@ -200,7 +192,7 @@ int mls_context_isvalid(struct policydb *p, struct context *c)
{
struct user_datum *usrdatum;
- if (!selinux_mls_enabled)
+ if (!p->mls_enabled)
return 1;
if (!mls_range_isvalid(p, &c->range))
@@ -253,9 +245,9 @@ int mls_context_to_sid(struct policydb *pol,
struct cat_datum *catdatum, *rngdatum;
int l, rc = -EINVAL;
- if (!selinux_mls_enabled) {
+ if (!pol->mls_enabled) {
if (def_sid != SECSID_NULL && oldc)
- *scontext += strlen(*scontext)+1;
+ *scontext += strlen(*scontext) + 1;
return 0;
}
@@ -387,7 +379,7 @@ int mls_from_string(char *str, struct context *context, gfp_t gfp_mask)
char *tmpstr, *freestr;
int rc;
- if (!selinux_mls_enabled)
+ if (!policydb.mls_enabled)
return -EINVAL;
/* we need freestr because mls_context_to_sid will change
@@ -407,7 +399,7 @@ int mls_from_string(char *str, struct context *context, gfp_t gfp_mask)
/*
* Copies the MLS range `range' into `context'.
*/
-static inline int mls_range_set(struct context *context,
+int mls_range_set(struct context *context,
struct mls_range *range)
{
int l, rc = 0;
@@ -427,7 +419,7 @@ static inline int mls_range_set(struct context *context,
int mls_setup_user_range(struct context *fromcon, struct user_datum *user,
struct context *usercon)
{
- if (selinux_mls_enabled) {
+ if (policydb.mls_enabled) {
struct mls_level *fromcon_sen = &(fromcon->range.level[0]);
struct mls_level *fromcon_clr = &(fromcon->range.level[1]);
struct mls_level *user_low = &(user->range.level[0]);
@@ -477,12 +469,13 @@ int mls_convert_context(struct policydb *oldp,
struct ebitmap_node *node;
int l, i;
- if (!selinux_mls_enabled)
+ if (!policydb.mls_enabled)
return 0;
for (l = 0; l < 2; l++) {
levdatum = hashtab_search(newp->p_levels.table,
- oldp->p_sens_val_to_name[c->range.level[l].sens - 1]);
+ sym_name(oldp, SYM_LEVELS,
+ c->range.level[l].sens - 1));
if (!levdatum)
return -EINVAL;
@@ -493,12 +486,14 @@ int mls_convert_context(struct policydb *oldp,
int rc;
catdatum = hashtab_search(newp->p_cats.table,
- oldp->p_cat_val_to_name[i]);
+ sym_name(oldp, SYM_CATS, i));
if (!catdatum)
return -EINVAL;
rc = ebitmap_set_bit(&bitmap, catdatum->value - 1, 1);
if (rc)
return rc;
+
+ cond_resched();
}
ebitmap_destroy(&c->range.level[l].cat);
c->range.level[l].cat = bitmap;
@@ -511,28 +506,51 @@ int mls_compute_sid(struct context *scontext,
struct context *tcontext,
u16 tclass,
u32 specified,
- struct context *newcontext)
+ struct context *newcontext,
+ bool sock)
{
- struct range_trans *rtr;
+ struct range_trans rtr;
+ struct mls_range *r;
+ struct class_datum *cladatum;
+ int default_range = 0;
- if (!selinux_mls_enabled)
+ if (!policydb.mls_enabled)
return 0;
switch (specified) {
case AVTAB_TRANSITION:
/* Look for a range transition rule. */
- for (rtr = policydb.range_tr; rtr; rtr = rtr->next) {
- if (rtr->source_type == scontext->type &&
- rtr->target_type == tcontext->type &&
- rtr->target_class == tclass) {
- /* Set the range from the rule */
- return mls_range_set(newcontext,
- &rtr->target_range);
- }
+ rtr.source_type = scontext->type;
+ rtr.target_type = tcontext->type;
+ rtr.target_class = tclass;
+ r = hashtab_search(policydb.range_tr, &rtr);
+ if (r)
+ return mls_range_set(newcontext, r);
+
+ if (tclass && tclass <= policydb.p_classes.nprim) {
+ cladatum = policydb.class_val_to_struct[tclass - 1];
+ if (cladatum)
+ default_range = cladatum->default_range;
+ }
+
+ switch (default_range) {
+ case DEFAULT_SOURCE_LOW:
+ return mls_context_cpy_low(newcontext, scontext);
+ case DEFAULT_SOURCE_HIGH:
+ return mls_context_cpy_high(newcontext, scontext);
+ case DEFAULT_SOURCE_LOW_HIGH:
+ return mls_context_cpy(newcontext, scontext);
+ case DEFAULT_TARGET_LOW:
+ return mls_context_cpy_low(newcontext, tcontext);
+ case DEFAULT_TARGET_HIGH:
+ return mls_context_cpy_high(newcontext, tcontext);
+ case DEFAULT_TARGET_LOW_HIGH:
+ return mls_context_cpy(newcontext, tcontext);
}
+
/* Fallthrough */
case AVTAB_CHANGE:
- if (tclass == policydb.process_class)
+ if ((tclass == policydb.process_class) || (sock == true))
/* Use the process MLS attributes. */
return mls_context_cpy(newcontext, scontext);
else
@@ -541,8 +559,8 @@ int mls_compute_sid(struct context *scontext,
case AVTAB_MEMBER:
/* Use the process effective MLS attributes. */
return mls_context_cpy_low(newcontext, scontext);
- default:
- return -EINVAL;
+
+ /* fall through */
}
return -EINVAL;
}
@@ -561,7 +579,7 @@ int mls_compute_sid(struct context *scontext,
void mls_export_netlbl_lvl(struct context *context,
struct netlbl_lsm_secattr *secattr)
{
- if (!selinux_mls_enabled)
+ if (!policydb.mls_enabled)
return;
secattr->attr.mls.lvl = context->range.level[0].sens - 1;
@@ -581,7 +599,7 @@ void mls_export_netlbl_lvl(struct context *context,
void mls_import_netlbl_lvl(struct context *context,
struct netlbl_lsm_secattr *secattr)
{
- if (!selinux_mls_enabled)
+ if (!policydb.mls_enabled)
return;
context->range.level[0].sens = secattr->attr.mls.lvl + 1;
@@ -603,7 +621,7 @@ int mls_export_netlbl_cat(struct context *context,
{
int rc;
- if (!selinux_mls_enabled)
+ if (!policydb.mls_enabled)
return 0;
rc = ebitmap_netlbl_export(&context->range.level[0].cat,
@@ -631,7 +649,7 @@ int mls_import_netlbl_cat(struct context *context,
{
int rc;
- if (!selinux_mls_enabled)
+ if (!policydb.mls_enabled)
return 0;
rc = ebitmap_netlbl_import(&context->range.level[0].cat,
diff --git a/security/selinux/ss/mls.h b/security/selinux/ss/mls.h
index 1276715aaa8..e4369e3e636 100644
--- a/security/selinux/ss/mls.h
+++ b/security/selinux/ss/mls.h
@@ -11,7 +11,7 @@
* Copyright (C) 2004-2006 Trusted Computer Solutions, Inc.
*/
/*
- * Updated: Hewlett-Packard <paul.moore@hp.com>
+ * Updated: Hewlett-Packard <paul@paul-moore.com>
*
* Added support to import/export the MLS label from NetLabel
*
@@ -39,6 +39,8 @@ int mls_context_to_sid(struct policydb *p,
int mls_from_string(char *str, struct context *context, gfp_t gfp_mask);
+int mls_range_set(struct context *context, struct mls_range *range);
+
int mls_convert_context(struct policydb *oldp,
struct policydb *newp,
struct context *context);
@@ -47,7 +49,8 @@ int mls_compute_sid(struct context *scontext,
struct context *tcontext,
u16 tclass,
u32 specified,
- struct context *newcontext);
+ struct context *newcontext,
+ bool sock);
int mls_setup_user_range(struct context *fromcon, struct user_datum *user,
struct context *usercon);
diff --git a/security/selinux/ss/mls_types.h b/security/selinux/ss/mls_types.h
index b6e943a2106..e9364877413 100644
--- a/security/selinux/ss/mls_types.h
+++ b/security/selinux/ss/mls_types.h
@@ -15,6 +15,7 @@
#define _SS_MLS_TYPES_H_
#include "security.h"
+#include "ebitmap.h"
struct mls_level {
u32 sens; /* sensitivity */
@@ -27,20 +28,14 @@ struct mls_range {
static inline int mls_level_eq(struct mls_level *l1, struct mls_level *l2)
{
- if (!selinux_mls_enabled)
- return 1;
-
return ((l1->sens == l2->sens) &&
ebitmap_cmp(&l1->cat, &l2->cat));
}
static inline int mls_level_dom(struct mls_level *l1, struct mls_level *l2)
{
- if (!selinux_mls_enabled)
- return 1;
-
return ((l1->sens >= l2->sens) &&
- ebitmap_contains(&l1->cat, &l2->cat));
+ ebitmap_contains(&l1->cat, &l2->cat, 0));
}
#define mls_level_incomp(l1, l2) \
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index f03667213ea..9c5cdc2caae 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -13,7 +13,7 @@
*
* Added conditional policy language extensions
*
- * Updated: Hewlett-Packard <paul.moore@hp.com>
+ * Updated: Hewlett-Packard <paul@paul-moore.com>
*
* Added support for the policy capability bitmap
*
@@ -31,16 +31,18 @@
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/audit.h>
+#include <linux/flex_array.h>
#include "security.h"
#include "policydb.h"
#include "conditional.h"
#include "mls.h"
+#include "services.h"
#define _DEBUG_HASHES
#ifdef DEBUG_HASHES
-static char *symtab_name[SYM_NUM] = {
+static const char *symtab_name[SYM_NUM] = {
"common prefixes",
"classes",
"roles",
@@ -52,8 +54,6 @@ static char *symtab_name[SYM_NUM] = {
};
#endif
-int selinux_mls_enabled;
-
static unsigned int symtab_sizes[SYM_NUM] = {
2,
32,
@@ -123,6 +123,31 @@ static struct policydb_compat_info policydb_compat[] = {
.sym_num = SYM_NUM,
.ocon_num = OCON_NUM,
},
+ {
+ .version = POLICYDB_VERSION_FILENAME_TRANS,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM,
+ },
+ {
+ .version = POLICYDB_VERSION_ROLETRANS,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM,
+ },
+ {
+ .version = POLICYDB_VERSION_NEW_OBJECT_DEFAULTS,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM,
+ },
+ {
+ .version = POLICYDB_VERSION_DEFAULT_TYPE,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM,
+ },
+ {
+ .version = POLICYDB_VERSION_CONSTRAINT_NAMES,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM,
+ },
};
static struct policydb_compat_info *policydb_lookup_compat(int version)
@@ -148,33 +173,92 @@ static int roles_init(struct policydb *p)
int rc;
struct role_datum *role;
+ rc = -ENOMEM;
role = kzalloc(sizeof(*role), GFP_KERNEL);
- if (!role) {
- rc = -ENOMEM;
+ if (!role)
goto out;
- }
+
+ rc = -EINVAL;
role->value = ++p->p_roles.nprim;
- if (role->value != OBJECT_R_VAL) {
- rc = -EINVAL;
- goto out_free_role;
- }
- key = kmalloc(strlen(OBJECT_R)+1, GFP_KERNEL);
- if (!key) {
- rc = -ENOMEM;
- goto out_free_role;
- }
- strcpy(key, OBJECT_R);
+ if (role->value != OBJECT_R_VAL)
+ goto out;
+
+ rc = -ENOMEM;
+ key = kstrdup(OBJECT_R, GFP_KERNEL);
+ if (!key)
+ goto out;
+
rc = hashtab_insert(p->p_roles.table, key, role);
if (rc)
- goto out_free_key;
-out:
- return rc;
+ goto out;
-out_free_key:
+ return 0;
+out:
kfree(key);
-out_free_role:
kfree(role);
- goto out;
+ return rc;
+}
+
+static u32 filenametr_hash(struct hashtab *h, const void *k)
+{
+ const struct filename_trans *ft = k;
+ unsigned long hash;
+ unsigned int byte_num;
+ unsigned char focus;
+
+ hash = ft->stype ^ ft->ttype ^ ft->tclass;
+
+ byte_num = 0;
+ while ((focus = ft->name[byte_num++]))
+ hash = partial_name_hash(focus, hash);
+ return hash & (h->size - 1);
+}
+
+static int filenametr_cmp(struct hashtab *h, const void *k1, const void *k2)
+{
+ const struct filename_trans *ft1 = k1;
+ const struct filename_trans *ft2 = k2;
+ int v;
+
+ v = ft1->stype - ft2->stype;
+ if (v)
+ return v;
+
+ v = ft1->ttype - ft2->ttype;
+ if (v)
+ return v;
+
+ v = ft1->tclass - ft2->tclass;
+ if (v)
+ return v;
+
+ return strcmp(ft1->name, ft2->name);
+
+}
+
+static u32 rangetr_hash(struct hashtab *h, const void *k)
+{
+ const struct range_trans *key = k;
+ return (key->source_type + (key->target_type << 3) +
+ (key->target_class << 5)) & (h->size - 1);
+}
+
+static int rangetr_cmp(struct hashtab *h, const void *k1, const void *k2)
+{
+ const struct range_trans *key1 = k1, *key2 = k2;
+ int v;
+
+ v = key1->source_type - key2->source_type;
+ if (v)
+ return v;
+
+ v = key1->target_type - key2->target_type;
+ if (v)
+ return v;
+
+ v = key1->target_class - key2->target_class;
+
+ return v;
}
/*
@@ -189,31 +273,40 @@ static int policydb_init(struct policydb *p)
for (i = 0; i < SYM_NUM; i++) {
rc = symtab_init(&p->symtab[i], symtab_sizes[i]);
if (rc)
- goto out_free_symtab;
+ goto out;
}
rc = avtab_init(&p->te_avtab);
if (rc)
- goto out_free_symtab;
+ goto out;
rc = roles_init(p);
if (rc)
- goto out_free_symtab;
+ goto out;
rc = cond_policydb_init(p);
if (rc)
- goto out_free_symtab;
+ goto out;
+
+ p->filename_trans = hashtab_create(filenametr_hash, filenametr_cmp, (1 << 10));
+ if (!p->filename_trans)
+ goto out;
+ p->range_tr = hashtab_create(rangetr_hash, rangetr_cmp, 256);
+ if (!p->range_tr)
+ goto out;
+
+ ebitmap_init(&p->filename_trans_ttypes);
ebitmap_init(&p->policycaps);
ebitmap_init(&p->permissive_map);
+ return 0;
out:
- return rc;
-
-out_free_symtab:
+ hashtab_destroy(p->filename_trans);
+ hashtab_destroy(p->range_tr);
for (i = 0; i < SYM_NUM; i++)
hashtab_destroy(p->symtab[i].table);
- goto out;
+ return rc;
}
/*
@@ -230,12 +323,17 @@ static int common_index(void *key, void *datum, void *datap)
{
struct policydb *p;
struct common_datum *comdatum;
+ struct flex_array *fa;
comdatum = datum;
p = datap;
if (!comdatum->value || comdatum->value > p->p_commons.nprim)
return -EINVAL;
- p->p_common_val_to_name[comdatum->value - 1] = key;
+
+ fa = p->sym_val_to_name[SYM_COMMONS];
+ if (flex_array_put_ptr(fa, comdatum->value - 1, key,
+ GFP_KERNEL | __GFP_ZERO))
+ BUG();
return 0;
}
@@ -243,12 +341,16 @@ static int class_index(void *key, void *datum, void *datap)
{
struct policydb *p;
struct class_datum *cladatum;
+ struct flex_array *fa;
cladatum = datum;
p = datap;
if (!cladatum->value || cladatum->value > p->p_classes.nprim)
return -EINVAL;
- p->p_class_val_to_name[cladatum->value - 1] = key;
+ fa = p->sym_val_to_name[SYM_CLASSES];
+ if (flex_array_put_ptr(fa, cladatum->value - 1, key,
+ GFP_KERNEL | __GFP_ZERO))
+ BUG();
p->class_val_to_struct[cladatum->value - 1] = cladatum;
return 0;
}
@@ -257,6 +359,7 @@ static int role_index(void *key, void *datum, void *datap)
{
struct policydb *p;
struct role_datum *role;
+ struct flex_array *fa;
role = datum;
p = datap;
@@ -264,7 +367,11 @@ static int role_index(void *key, void *datum, void *datap)
|| role->value > p->p_roles.nprim
|| role->bounds > p->p_roles.nprim)
return -EINVAL;
- p->p_role_val_to_name[role->value - 1] = key;
+
+ fa = p->sym_val_to_name[SYM_ROLES];
+ if (flex_array_put_ptr(fa, role->value - 1, key,
+ GFP_KERNEL | __GFP_ZERO))
+ BUG();
p->role_val_to_struct[role->value - 1] = role;
return 0;
}
@@ -273,6 +380,7 @@ static int type_index(void *key, void *datum, void *datap)
{
struct policydb *p;
struct type_datum *typdatum;
+ struct flex_array *fa;
typdatum = datum;
p = datap;
@@ -282,8 +390,15 @@ static int type_index(void *key, void *datum, void *datap)
|| typdatum->value > p->p_types.nprim
|| typdatum->bounds > p->p_types.nprim)
return -EINVAL;
- p->p_type_val_to_name[typdatum->value - 1] = key;
- p->type_val_to_struct[typdatum->value - 1] = typdatum;
+ fa = p->sym_val_to_name[SYM_TYPES];
+ if (flex_array_put_ptr(fa, typdatum->value - 1, key,
+ GFP_KERNEL | __GFP_ZERO))
+ BUG();
+
+ fa = p->type_val_to_struct_array;
+ if (flex_array_put_ptr(fa, typdatum->value - 1, typdatum,
+ GFP_KERNEL | __GFP_ZERO))
+ BUG();
}
return 0;
@@ -293,6 +408,7 @@ static int user_index(void *key, void *datum, void *datap)
{
struct policydb *p;
struct user_datum *usrdatum;
+ struct flex_array *fa;
usrdatum = datum;
p = datap;
@@ -300,7 +416,11 @@ static int user_index(void *key, void *datum, void *datap)
|| usrdatum->value > p->p_users.nprim
|| usrdatum->bounds > p->p_users.nprim)
return -EINVAL;
- p->p_user_val_to_name[usrdatum->value - 1] = key;
+
+ fa = p->sym_val_to_name[SYM_USERS];
+ if (flex_array_put_ptr(fa, usrdatum->value - 1, key,
+ GFP_KERNEL | __GFP_ZERO))
+ BUG();
p->user_val_to_struct[usrdatum->value - 1] = usrdatum;
return 0;
}
@@ -309,6 +429,7 @@ static int sens_index(void *key, void *datum, void *datap)
{
struct policydb *p;
struct level_datum *levdatum;
+ struct flex_array *fa;
levdatum = datum;
p = datap;
@@ -317,7 +438,10 @@ static int sens_index(void *key, void *datum, void *datap)
if (!levdatum->level->sens ||
levdatum->level->sens > p->p_levels.nprim)
return -EINVAL;
- p->p_sens_val_to_name[levdatum->level->sens - 1] = key;
+ fa = p->sym_val_to_name[SYM_LEVELS];
+ if (flex_array_put_ptr(fa, levdatum->level->sens - 1, key,
+ GFP_KERNEL | __GFP_ZERO))
+ BUG();
}
return 0;
@@ -327,6 +451,7 @@ static int cat_index(void *key, void *datum, void *datap)
{
struct policydb *p;
struct cat_datum *catdatum;
+ struct flex_array *fa;
catdatum = datum;
p = datap;
@@ -334,7 +459,10 @@ static int cat_index(void *key, void *datum, void *datap)
if (!catdatum->isalias) {
if (!catdatum->value || catdatum->value > p->p_cats.nprim)
return -EINVAL;
- p->p_cat_val_to_name[catdatum->value - 1] = key;
+ fa = p->sym_val_to_name[SYM_CATS];
+ if (flex_array_put_ptr(fa, catdatum->value - 1, key,
+ GFP_KERNEL | __GFP_ZERO))
+ BUG();
}
return 0;
@@ -352,61 +480,28 @@ static int (*index_f[SYM_NUM]) (void *key, void *datum, void *datap) =
cat_index,
};
-/*
- * Define the common val_to_name array and the class
- * val_to_name and val_to_struct arrays in a policy
- * database structure.
- *
- * Caller must clean up upon failure.
- */
-static int policydb_index_classes(struct policydb *p)
+#ifdef DEBUG_HASHES
+static void hash_eval(struct hashtab *h, const char *hash_name)
{
- int rc;
+ struct hashtab_info info;
- p->p_common_val_to_name =
- kmalloc(p->p_commons.nprim * sizeof(char *), GFP_KERNEL);
- if (!p->p_common_val_to_name) {
- rc = -ENOMEM;
- goto out;
- }
-
- rc = hashtab_map(p->p_commons.table, common_index, p);
- if (rc)
- goto out;
-
- p->class_val_to_struct =
- kmalloc(p->p_classes.nprim * sizeof(*(p->class_val_to_struct)), GFP_KERNEL);
- if (!p->class_val_to_struct) {
- rc = -ENOMEM;
- goto out;
- }
-
- p->p_class_val_to_name =
- kmalloc(p->p_classes.nprim * sizeof(char *), GFP_KERNEL);
- if (!p->p_class_val_to_name) {
- rc = -ENOMEM;
- goto out;
- }
-
- rc = hashtab_map(p->p_classes.table, class_index, p);
-out:
- return rc;
+ hashtab_stat(h, &info);
+ printk(KERN_DEBUG "SELinux: %s: %d entries and %d/%d buckets used, "
+ "longest chain length %d\n", hash_name, h->nel,
+ info.slots_used, h->size, info.max_chain_len);
}
-#ifdef DEBUG_HASHES
static void symtab_hash_eval(struct symtab *s)
{
int i;
- for (i = 0; i < SYM_NUM; i++) {
- struct hashtab *h = s[i].table;
- struct hashtab_info info;
+ for (i = 0; i < SYM_NUM; i++)
+ hash_eval(s[i].table, symtab_name[i]);
+}
- hashtab_stat(h, &info);
- printk(KERN_DEBUG "SELinux: %s: %d entries and %d/%d buckets used, "
- "longest chain length %d\n", symtab_name[i], h->nel,
- info.slots_used, h->size, info.max_chain_len);
- }
+#else
+static inline void hash_eval(struct hashtab *h, char *hash_name)
+{
}
#endif
@@ -416,13 +511,13 @@ static void symtab_hash_eval(struct symtab *s)
*
* Caller must clean up on failure.
*/
-static int policydb_index_others(struct policydb *p)
+static int policydb_index(struct policydb *p)
{
- int i, rc = 0;
+ int i, rc;
printk(KERN_DEBUG "SELinux: %d users, %d roles, %d types, %d bools",
p->p_users.nprim, p->p_roles.nprim, p->p_types.nprim, p->p_bools.nprim);
- if (selinux_mls_enabled)
+ if (p->mls_enabled)
printk(", %d sens, %d cats", p->p_levels.nprim,
p->p_cats.nprim);
printk("\n");
@@ -435,47 +530,63 @@ static int policydb_index_others(struct policydb *p)
symtab_hash_eval(p->symtab);
#endif
+ rc = -ENOMEM;
+ p->class_val_to_struct =
+ kmalloc(p->p_classes.nprim * sizeof(*(p->class_val_to_struct)),
+ GFP_KERNEL);
+ if (!p->class_val_to_struct)
+ goto out;
+
+ rc = -ENOMEM;
p->role_val_to_struct =
kmalloc(p->p_roles.nprim * sizeof(*(p->role_val_to_struct)),
GFP_KERNEL);
- if (!p->role_val_to_struct) {
- rc = -ENOMEM;
+ if (!p->role_val_to_struct)
goto out;
- }
+ rc = -ENOMEM;
p->user_val_to_struct =
kmalloc(p->p_users.nprim * sizeof(*(p->user_val_to_struct)),
GFP_KERNEL);
- if (!p->user_val_to_struct) {
- rc = -ENOMEM;
+ if (!p->user_val_to_struct)
goto out;
- }
- p->type_val_to_struct =
- kmalloc(p->p_types.nprim * sizeof(*(p->type_val_to_struct)),
- GFP_KERNEL);
- if (!p->type_val_to_struct) {
- rc = -ENOMEM;
+ /* Yes, I want the sizeof the pointer, not the structure */
+ rc = -ENOMEM;
+ p->type_val_to_struct_array = flex_array_alloc(sizeof(struct type_datum *),
+ p->p_types.nprim,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!p->type_val_to_struct_array)
goto out;
- }
- if (cond_init_bool_indexes(p)) {
- rc = -ENOMEM;
+ rc = flex_array_prealloc(p->type_val_to_struct_array, 0,
+ p->p_types.nprim, GFP_KERNEL | __GFP_ZERO);
+ if (rc)
goto out;
- }
- for (i = SYM_ROLES; i < SYM_NUM; i++) {
- p->sym_val_to_name[i] =
- kmalloc(p->symtab[i].nprim * sizeof(char *), GFP_KERNEL);
- if (!p->sym_val_to_name[i]) {
- rc = -ENOMEM;
+ rc = cond_init_bool_indexes(p);
+ if (rc)
+ goto out;
+
+ for (i = 0; i < SYM_NUM; i++) {
+ rc = -ENOMEM;
+ p->sym_val_to_name[i] = flex_array_alloc(sizeof(char *),
+ p->symtab[i].nprim,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!p->sym_val_to_name[i])
goto out;
- }
+
+ rc = flex_array_prealloc(p->sym_val_to_name[i],
+ 0, p->symtab[i].nprim,
+ GFP_KERNEL | __GFP_ZERO);
+ if (rc)
+ goto out;
+
rc = hashtab_map(p->symtab[i].table, index_f[i], p);
if (rc)
goto out;
}
-
+ rc = 0;
out:
return rc;
}
@@ -498,13 +609,28 @@ static int common_destroy(void *key, void *datum, void *p)
struct common_datum *comdatum;
kfree(key);
- comdatum = datum;
- hashtab_map(comdatum->permissions.table, perm_destroy, NULL);
- hashtab_destroy(comdatum->permissions.table);
+ if (datum) {
+ comdatum = datum;
+ hashtab_map(comdatum->permissions.table, perm_destroy, NULL);
+ hashtab_destroy(comdatum->permissions.table);
+ }
kfree(datum);
return 0;
}
+static void constraint_expr_destroy(struct constraint_expr *expr)
+{
+ if (expr) {
+ ebitmap_destroy(&expr->names);
+ if (expr->type_names) {
+ ebitmap_destroy(&expr->type_names->types);
+ ebitmap_destroy(&expr->type_names->negset);
+ kfree(expr->type_names);
+ }
+ kfree(expr);
+ }
+}
+
static int cls_destroy(void *key, void *datum, void *p)
{
struct class_datum *cladatum;
@@ -512,38 +638,37 @@ static int cls_destroy(void *key, void *datum, void *p)
struct constraint_expr *e, *etmp;
kfree(key);
- cladatum = datum;
- hashtab_map(cladatum->permissions.table, perm_destroy, NULL);
- hashtab_destroy(cladatum->permissions.table);
- constraint = cladatum->constraints;
- while (constraint) {
- e = constraint->expr;
- while (e) {
- ebitmap_destroy(&e->names);
- etmp = e;
- e = e->next;
- kfree(etmp);
+ if (datum) {
+ cladatum = datum;
+ hashtab_map(cladatum->permissions.table, perm_destroy, NULL);
+ hashtab_destroy(cladatum->permissions.table);
+ constraint = cladatum->constraints;
+ while (constraint) {
+ e = constraint->expr;
+ while (e) {
+ etmp = e;
+ e = e->next;
+ constraint_expr_destroy(etmp);
+ }
+ ctemp = constraint;
+ constraint = constraint->next;
+ kfree(ctemp);
}
- ctemp = constraint;
- constraint = constraint->next;
- kfree(ctemp);
- }
-
- constraint = cladatum->validatetrans;
- while (constraint) {
- e = constraint->expr;
- while (e) {
- ebitmap_destroy(&e->names);
- etmp = e;
- e = e->next;
- kfree(etmp);
+
+ constraint = cladatum->validatetrans;
+ while (constraint) {
+ e = constraint->expr;
+ while (e) {
+ etmp = e;
+ e = e->next;
+ constraint_expr_destroy(etmp);
+ }
+ ctemp = constraint;
+ constraint = constraint->next;
+ kfree(ctemp);
}
- ctemp = constraint;
- constraint = constraint->next;
- kfree(ctemp);
+ kfree(cladatum->comkey);
}
-
- kfree(cladatum->comkey);
kfree(datum);
return 0;
}
@@ -553,9 +678,11 @@ static int role_destroy(void *key, void *datum, void *p)
struct role_datum *role;
kfree(key);
- role = datum;
- ebitmap_destroy(&role->dominates);
- ebitmap_destroy(&role->types);
+ if (datum) {
+ role = datum;
+ ebitmap_destroy(&role->dominates);
+ ebitmap_destroy(&role->types);
+ }
kfree(datum);
return 0;
}
@@ -572,11 +699,13 @@ static int user_destroy(void *key, void *datum, void *p)
struct user_datum *usrdatum;
kfree(key);
- usrdatum = datum;
- ebitmap_destroy(&usrdatum->roles);
- ebitmap_destroy(&usrdatum->range.level[0].cat);
- ebitmap_destroy(&usrdatum->range.level[1].cat);
- ebitmap_destroy(&usrdatum->dfltlevel.cat);
+ if (datum) {
+ usrdatum = datum;
+ ebitmap_destroy(&usrdatum->roles);
+ ebitmap_destroy(&usrdatum->range.level[0].cat);
+ ebitmap_destroy(&usrdatum->range.level[1].cat);
+ ebitmap_destroy(&usrdatum->dfltlevel.cat);
+ }
kfree(datum);
return 0;
}
@@ -586,9 +715,11 @@ static int sens_destroy(void *key, void *datum, void *p)
struct level_datum *levdatum;
kfree(key);
- levdatum = datum;
- ebitmap_destroy(&levdatum->level->cat);
- kfree(levdatum->level);
+ if (datum) {
+ levdatum = datum;
+ ebitmap_destroy(&levdatum->level->cat);
+ kfree(levdatum->level);
+ }
kfree(datum);
return 0;
}
@@ -612,8 +743,32 @@ static int (*destroy_f[SYM_NUM]) (void *key, void *datum, void *datap) =
cat_destroy,
};
+static int filenametr_destroy(void *key, void *datum, void *p)
+{
+ struct filename_trans *ft = key;
+ kfree(ft->name);
+ kfree(key);
+ kfree(datum);
+ cond_resched();
+ return 0;
+}
+
+static int range_tr_destroy(void *key, void *datum, void *p)
+{
+ struct mls_range *rt = datum;
+ kfree(key);
+ ebitmap_destroy(&rt->level[0].cat);
+ ebitmap_destroy(&rt->level[1].cat);
+ kfree(datum);
+ cond_resched();
+ return 0;
+}
+
static void ocontext_destroy(struct ocontext *c, int i)
{
+ if (!c)
+ return;
+
context_destroy(&c->context[0]);
context_destroy(&c->context[1]);
if (i == OCON_ISID || i == OCON_FS ||
@@ -632,7 +787,6 @@ void policydb_destroy(struct policydb *p)
int i;
struct role_allow *ra, *lra = NULL;
struct role_trans *tr, *ltr = NULL;
- struct range_trans *rt, *lrt = NULL;
for (i = 0; i < SYM_NUM; i++) {
cond_resched();
@@ -640,13 +794,16 @@ void policydb_destroy(struct policydb *p)
hashtab_destroy(p->symtab[i].table);
}
- for (i = 0; i < SYM_NUM; i++)
- kfree(p->sym_val_to_name[i]);
+ for (i = 0; i < SYM_NUM; i++) {
+ if (p->sym_val_to_name[i])
+ flex_array_free(p->sym_val_to_name[i]);
+ }
kfree(p->class_val_to_struct);
kfree(p->role_val_to_struct);
kfree(p->user_val_to_struct);
- kfree(p->type_val_to_struct);
+ if (p->type_val_to_struct_array)
+ flex_array_free(p->type_val_to_struct_array);
avtab_destroy(&p->te_avtab);
@@ -693,26 +850,25 @@ void policydb_destroy(struct policydb *p)
}
kfree(lra);
- for (rt = p->range_tr; rt; rt = rt->next) {
- cond_resched();
- if (lrt) {
- ebitmap_destroy(&lrt->target_range.level[0].cat);
- ebitmap_destroy(&lrt->target_range.level[1].cat);
- kfree(lrt);
+ hashtab_map(p->filename_trans, filenametr_destroy, NULL);
+ hashtab_destroy(p->filename_trans);
+
+ hashtab_map(p->range_tr, range_tr_destroy, NULL);
+ hashtab_destroy(p->range_tr);
+
+ if (p->type_attr_map_array) {
+ for (i = 0; i < p->p_types.nprim; i++) {
+ struct ebitmap *e;
+
+ e = flex_array_get(p->type_attr_map_array, i);
+ if (!e)
+ continue;
+ ebitmap_destroy(e);
}
- lrt = rt;
- }
- if (lrt) {
- ebitmap_destroy(&lrt->target_range.level[0].cat);
- ebitmap_destroy(&lrt->target_range.level[1].cat);
- kfree(lrt);
+ flex_array_free(p->type_attr_map_array);
}
- if (p->type_attr_map) {
- for (i = 0; i < p->p_types.nprim; i++)
- ebitmap_destroy(&p->type_attr_map[i]);
- }
- kfree(p->type_attr_map);
+ ebitmap_destroy(&p->filename_trans_ttypes);
ebitmap_destroy(&p->policycaps);
ebitmap_destroy(&p->permissive_map);
@@ -736,19 +892,21 @@ int policydb_load_isids(struct policydb *p, struct sidtab *s)
head = p->ocontexts[OCON_ISID];
for (c = head; c; c = c->next) {
+ rc = -EINVAL;
if (!c->context[0].user) {
- printk(KERN_ERR "SELinux: SID %s was never "
- "defined.\n", c->u.name);
- rc = -EINVAL;
+ printk(KERN_ERR "SELinux: SID %s was never defined.\n",
+ c->u.name);
goto out;
}
- if (sidtab_insert(s, c->sid[0], &c->context[0])) {
- printk(KERN_ERR "SELinux: unable to load initial "
- "SID %s.\n", c->u.name);
- rc = -EINVAL;
+
+ rc = sidtab_insert(s, c->sid[0], &c->context[0]);
+ if (rc) {
+ printk(KERN_ERR "SELinux: unable to load initial SID %s.\n",
+ c->u.name);
goto out;
}
}
+ rc = 0;
out:
return rc;
}
@@ -797,8 +955,7 @@ int policydb_context_isvalid(struct policydb *p, struct context *c)
* Role must be authorized for the type.
*/
role = p->role_val_to_struct[c->role - 1];
- if (!ebitmap_get_bit(&role->types,
- c->type - 1))
+ if (!ebitmap_get_bit(&role->types, c->type - 1))
/* role may not be associated with type */
return 0;
@@ -809,8 +966,7 @@ int policydb_context_isvalid(struct policydb *p, struct context *c)
if (!usrdatum)
return 0;
- if (!ebitmap_get_bit(&usrdatum->roles,
- c->role - 1))
+ if (!ebitmap_get_bit(&usrdatum->roles, c->role - 1))
/* user may not be associated with role */
return 0;
}
@@ -832,20 +988,22 @@ static int mls_read_range_helper(struct mls_range *r, void *fp)
int rc;
rc = next_entry(buf, fp, sizeof(u32));
- if (rc < 0)
+ if (rc)
goto out;
+ rc = -EINVAL;
items = le32_to_cpu(buf[0]);
if (items > ARRAY_SIZE(buf)) {
printk(KERN_ERR "SELinux: mls: range overflow\n");
- rc = -EINVAL;
goto out;
}
+
rc = next_entry(buf, fp, sizeof(u32) * items);
- if (rc < 0) {
+ if (rc) {
printk(KERN_ERR "SELinux: mls: truncated range\n");
goto out;
}
+
r->level[0].sens = le32_to_cpu(buf[0]);
if (items > 1)
r->level[1].sens = le32_to_cpu(buf[1]);
@@ -854,15 +1012,13 @@ static int mls_read_range_helper(struct mls_range *r, void *fp)
rc = ebitmap_read(&r->level[0].cat, fp);
if (rc) {
- printk(KERN_ERR "SELinux: mls: error reading low "
- "categories\n");
+ printk(KERN_ERR "SELinux: mls: error reading low categories\n");
goto out;
}
if (items > 1) {
rc = ebitmap_read(&r->level[1].cat, fp);
if (rc) {
- printk(KERN_ERR "SELinux: mls: error reading high "
- "categories\n");
+ printk(KERN_ERR "SELinux: mls: error reading high categories\n");
goto bad_high;
}
} else {
@@ -873,12 +1029,11 @@ static int mls_read_range_helper(struct mls_range *r, void *fp)
}
}
- rc = 0;
-out:
- return rc;
+ return 0;
bad_high:
ebitmap_destroy(&r->level[0].cat);
- goto out;
+out:
+ return rc;
}
/*
@@ -893,7 +1048,7 @@ static int context_read_and_validate(struct context *c,
int rc;
rc = next_entry(buf, fp, sizeof buf);
- if (rc < 0) {
+ if (rc) {
printk(KERN_ERR "SELinux: context truncated\n");
goto out;
}
@@ -901,19 +1056,20 @@ static int context_read_and_validate(struct context *c,
c->role = le32_to_cpu(buf[1]);
c->type = le32_to_cpu(buf[2]);
if (p->policyvers >= POLICYDB_VERSION_MLS) {
- if (mls_read_range_helper(&c->range, fp)) {
- printk(KERN_ERR "SELinux: error reading MLS range of "
- "context\n");
- rc = -EINVAL;
+ rc = mls_read_range_helper(&c->range, fp);
+ if (rc) {
+ printk(KERN_ERR "SELinux: error reading MLS range of context\n");
goto out;
}
}
+ rc = -EINVAL;
if (!policydb_context_isvalid(p, c)) {
printk(KERN_ERR "SELinux: invalid security context\n");
context_destroy(c);
- rc = -EINVAL;
+ goto out;
}
+ rc = 0;
out:
return rc;
}
@@ -932,37 +1088,36 @@ static int perm_read(struct policydb *p, struct hashtab *h, void *fp)
__le32 buf[2];
u32 len;
+ rc = -ENOMEM;
perdatum = kzalloc(sizeof(*perdatum), GFP_KERNEL);
- if (!perdatum) {
- rc = -ENOMEM;
- goto out;
- }
+ if (!perdatum)
+ goto bad;
rc = next_entry(buf, fp, sizeof buf);
- if (rc < 0)
+ if (rc)
goto bad;
len = le32_to_cpu(buf[0]);
perdatum->value = le32_to_cpu(buf[1]);
+ rc = -ENOMEM;
key = kmalloc(len + 1, GFP_KERNEL);
- if (!key) {
- rc = -ENOMEM;
+ if (!key)
goto bad;
- }
+
rc = next_entry(key, fp, len);
- if (rc < 0)
+ if (rc)
goto bad;
key[len] = '\0';
rc = hashtab_insert(h, key, perdatum);
if (rc)
goto bad;
-out:
- return rc;
+
+ return 0;
bad:
perm_destroy(key, perdatum, NULL);
- goto out;
+ return rc;
}
static int common_read(struct policydb *p, struct hashtab *h, void *fp)
@@ -973,14 +1128,13 @@ static int common_read(struct policydb *p, struct hashtab *h, void *fp)
u32 len, nel;
int i, rc;
+ rc = -ENOMEM;
comdatum = kzalloc(sizeof(*comdatum), GFP_KERNEL);
- if (!comdatum) {
- rc = -ENOMEM;
- goto out;
- }
+ if (!comdatum)
+ goto bad;
rc = next_entry(buf, fp, sizeof buf);
- if (rc < 0)
+ if (rc)
goto bad;
len = le32_to_cpu(buf[0]);
@@ -992,13 +1146,13 @@ static int common_read(struct policydb *p, struct hashtab *h, void *fp)
comdatum->permissions.nprim = le32_to_cpu(buf[2]);
nel = le32_to_cpu(buf[3]);
+ rc = -ENOMEM;
key = kmalloc(len + 1, GFP_KERNEL);
- if (!key) {
- rc = -ENOMEM;
+ if (!key)
goto bad;
- }
+
rc = next_entry(key, fp, len);
- if (rc < 0)
+ if (rc)
goto bad;
key[len] = '\0';
@@ -1011,15 +1165,40 @@ static int common_read(struct policydb *p, struct hashtab *h, void *fp)
rc = hashtab_insert(h, key, comdatum);
if (rc)
goto bad;
-out:
- return rc;
+ return 0;
bad:
common_destroy(key, comdatum, NULL);
- goto out;
+ return rc;
+}
+
+static void type_set_init(struct type_set *t)
+{
+ ebitmap_init(&t->types);
+ ebitmap_init(&t->negset);
+}
+
+static int type_set_read(struct type_set *t, void *fp)
+{
+ __le32 buf[1];
+ int rc;
+
+ if (ebitmap_read(&t->types, fp))
+ return -EINVAL;
+ if (ebitmap_read(&t->negset, fp))
+ return -EINVAL;
+
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc < 0)
+ return -EINVAL;
+ t->flags = le32_to_cpu(buf[0]);
+
+ return 0;
}
-static int read_cons_helper(struct constraint_node **nodep, int ncons,
- int allowxtarget, void *fp)
+
+static int read_cons_helper(struct policydb *p,
+ struct constraint_node **nodep,
+ int ncons, int allowxtarget, void *fp)
{
struct constraint_node *c, *lc;
struct constraint_expr *e, *le;
@@ -1039,7 +1218,7 @@ static int read_cons_helper(struct constraint_node **nodep, int ncons,
*nodep = c;
rc = next_entry(buf, fp, (sizeof(u32) * 2));
- if (rc < 0)
+ if (rc)
return rc;
c->permissions = le32_to_cpu(buf[0]);
nexpr = le32_to_cpu(buf[1]);
@@ -1056,7 +1235,7 @@ static int read_cons_helper(struct constraint_node **nodep, int ncons,
c->expr = e;
rc = next_entry(buf, fp, (sizeof(u32) * 3));
- if (rc < 0)
+ if (rc)
return rc;
e->expr_type = le32_to_cpu(buf[0]);
e->attr = le32_to_cpu(buf[1]);
@@ -1084,8 +1263,21 @@ static int read_cons_helper(struct constraint_node **nodep, int ncons,
if (depth == (CEXPR_MAXDEPTH - 1))
return -EINVAL;
depth++;
- if (ebitmap_read(&e->names, fp))
- return -EINVAL;
+ rc = ebitmap_read(&e->names, fp);
+ if (rc)
+ return rc;
+ if (p->policyvers >=
+ POLICYDB_VERSION_CONSTRAINT_NAMES) {
+ e->type_names = kzalloc(sizeof
+ (*e->type_names),
+ GFP_KERNEL);
+ if (!e->type_names)
+ return -ENOMEM;
+ type_set_init(e->type_names);
+ rc = type_set_read(e->type_names, fp);
+ if (rc)
+ return rc;
+ }
break;
default:
return -EINVAL;
@@ -1108,14 +1300,13 @@ static int class_read(struct policydb *p, struct hashtab *h, void *fp)
u32 len, len2, ncons, nel;
int i, rc;
+ rc = -ENOMEM;
cladatum = kzalloc(sizeof(*cladatum), GFP_KERNEL);
- if (!cladatum) {
- rc = -ENOMEM;
- goto out;
- }
+ if (!cladatum)
+ goto bad;
rc = next_entry(buf, fp, sizeof(u32)*6);
- if (rc < 0)
+ if (rc)
goto bad;
len = le32_to_cpu(buf[0]);
@@ -1130,33 +1321,30 @@ static int class_read(struct policydb *p, struct hashtab *h, void *fp)
ncons = le32_to_cpu(buf[5]);
+ rc = -ENOMEM;
key = kmalloc(len + 1, GFP_KERNEL);
- if (!key) {
- rc = -ENOMEM;
+ if (!key)
goto bad;
- }
+
rc = next_entry(key, fp, len);
- if (rc < 0)
+ if (rc)
goto bad;
key[len] = '\0';
if (len2) {
+ rc = -ENOMEM;
cladatum->comkey = kmalloc(len2 + 1, GFP_KERNEL);
- if (!cladatum->comkey) {
- rc = -ENOMEM;
+ if (!cladatum->comkey)
goto bad;
- }
rc = next_entry(cladatum->comkey, fp, len2);
- if (rc < 0)
+ if (rc)
goto bad;
cladatum->comkey[len2] = '\0';
- cladatum->comdatum = hashtab_search(p->p_commons.table,
- cladatum->comkey);
+ rc = -EINVAL;
+ cladatum->comdatum = hashtab_search(p->p_commons.table, cladatum->comkey);
if (!cladatum->comdatum) {
- printk(KERN_ERR "SELinux: unknown common %s\n",
- cladatum->comkey);
- rc = -EINVAL;
+ printk(KERN_ERR "SELinux: unknown common %s\n", cladatum->comkey);
goto bad;
}
}
@@ -1166,31 +1354,47 @@ static int class_read(struct policydb *p, struct hashtab *h, void *fp)
goto bad;
}
- rc = read_cons_helper(&cladatum->constraints, ncons, 0, fp);
+ rc = read_cons_helper(p, &cladatum->constraints, ncons, 0, fp);
if (rc)
goto bad;
if (p->policyvers >= POLICYDB_VERSION_VALIDATETRANS) {
/* grab the validatetrans rules */
rc = next_entry(buf, fp, sizeof(u32));
- if (rc < 0)
+ if (rc)
goto bad;
ncons = le32_to_cpu(buf[0]);
- rc = read_cons_helper(&cladatum->validatetrans, ncons, 1, fp);
+ rc = read_cons_helper(p, &cladatum->validatetrans,
+ ncons, 1, fp);
+ if (rc)
+ goto bad;
+ }
+
+ if (p->policyvers >= POLICYDB_VERSION_NEW_OBJECT_DEFAULTS) {
+ rc = next_entry(buf, fp, sizeof(u32) * 3);
+ if (rc)
+ goto bad;
+
+ cladatum->default_user = le32_to_cpu(buf[0]);
+ cladatum->default_role = le32_to_cpu(buf[1]);
+ cladatum->default_range = le32_to_cpu(buf[2]);
+ }
+
+ if (p->policyvers >= POLICYDB_VERSION_DEFAULT_TYPE) {
+ rc = next_entry(buf, fp, sizeof(u32) * 1);
if (rc)
goto bad;
+ cladatum->default_type = le32_to_cpu(buf[0]);
}
rc = hashtab_insert(h, key, cladatum);
if (rc)
goto bad;
- rc = 0;
-out:
- return rc;
+ return 0;
bad:
cls_destroy(key, cladatum, NULL);
- goto out;
+ return rc;
}
static int role_read(struct policydb *p, struct hashtab *h, void *fp)
@@ -1201,17 +1405,16 @@ static int role_read(struct policydb *p, struct hashtab *h, void *fp)
__le32 buf[3];
u32 len;
+ rc = -ENOMEM;
role = kzalloc(sizeof(*role), GFP_KERNEL);
- if (!role) {
- rc = -ENOMEM;
- goto out;
- }
+ if (!role)
+ goto bad;
if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
to_read = 3;
rc = next_entry(buf, fp, sizeof(buf[0]) * to_read);
- if (rc < 0)
+ if (rc)
goto bad;
len = le32_to_cpu(buf[0]);
@@ -1219,13 +1422,13 @@ static int role_read(struct policydb *p, struct hashtab *h, void *fp)
if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
role->bounds = le32_to_cpu(buf[2]);
+ rc = -ENOMEM;
key = kmalloc(len + 1, GFP_KERNEL);
- if (!key) {
- rc = -ENOMEM;
+ if (!key)
goto bad;
- }
+
rc = next_entry(key, fp, len);
- if (rc < 0)
+ if (rc)
goto bad;
key[len] = '\0';
@@ -1238,10 +1441,10 @@ static int role_read(struct policydb *p, struct hashtab *h, void *fp)
goto bad;
if (strcmp(key, OBJECT_R) == 0) {
+ rc = -EINVAL;
if (role->value != OBJECT_R_VAL) {
printk(KERN_ERR "SELinux: Role %s has wrong value %d\n",
OBJECT_R, role->value);
- rc = -EINVAL;
goto bad;
}
rc = 0;
@@ -1251,11 +1454,10 @@ static int role_read(struct policydb *p, struct hashtab *h, void *fp)
rc = hashtab_insert(h, key, role);
if (rc)
goto bad;
-out:
- return rc;
+ return 0;
bad:
role_destroy(key, role, NULL);
- goto out;
+ return rc;
}
static int type_read(struct policydb *p, struct hashtab *h, void *fp)
@@ -1266,17 +1468,16 @@ static int type_read(struct policydb *p, struct hashtab *h, void *fp)
__le32 buf[4];
u32 len;
+ rc = -ENOMEM;
typdatum = kzalloc(sizeof(*typdatum), GFP_KERNEL);
- if (!typdatum) {
- rc = -ENOMEM;
- return rc;
- }
+ if (!typdatum)
+ goto bad;
if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
to_read = 4;
rc = next_entry(buf, fp, sizeof(buf[0]) * to_read);
- if (rc < 0)
+ if (rc)
goto bad;
len = le32_to_cpu(buf[0]);
@@ -1294,24 +1495,22 @@ static int type_read(struct policydb *p, struct hashtab *h, void *fp)
typdatum->primary = le32_to_cpu(buf[2]);
}
+ rc = -ENOMEM;
key = kmalloc(len + 1, GFP_KERNEL);
- if (!key) {
- rc = -ENOMEM;
+ if (!key)
goto bad;
- }
rc = next_entry(key, fp, len);
- if (rc < 0)
+ if (rc)
goto bad;
key[len] = '\0';
rc = hashtab_insert(h, key, typdatum);
if (rc)
goto bad;
-out:
- return rc;
+ return 0;
bad:
type_destroy(key, typdatum, NULL);
- goto out;
+ return rc;
}
@@ -1327,22 +1526,18 @@ static int mls_read_level(struct mls_level *lp, void *fp)
memset(lp, 0, sizeof(*lp));
rc = next_entry(buf, fp, sizeof buf);
- if (rc < 0) {
+ if (rc) {
printk(KERN_ERR "SELinux: mls: truncated level\n");
- goto bad;
+ return rc;
}
lp->sens = le32_to_cpu(buf[0]);
- if (ebitmap_read(&lp->cat, fp)) {
- printk(KERN_ERR "SELinux: mls: error reading level "
- "categories\n");
- goto bad;
+ rc = ebitmap_read(&lp->cat, fp);
+ if (rc) {
+ printk(KERN_ERR "SELinux: mls: error reading level categories\n");
+ return rc;
}
-
return 0;
-
-bad:
- return -EINVAL;
}
static int user_read(struct policydb *p, struct hashtab *h, void *fp)
@@ -1353,17 +1548,16 @@ static int user_read(struct policydb *p, struct hashtab *h, void *fp)
__le32 buf[3];
u32 len;
+ rc = -ENOMEM;
usrdatum = kzalloc(sizeof(*usrdatum), GFP_KERNEL);
- if (!usrdatum) {
- rc = -ENOMEM;
- goto out;
- }
+ if (!usrdatum)
+ goto bad;
if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
to_read = 3;
rc = next_entry(buf, fp, sizeof(buf[0]) * to_read);
- if (rc < 0)
+ if (rc)
goto bad;
len = le32_to_cpu(buf[0]);
@@ -1371,13 +1565,12 @@ static int user_read(struct policydb *p, struct hashtab *h, void *fp)
if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
usrdatum->bounds = le32_to_cpu(buf[2]);
+ rc = -ENOMEM;
key = kmalloc(len + 1, GFP_KERNEL);
- if (!key) {
- rc = -ENOMEM;
+ if (!key)
goto bad;
- }
rc = next_entry(key, fp, len);
- if (rc < 0)
+ if (rc)
goto bad;
key[len] = '\0';
@@ -1397,11 +1590,10 @@ static int user_read(struct policydb *p, struct hashtab *h, void *fp)
rc = hashtab_insert(h, key, usrdatum);
if (rc)
goto bad;
-out:
- return rc;
+ return 0;
bad:
user_destroy(key, usrdatum, NULL);
- goto out;
+ return rc;
}
static int sens_read(struct policydb *p, struct hashtab *h, void *fp)
@@ -1412,47 +1604,43 @@ static int sens_read(struct policydb *p, struct hashtab *h, void *fp)
__le32 buf[2];
u32 len;
+ rc = -ENOMEM;
levdatum = kzalloc(sizeof(*levdatum), GFP_ATOMIC);
- if (!levdatum) {
- rc = -ENOMEM;
- goto out;
- }
+ if (!levdatum)
+ goto bad;
rc = next_entry(buf, fp, sizeof buf);
- if (rc < 0)
+ if (rc)
goto bad;
len = le32_to_cpu(buf[0]);
levdatum->isalias = le32_to_cpu(buf[1]);
+ rc = -ENOMEM;
key = kmalloc(len + 1, GFP_ATOMIC);
- if (!key) {
- rc = -ENOMEM;
+ if (!key)
goto bad;
- }
rc = next_entry(key, fp, len);
- if (rc < 0)
+ if (rc)
goto bad;
key[len] = '\0';
+ rc = -ENOMEM;
levdatum->level = kmalloc(sizeof(struct mls_level), GFP_ATOMIC);
- if (!levdatum->level) {
- rc = -ENOMEM;
+ if (!levdatum->level)
goto bad;
- }
- if (mls_read_level(levdatum->level, fp)) {
- rc = -EINVAL;
+
+ rc = mls_read_level(levdatum->level, fp);
+ if (rc)
goto bad;
- }
rc = hashtab_insert(h, key, levdatum);
if (rc)
goto bad;
-out:
- return rc;
+ return 0;
bad:
sens_destroy(key, levdatum, NULL);
- goto out;
+ return rc;
}
static int cat_read(struct policydb *p, struct hashtab *h, void *fp)
@@ -1463,39 +1651,35 @@ static int cat_read(struct policydb *p, struct hashtab *h, void *fp)
__le32 buf[3];
u32 len;
+ rc = -ENOMEM;
catdatum = kzalloc(sizeof(*catdatum), GFP_ATOMIC);
- if (!catdatum) {
- rc = -ENOMEM;
- goto out;
- }
+ if (!catdatum)
+ goto bad;
rc = next_entry(buf, fp, sizeof buf);
- if (rc < 0)
+ if (rc)
goto bad;
len = le32_to_cpu(buf[0]);
catdatum->value = le32_to_cpu(buf[1]);
catdatum->isalias = le32_to_cpu(buf[2]);
+ rc = -ENOMEM;
key = kmalloc(len + 1, GFP_ATOMIC);
- if (!key) {
- rc = -ENOMEM;
+ if (!key)
goto bad;
- }
rc = next_entry(key, fp, len);
- if (rc < 0)
+ if (rc)
goto bad;
key[len] = '\0';
rc = hashtab_insert(h, key, catdatum);
if (rc)
goto bad;
-out:
- return rc;
-
+ return 0;
bad:
cat_destroy(key, catdatum, NULL);
- goto out;
+ return rc;
}
static int (*read_f[SYM_NUM]) (struct policydb *p, struct hashtab *h, void *fp) =
@@ -1536,9 +1720,9 @@ static int user_bounds_sanity_check(void *key, void *datum, void *datap)
printk(KERN_ERR
"SELinux: boundary violated policy: "
"user=%s role=%s bounds=%s\n",
- p->p_user_val_to_name[user->value - 1],
- p->p_role_val_to_name[bit],
- p->p_user_val_to_name[upper->value - 1]);
+ sym_name(p, SYM_USERS, user->value - 1),
+ sym_name(p, SYM_ROLES, bit),
+ sym_name(p, SYM_USERS, upper->value - 1));
return -EINVAL;
}
@@ -1573,9 +1757,9 @@ static int role_bounds_sanity_check(void *key, void *datum, void *datap)
printk(KERN_ERR
"SELinux: boundary violated policy: "
"role=%s type=%s bounds=%s\n",
- p->p_role_val_to_name[role->value - 1],
- p->p_type_val_to_name[bit],
- p->p_role_val_to_name[upper->value - 1]);
+ sym_name(p, SYM_ROLES, role->value - 1),
+ sym_name(p, SYM_TYPES, bit),
+ sym_name(p, SYM_ROLES, upper->value - 1));
return -EINVAL;
}
@@ -1586,11 +1770,11 @@ static int role_bounds_sanity_check(void *key, void *datum, void *datap)
static int type_bounds_sanity_check(void *key, void *datum, void *datap)
{
- struct type_datum *upper, *type;
+ struct type_datum *upper;
struct policydb *p = datap;
int depth = 0;
- upper = type = datum;
+ upper = datum;
while (upper->bounds) {
if (++depth == POLICYDB_BOUNDS_MAXDEPTH) {
printk(KERN_ERR "SELinux: type %s: "
@@ -1599,12 +1783,15 @@ static int type_bounds_sanity_check(void *key, void *datum, void *datap)
return -EINVAL;
}
- upper = p->type_val_to_struct[upper->bounds - 1];
+ upper = flex_array_get_ptr(p->type_val_to_struct_array,
+ upper->bounds - 1);
+ BUG_ON(!upper);
+
if (upper->attribute) {
printk(KERN_ERR "SELinux: type %s: "
"bounded by attribute %s",
(char *) key,
- p->p_type_val_to_name[upper->value - 1]);
+ sym_name(p, SYM_TYPES, upper->value - 1));
return -EINVAL;
}
}
@@ -1637,8 +1824,6 @@ static int policydb_bounds_sanity_check(struct policydb *p)
return 0;
}
-extern int ss_initialized;
-
u16 string_to_security_class(struct policydb *p, const char *name)
{
struct class_datum *cladatum;
@@ -1673,6 +1858,425 @@ u32 string_to_av_perm(struct policydb *p, u16 tclass, const char *name)
return 1U << (perdatum->value-1);
}
+static int range_read(struct policydb *p, void *fp)
+{
+ struct range_trans *rt = NULL;
+ struct mls_range *r = NULL;
+ int i, rc;
+ __le32 buf[2];
+ u32 nel;
+
+ if (p->policyvers < POLICYDB_VERSION_MLS)
+ return 0;
+
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ goto out;
+
+ nel = le32_to_cpu(buf[0]);
+ for (i = 0; i < nel; i++) {
+ rc = -ENOMEM;
+ rt = kzalloc(sizeof(*rt), GFP_KERNEL);
+ if (!rt)
+ goto out;
+
+ rc = next_entry(buf, fp, (sizeof(u32) * 2));
+ if (rc)
+ goto out;
+
+ rt->source_type = le32_to_cpu(buf[0]);
+ rt->target_type = le32_to_cpu(buf[1]);
+ if (p->policyvers >= POLICYDB_VERSION_RANGETRANS) {
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ goto out;
+ rt->target_class = le32_to_cpu(buf[0]);
+ } else
+ rt->target_class = p->process_class;
+
+ rc = -EINVAL;
+ if (!policydb_type_isvalid(p, rt->source_type) ||
+ !policydb_type_isvalid(p, rt->target_type) ||
+ !policydb_class_isvalid(p, rt->target_class))
+ goto out;
+
+ rc = -ENOMEM;
+ r = kzalloc(sizeof(*r), GFP_KERNEL);
+ if (!r)
+ goto out;
+
+ rc = mls_read_range_helper(r, fp);
+ if (rc)
+ goto out;
+
+ rc = -EINVAL;
+ if (!mls_range_isvalid(p, r)) {
+ printk(KERN_WARNING "SELinux: rangetrans: invalid range\n");
+ goto out;
+ }
+
+ rc = hashtab_insert(p->range_tr, rt, r);
+ if (rc)
+ goto out;
+
+ rt = NULL;
+ r = NULL;
+ }
+ hash_eval(p->range_tr, "rangetr");
+ rc = 0;
+out:
+ kfree(rt);
+ kfree(r);
+ return rc;
+}
+
+static int filename_trans_read(struct policydb *p, void *fp)
+{
+ struct filename_trans *ft;
+ struct filename_trans_datum *otype;
+ char *name;
+ u32 nel, len;
+ __le32 buf[4];
+ int rc, i;
+
+ if (p->policyvers < POLICYDB_VERSION_FILENAME_TRANS)
+ return 0;
+
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ return rc;
+ nel = le32_to_cpu(buf[0]);
+
+ for (i = 0; i < nel; i++) {
+ ft = NULL;
+ otype = NULL;
+ name = NULL;
+
+ rc = -ENOMEM;
+ ft = kzalloc(sizeof(*ft), GFP_KERNEL);
+ if (!ft)
+ goto out;
+
+ rc = -ENOMEM;
+ otype = kmalloc(sizeof(*otype), GFP_KERNEL);
+ if (!otype)
+ goto out;
+
+ /* length of the path component string */
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ goto out;
+ len = le32_to_cpu(buf[0]);
+
+ rc = -ENOMEM;
+ name = kmalloc(len + 1, GFP_KERNEL);
+ if (!name)
+ goto out;
+
+ ft->name = name;
+
+ /* path component string */
+ rc = next_entry(name, fp, len);
+ if (rc)
+ goto out;
+ name[len] = 0;
+
+ rc = next_entry(buf, fp, sizeof(u32) * 4);
+ if (rc)
+ goto out;
+
+ ft->stype = le32_to_cpu(buf[0]);
+ ft->ttype = le32_to_cpu(buf[1]);
+ ft->tclass = le32_to_cpu(buf[2]);
+
+ otype->otype = le32_to_cpu(buf[3]);
+
+ rc = ebitmap_set_bit(&p->filename_trans_ttypes, ft->ttype, 1);
+ if (rc)
+ goto out;
+
+ rc = hashtab_insert(p->filename_trans, ft, otype);
+ if (rc) {
+ /*
+ * Do not return -EEXIST to the caller, or the system
+ * will not boot.
+ */
+ if (rc != -EEXIST)
+ goto out;
+ /* But free memory to avoid memory leak. */
+ kfree(ft);
+ kfree(name);
+ kfree(otype);
+ }
+ }
+ hash_eval(p->filename_trans, "filenametr");
+ return 0;
+out:
+ kfree(ft);
+ kfree(name);
+ kfree(otype);
+
+ return rc;
+}
+
+static int genfs_read(struct policydb *p, void *fp)
+{
+ int i, j, rc;
+ u32 nel, nel2, len, len2;
+ __le32 buf[1];
+ struct ocontext *l, *c;
+ struct ocontext *newc = NULL;
+ struct genfs *genfs_p, *genfs;
+ struct genfs *newgenfs = NULL;
+
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ goto out;
+ nel = le32_to_cpu(buf[0]);
+
+ for (i = 0; i < nel; i++) {
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ goto out;
+ len = le32_to_cpu(buf[0]);
+
+ rc = -ENOMEM;
+ newgenfs = kzalloc(sizeof(*newgenfs), GFP_KERNEL);
+ if (!newgenfs)
+ goto out;
+
+ rc = -ENOMEM;
+ newgenfs->fstype = kmalloc(len + 1, GFP_KERNEL);
+ if (!newgenfs->fstype)
+ goto out;
+
+ rc = next_entry(newgenfs->fstype, fp, len);
+ if (rc)
+ goto out;
+
+ newgenfs->fstype[len] = 0;
+
+ for (genfs_p = NULL, genfs = p->genfs; genfs;
+ genfs_p = genfs, genfs = genfs->next) {
+ rc = -EINVAL;
+ if (strcmp(newgenfs->fstype, genfs->fstype) == 0) {
+ printk(KERN_ERR "SELinux: dup genfs fstype %s\n",
+ newgenfs->fstype);
+ goto out;
+ }
+ if (strcmp(newgenfs->fstype, genfs->fstype) < 0)
+ break;
+ }
+ newgenfs->next = genfs;
+ if (genfs_p)
+ genfs_p->next = newgenfs;
+ else
+ p->genfs = newgenfs;
+ genfs = newgenfs;
+ newgenfs = NULL;
+
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ goto out;
+
+ nel2 = le32_to_cpu(buf[0]);
+ for (j = 0; j < nel2; j++) {
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ goto out;
+ len = le32_to_cpu(buf[0]);
+
+ rc = -ENOMEM;
+ newc = kzalloc(sizeof(*newc), GFP_KERNEL);
+ if (!newc)
+ goto out;
+
+ rc = -ENOMEM;
+ newc->u.name = kmalloc(len + 1, GFP_KERNEL);
+ if (!newc->u.name)
+ goto out;
+
+ rc = next_entry(newc->u.name, fp, len);
+ if (rc)
+ goto out;
+ newc->u.name[len] = 0;
+
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ goto out;
+
+ newc->v.sclass = le32_to_cpu(buf[0]);
+ rc = context_read_and_validate(&newc->context[0], p, fp);
+ if (rc)
+ goto out;
+
+ for (l = NULL, c = genfs->head; c;
+ l = c, c = c->next) {
+ rc = -EINVAL;
+ if (!strcmp(newc->u.name, c->u.name) &&
+ (!c->v.sclass || !newc->v.sclass ||
+ newc->v.sclass == c->v.sclass)) {
+ printk(KERN_ERR "SELinux: dup genfs entry (%s,%s)\n",
+ genfs->fstype, c->u.name);
+ goto out;
+ }
+ len = strlen(newc->u.name);
+ len2 = strlen(c->u.name);
+ if (len > len2)
+ break;
+ }
+
+ newc->next = c;
+ if (l)
+ l->next = newc;
+ else
+ genfs->head = newc;
+ newc = NULL;
+ }
+ }
+ rc = 0;
+out:
+ if (newgenfs)
+ kfree(newgenfs->fstype);
+ kfree(newgenfs);
+ ocontext_destroy(newc, OCON_FSUSE);
+
+ return rc;
+}
+
+static int ocontext_read(struct policydb *p, struct policydb_compat_info *info,
+ void *fp)
+{
+ int i, j, rc;
+ u32 nel, len;
+ __le32 buf[3];
+ struct ocontext *l, *c;
+ u32 nodebuf[8];
+
+ for (i = 0; i < info->ocon_num; i++) {
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ goto out;
+ nel = le32_to_cpu(buf[0]);
+
+ l = NULL;
+ for (j = 0; j < nel; j++) {
+ rc = -ENOMEM;
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ goto out;
+ if (l)
+ l->next = c;
+ else
+ p->ocontexts[i] = c;
+ l = c;
+
+ switch (i) {
+ case OCON_ISID:
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ goto out;
+
+ c->sid[0] = le32_to_cpu(buf[0]);
+ rc = context_read_and_validate(&c->context[0], p, fp);
+ if (rc)
+ goto out;
+ break;
+ case OCON_FS:
+ case OCON_NETIF:
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ goto out;
+ len = le32_to_cpu(buf[0]);
+
+ rc = -ENOMEM;
+ c->u.name = kmalloc(len + 1, GFP_KERNEL);
+ if (!c->u.name)
+ goto out;
+
+ rc = next_entry(c->u.name, fp, len);
+ if (rc)
+ goto out;
+
+ c->u.name[len] = 0;
+ rc = context_read_and_validate(&c->context[0], p, fp);
+ if (rc)
+ goto out;
+ rc = context_read_and_validate(&c->context[1], p, fp);
+ if (rc)
+ goto out;
+ break;
+ case OCON_PORT:
+ rc = next_entry(buf, fp, sizeof(u32)*3);
+ if (rc)
+ goto out;
+ c->u.port.protocol = le32_to_cpu(buf[0]);
+ c->u.port.low_port = le32_to_cpu(buf[1]);
+ c->u.port.high_port = le32_to_cpu(buf[2]);
+ rc = context_read_and_validate(&c->context[0], p, fp);
+ if (rc)
+ goto out;
+ break;
+ case OCON_NODE:
+ rc = next_entry(nodebuf, fp, sizeof(u32) * 2);
+ if (rc)
+ goto out;
+ c->u.node.addr = nodebuf[0]; /* network order */
+ c->u.node.mask = nodebuf[1]; /* network order */
+ rc = context_read_and_validate(&c->context[0], p, fp);
+ if (rc)
+ goto out;
+ break;
+ case OCON_FSUSE:
+ rc = next_entry(buf, fp, sizeof(u32)*2);
+ if (rc)
+ goto out;
+
+ rc = -EINVAL;
+ c->v.behavior = le32_to_cpu(buf[0]);
+ /* Determined at runtime, not in policy DB. */
+ if (c->v.behavior == SECURITY_FS_USE_MNTPOINT)
+ goto out;
+ if (c->v.behavior > SECURITY_FS_USE_MAX)
+ goto out;
+
+ rc = -ENOMEM;
+ len = le32_to_cpu(buf[1]);
+ c->u.name = kmalloc(len + 1, GFP_KERNEL);
+ if (!c->u.name)
+ goto out;
+
+ rc = next_entry(c->u.name, fp, len);
+ if (rc)
+ goto out;
+ c->u.name[len] = 0;
+ rc = context_read_and_validate(&c->context[0], p, fp);
+ if (rc)
+ goto out;
+ break;
+ case OCON_NODE6: {
+ int k;
+
+ rc = next_entry(nodebuf, fp, sizeof(u32) * 8);
+ if (rc)
+ goto out;
+ for (k = 0; k < 4; k++)
+ c->u.node6.addr[k] = nodebuf[k];
+ for (k = 0; k < 4; k++)
+ c->u.node6.mask[k] = nodebuf[k+4];
+ rc = context_read_and_validate(&c->context[0], p, fp);
+ if (rc)
+ goto out;
+ break;
+ }
+ }
+ }
+ }
+ rc = 0;
+out:
+ return rc;
+}
+
/*
* Read the configuration data from a policy database binary
* representation file into a policy database structure.
@@ -1681,27 +2285,23 @@ int policydb_read(struct policydb *p, void *fp)
{
struct role_allow *ra, *lra;
struct role_trans *tr, *ltr;
- struct ocontext *l, *c, *newc;
- struct genfs *genfs_p, *genfs, *newgenfs;
int i, j, rc;
__le32 buf[4];
- u32 nodebuf[8];
- u32 len, len2, config, nprim, nel, nel2;
+ u32 len, nprim, nel;
+
char *policydb_str;
struct policydb_compat_info *info;
- struct range_trans *rt, *lrt;
-
- config = 0;
rc = policydb_init(p);
if (rc)
- goto out;
+ return rc;
/* Read the magic number and string length. */
rc = next_entry(buf, fp, sizeof(u32) * 2);
- if (rc < 0)
+ if (rc)
goto bad;
+ rc = -EINVAL;
if (le32_to_cpu(buf[0]) != POLICYDB_MAGIC) {
printk(KERN_ERR "SELinux: policydb magic number 0x%x does "
"not match expected magic number 0x%x\n",
@@ -1709,6 +2309,7 @@ int policydb_read(struct policydb *p, void *fp)
goto bad;
}
+ rc = -EINVAL;
len = le32_to_cpu(buf[1]);
if (len != strlen(POLICYDB_STRING)) {
printk(KERN_ERR "SELinux: policydb string length %d does not "
@@ -1716,19 +2317,23 @@ int policydb_read(struct policydb *p, void *fp)
len, strlen(POLICYDB_STRING));
goto bad;
}
+
+ rc = -ENOMEM;
policydb_str = kmalloc(len + 1, GFP_KERNEL);
if (!policydb_str) {
printk(KERN_ERR "SELinux: unable to allocate memory for policydb "
"string of length %d\n", len);
- rc = -ENOMEM;
goto bad;
}
+
rc = next_entry(policydb_str, fp, len);
- if (rc < 0) {
+ if (rc) {
printk(KERN_ERR "SELinux: truncated policydb string identifier\n");
kfree(policydb_str);
goto bad;
}
+
+ rc = -EINVAL;
policydb_str[len] = '\0';
if (strcmp(policydb_str, POLICYDB_STRING)) {
printk(KERN_ERR "SELinux: policydb string %s does not match "
@@ -1740,11 +2345,12 @@ int policydb_read(struct policydb *p, void *fp)
kfree(policydb_str);
policydb_str = NULL;
- /* Read the version, config, and table sizes. */
+ /* Read the version and table sizes. */
rc = next_entry(buf, fp, sizeof(u32)*4);
- if (rc < 0)
+ if (rc)
goto bad;
+ rc = -EINVAL;
p->policyvers = le32_to_cpu(buf[0]);
if (p->policyvers < POLICYDB_VERSION_MIN ||
p->policyvers > POLICYDB_VERSION_MAX) {
@@ -1755,38 +2361,32 @@ int policydb_read(struct policydb *p, void *fp)
}
if ((le32_to_cpu(buf[1]) & POLICYDB_CONFIG_MLS)) {
- if (ss_initialized && !selinux_mls_enabled) {
- printk(KERN_ERR "SELinux: Cannot switch between non-MLS"
- " and MLS policies\n");
- goto bad;
- }
- selinux_mls_enabled = 1;
- config |= POLICYDB_CONFIG_MLS;
+ p->mls_enabled = 1;
+ rc = -EINVAL;
if (p->policyvers < POLICYDB_VERSION_MLS) {
printk(KERN_ERR "SELinux: security policydb version %d "
"(MLS) not backwards compatible\n",
p->policyvers);
goto bad;
}
- } else {
- if (ss_initialized && selinux_mls_enabled) {
- printk(KERN_ERR "SELinux: Cannot switch between MLS and"
- " non-MLS policies\n");
- goto bad;
- }
}
p->reject_unknown = !!(le32_to_cpu(buf[1]) & REJECT_UNKNOWN);
p->allow_unknown = !!(le32_to_cpu(buf[1]) & ALLOW_UNKNOWN);
- if (p->policyvers >= POLICYDB_VERSION_POLCAP &&
- ebitmap_read(&p->policycaps, fp) != 0)
- goto bad;
+ if (p->policyvers >= POLICYDB_VERSION_POLCAP) {
+ rc = ebitmap_read(&p->policycaps, fp);
+ if (rc)
+ goto bad;
+ }
- if (p->policyvers >= POLICYDB_VERSION_PERMISSIVE &&
- ebitmap_read(&p->permissive_map, fp) != 0)
- goto bad;
+ if (p->policyvers >= POLICYDB_VERSION_PERMISSIVE) {
+ rc = ebitmap_read(&p->permissive_map, fp);
+ if (rc)
+ goto bad;
+ }
+ rc = -EINVAL;
info = policydb_lookup_compat(p->policyvers);
if (!info) {
printk(KERN_ERR "SELinux: unable to find policy compat info "
@@ -1794,6 +2394,7 @@ int policydb_read(struct policydb *p, void *fp)
goto bad;
}
+ rc = -EINVAL;
if (le32_to_cpu(buf[2]) != info->sym_num ||
le32_to_cpu(buf[3]) != info->ocon_num) {
printk(KERN_ERR "SELinux: policydb table sizes (%d,%d) do "
@@ -1805,7 +2406,7 @@ int policydb_read(struct policydb *p, void *fp)
for (i = 0; i < info->sym_num; i++) {
rc = next_entry(buf, fp, sizeof(u32)*2);
- if (rc < 0)
+ if (rc)
goto bad;
nprim = le32_to_cpu(buf[0]);
nel = le32_to_cpu(buf[1]);
@@ -1818,6 +2419,11 @@ int policydb_read(struct policydb *p, void *fp)
p->symtab[i].nprim = nprim;
}
+ rc = -EINVAL;
+ p->process_class = string_to_security_class(p, "process");
+ if (!p->process_class)
+ goto bad;
+
rc = avtab_read(&p->te_avtab, fp, p);
if (rc)
goto bad;
@@ -1829,366 +2435,1081 @@ int policydb_read(struct policydb *p, void *fp)
}
rc = next_entry(buf, fp, sizeof(u32));
- if (rc < 0)
+ if (rc)
goto bad;
nel = le32_to_cpu(buf[0]);
ltr = NULL;
for (i = 0; i < nel; i++) {
+ rc = -ENOMEM;
tr = kzalloc(sizeof(*tr), GFP_KERNEL);
- if (!tr) {
- rc = -ENOMEM;
+ if (!tr)
goto bad;
- }
if (ltr)
ltr->next = tr;
else
p->role_tr = tr;
rc = next_entry(buf, fp, sizeof(u32)*3);
- if (rc < 0)
+ if (rc)
goto bad;
+
+ rc = -EINVAL;
tr->role = le32_to_cpu(buf[0]);
tr->type = le32_to_cpu(buf[1]);
tr->new_role = le32_to_cpu(buf[2]);
+ if (p->policyvers >= POLICYDB_VERSION_ROLETRANS) {
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc)
+ goto bad;
+ tr->tclass = le32_to_cpu(buf[0]);
+ } else
+ tr->tclass = p->process_class;
+
if (!policydb_role_isvalid(p, tr->role) ||
!policydb_type_isvalid(p, tr->type) ||
- !policydb_role_isvalid(p, tr->new_role)) {
- rc = -EINVAL;
+ !policydb_class_isvalid(p, tr->tclass) ||
+ !policydb_role_isvalid(p, tr->new_role))
goto bad;
- }
ltr = tr;
}
rc = next_entry(buf, fp, sizeof(u32));
- if (rc < 0)
+ if (rc)
goto bad;
nel = le32_to_cpu(buf[0]);
lra = NULL;
for (i = 0; i < nel; i++) {
+ rc = -ENOMEM;
ra = kzalloc(sizeof(*ra), GFP_KERNEL);
- if (!ra) {
- rc = -ENOMEM;
+ if (!ra)
goto bad;
- }
if (lra)
lra->next = ra;
else
p->role_allow = ra;
rc = next_entry(buf, fp, sizeof(u32)*2);
- if (rc < 0)
+ if (rc)
goto bad;
+
+ rc = -EINVAL;
ra->role = le32_to_cpu(buf[0]);
ra->new_role = le32_to_cpu(buf[1]);
if (!policydb_role_isvalid(p, ra->role) ||
- !policydb_role_isvalid(p, ra->new_role)) {
- rc = -EINVAL;
+ !policydb_role_isvalid(p, ra->new_role))
goto bad;
- }
lra = ra;
}
- rc = policydb_index_classes(p);
+ rc = filename_trans_read(p, fp);
if (rc)
goto bad;
- rc = policydb_index_others(p);
+ rc = policydb_index(p);
if (rc)
goto bad;
- p->process_class = string_to_security_class(p, "process");
- if (!p->process_class)
- goto bad;
- p->process_trans_perms = string_to_av_perm(p, p->process_class,
- "transition");
- p->process_trans_perms |= string_to_av_perm(p, p->process_class,
- "dyntransition");
+ rc = -EINVAL;
+ p->process_trans_perms = string_to_av_perm(p, p->process_class, "transition");
+ p->process_trans_perms |= string_to_av_perm(p, p->process_class, "dyntransition");
if (!p->process_trans_perms)
goto bad;
- for (i = 0; i < info->ocon_num; i++) {
- rc = next_entry(buf, fp, sizeof(u32));
- if (rc < 0)
- goto bad;
- nel = le32_to_cpu(buf[0]);
- l = NULL;
- for (j = 0; j < nel; j++) {
- c = kzalloc(sizeof(*c), GFP_KERNEL);
- if (!c) {
- rc = -ENOMEM;
+ rc = ocontext_read(p, info, fp);
+ if (rc)
+ goto bad;
+
+ rc = genfs_read(p, fp);
+ if (rc)
+ goto bad;
+
+ rc = range_read(p, fp);
+ if (rc)
+ goto bad;
+
+ rc = -ENOMEM;
+ p->type_attr_map_array = flex_array_alloc(sizeof(struct ebitmap),
+ p->p_types.nprim,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!p->type_attr_map_array)
+ goto bad;
+
+ /* preallocate so we don't have to worry about the put ever failing */
+ rc = flex_array_prealloc(p->type_attr_map_array, 0, p->p_types.nprim,
+ GFP_KERNEL | __GFP_ZERO);
+ if (rc)
+ goto bad;
+
+ for (i = 0; i < p->p_types.nprim; i++) {
+ struct ebitmap *e = flex_array_get(p->type_attr_map_array, i);
+
+ BUG_ON(!e);
+ ebitmap_init(e);
+ if (p->policyvers >= POLICYDB_VERSION_AVTAB) {
+ rc = ebitmap_read(e, fp);
+ if (rc)
goto bad;
+ }
+ /* add the type itself as the degenerate case */
+ rc = ebitmap_set_bit(e, i, 1);
+ if (rc)
+ goto bad;
+ }
+
+ rc = policydb_bounds_sanity_check(p);
+ if (rc)
+ goto bad;
+
+ rc = 0;
+out:
+ return rc;
+bad:
+ policydb_destroy(p);
+ goto out;
+}
+
+/*
+ * Write a MLS level structure to a policydb binary
+ * representation file.
+ */
+static int mls_write_level(struct mls_level *l, void *fp)
+{
+ __le32 buf[1];
+ int rc;
+
+ buf[0] = cpu_to_le32(l->sens);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ rc = ebitmap_write(&l->cat, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+/*
+ * Write a MLS range structure to a policydb binary
+ * representation file.
+ */
+static int mls_write_range_helper(struct mls_range *r, void *fp)
+{
+ __le32 buf[3];
+ size_t items;
+ int rc, eq;
+
+ eq = mls_level_eq(&r->level[1], &r->level[0]);
+
+ if (eq)
+ items = 2;
+ else
+ items = 3;
+ buf[0] = cpu_to_le32(items-1);
+ buf[1] = cpu_to_le32(r->level[0].sens);
+ if (!eq)
+ buf[2] = cpu_to_le32(r->level[1].sens);
+
+ BUG_ON(items > (sizeof(buf)/sizeof(buf[0])));
+
+ rc = put_entry(buf, sizeof(u32), items, fp);
+ if (rc)
+ return rc;
+
+ rc = ebitmap_write(&r->level[0].cat, fp);
+ if (rc)
+ return rc;
+ if (!eq) {
+ rc = ebitmap_write(&r->level[1].cat, fp);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int sens_write(void *vkey, void *datum, void *ptr)
+{
+ char *key = vkey;
+ struct level_datum *levdatum = datum;
+ struct policy_data *pd = ptr;
+ void *fp = pd->fp;
+ __le32 buf[2];
+ size_t len;
+ int rc;
+
+ len = strlen(key);
+ buf[0] = cpu_to_le32(len);
+ buf[1] = cpu_to_le32(levdatum->isalias);
+ rc = put_entry(buf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+
+ rc = put_entry(key, 1, len, fp);
+ if (rc)
+ return rc;
+
+ rc = mls_write_level(levdatum->level, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int cat_write(void *vkey, void *datum, void *ptr)
+{
+ char *key = vkey;
+ struct cat_datum *catdatum = datum;
+ struct policy_data *pd = ptr;
+ void *fp = pd->fp;
+ __le32 buf[3];
+ size_t len;
+ int rc;
+
+ len = strlen(key);
+ buf[0] = cpu_to_le32(len);
+ buf[1] = cpu_to_le32(catdatum->value);
+ buf[2] = cpu_to_le32(catdatum->isalias);
+ rc = put_entry(buf, sizeof(u32), 3, fp);
+ if (rc)
+ return rc;
+
+ rc = put_entry(key, 1, len, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int role_trans_write(struct policydb *p, void *fp)
+{
+ struct role_trans *r = p->role_tr;
+ struct role_trans *tr;
+ u32 buf[3];
+ size_t nel;
+ int rc;
+
+ nel = 0;
+ for (tr = r; tr; tr = tr->next)
+ nel++;
+ buf[0] = cpu_to_le32(nel);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ for (tr = r; tr; tr = tr->next) {
+ buf[0] = cpu_to_le32(tr->role);
+ buf[1] = cpu_to_le32(tr->type);
+ buf[2] = cpu_to_le32(tr->new_role);
+ rc = put_entry(buf, sizeof(u32), 3, fp);
+ if (rc)
+ return rc;
+ if (p->policyvers >= POLICYDB_VERSION_ROLETRANS) {
+ buf[0] = cpu_to_le32(tr->tclass);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ }
+ }
+
+ return 0;
+}
+
+static int role_allow_write(struct role_allow *r, void *fp)
+{
+ struct role_allow *ra;
+ u32 buf[2];
+ size_t nel;
+ int rc;
+
+ nel = 0;
+ for (ra = r; ra; ra = ra->next)
+ nel++;
+ buf[0] = cpu_to_le32(nel);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ for (ra = r; ra; ra = ra->next) {
+ buf[0] = cpu_to_le32(ra->role);
+ buf[1] = cpu_to_le32(ra->new_role);
+ rc = put_entry(buf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+ }
+ return 0;
+}
+
+/*
+ * Write a security context structure
+ * to a policydb binary representation file.
+ */
+static int context_write(struct policydb *p, struct context *c,
+ void *fp)
+{
+ int rc;
+ __le32 buf[3];
+
+ buf[0] = cpu_to_le32(c->user);
+ buf[1] = cpu_to_le32(c->role);
+ buf[2] = cpu_to_le32(c->type);
+
+ rc = put_entry(buf, sizeof(u32), 3, fp);
+ if (rc)
+ return rc;
+
+ rc = mls_write_range_helper(&c->range, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+/*
+ * The following *_write functions are used to
+ * write the symbol data to a policy database
+ * binary representation file.
+ */
+
+static int perm_write(void *vkey, void *datum, void *fp)
+{
+ char *key = vkey;
+ struct perm_datum *perdatum = datum;
+ __le32 buf[2];
+ size_t len;
+ int rc;
+
+ len = strlen(key);
+ buf[0] = cpu_to_le32(len);
+ buf[1] = cpu_to_le32(perdatum->value);
+ rc = put_entry(buf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+
+ rc = put_entry(key, 1, len, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int common_write(void *vkey, void *datum, void *ptr)
+{
+ char *key = vkey;
+ struct common_datum *comdatum = datum;
+ struct policy_data *pd = ptr;
+ void *fp = pd->fp;
+ __le32 buf[4];
+ size_t len;
+ int rc;
+
+ len = strlen(key);
+ buf[0] = cpu_to_le32(len);
+ buf[1] = cpu_to_le32(comdatum->value);
+ buf[2] = cpu_to_le32(comdatum->permissions.nprim);
+ buf[3] = cpu_to_le32(comdatum->permissions.table->nel);
+ rc = put_entry(buf, sizeof(u32), 4, fp);
+ if (rc)
+ return rc;
+
+ rc = put_entry(key, 1, len, fp);
+ if (rc)
+ return rc;
+
+ rc = hashtab_map(comdatum->permissions.table, perm_write, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int type_set_write(struct type_set *t, void *fp)
+{
+ int rc;
+ __le32 buf[1];
+
+ if (ebitmap_write(&t->types, fp))
+ return -EINVAL;
+ if (ebitmap_write(&t->negset, fp))
+ return -EINVAL;
+
+ buf[0] = cpu_to_le32(t->flags);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int write_cons_helper(struct policydb *p, struct constraint_node *node,
+ void *fp)
+{
+ struct constraint_node *c;
+ struct constraint_expr *e;
+ __le32 buf[3];
+ u32 nel;
+ int rc;
+
+ for (c = node; c; c = c->next) {
+ nel = 0;
+ for (e = c->expr; e; e = e->next)
+ nel++;
+ buf[0] = cpu_to_le32(c->permissions);
+ buf[1] = cpu_to_le32(nel);
+ rc = put_entry(buf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+ for (e = c->expr; e; e = e->next) {
+ buf[0] = cpu_to_le32(e->expr_type);
+ buf[1] = cpu_to_le32(e->attr);
+ buf[2] = cpu_to_le32(e->op);
+ rc = put_entry(buf, sizeof(u32), 3, fp);
+ if (rc)
+ return rc;
+
+ switch (e->expr_type) {
+ case CEXPR_NAMES:
+ rc = ebitmap_write(&e->names, fp);
+ if (rc)
+ return rc;
+ if (p->policyvers >=
+ POLICYDB_VERSION_CONSTRAINT_NAMES) {
+ rc = type_set_write(e->type_names, fp);
+ if (rc)
+ return rc;
+ }
+ break;
+ default:
+ break;
}
- if (l)
- l->next = c;
- else
- p->ocontexts[i] = c;
- l = c;
- rc = -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+static int class_write(void *vkey, void *datum, void *ptr)
+{
+ char *key = vkey;
+ struct class_datum *cladatum = datum;
+ struct policy_data *pd = ptr;
+ void *fp = pd->fp;
+ struct policydb *p = pd->p;
+ struct constraint_node *c;
+ __le32 buf[6];
+ u32 ncons;
+ size_t len, len2;
+ int rc;
+
+ len = strlen(key);
+ if (cladatum->comkey)
+ len2 = strlen(cladatum->comkey);
+ else
+ len2 = 0;
+
+ ncons = 0;
+ for (c = cladatum->constraints; c; c = c->next)
+ ncons++;
+
+ buf[0] = cpu_to_le32(len);
+ buf[1] = cpu_to_le32(len2);
+ buf[2] = cpu_to_le32(cladatum->value);
+ buf[3] = cpu_to_le32(cladatum->permissions.nprim);
+ if (cladatum->permissions.table)
+ buf[4] = cpu_to_le32(cladatum->permissions.table->nel);
+ else
+ buf[4] = 0;
+ buf[5] = cpu_to_le32(ncons);
+ rc = put_entry(buf, sizeof(u32), 6, fp);
+ if (rc)
+ return rc;
+
+ rc = put_entry(key, 1, len, fp);
+ if (rc)
+ return rc;
+
+ if (cladatum->comkey) {
+ rc = put_entry(cladatum->comkey, 1, len2, fp);
+ if (rc)
+ return rc;
+ }
+
+ rc = hashtab_map(cladatum->permissions.table, perm_write, fp);
+ if (rc)
+ return rc;
+
+ rc = write_cons_helper(p, cladatum->constraints, fp);
+ if (rc)
+ return rc;
+
+ /* write out the validatetrans rule */
+ ncons = 0;
+ for (c = cladatum->validatetrans; c; c = c->next)
+ ncons++;
+
+ buf[0] = cpu_to_le32(ncons);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ rc = write_cons_helper(p, cladatum->validatetrans, fp);
+ if (rc)
+ return rc;
+
+ if (p->policyvers >= POLICYDB_VERSION_NEW_OBJECT_DEFAULTS) {
+ buf[0] = cpu_to_le32(cladatum->default_user);
+ buf[1] = cpu_to_le32(cladatum->default_role);
+ buf[2] = cpu_to_le32(cladatum->default_range);
+
+ rc = put_entry(buf, sizeof(uint32_t), 3, fp);
+ if (rc)
+ return rc;
+ }
+
+ if (p->policyvers >= POLICYDB_VERSION_DEFAULT_TYPE) {
+ buf[0] = cpu_to_le32(cladatum->default_type);
+ rc = put_entry(buf, sizeof(uint32_t), 1, fp);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
+}
+
+static int role_write(void *vkey, void *datum, void *ptr)
+{
+ char *key = vkey;
+ struct role_datum *role = datum;
+ struct policy_data *pd = ptr;
+ void *fp = pd->fp;
+ struct policydb *p = pd->p;
+ __le32 buf[3];
+ size_t items, len;
+ int rc;
+
+ len = strlen(key);
+ items = 0;
+ buf[items++] = cpu_to_le32(len);
+ buf[items++] = cpu_to_le32(role->value);
+ if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
+ buf[items++] = cpu_to_le32(role->bounds);
+
+ BUG_ON(items > (sizeof(buf)/sizeof(buf[0])));
+
+ rc = put_entry(buf, sizeof(u32), items, fp);
+ if (rc)
+ return rc;
+
+ rc = put_entry(key, 1, len, fp);
+ if (rc)
+ return rc;
+
+ rc = ebitmap_write(&role->dominates, fp);
+ if (rc)
+ return rc;
+
+ rc = ebitmap_write(&role->types, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int type_write(void *vkey, void *datum, void *ptr)
+{
+ char *key = vkey;
+ struct type_datum *typdatum = datum;
+ struct policy_data *pd = ptr;
+ struct policydb *p = pd->p;
+ void *fp = pd->fp;
+ __le32 buf[4];
+ int rc;
+ size_t items, len;
+
+ len = strlen(key);
+ items = 0;
+ buf[items++] = cpu_to_le32(len);
+ buf[items++] = cpu_to_le32(typdatum->value);
+ if (p->policyvers >= POLICYDB_VERSION_BOUNDARY) {
+ u32 properties = 0;
+
+ if (typdatum->primary)
+ properties |= TYPEDATUM_PROPERTY_PRIMARY;
+
+ if (typdatum->attribute)
+ properties |= TYPEDATUM_PROPERTY_ATTRIBUTE;
+
+ buf[items++] = cpu_to_le32(properties);
+ buf[items++] = cpu_to_le32(typdatum->bounds);
+ } else {
+ buf[items++] = cpu_to_le32(typdatum->primary);
+ }
+ BUG_ON(items > (sizeof(buf) / sizeof(buf[0])));
+ rc = put_entry(buf, sizeof(u32), items, fp);
+ if (rc)
+ return rc;
+
+ rc = put_entry(key, 1, len, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int user_write(void *vkey, void *datum, void *ptr)
+{
+ char *key = vkey;
+ struct user_datum *usrdatum = datum;
+ struct policy_data *pd = ptr;
+ struct policydb *p = pd->p;
+ void *fp = pd->fp;
+ __le32 buf[3];
+ size_t items, len;
+ int rc;
+
+ len = strlen(key);
+ items = 0;
+ buf[items++] = cpu_to_le32(len);
+ buf[items++] = cpu_to_le32(usrdatum->value);
+ if (p->policyvers >= POLICYDB_VERSION_BOUNDARY)
+ buf[items++] = cpu_to_le32(usrdatum->bounds);
+ BUG_ON(items > (sizeof(buf) / sizeof(buf[0])));
+ rc = put_entry(buf, sizeof(u32), items, fp);
+ if (rc)
+ return rc;
+
+ rc = put_entry(key, 1, len, fp);
+ if (rc)
+ return rc;
+
+ rc = ebitmap_write(&usrdatum->roles, fp);
+ if (rc)
+ return rc;
+
+ rc = mls_write_range_helper(&usrdatum->range, fp);
+ if (rc)
+ return rc;
+
+ rc = mls_write_level(&usrdatum->dfltlevel, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int (*write_f[SYM_NUM]) (void *key, void *datum,
+ void *datap) =
+{
+ common_write,
+ class_write,
+ role_write,
+ type_write,
+ user_write,
+ cond_write_bool,
+ sens_write,
+ cat_write,
+};
+
+static int ocontext_write(struct policydb *p, struct policydb_compat_info *info,
+ void *fp)
+{
+ unsigned int i, j, rc;
+ size_t nel, len;
+ __le32 buf[3];
+ u32 nodebuf[8];
+ struct ocontext *c;
+ for (i = 0; i < info->ocon_num; i++) {
+ nel = 0;
+ for (c = p->ocontexts[i]; c; c = c->next)
+ nel++;
+ buf[0] = cpu_to_le32(nel);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ for (c = p->ocontexts[i]; c; c = c->next) {
switch (i) {
case OCON_ISID:
- rc = next_entry(buf, fp, sizeof(u32));
- if (rc < 0)
- goto bad;
- c->sid[0] = le32_to_cpu(buf[0]);
- rc = context_read_and_validate(&c->context[0], p, fp);
+ buf[0] = cpu_to_le32(c->sid[0]);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ rc = context_write(p, &c->context[0], fp);
if (rc)
- goto bad;
+ return rc;
break;
case OCON_FS:
case OCON_NETIF:
- rc = next_entry(buf, fp, sizeof(u32));
- if (rc < 0)
- goto bad;
- len = le32_to_cpu(buf[0]);
- c->u.name = kmalloc(len + 1, GFP_KERNEL);
- if (!c->u.name) {
- rc = -ENOMEM;
- goto bad;
- }
- rc = next_entry(c->u.name, fp, len);
- if (rc < 0)
- goto bad;
- c->u.name[len] = 0;
- rc = context_read_and_validate(&c->context[0], p, fp);
+ len = strlen(c->u.name);
+ buf[0] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
if (rc)
- goto bad;
- rc = context_read_and_validate(&c->context[1], p, fp);
+ return rc;
+ rc = put_entry(c->u.name, 1, len, fp);
+ if (rc)
+ return rc;
+ rc = context_write(p, &c->context[0], fp);
if (rc)
- goto bad;
+ return rc;
+ rc = context_write(p, &c->context[1], fp);
+ if (rc)
+ return rc;
break;
case OCON_PORT:
- rc = next_entry(buf, fp, sizeof(u32)*3);
- if (rc < 0)
- goto bad;
- c->u.port.protocol = le32_to_cpu(buf[0]);
- c->u.port.low_port = le32_to_cpu(buf[1]);
- c->u.port.high_port = le32_to_cpu(buf[2]);
- rc = context_read_and_validate(&c->context[0], p, fp);
+ buf[0] = cpu_to_le32(c->u.port.protocol);
+ buf[1] = cpu_to_le32(c->u.port.low_port);
+ buf[2] = cpu_to_le32(c->u.port.high_port);
+ rc = put_entry(buf, sizeof(u32), 3, fp);
if (rc)
- goto bad;
+ return rc;
+ rc = context_write(p, &c->context[0], fp);
+ if (rc)
+ return rc;
break;
case OCON_NODE:
- rc = next_entry(nodebuf, fp, sizeof(u32) * 2);
- if (rc < 0)
- goto bad;
- c->u.node.addr = nodebuf[0]; /* network order */
- c->u.node.mask = nodebuf[1]; /* network order */
- rc = context_read_and_validate(&c->context[0], p, fp);
+ nodebuf[0] = c->u.node.addr; /* network order */
+ nodebuf[1] = c->u.node.mask; /* network order */
+ rc = put_entry(nodebuf, sizeof(u32), 2, fp);
if (rc)
- goto bad;
+ return rc;
+ rc = context_write(p, &c->context[0], fp);
+ if (rc)
+ return rc;
break;
case OCON_FSUSE:
- rc = next_entry(buf, fp, sizeof(u32)*2);
- if (rc < 0)
- goto bad;
- c->v.behavior = le32_to_cpu(buf[0]);
- if (c->v.behavior > SECURITY_FS_USE_NONE)
- goto bad;
- len = le32_to_cpu(buf[1]);
- c->u.name = kmalloc(len + 1, GFP_KERNEL);
- if (!c->u.name) {
- rc = -ENOMEM;
- goto bad;
- }
- rc = next_entry(c->u.name, fp, len);
- if (rc < 0)
- goto bad;
- c->u.name[len] = 0;
- rc = context_read_and_validate(&c->context[0], p, fp);
+ buf[0] = cpu_to_le32(c->v.behavior);
+ len = strlen(c->u.name);
+ buf[1] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+ rc = put_entry(c->u.name, 1, len, fp);
+ if (rc)
+ return rc;
+ rc = context_write(p, &c->context[0], fp);
if (rc)
- goto bad;
+ return rc;
break;
- case OCON_NODE6: {
- int k;
-
- rc = next_entry(nodebuf, fp, sizeof(u32) * 8);
- if (rc < 0)
- goto bad;
- for (k = 0; k < 4; k++)
- c->u.node6.addr[k] = nodebuf[k];
- for (k = 0; k < 4; k++)
- c->u.node6.mask[k] = nodebuf[k+4];
- if (context_read_and_validate(&c->context[0], p, fp))
- goto bad;
+ case OCON_NODE6:
+ for (j = 0; j < 4; j++)
+ nodebuf[j] = c->u.node6.addr[j]; /* network order */
+ for (j = 0; j < 4; j++)
+ nodebuf[j + 4] = c->u.node6.mask[j]; /* network order */
+ rc = put_entry(nodebuf, sizeof(u32), 8, fp);
+ if (rc)
+ return rc;
+ rc = context_write(p, &c->context[0], fp);
+ if (rc)
+ return rc;
break;
}
- }
}
}
+ return 0;
+}
- rc = next_entry(buf, fp, sizeof(u32));
- if (rc < 0)
- goto bad;
- nel = le32_to_cpu(buf[0]);
- genfs_p = NULL;
- rc = -EINVAL;
- for (i = 0; i < nel; i++) {
- rc = next_entry(buf, fp, sizeof(u32));
- if (rc < 0)
- goto bad;
- len = le32_to_cpu(buf[0]);
- newgenfs = kzalloc(sizeof(*newgenfs), GFP_KERNEL);
- if (!newgenfs) {
- rc = -ENOMEM;
- goto bad;
- }
+static int genfs_write(struct policydb *p, void *fp)
+{
+ struct genfs *genfs;
+ struct ocontext *c;
+ size_t len;
+ __le32 buf[1];
+ int rc;
- newgenfs->fstype = kmalloc(len + 1, GFP_KERNEL);
- if (!newgenfs->fstype) {
- rc = -ENOMEM;
- kfree(newgenfs);
- goto bad;
- }
- rc = next_entry(newgenfs->fstype, fp, len);
- if (rc < 0) {
- kfree(newgenfs->fstype);
- kfree(newgenfs);
- goto bad;
- }
- newgenfs->fstype[len] = 0;
- for (genfs_p = NULL, genfs = p->genfs; genfs;
- genfs_p = genfs, genfs = genfs->next) {
- if (strcmp(newgenfs->fstype, genfs->fstype) == 0) {
- printk(KERN_ERR "SELinux: dup genfs "
- "fstype %s\n", newgenfs->fstype);
- kfree(newgenfs->fstype);
- kfree(newgenfs);
- goto bad;
- }
- if (strcmp(newgenfs->fstype, genfs->fstype) < 0)
- break;
+ len = 0;
+ for (genfs = p->genfs; genfs; genfs = genfs->next)
+ len++;
+ buf[0] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ for (genfs = p->genfs; genfs; genfs = genfs->next) {
+ len = strlen(genfs->fstype);
+ buf[0] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ rc = put_entry(genfs->fstype, 1, len, fp);
+ if (rc)
+ return rc;
+ len = 0;
+ for (c = genfs->head; c; c = c->next)
+ len++;
+ buf[0] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ for (c = genfs->head; c; c = c->next) {
+ len = strlen(c->u.name);
+ buf[0] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ rc = put_entry(c->u.name, 1, len, fp);
+ if (rc)
+ return rc;
+ buf[0] = cpu_to_le32(c->v.sclass);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+ rc = context_write(p, &c->context[0], fp);
+ if (rc)
+ return rc;
}
- newgenfs->next = genfs;
- if (genfs_p)
- genfs_p->next = newgenfs;
- else
- p->genfs = newgenfs;
- rc = next_entry(buf, fp, sizeof(u32));
- if (rc < 0)
- goto bad;
- nel2 = le32_to_cpu(buf[0]);
- for (j = 0; j < nel2; j++) {
- rc = next_entry(buf, fp, sizeof(u32));
- if (rc < 0)
- goto bad;
- len = le32_to_cpu(buf[0]);
+ }
+ return 0;
+}
- newc = kzalloc(sizeof(*newc), GFP_KERNEL);
- if (!newc) {
- rc = -ENOMEM;
- goto bad;
- }
+static int hashtab_cnt(void *key, void *data, void *ptr)
+{
+ int *cnt = ptr;
+ *cnt = *cnt + 1;
- newc->u.name = kmalloc(len + 1, GFP_KERNEL);
- if (!newc->u.name) {
- rc = -ENOMEM;
- goto bad_newc;
- }
- rc = next_entry(newc->u.name, fp, len);
- if (rc < 0)
- goto bad_newc;
- newc->u.name[len] = 0;
- rc = next_entry(buf, fp, sizeof(u32));
- if (rc < 0)
- goto bad_newc;
- newc->v.sclass = le32_to_cpu(buf[0]);
- if (context_read_and_validate(&newc->context[0], p, fp))
- goto bad_newc;
- for (l = NULL, c = newgenfs->head; c;
- l = c, c = c->next) {
- if (!strcmp(newc->u.name, c->u.name) &&
- (!c->v.sclass || !newc->v.sclass ||
- newc->v.sclass == c->v.sclass)) {
- printk(KERN_ERR "SELinux: dup genfs "
- "entry (%s,%s)\n",
- newgenfs->fstype, c->u.name);
- goto bad_newc;
- }
- len = strlen(newc->u.name);
- len2 = strlen(c->u.name);
- if (len > len2)
- break;
- }
+ return 0;
+}
- newc->next = c;
- if (l)
- l->next = newc;
- else
- newgenfs->head = newc;
- }
+static int range_write_helper(void *key, void *data, void *ptr)
+{
+ __le32 buf[2];
+ struct range_trans *rt = key;
+ struct mls_range *r = data;
+ struct policy_data *pd = ptr;
+ void *fp = pd->fp;
+ struct policydb *p = pd->p;
+ int rc;
+
+ buf[0] = cpu_to_le32(rt->source_type);
+ buf[1] = cpu_to_le32(rt->target_type);
+ rc = put_entry(buf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+ if (p->policyvers >= POLICYDB_VERSION_RANGETRANS) {
+ buf[0] = cpu_to_le32(rt->target_class);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
}
+ rc = mls_write_range_helper(r, fp);
+ if (rc)
+ return rc;
- if (p->policyvers >= POLICYDB_VERSION_MLS) {
- int new_rangetr = p->policyvers >= POLICYDB_VERSION_RANGETRANS;
- rc = next_entry(buf, fp, sizeof(u32));
- if (rc < 0)
- goto bad;
- nel = le32_to_cpu(buf[0]);
- lrt = NULL;
- for (i = 0; i < nel; i++) {
- rt = kzalloc(sizeof(*rt), GFP_KERNEL);
- if (!rt) {
- rc = -ENOMEM;
- goto bad;
- }
- if (lrt)
- lrt->next = rt;
- else
- p->range_tr = rt;
- rc = next_entry(buf, fp, (sizeof(u32) * 2));
- if (rc < 0)
- goto bad;
- rt->source_type = le32_to_cpu(buf[0]);
- rt->target_type = le32_to_cpu(buf[1]);
- if (new_rangetr) {
- rc = next_entry(buf, fp, sizeof(u32));
- if (rc < 0)
- goto bad;
- rt->target_class = le32_to_cpu(buf[0]);
- } else
- rt->target_class = p->process_class;
- if (!policydb_type_isvalid(p, rt->source_type) ||
- !policydb_type_isvalid(p, rt->target_type) ||
- !policydb_class_isvalid(p, rt->target_class)) {
- rc = -EINVAL;
- goto bad;
- }
- rc = mls_read_range_helper(&rt->target_range, fp);
- if (rc)
- goto bad;
- if (!mls_range_isvalid(p, &rt->target_range)) {
- printk(KERN_WARNING "SELinux: rangetrans: invalid range\n");
- goto bad;
- }
- lrt = rt;
- }
+ return 0;
+}
+
+static int range_write(struct policydb *p, void *fp)
+{
+ __le32 buf[1];
+ int rc, nel;
+ struct policy_data pd;
+
+ pd.p = p;
+ pd.fp = fp;
+
+ /* count the number of entries in the hashtab */
+ nel = 0;
+ rc = hashtab_map(p->range_tr, hashtab_cnt, &nel);
+ if (rc)
+ return rc;
+
+ buf[0] = cpu_to_le32(nel);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ /* actually write all of the entries */
+ rc = hashtab_map(p->range_tr, range_write_helper, &pd);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int filename_write_helper(void *key, void *data, void *ptr)
+{
+ __le32 buf[4];
+ struct filename_trans *ft = key;
+ struct filename_trans_datum *otype = data;
+ void *fp = ptr;
+ int rc;
+ u32 len;
+
+ len = strlen(ft->name);
+ buf[0] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ rc = put_entry(ft->name, sizeof(char), len, fp);
+ if (rc)
+ return rc;
+
+ buf[0] = cpu_to_le32(ft->stype);
+ buf[1] = cpu_to_le32(ft->ttype);
+ buf[2] = cpu_to_le32(ft->tclass);
+ buf[3] = cpu_to_le32(otype->otype);
+
+ rc = put_entry(buf, sizeof(u32), 4, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+static int filename_trans_write(struct policydb *p, void *fp)
+{
+ u32 nel;
+ __le32 buf[1];
+ int rc;
+
+ if (p->policyvers < POLICYDB_VERSION_FILENAME_TRANS)
+ return 0;
+
+ nel = 0;
+ rc = hashtab_map(p->filename_trans, hashtab_cnt, &nel);
+ if (rc)
+ return rc;
+
+ buf[0] = cpu_to_le32(nel);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return rc;
+
+ rc = hashtab_map(p->filename_trans, filename_write_helper, fp);
+ if (rc)
+ return rc;
+
+ return 0;
+}
+
+/*
+ * Write the configuration data in a policy database
+ * structure to a policy database binary representation
+ * file.
+ */
+int policydb_write(struct policydb *p, void *fp)
+{
+ unsigned int i, num_syms;
+ int rc;
+ __le32 buf[4];
+ u32 config;
+ size_t len;
+ struct policydb_compat_info *info;
+
+ /*
+ * refuse to write policy older than compressed avtab
+ * to simplify the writer. There are other tests dropped
+ * since we assume this throughout the writer code. Be
+ * careful if you ever try to remove this restriction
+ */
+ if (p->policyvers < POLICYDB_VERSION_AVTAB) {
+ printk(KERN_ERR "SELinux: refusing to write policy version %d."
+ " Because it is less than version %d\n", p->policyvers,
+ POLICYDB_VERSION_AVTAB);
+ return -EINVAL;
}
- p->type_attr_map = kmalloc(p->p_types.nprim*sizeof(struct ebitmap), GFP_KERNEL);
- if (!p->type_attr_map)
- goto bad;
+ config = 0;
+ if (p->mls_enabled)
+ config |= POLICYDB_CONFIG_MLS;
- for (i = 0; i < p->p_types.nprim; i++) {
- ebitmap_init(&p->type_attr_map[i]);
- if (p->policyvers >= POLICYDB_VERSION_AVTAB) {
- if (ebitmap_read(&p->type_attr_map[i], fp))
- goto bad;
- }
- /* add the type itself as the degenerate case */
- if (ebitmap_set_bit(&p->type_attr_map[i], i, 1))
- goto bad;
+ if (p->reject_unknown)
+ config |= REJECT_UNKNOWN;
+ if (p->allow_unknown)
+ config |= ALLOW_UNKNOWN;
+
+ /* Write the magic number and string identifiers. */
+ buf[0] = cpu_to_le32(POLICYDB_MAGIC);
+ len = strlen(POLICYDB_STRING);
+ buf[1] = cpu_to_le32(len);
+ rc = put_entry(buf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+ rc = put_entry(POLICYDB_STRING, 1, len, fp);
+ if (rc)
+ return rc;
+
+ /* Write the version, config, and table sizes. */
+ info = policydb_lookup_compat(p->policyvers);
+ if (!info) {
+ printk(KERN_ERR "SELinux: compatibility lookup failed for policy "
+ "version %d", p->policyvers);
+ return -EINVAL;
}
- rc = policydb_bounds_sanity_check(p);
+ buf[0] = cpu_to_le32(p->policyvers);
+ buf[1] = cpu_to_le32(config);
+ buf[2] = cpu_to_le32(info->sym_num);
+ buf[3] = cpu_to_le32(info->ocon_num);
+
+ rc = put_entry(buf, sizeof(u32), 4, fp);
if (rc)
- goto bad;
+ return rc;
- rc = 0;
-out:
- return rc;
-bad_newc:
- ocontext_destroy(newc, OCON_FSUSE);
-bad:
- if (!rc)
- rc = -EINVAL;
- policydb_destroy(p);
- goto out;
+ if (p->policyvers >= POLICYDB_VERSION_POLCAP) {
+ rc = ebitmap_write(&p->policycaps, fp);
+ if (rc)
+ return rc;
+ }
+
+ if (p->policyvers >= POLICYDB_VERSION_PERMISSIVE) {
+ rc = ebitmap_write(&p->permissive_map, fp);
+ if (rc)
+ return rc;
+ }
+
+ num_syms = info->sym_num;
+ for (i = 0; i < num_syms; i++) {
+ struct policy_data pd;
+
+ pd.fp = fp;
+ pd.p = p;
+
+ buf[0] = cpu_to_le32(p->symtab[i].nprim);
+ buf[1] = cpu_to_le32(p->symtab[i].table->nel);
+
+ rc = put_entry(buf, sizeof(u32), 2, fp);
+ if (rc)
+ return rc;
+ rc = hashtab_map(p->symtab[i].table, write_f[i], &pd);
+ if (rc)
+ return rc;
+ }
+
+ rc = avtab_write(p, &p->te_avtab, fp);
+ if (rc)
+ return rc;
+
+ rc = cond_write_list(p, p->cond_list, fp);
+ if (rc)
+ return rc;
+
+ rc = role_trans_write(p, fp);
+ if (rc)
+ return rc;
+
+ rc = role_allow_write(p->role_allow, fp);
+ if (rc)
+ return rc;
+
+ rc = filename_trans_write(p, fp);
+ if (rc)
+ return rc;
+
+ rc = ocontext_write(p, info, fp);
+ if (rc)
+ return rc;
+
+ rc = genfs_write(p, fp);
+ if (rc)
+ return rc;
+
+ rc = range_write(p, fp);
+ if (rc)
+ return rc;
+
+ for (i = 0; i < p->p_types.nprim; i++) {
+ struct ebitmap *e = flex_array_get(p->type_attr_map_array, i);
+
+ BUG_ON(!e);
+ rc = ebitmap_write(e, fp);
+ if (rc)
+ return rc;
+ }
+
+ return 0;
}
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
index cdcc5700946..725d5945a97 100644
--- a/security/selinux/ss/policydb.h
+++ b/security/selinux/ss/policydb.h
@@ -24,9 +24,13 @@
#ifndef _SS_POLICYDB_H_
#define _SS_POLICYDB_H_
+#include <linux/flex_array.h>
+
#include "symtab.h"
#include "avtab.h"
#include "sidtab.h"
+#include "ebitmap.h"
+#include "mls_types.h"
#include "context.h"
#include "constraint.h"
@@ -56,6 +60,20 @@ struct class_datum {
struct symtab permissions; /* class-specific permission symbol table */
struct constraint_node *constraints; /* constraints on class permissions */
struct constraint_node *validatetrans; /* special transition rules */
+/* Options how a new object user, role, and type should be decided */
+#define DEFAULT_SOURCE 1
+#define DEFAULT_TARGET 2
+ char default_user;
+ char default_role;
+ char default_type;
+/* Options how a new object range should be decided */
+#define DEFAULT_SOURCE_LOW 1
+#define DEFAULT_SOURCE_HIGH 2
+#define DEFAULT_SOURCE_LOW_HIGH 3
+#define DEFAULT_TARGET_LOW 4
+#define DEFAULT_TARGET_HIGH 5
+#define DEFAULT_TARGET_LOW_HIGH 6
+ char default_range;
};
/* Role attributes */
@@ -68,11 +86,23 @@ struct role_datum {
struct role_trans {
u32 role; /* current role */
- u32 type; /* program executable type */
+ u32 type; /* program executable type, or new object type */
+ u32 tclass; /* process class, or new object class */
u32 new_role; /* new role */
struct role_trans *next;
};
+struct filename_trans {
+ u32 stype; /* current process */
+ u32 ttype; /* parent dir context */
+ u16 tclass; /* class of new object */
+ const char *name; /* last path component */
+};
+
+struct filename_trans_datum {
+ u32 otype; /* expected of new object */
+};
+
struct role_allow {
u32 role; /* current role */
u32 new_role; /* new role */
@@ -113,8 +143,6 @@ struct range_trans {
u32 source_type;
u32 target_type;
u32 target_class;
- struct mls_range target_range;
- struct range_trans *next;
};
/* Boolean data type */
@@ -126,6 +154,17 @@ struct cond_bool_datum {
struct cond_node;
/*
+ * type set preserves data needed to determine constraint info from
+ * policy source. This is not used by the kernel policy but allows
+ * utilities such as audit2allow to determine constraint denials.
+ */
+struct type_set {
+ struct ebitmap types;
+ struct ebitmap negset;
+ u32 flags;
+};
+
+/*
* The configuration data includes security contexts for
* initial SIDs, unlabeled file systems, TCP and UDP port numbers,
* network interfaces, and nodes. This structure stores the
@@ -187,6 +226,8 @@ struct genfs {
/* The policy database */
struct policydb {
+ int mls_enabled;
+
/* symbol tables */
struct symtab symtab[SYM_NUM];
#define p_commons symtab[SYM_COMMONS]
@@ -199,21 +240,13 @@ struct policydb {
#define p_cats symtab[SYM_CATS]
/* symbol names indexed by (value - 1) */
- char **sym_val_to_name[SYM_NUM];
-#define p_common_val_to_name sym_val_to_name[SYM_COMMONS]
-#define p_class_val_to_name sym_val_to_name[SYM_CLASSES]
-#define p_role_val_to_name sym_val_to_name[SYM_ROLES]
-#define p_type_val_to_name sym_val_to_name[SYM_TYPES]
-#define p_user_val_to_name sym_val_to_name[SYM_USERS]
-#define p_bool_val_to_name sym_val_to_name[SYM_BOOLS]
-#define p_sens_val_to_name sym_val_to_name[SYM_LEVELS]
-#define p_cat_val_to_name sym_val_to_name[SYM_CATS]
+ struct flex_array *sym_val_to_name[SYM_NUM];
/* class, role, and user attributes indexed by (value - 1) */
struct class_datum **class_val_to_struct;
struct role_datum **role_val_to_struct;
struct user_datum **user_val_to_struct;
- struct type_datum **type_val_to_struct;
+ struct flex_array *type_val_to_struct_array;
/* type enforcement access vectors and transitions */
struct avtab te_avtab;
@@ -221,6 +254,12 @@ struct policydb {
/* role transitions */
struct role_trans *role_tr;
+ /* file transitions with the last path component */
+ /* quickly exclude lookups when parent ttype has no rules */
+ struct ebitmap filename_trans_ttypes;
+ /* actual set of filename_trans rules */
+ struct hashtab *filename_trans;
+
/* bools indexed by (value - 1) */
struct cond_bool_datum **bool_val_to_struct;
/* type enforcement conditional access vectors and transitions */
@@ -240,16 +279,19 @@ struct policydb {
fixed labeling behavior. */
struct genfs *genfs;
- /* range transitions */
- struct range_trans *range_tr;
+ /* range transitions table (range_trans_key -> mls_range) */
+ struct hashtab *range_tr;
/* type -> attribute reverse mapping */
- struct ebitmap *type_attr_map;
+ struct flex_array *type_attr_map_array;
struct ebitmap policycaps;
struct ebitmap permissive_map;
+ /* length of this policy when it was loaded */
+ size_t len;
+
unsigned int policyvers;
unsigned int reject_unknown : 1;
@@ -266,6 +308,7 @@ extern int policydb_class_isvalid(struct policydb *p, unsigned int class);
extern int policydb_type_isvalid(struct policydb *p, unsigned int type);
extern int policydb_role_isvalid(struct policydb *p, unsigned int role);
extern int policydb_read(struct policydb *p, void *fp);
+extern int policydb_write(struct policydb *p, void *fp);
#define PERM_SYMTAB_SIZE 32
@@ -286,6 +329,11 @@ struct policy_file {
size_t len;
};
+struct policy_data {
+ struct policydb *p;
+ void *fp;
+};
+
static inline int next_entry(void *buf, struct policy_file *fp, size_t bytes)
{
if (bytes > fp->len)
@@ -297,6 +345,24 @@ static inline int next_entry(void *buf, struct policy_file *fp, size_t bytes)
return 0;
}
+static inline int put_entry(const void *buf, size_t bytes, int num, struct policy_file *fp)
+{
+ size_t len = bytes * num;
+
+ memcpy(fp->data, buf, len);
+ fp->data += len;
+ fp->len -= len;
+
+ return 0;
+}
+
+static inline char *sym_name(struct policydb *p, unsigned int sym_num, unsigned int element_nr)
+{
+ struct flex_array *fa = p->sym_val_to_name[sym_num];
+
+ return flex_array_get_ptr(fa, element_nr);
+}
+
extern u16 string_to_security_class(struct policydb *p, const char *name);
extern u32 string_to_av_perm(struct policydb *p, u16 tclass, const char *name);
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index b3efae204ac..4bca49414a4 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -13,7 +13,7 @@
*
* Added conditional policy language extensions
*
- * Updated: Hewlett-Packard <paul.moore@hp.com>
+ * Updated: Hewlett-Packard <paul@paul-moore.com>
*
* Added support for NetLabel
* Added support for the policy capability bitmap
@@ -26,6 +26,10 @@
*
* Added support for bounds domain and audit messaged on masked permissions
*
+ * Updated: Guido Trentalancia <guido@trentalancia.com>
+ *
+ * Added support for runtime switching of the policy type
+ *
* Copyright (C) 2008, 2009 NEC Corporation
* Copyright (C) 2006, 2007 Hewlett-Packard Development Company, L.P.
* Copyright (C) 2004-2006 Trusted Computer Solutions, Inc.
@@ -46,6 +50,8 @@
#include <linux/audit.h>
#include <linux/mutex.h>
#include <linux/selinux.h>
+#include <linux/flex_array.h>
+#include <linux/vmalloc.h>
#include <net/netlabel.h>
#include "flask.h"
@@ -64,10 +70,9 @@
#include "ebitmap.h"
#include "audit.h"
-extern void selnl_notify_policyload(u32 seqno);
-
int selinux_policycap_netpeer;
int selinux_policycap_openperm;
+int selinux_policycap_alwaysnetwork;
static DEFINE_RWLOCK(policy_rwlock);
@@ -87,11 +92,10 @@ static u32 latest_granting;
static int context_struct_to_string(struct context *context, char **scontext,
u32 *scontext_len);
-static int context_struct_compute_av(struct context *scontext,
- struct context *tcontext,
- u16 tclass,
- u32 requested,
- struct av_decision *avd);
+static void context_struct_compute_av(struct context *scontext,
+ struct context *tcontext,
+ u16 tclass,
+ struct av_decision *avd);
struct selinux_mapping {
u16 value; /* policy value */
@@ -196,21 +200,19 @@ static u16 unmap_class(u16 tclass)
return tclass;
}
-static u32 unmap_perm(u16 tclass, u32 tperm)
+/*
+ * Get kernel value for class from its policy value
+ */
+static u16 map_class(u16 pol_value)
{
- if (tclass < current_mapping_size) {
- unsigned i;
- u32 kperm = 0;
+ u16 i;
- for (i = 0; i < current_mapping[tclass].num_perms; i++)
- if (tperm & (1<<i)) {
- kperm |= current_mapping[tclass].perms[i];
- tperm &= ~(1<<i);
- }
- return kperm;
+ for (i = 1; i < current_mapping_size; i++) {
+ if (current_mapping[i].value == pol_value)
+ return i;
}
- return tperm;
+ return SECCLASS_NULL;
}
static void map_decision(u16 tclass, struct av_decision *avd,
@@ -250,6 +252,10 @@ static void map_decision(u16 tclass, struct av_decision *avd,
}
}
+int security_mls_enabled(void)
+{
+ return policydb.mls_enabled;
+}
/*
* Return the boolean value of a constraint expression
@@ -284,15 +290,15 @@ static int constraint_expr_eval(struct context *scontext,
case CEXPR_AND:
BUG_ON(sp < 1);
sp--;
- s[sp] &= s[sp+1];
+ s[sp] &= s[sp + 1];
break;
case CEXPR_OR:
BUG_ON(sp < 1);
sp--;
- s[sp] |= s[sp+1];
+ s[sp] |= s[sp + 1];
break;
case CEXPR_ATTR:
- if (sp == (CEXPR_MAXDEPTH-1))
+ if (sp == (CEXPR_MAXDEPTH - 1))
return 0;
switch (e->attr) {
case CEXPR_USER:
@@ -465,13 +471,14 @@ static void security_dump_masked_av(struct context *scontext,
char *scontext_name = NULL;
char *tcontext_name = NULL;
char *permission_names[32];
- int index, length;
+ int index;
+ u32 length;
bool need_comma = false;
if (!permissions)
return;
- tclass_name = policydb.p_class_val_to_name[tclass - 1];
+ tclass_name = sym_name(&policydb, SYM_CLASSES, tclass - 1);
tclass_dat = policydb.class_val_to_struct[tclass - 1];
common_dat = tclass_dat->comdatum;
@@ -532,18 +539,23 @@ out:
static void type_attribute_bounds_av(struct context *scontext,
struct context *tcontext,
u16 tclass,
- u32 requested,
struct av_decision *avd)
{
struct context lo_scontext;
struct context lo_tcontext;
struct av_decision lo_avd;
- struct type_datum *source
- = policydb.type_val_to_struct[scontext->type - 1];
- struct type_datum *target
- = policydb.type_val_to_struct[tcontext->type - 1];
+ struct type_datum *source;
+ struct type_datum *target;
u32 masked = 0;
+ source = flex_array_get_ptr(policydb.type_val_to_struct_array,
+ scontext->type - 1);
+ BUG_ON(!source);
+
+ target = flex_array_get_ptr(policydb.type_val_to_struct_array,
+ tcontext->type - 1);
+ BUG_ON(!target);
+
if (source->bounds) {
memset(&lo_avd, 0, sizeof(lo_avd));
@@ -553,7 +565,6 @@ static void type_attribute_bounds_av(struct context *scontext,
context_struct_compute_av(&lo_scontext,
tcontext,
tclass,
- requested,
&lo_avd);
if ((lo_avd.allowed & avd->allowed) == avd->allowed)
return; /* no masked permission */
@@ -569,7 +580,6 @@ static void type_attribute_bounds_av(struct context *scontext,
context_struct_compute_av(scontext,
&lo_tcontext,
tclass,
- requested,
&lo_avd);
if ((lo_avd.allowed & avd->allowed) == avd->allowed)
return; /* no masked permission */
@@ -586,7 +596,6 @@ static void type_attribute_bounds_av(struct context *scontext,
context_struct_compute_av(&lo_scontext,
&lo_tcontext,
tclass,
- requested,
&lo_avd);
if ((lo_avd.allowed & avd->allowed) == avd->allowed)
return; /* no masked permission */
@@ -607,11 +616,10 @@ static void type_attribute_bounds_av(struct context *scontext,
* Compute access vectors based on a context structure pair for
* the permissions in a particular class.
*/
-static int context_struct_compute_av(struct context *scontext,
- struct context *tcontext,
- u16 tclass,
- u32 requested,
- struct av_decision *avd)
+static void context_struct_compute_av(struct context *scontext,
+ struct context *tcontext,
+ u16 tclass,
+ struct av_decision *avd)
{
struct constraint_node *constraint;
struct role_allow *ra;
@@ -622,19 +630,14 @@ static int context_struct_compute_av(struct context *scontext,
struct ebitmap_node *snode, *tnode;
unsigned int i, j;
- /*
- * Initialize the access vectors to the default values.
- */
avd->allowed = 0;
avd->auditallow = 0;
avd->auditdeny = 0xffffffff;
- avd->seqno = latest_granting;
- avd->flags = 0;
if (unlikely(!tclass || tclass > policydb.p_classes.nprim)) {
if (printk_ratelimit())
printk(KERN_WARNING "SELinux: Invalid class %hu\n", tclass);
- return -EINVAL;
+ return;
}
tclass_datum = policydb.class_val_to_struct[tclass - 1];
@@ -645,8 +648,10 @@ static int context_struct_compute_av(struct context *scontext,
*/
avkey.target_class = tclass;
avkey.specified = AVTAB_AV;
- sattr = &policydb.type_attr_map[scontext->type - 1];
- tattr = &policydb.type_attr_map[tcontext->type - 1];
+ sattr = flex_array_get(policydb.type_attr_map_array, scontext->type - 1);
+ BUG_ON(!sattr);
+ tattr = flex_array_get(policydb.type_attr_map_array, tcontext->type - 1);
+ BUG_ON(!tattr);
ebitmap_for_each_positive_bit(sattr, snode, i) {
ebitmap_for_each_positive_bit(tattr, tnode, j) {
avkey.source_type = i + 1;
@@ -705,9 +710,7 @@ static int context_struct_compute_av(struct context *scontext,
* permission and notice it to userspace via audit.
*/
type_attribute_bounds_av(scontext, tcontext,
- tclass, requested, avd);
-
- return 0;
+ tclass, avd);
}
static int security_validtrans_handle_fail(struct context *ocontext,
@@ -718,16 +721,16 @@ static int security_validtrans_handle_fail(struct context *ocontext,
char *o = NULL, *n = NULL, *t = NULL;
u32 olen, nlen, tlen;
- if (context_struct_to_string(ocontext, &o, &olen) < 0)
+ if (context_struct_to_string(ocontext, &o, &olen))
goto out;
- if (context_struct_to_string(ncontext, &n, &nlen) < 0)
+ if (context_struct_to_string(ncontext, &n, &nlen))
goto out;
- if (context_struct_to_string(tcontext, &t, &tlen) < 0)
+ if (context_struct_to_string(tcontext, &t, &tlen))
goto out;
audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR,
"security_validate_transition: denied for"
" oldcontext=%s newcontext=%s taskcontext=%s tclass=%s",
- o, n, t, policydb.p_class_val_to_name[tclass-1]);
+ o, n, t, sym_name(&policydb, SYM_CLASSES, tclass-1));
out:
kfree(o);
kfree(n);
@@ -818,10 +821,11 @@ int security_bounded_transition(u32 old_sid, u32 new_sid)
struct context *old_context, *new_context;
struct type_datum *type;
int index;
- int rc = -EINVAL;
+ int rc;
read_lock(&policy_rwlock);
+ rc = -EINVAL;
old_context = sidtab_search(&sidtab, old_sid);
if (!old_context) {
printk(KERN_ERR "SELinux: %s: unrecognized SID %u\n",
@@ -829,6 +833,7 @@ int security_bounded_transition(u32 old_sid, u32 new_sid)
goto out;
}
+ rc = -EINVAL;
new_context = sidtab_search(&sidtab, new_sid);
if (!new_context) {
printk(KERN_ERR "SELinux: %s: unrecognized SID %u\n",
@@ -836,35 +841,34 @@ int security_bounded_transition(u32 old_sid, u32 new_sid)
goto out;
}
+ rc = 0;
/* type/domain unchanged */
- if (old_context->type == new_context->type) {
- rc = 0;
+ if (old_context->type == new_context->type)
goto out;
- }
index = new_context->type;
while (true) {
- type = policydb.type_val_to_struct[index - 1];
+ type = flex_array_get_ptr(policydb.type_val_to_struct_array,
+ index - 1);
BUG_ON(!type);
/* not bounded anymore */
- if (!type->bounds) {
- rc = -EPERM;
+ rc = -EPERM;
+ if (!type->bounds)
break;
- }
/* @newsid is bounded by @oldsid */
- if (type->bounds == old_context->type) {
- rc = 0;
+ rc = 0;
+ if (type->bounds == old_context->type)
break;
- }
+
index = type->bounds;
}
if (rc) {
char *old_name = NULL;
char *new_name = NULL;
- int length;
+ u32 length;
if (!context_struct_to_string(old_context,
&old_name, &length) &&
@@ -886,110 +890,116 @@ out:
return rc;
}
-
-static int security_compute_av_core(u32 ssid,
- u32 tsid,
- u16 tclass,
- u32 requested,
- struct av_decision *avd)
+static void avd_init(struct av_decision *avd)
{
- struct context *scontext = NULL, *tcontext = NULL;
- int rc = 0;
-
- scontext = sidtab_search(&sidtab, ssid);
- if (!scontext) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
- __func__, ssid);
- return -EINVAL;
- }
- tcontext = sidtab_search(&sidtab, tsid);
- if (!tcontext) {
- printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
- __func__, tsid);
- return -EINVAL;
- }
-
- rc = context_struct_compute_av(scontext, tcontext, tclass,
- requested, avd);
-
- /* permissive domain? */
- if (ebitmap_get_bit(&policydb.permissive_map, scontext->type))
- avd->flags |= AVD_FLAGS_PERMISSIVE;
-
- return rc;
+ avd->allowed = 0;
+ avd->auditallow = 0;
+ avd->auditdeny = 0xffffffff;
+ avd->seqno = latest_granting;
+ avd->flags = 0;
}
+
/**
* security_compute_av - Compute access vector decisions.
* @ssid: source security identifier
* @tsid: target security identifier
* @tclass: target security class
- * @requested: requested permissions
* @avd: access vector decisions
*
* Compute a set of access vector decisions based on the
* SID pair (@ssid, @tsid) for the permissions in @tclass.
- * Return -%EINVAL if any of the parameters are invalid or %0
- * if the access vector decisions were computed successfully.
*/
-int security_compute_av(u32 ssid,
- u32 tsid,
- u16 orig_tclass,
- u32 orig_requested,
- struct av_decision *avd)
+void security_compute_av(u32 ssid,
+ u32 tsid,
+ u16 orig_tclass,
+ struct av_decision *avd)
{
u16 tclass;
- u32 requested;
- int rc;
+ struct context *scontext = NULL, *tcontext = NULL;
read_lock(&policy_rwlock);
-
+ avd_init(avd);
if (!ss_initialized)
goto allow;
- requested = unmap_perm(orig_tclass, orig_requested);
+ scontext = sidtab_search(&sidtab, ssid);
+ if (!scontext) {
+ printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ __func__, ssid);
+ goto out;
+ }
+
+ /* permissive domain? */
+ if (ebitmap_get_bit(&policydb.permissive_map, scontext->type))
+ avd->flags |= AVD_FLAGS_PERMISSIVE;
+
+ tcontext = sidtab_search(&sidtab, tsid);
+ if (!tcontext) {
+ printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ __func__, tsid);
+ goto out;
+ }
+
tclass = unmap_class(orig_tclass);
if (unlikely(orig_tclass && !tclass)) {
if (policydb.allow_unknown)
goto allow;
- rc = -EINVAL;
goto out;
}
- rc = security_compute_av_core(ssid, tsid, tclass, requested, avd);
+ context_struct_compute_av(scontext, tcontext, tclass, avd);
map_decision(orig_tclass, avd, policydb.allow_unknown);
out:
read_unlock(&policy_rwlock);
- return rc;
+ return;
allow:
avd->allowed = 0xffffffff;
- avd->auditallow = 0;
- avd->auditdeny = 0xffffffff;
- avd->seqno = latest_granting;
- avd->flags = 0;
- rc = 0;
goto out;
}
-int security_compute_av_user(u32 ssid,
- u32 tsid,
- u16 tclass,
- u32 requested,
- struct av_decision *avd)
+void security_compute_av_user(u32 ssid,
+ u32 tsid,
+ u16 tclass,
+ struct av_decision *avd)
{
- int rc;
+ struct context *scontext = NULL, *tcontext = NULL;
- if (!ss_initialized) {
- avd->allowed = 0xffffffff;
- avd->auditallow = 0;
- avd->auditdeny = 0xffffffff;
- avd->seqno = latest_granting;
- return 0;
+ read_lock(&policy_rwlock);
+ avd_init(avd);
+ if (!ss_initialized)
+ goto allow;
+
+ scontext = sidtab_search(&sidtab, ssid);
+ if (!scontext) {
+ printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ __func__, ssid);
+ goto out;
}
- read_lock(&policy_rwlock);
- rc = security_compute_av_core(ssid, tsid, tclass, requested, avd);
+ /* permissive domain? */
+ if (ebitmap_get_bit(&policydb.permissive_map, scontext->type))
+ avd->flags |= AVD_FLAGS_PERMISSIVE;
+
+ tcontext = sidtab_search(&sidtab, tsid);
+ if (!tcontext) {
+ printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
+ __func__, tsid);
+ goto out;
+ }
+
+ if (unlikely(!tclass)) {
+ if (policydb.allow_unknown)
+ goto allow;
+ goto out;
+ }
+
+ context_struct_compute_av(scontext, tcontext, tclass, avd);
+ out:
read_unlock(&policy_rwlock);
- return rc;
+ return;
+allow:
+ avd->allowed = 0xffffffff;
+ goto out;
}
/*
@@ -1003,23 +1013,29 @@ static int context_struct_to_string(struct context *context, char **scontext, u3
{
char *scontextp;
- *scontext = NULL;
+ if (scontext)
+ *scontext = NULL;
*scontext_len = 0;
if (context->len) {
*scontext_len = context->len;
- *scontext = kstrdup(context->str, GFP_ATOMIC);
- if (!(*scontext))
- return -ENOMEM;
+ if (scontext) {
+ *scontext = kstrdup(context->str, GFP_ATOMIC);
+ if (!(*scontext))
+ return -ENOMEM;
+ }
return 0;
}
/* Compute the size of the context. */
- *scontext_len += strlen(policydb.p_user_val_to_name[context->user - 1]) + 1;
- *scontext_len += strlen(policydb.p_role_val_to_name[context->role - 1]) + 1;
- *scontext_len += strlen(policydb.p_type_val_to_name[context->type - 1]) + 1;
+ *scontext_len += strlen(sym_name(&policydb, SYM_USERS, context->user - 1)) + 1;
+ *scontext_len += strlen(sym_name(&policydb, SYM_ROLES, context->role - 1)) + 1;
+ *scontext_len += strlen(sym_name(&policydb, SYM_TYPES, context->type - 1)) + 1;
*scontext_len += mls_compute_context_len(context);
+ if (!scontext)
+ return 0;
+
/* Allocate space for the context; caller must free this space. */
scontextp = kmalloc(*scontext_len, GFP_ATOMIC);
if (!scontextp)
@@ -1030,12 +1046,12 @@ static int context_struct_to_string(struct context *context, char **scontext, u3
* Copy the user name, role name and type name into the context.
*/
sprintf(scontextp, "%s:%s:%s",
- policydb.p_user_val_to_name[context->user - 1],
- policydb.p_role_val_to_name[context->role - 1],
- policydb.p_type_val_to_name[context->type - 1]);
- scontextp += strlen(policydb.p_user_val_to_name[context->user - 1]) +
- 1 + strlen(policydb.p_role_val_to_name[context->role - 1]) +
- 1 + strlen(policydb.p_type_val_to_name[context->type - 1]);
+ sym_name(&policydb, SYM_USERS, context->user - 1),
+ sym_name(&policydb, SYM_ROLES, context->role - 1),
+ sym_name(&policydb, SYM_TYPES, context->type - 1));
+ scontextp += strlen(sym_name(&policydb, SYM_USERS, context->user - 1)) +
+ 1 + strlen(sym_name(&policydb, SYM_ROLES, context->role - 1)) +
+ 1 + strlen(sym_name(&policydb, SYM_TYPES, context->type - 1));
mls_sid_to_context(context, &scontextp);
@@ -1059,7 +1075,8 @@ static int security_sid_to_context_core(u32 sid, char **scontext,
struct context *context;
int rc = 0;
- *scontext = NULL;
+ if (scontext)
+ *scontext = NULL;
*scontext_len = 0;
if (!ss_initialized) {
@@ -1067,6 +1084,8 @@ static int security_sid_to_context_core(u32 sid, char **scontext,
char *scontextp;
*scontext_len = strlen(initial_sid_to_string[sid]) + 1;
+ if (!scontext)
+ goto out;
scontextp = kmalloc(*scontext_len, GFP_ATOMIC);
if (!scontextp) {
rc = -ENOMEM;
@@ -1191,16 +1210,13 @@ static int string_to_context_struct(struct policydb *pol,
if (rc)
goto out;
- if ((p - scontext) < scontext_len) {
- rc = -EINVAL;
+ rc = -EINVAL;
+ if ((p - scontext) < scontext_len)
goto out;
- }
/* Check the validity of the new context. */
- if (!policydb_context_isvalid(pol, ctx)) {
- rc = -EINVAL;
+ if (!policydb_context_isvalid(pol, ctx))
goto out;
- }
rc = 0;
out:
if (rc)
@@ -1216,6 +1232,10 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
struct context context;
int rc = 0;
+ /* An empty security context is never valid. */
+ if (!scontext_len)
+ return -EINVAL;
+
if (!ss_initialized) {
int i;
@@ -1231,7 +1251,7 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
*sid = SECSID_NULL;
/* Copy the string so that we can modify the copy as we parse it. */
- scontext2 = kmalloc(scontext_len+1, gfp_flags);
+ scontext2 = kmalloc(scontext_len + 1, gfp_flags);
if (!scontext2)
return -ENOMEM;
memcpy(scontext2, scontext, scontext_len);
@@ -1239,27 +1259,26 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
if (force) {
/* Save another copy for storing in uninterpreted form */
+ rc = -ENOMEM;
str = kstrdup(scontext2, gfp_flags);
- if (!str) {
- kfree(scontext2);
- return -ENOMEM;
- }
+ if (!str)
+ goto out;
}
read_lock(&policy_rwlock);
- rc = string_to_context_struct(&policydb, &sidtab,
- scontext2, scontext_len,
- &context, def_sid);
+ rc = string_to_context_struct(&policydb, &sidtab, scontext2,
+ scontext_len, &context, def_sid);
if (rc == -EINVAL && force) {
context.str = str;
context.len = scontext_len;
str = NULL;
} else if (rc)
- goto out;
+ goto out_unlock;
rc = sidtab_context_to_sid(&sidtab, &context, sid);
context_destroy(&context);
-out:
+out_unlock:
read_unlock(&policy_rwlock);
+out:
kfree(scontext2);
kfree(str);
return rc;
@@ -1270,16 +1289,18 @@ out:
* @scontext: security context
* @scontext_len: length in bytes
* @sid: security identifier, SID
+ * @gfp: context for the allocation
*
* Obtains a SID associated with the security context that
* has the string representation specified by @scontext.
* Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient
* memory is available, or 0 on success.
*/
-int security_context_to_sid(const char *scontext, u32 scontext_len, u32 *sid)
+int security_context_to_sid(const char *scontext, u32 scontext_len, u32 *sid,
+ gfp_t gfp)
{
return security_context_to_sid_core(scontext, scontext_len,
- sid, SECSID_NULL, GFP_KERNEL, 0);
+ sid, SECSID_NULL, gfp, 0);
}
/**
@@ -1323,18 +1344,18 @@ static int compute_sid_handle_invalid_context(
char *s = NULL, *t = NULL, *n = NULL;
u32 slen, tlen, nlen;
- if (context_struct_to_string(scontext, &s, &slen) < 0)
+ if (context_struct_to_string(scontext, &s, &slen))
goto out;
- if (context_struct_to_string(tcontext, &t, &tlen) < 0)
+ if (context_struct_to_string(tcontext, &t, &tlen))
goto out;
- if (context_struct_to_string(newcontext, &n, &nlen) < 0)
+ if (context_struct_to_string(newcontext, &n, &nlen))
goto out;
audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR,
"security_compute_sid: invalid context %s"
" for scontext=%s"
" tcontext=%s"
" tclass=%s",
- n, s, t, policydb.p_class_val_to_name[tclass-1]);
+ n, s, t, sym_name(&policydb, SYM_CLASSES, tclass-1));
out:
kfree(s);
kfree(t);
@@ -1344,13 +1365,40 @@ out:
return -EACCES;
}
+static void filename_compute_type(struct policydb *p, struct context *newcontext,
+ u32 stype, u32 ttype, u16 tclass,
+ const char *objname)
+{
+ struct filename_trans ft;
+ struct filename_trans_datum *otype;
+
+ /*
+ * Most filename trans rules are going to live in specific directories
+ * like /dev or /var/run. This bitmap will quickly skip rule searches
+ * if the ttype does not contain any rules.
+ */
+ if (!ebitmap_get_bit(&p->filename_trans_ttypes, ttype))
+ return;
+
+ ft.stype = stype;
+ ft.ttype = ttype;
+ ft.tclass = tclass;
+ ft.name = objname;
+
+ otype = hashtab_search(p->filename_trans, &ft);
+ if (otype)
+ newcontext->type = otype->otype;
+}
+
static int security_compute_sid(u32 ssid,
u32 tsid,
u16 orig_tclass,
u32 specified,
+ const char *objname,
u32 *out_sid,
bool kern)
{
+ struct class_datum *cladatum = NULL;
struct context *scontext = NULL, *tcontext = NULL, newcontext;
struct role_trans *roletr = NULL;
struct avtab_key avkey;
@@ -1358,6 +1406,7 @@ static int security_compute_sid(u32 ssid,
struct avtab_node *node;
u16 tclass;
int rc = 0;
+ bool sock;
if (!ss_initialized) {
switch (orig_tclass) {
@@ -1375,10 +1424,13 @@ static int security_compute_sid(u32 ssid,
read_lock(&policy_rwlock);
- if (kern)
+ if (kern) {
tclass = unmap_class(orig_tclass);
- else
+ sock = security_is_socket_class(orig_tclass);
+ } else {
tclass = orig_tclass;
+ sock = security_is_socket_class(map_class(tclass));
+ }
scontext = sidtab_search(&sidtab, ssid);
if (!scontext) {
@@ -1395,12 +1447,20 @@ static int security_compute_sid(u32 ssid,
goto out_unlock;
}
+ if (tclass && tclass <= policydb.p_classes.nprim)
+ cladatum = policydb.class_val_to_struct[tclass - 1];
+
/* Set the user identity. */
switch (specified) {
case AVTAB_TRANSITION:
case AVTAB_CHANGE:
- /* Use the process user identity. */
- newcontext.user = scontext->user;
+ if (cladatum && cladatum->default_user == DEFAULT_TARGET) {
+ newcontext.user = tcontext->user;
+ } else {
+ /* notice this gets both DEFAULT_SOURCE and unset */
+ /* Use the process user identity. */
+ newcontext.user = scontext->user;
+ }
break;
case AVTAB_MEMBER:
/* Use the related object owner. */
@@ -1408,16 +1468,31 @@ static int security_compute_sid(u32 ssid,
break;
}
- /* Set the role and type to default values. */
- if (tclass == policydb.process_class) {
- /* Use the current role and type of process. */
+ /* Set the role to default values. */
+ if (cladatum && cladatum->default_role == DEFAULT_SOURCE) {
newcontext.role = scontext->role;
- newcontext.type = scontext->type;
+ } else if (cladatum && cladatum->default_role == DEFAULT_TARGET) {
+ newcontext.role = tcontext->role;
} else {
- /* Use the well-defined object role. */
- newcontext.role = OBJECT_R_VAL;
- /* Use the type of the related object. */
+ if ((tclass == policydb.process_class) || (sock == true))
+ newcontext.role = scontext->role;
+ else
+ newcontext.role = OBJECT_R_VAL;
+ }
+
+ /* Set the type to default values. */
+ if (cladatum && cladatum->default_type == DEFAULT_SOURCE) {
+ newcontext.type = scontext->type;
+ } else if (cladatum && cladatum->default_type == DEFAULT_TARGET) {
newcontext.type = tcontext->type;
+ } else {
+ if ((tclass == policydb.process_class) || (sock == true)) {
+ /* Use the type of process. */
+ newcontext.type = scontext->type;
+ } else {
+ /* Use the type of the related object. */
+ newcontext.type = tcontext->type;
+ }
}
/* Look for a type transition/member/change rule. */
@@ -1443,25 +1518,29 @@ static int security_compute_sid(u32 ssid,
newcontext.type = avdatum->data;
}
+ /* if we have a objname this is a file trans check so check those rules */
+ if (objname)
+ filename_compute_type(&policydb, &newcontext, scontext->type,
+ tcontext->type, tclass, objname);
+
/* Check for class-specific changes. */
- if (tclass == policydb.process_class) {
- if (specified & AVTAB_TRANSITION) {
- /* Look for a role transition rule. */
- for (roletr = policydb.role_tr; roletr;
- roletr = roletr->next) {
- if (roletr->role == scontext->role &&
- roletr->type == tcontext->type) {
- /* Use the role transition rule. */
- newcontext.role = roletr->new_role;
- break;
- }
+ if (specified & AVTAB_TRANSITION) {
+ /* Look for a role transition rule. */
+ for (roletr = policydb.role_tr; roletr; roletr = roletr->next) {
+ if ((roletr->role == scontext->role) &&
+ (roletr->type == tcontext->type) &&
+ (roletr->tclass == tclass)) {
+ /* Use the role transition rule. */
+ newcontext.role = roletr->new_role;
+ break;
}
}
}
/* Set the MLS attributes.
This is done last because it may allocate memory. */
- rc = mls_compute_sid(scontext, tcontext, tclass, specified, &newcontext);
+ rc = mls_compute_sid(scontext, tcontext, tclass, specified,
+ &newcontext, sock);
if (rc)
goto out_unlock;
@@ -1496,22 +1575,18 @@ out:
* if insufficient memory is available, or %0 if the new SID was
* computed successfully.
*/
-int security_transition_sid(u32 ssid,
- u32 tsid,
- u16 tclass,
- u32 *out_sid)
+int security_transition_sid(u32 ssid, u32 tsid, u16 tclass,
+ const struct qstr *qstr, u32 *out_sid)
{
return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION,
- out_sid, true);
+ qstr ? qstr->name : NULL, out_sid, true);
}
-int security_transition_sid_user(u32 ssid,
- u32 tsid,
- u16 tclass,
- u32 *out_sid)
+int security_transition_sid_user(u32 ssid, u32 tsid, u16 tclass,
+ const char *objname, u32 *out_sid)
{
return security_compute_sid(ssid, tsid, tclass, AVTAB_TRANSITION,
- out_sid, false);
+ objname, out_sid, false);
}
/**
@@ -1532,8 +1607,8 @@ int security_member_sid(u32 ssid,
u16 tclass,
u32 *out_sid)
{
- return security_compute_sid(ssid, tsid, tclass, AVTAB_MEMBER, out_sid,
- false);
+ return security_compute_sid(ssid, tsid, tclass, AVTAB_MEMBER, NULL,
+ out_sid, false);
}
/**
@@ -1554,8 +1629,8 @@ int security_change_sid(u32 ssid,
u16 tclass,
u32 *out_sid)
{
- return security_compute_sid(ssid, tsid, tclass, AVTAB_CHANGE, out_sid,
- false);
+ return security_compute_sid(ssid, tsid, tclass, AVTAB_CHANGE, NULL,
+ out_sid, false);
}
/* Clone the SID into the new SID table. */
@@ -1565,27 +1640,25 @@ static int clone_sid(u32 sid,
{
struct sidtab *s = arg;
- return sidtab_insert(s, sid, context);
+ if (sid > SECINITSID_NUM)
+ return sidtab_insert(s, sid, context);
+ else
+ return 0;
}
static inline int convert_context_handle_invalid_context(struct context *context)
{
- int rc = 0;
+ char *s;
+ u32 len;
- if (selinux_enforcing) {
- rc = -EINVAL;
- } else {
- char *s;
- u32 len;
-
- if (!context_struct_to_string(context, &s, &len)) {
- printk(KERN_WARNING
- "SELinux: Context %s would be invalid if enforcing\n",
- s);
- kfree(s);
- }
+ if (selinux_enforcing)
+ return -EINVAL;
+
+ if (!context_struct_to_string(context, &s, &len)) {
+ printk(KERN_WARNING "SELinux: Context %s would be invalid if enforcing\n", s);
+ kfree(s);
}
- return rc;
+ return 0;
}
struct convert_context_args {
@@ -1606,28 +1679,33 @@ static int convert_context(u32 key,
{
struct convert_context_args *args;
struct context oldc;
+ struct ocontext *oc;
+ struct mls_range *range;
struct role_datum *role;
struct type_datum *typdatum;
struct user_datum *usrdatum;
char *s;
u32 len;
- int rc;
+ int rc = 0;
+
+ if (key <= SECINITSID_NUM)
+ goto out;
args = p;
if (c->str) {
struct context ctx;
+
+ rc = -ENOMEM;
s = kstrdup(c->str, GFP_KERNEL);
- if (!s) {
- rc = -ENOMEM;
+ if (!s)
goto out;
- }
+
rc = string_to_context_struct(args->newp, NULL, s,
c->len, &ctx, SECSID_NULL);
kfree(s);
if (!rc) {
- printk(KERN_INFO
- "SELinux: Context %s became valid (mapped).\n",
+ printk(KERN_INFO "SELinux: Context %s became valid (mapped).\n",
c->str);
/* Replace string with mapped representation. */
kfree(c->str);
@@ -1639,8 +1717,7 @@ static int convert_context(u32 key,
goto out;
} else {
/* Other error condition, e.g. ENOMEM. */
- printk(KERN_ERR
- "SELinux: Unable to map context %s, rc = %d.\n",
+ printk(KERN_ERR "SELinux: Unable to map context %s, rc = %d.\n",
c->str, -rc);
goto out;
}
@@ -1650,32 +1727,64 @@ static int convert_context(u32 key,
if (rc)
goto out;
- rc = -EINVAL;
-
/* Convert the user. */
+ rc = -EINVAL;
usrdatum = hashtab_search(args->newp->p_users.table,
- args->oldp->p_user_val_to_name[c->user - 1]);
+ sym_name(args->oldp, SYM_USERS, c->user - 1));
if (!usrdatum)
goto bad;
c->user = usrdatum->value;
/* Convert the role. */
+ rc = -EINVAL;
role = hashtab_search(args->newp->p_roles.table,
- args->oldp->p_role_val_to_name[c->role - 1]);
+ sym_name(args->oldp, SYM_ROLES, c->role - 1));
if (!role)
goto bad;
c->role = role->value;
/* Convert the type. */
+ rc = -EINVAL;
typdatum = hashtab_search(args->newp->p_types.table,
- args->oldp->p_type_val_to_name[c->type - 1]);
+ sym_name(args->oldp, SYM_TYPES, c->type - 1));
if (!typdatum)
goto bad;
c->type = typdatum->value;
- rc = mls_convert_context(args->oldp, args->newp, c);
- if (rc)
- goto bad;
+ /* Convert the MLS fields if dealing with MLS policies */
+ if (args->oldp->mls_enabled && args->newp->mls_enabled) {
+ rc = mls_convert_context(args->oldp, args->newp, c);
+ if (rc)
+ goto bad;
+ } else if (args->oldp->mls_enabled && !args->newp->mls_enabled) {
+ /*
+ * Switching between MLS and non-MLS policy:
+ * free any storage used by the MLS fields in the
+ * context for all existing entries in the sidtab.
+ */
+ mls_context_destroy(c);
+ } else if (!args->oldp->mls_enabled && args->newp->mls_enabled) {
+ /*
+ * Switching between non-MLS and MLS policy:
+ * ensure that the MLS fields of the context for all
+ * existing entries in the sidtab are filled in with a
+ * suitable default value, likely taken from one of the
+ * initial SIDs.
+ */
+ oc = args->newp->ocontexts[OCON_ISID];
+ while (oc && oc->sid[0] != SECINITSID_UNLABELED)
+ oc = oc->next;
+ rc = -EINVAL;
+ if (!oc) {
+ printk(KERN_ERR "SELinux: unable to look up"
+ " the initial SIDs list\n");
+ goto bad;
+ }
+ range = &oc->context[0].range;
+ rc = mls_range_set(c, range);
+ if (rc)
+ goto bad;
+ }
/* Check the validity of the new context. */
if (!policydb_context_isvalid(args->newp, c)) {
@@ -1685,19 +1794,20 @@ static int convert_context(u32 key,
}
context_destroy(&oldc);
+
rc = 0;
out:
return rc;
bad:
/* Map old representation to string and save it. */
- if (context_struct_to_string(&oldc, &s, &len))
- return -ENOMEM;
+ rc = context_struct_to_string(&oldc, &s, &len);
+ if (rc)
+ return rc;
context_destroy(&oldc);
context_destroy(c);
c->str = s;
c->len = len;
- printk(KERN_INFO
- "SELinux: Context %s became invalid (unmapped).\n",
+ printk(KERN_INFO "SELinux: Context %s became invalid (unmapped).\n",
c->str);
rc = 0;
goto out;
@@ -1709,9 +1819,10 @@ static void security_load_policycaps(void)
POLICYDB_CAPABILITY_NETPEER);
selinux_policycap_openperm = ebitmap_get_bit(&policydb.policycaps,
POLICYDB_CAPABILITY_OPENPERM);
+ selinux_policycap_alwaysnetwork = ebitmap_get_bit(&policydb.policycaps,
+ POLICYDB_CAPABILITY_ALWAYSNETWORK);
}
-extern void selinux_complete_init(void);
static int security_preserve_bools(struct policydb *p);
/**
@@ -1726,7 +1837,7 @@ static int security_preserve_bools(struct policydb *p);
*/
int security_load_policy(void *data, size_t len)
{
- struct policydb oldpolicydb, newpolicydb;
+ struct policydb *oldpolicydb, *newpolicydb;
struct sidtab oldsidtab, newsidtab;
struct selinux_mapping *oldmap, *map = NULL;
struct convert_context_args args;
@@ -1735,52 +1846,77 @@ int security_load_policy(void *data, size_t len)
int rc = 0;
struct policy_file file = { data, len }, *fp = &file;
+ oldpolicydb = kzalloc(2 * sizeof(*oldpolicydb), GFP_KERNEL);
+ if (!oldpolicydb) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ newpolicydb = oldpolicydb + 1;
+
if (!ss_initialized) {
avtab_cache_init();
- if (policydb_read(&policydb, fp)) {
+ rc = policydb_read(&policydb, fp);
+ if (rc) {
avtab_cache_destroy();
- return -EINVAL;
+ goto out;
}
- if (selinux_set_mapping(&policydb, secclass_map,
- &current_mapping,
- &current_mapping_size)) {
+
+ policydb.len = len;
+ rc = selinux_set_mapping(&policydb, secclass_map,
+ &current_mapping,
+ &current_mapping_size);
+ if (rc) {
policydb_destroy(&policydb);
avtab_cache_destroy();
- return -EINVAL;
+ goto out;
}
- if (policydb_load_isids(&policydb, &sidtab)) {
+
+ rc = policydb_load_isids(&policydb, &sidtab);
+ if (rc) {
policydb_destroy(&policydb);
avtab_cache_destroy();
- return -EINVAL;
+ goto out;
}
+
security_load_policycaps();
ss_initialized = 1;
seqno = ++latest_granting;
selinux_complete_init();
avc_ss_reset(seqno);
selnl_notify_policyload(seqno);
+ selinux_status_update_policyload(seqno);
selinux_netlbl_cache_invalidate();
selinux_xfrm_notify_policyload();
- return 0;
+ goto out;
}
#if 0
sidtab_hash_eval(&sidtab, "sids");
#endif
- if (policydb_read(&newpolicydb, fp))
- return -EINVAL;
+ rc = policydb_read(newpolicydb, fp);
+ if (rc)
+ goto out;
- if (sidtab_init(&newsidtab)) {
- policydb_destroy(&newpolicydb);
- return -ENOMEM;
+ newpolicydb->len = len;
+ /* If switching between different policy types, log MLS status */
+ if (policydb.mls_enabled && !newpolicydb->mls_enabled)
+ printk(KERN_INFO "SELinux: Disabling MLS support...\n");
+ else if (!policydb.mls_enabled && newpolicydb->mls_enabled)
+ printk(KERN_INFO "SELinux: Enabling MLS support...\n");
+
+ rc = policydb_load_isids(newpolicydb, &newsidtab);
+ if (rc) {
+ printk(KERN_ERR "SELinux: unable to load the initial SIDs\n");
+ policydb_destroy(newpolicydb);
+ goto out;
}
- if (selinux_set_mapping(&newpolicydb, secclass_map,
- &map, &map_size))
+ rc = selinux_set_mapping(newpolicydb, secclass_map, &map, &map_size);
+ if (rc)
goto err;
- rc = security_preserve_bools(&newpolicydb);
+ rc = security_preserve_bools(newpolicydb);
if (rc) {
printk(KERN_ERR "SELinux: unable to preserve booleans\n");
goto err;
@@ -1788,28 +1924,32 @@ int security_load_policy(void *data, size_t len)
/* Clone the SID table. */
sidtab_shutdown(&sidtab);
- if (sidtab_map(&sidtab, clone_sid, &newsidtab)) {
- rc = -ENOMEM;
+
+ rc = sidtab_map(&sidtab, clone_sid, &newsidtab);
+ if (rc)
goto err;
- }
/*
* Convert the internal representations of contexts
* in the new SID table.
*/
args.oldp = &policydb;
- args.newp = &newpolicydb;
+ args.newp = newpolicydb;
rc = sidtab_map(&newsidtab, convert_context, &args);
- if (rc)
+ if (rc) {
+ printk(KERN_ERR "SELinux: unable to convert the internal"
+ " representation of contexts in the new SID"
+ " table\n");
goto err;
+ }
/* Save the old policydb and SID table to free later. */
- memcpy(&oldpolicydb, &policydb, sizeof policydb);
+ memcpy(oldpolicydb, &policydb, sizeof(policydb));
sidtab_set(&oldsidtab, &sidtab);
/* Install the new policydb and SID table. */
write_lock_irq(&policy_rwlock);
- memcpy(&policydb, &newpolicydb, sizeof policydb);
+ memcpy(&policydb, newpolicydb, sizeof(policydb));
sidtab_set(&sidtab, &newsidtab);
security_load_policycaps();
oldmap = current_mapping;
@@ -1819,23 +1959,38 @@ int security_load_policy(void *data, size_t len)
write_unlock_irq(&policy_rwlock);
/* Free the old policydb and SID table. */
- policydb_destroy(&oldpolicydb);
+ policydb_destroy(oldpolicydb);
sidtab_destroy(&oldsidtab);
kfree(oldmap);
avc_ss_reset(seqno);
selnl_notify_policyload(seqno);
+ selinux_status_update_policyload(seqno);
selinux_netlbl_cache_invalidate();
selinux_xfrm_notify_policyload();
- return 0;
+ rc = 0;
+ goto out;
err:
kfree(map);
sidtab_destroy(&newsidtab);
- policydb_destroy(&newpolicydb);
+ policydb_destroy(newpolicydb);
+
+out:
+ kfree(oldpolicydb);
return rc;
+}
+
+size_t security_policydb_len(void)
+{
+ size_t len;
+ read_lock(&policy_rwlock);
+ len = policydb.len;
+ read_unlock(&policy_rwlock);
+
+ return len;
}
/**
@@ -1944,7 +2099,7 @@ int security_node_sid(u16 domain,
u32 addrlen,
u32 *out_sid)
{
- int rc = 0;
+ int rc;
struct ocontext *c;
read_lock(&policy_rwlock);
@@ -1953,10 +2108,9 @@ int security_node_sid(u16 domain,
case AF_INET: {
u32 addr;
- if (addrlen != sizeof(u32)) {
- rc = -EINVAL;
+ rc = -EINVAL;
+ if (addrlen != sizeof(u32))
goto out;
- }
addr = *((u32 *)addrp);
@@ -1970,10 +2124,9 @@ int security_node_sid(u16 domain,
}
case AF_INET6:
- if (addrlen != sizeof(u64) * 2) {
- rc = -EINVAL;
+ rc = -EINVAL;
+ if (addrlen != sizeof(u64) * 2)
goto out;
- }
c = policydb.ocontexts[OCON_NODE6];
while (c) {
if (match_ipv6_addrmask(addrp, c->u.node6.addr,
@@ -1984,6 +2137,7 @@ int security_node_sid(u16 domain,
break;
default:
+ rc = 0;
*out_sid = SECINITSID_NODE;
goto out;
}
@@ -2001,6 +2155,7 @@ int security_node_sid(u16 domain,
*out_sid = SECINITSID_NODE;
}
+ rc = 0;
out:
read_unlock(&policy_rwlock);
return rc;
@@ -2045,30 +2200,28 @@ int security_get_user_sids(u32 fromsid,
context_init(&usercon);
+ rc = -EINVAL;
fromcon = sidtab_search(&sidtab, fromsid);
- if (!fromcon) {
- rc = -EINVAL;
+ if (!fromcon)
goto out_unlock;
- }
+ rc = -EINVAL;
user = hashtab_search(policydb.p_users.table, username);
- if (!user) {
- rc = -EINVAL;
+ if (!user)
goto out_unlock;
- }
+
usercon.user = user->value;
+ rc = -ENOMEM;
mysids = kcalloc(maxnel, sizeof(*mysids), GFP_ATOMIC);
- if (!mysids) {
- rc = -ENOMEM;
+ if (!mysids)
goto out_unlock;
- }
ebitmap_for_each_positive_bit(&user->roles, rnode, i) {
role = policydb.role_val_to_struct[i];
- usercon.role = i+1;
+ usercon.role = i + 1;
ebitmap_for_each_positive_bit(&role->types, tnode, j) {
- usercon.type = j+1;
+ usercon.type = j + 1;
if (mls_setup_user_range(fromcon, user, &usercon))
continue;
@@ -2079,12 +2232,11 @@ int security_get_user_sids(u32 fromsid,
if (mynel < maxnel) {
mysids[mynel++] = sid;
} else {
+ rc = -ENOMEM;
maxnel += SIDS_NEL;
mysids2 = kcalloc(maxnel, sizeof(*mysids2), GFP_ATOMIC);
- if (!mysids2) {
- rc = -ENOMEM;
+ if (!mysids2)
goto out_unlock;
- }
memcpy(mysids2, mysids, mynel * sizeof(*mysids2));
kfree(mysids);
mysids = mysids2;
@@ -2092,7 +2244,7 @@ int security_get_user_sids(u32 fromsid,
}
}
}
-
+ rc = 0;
out_unlock:
read_unlock(&policy_rwlock);
if (rc || !mynel) {
@@ -2100,17 +2252,18 @@ out_unlock:
goto out;
}
+ rc = -ENOMEM;
mysids2 = kcalloc(mynel, sizeof(*mysids2), GFP_KERNEL);
if (!mysids2) {
- rc = -ENOMEM;
kfree(mysids);
goto out;
}
for (i = 0, j = 0; i < mynel; i++) {
+ struct av_decision dummy_avd;
rc = avc_has_perm_noaudit(fromsid, mysids[i],
SECCLASS_PROCESS, /* kernel value */
PROCESS__TRANSITION, AVC_STRICT,
- NULL);
+ &dummy_avd);
if (!rc)
mysids2[j++] = mysids[i];
cond_resched();
@@ -2143,7 +2296,7 @@ int security_genfs_sid(const char *fstype,
u16 sclass;
struct genfs *genfs;
struct ocontext *c;
- int rc = 0, cmp = 0;
+ int rc, cmp = 0;
while (path[0] == '/' && path[1] == '/')
path++;
@@ -2151,6 +2304,7 @@ int security_genfs_sid(const char *fstype,
read_lock(&policy_rwlock);
sclass = unmap_class(orig_sclass);
+ *sid = SECINITSID_UNLABELED;
for (genfs = policydb.genfs; genfs; genfs = genfs->next) {
cmp = strcmp(fstype, genfs->fstype);
@@ -2158,11 +2312,9 @@ int security_genfs_sid(const char *fstype,
break;
}
- if (!genfs || cmp) {
- *sid = SECINITSID_UNLABELED;
- rc = -ENOENT;
+ rc = -ENOENT;
+ if (!genfs || cmp)
goto out;
- }
for (c = genfs->head; c; c = c->next) {
len = strlen(c->u.name);
@@ -2171,21 +2323,18 @@ int security_genfs_sid(const char *fstype,
break;
}
- if (!c) {
- *sid = SECINITSID_UNLABELED;
- rc = -ENOENT;
+ rc = -ENOENT;
+ if (!c)
goto out;
- }
if (!c->sid[0]) {
- rc = sidtab_context_to_sid(&sidtab,
- &c->context[0],
- &c->sid[0]);
+ rc = sidtab_context_to_sid(&sidtab, &c->context[0], &c->sid[0]);
if (rc)
goto out;
}
*sid = c->sid[0];
+ rc = 0;
out:
read_unlock(&policy_rwlock);
return rc;
@@ -2193,17 +2342,14 @@ out:
/**
* security_fs_use - Determine how to handle labeling for a filesystem.
- * @fstype: filesystem type
- * @behavior: labeling behavior
- * @sid: SID for filesystem (superblock)
+ * @sb: superblock in question
*/
-int security_fs_use(
- const char *fstype,
- unsigned int *behavior,
- u32 *sid)
+int security_fs_use(struct super_block *sb)
{
int rc = 0;
struct ocontext *c;
+ struct superblock_security_struct *sbsec = sb->s_security;
+ const char *fstype = sb->s_type->name;
read_lock(&policy_rwlock);
@@ -2215,22 +2361,21 @@ int security_fs_use(
}
if (c) {
- *behavior = c->v.behavior;
+ sbsec->behavior = c->v.behavior;
if (!c->sid[0]) {
- rc = sidtab_context_to_sid(&sidtab,
- &c->context[0],
+ rc = sidtab_context_to_sid(&sidtab, &c->context[0],
&c->sid[0]);
if (rc)
goto out;
}
- *sid = c->sid[0];
+ sbsec->sid = c->sid[0];
} else {
- rc = security_genfs_sid(fstype, "/", SECCLASS_DIR, sid);
+ rc = security_genfs_sid(fstype, "/", SECCLASS_DIR, &sbsec->sid);
if (rc) {
- *behavior = SECURITY_FS_USE_NONE;
+ sbsec->behavior = SECURITY_FS_USE_NONE;
rc = 0;
} else {
- *behavior = SECURITY_FS_USE_GENFS;
+ sbsec->behavior = SECURITY_FS_USE_GENFS;
}
}
@@ -2241,34 +2386,39 @@ out:
int security_get_bools(int *len, char ***names, int **values)
{
- int i, rc = -ENOMEM;
+ int i, rc;
read_lock(&policy_rwlock);
*names = NULL;
*values = NULL;
+ rc = 0;
*len = policydb.p_bools.nprim;
- if (!*len) {
- rc = 0;
+ if (!*len)
goto out;
- }
- *names = kcalloc(*len, sizeof(char *), GFP_ATOMIC);
+ rc = -ENOMEM;
+ *names = kcalloc(*len, sizeof(char *), GFP_ATOMIC);
if (!*names)
goto err;
- *values = kcalloc(*len, sizeof(int), GFP_ATOMIC);
+ rc = -ENOMEM;
+ *values = kcalloc(*len, sizeof(int), GFP_ATOMIC);
if (!*values)
goto err;
for (i = 0; i < *len; i++) {
size_t name_len;
+
(*values)[i] = policydb.bool_val_to_struct[i]->state;
- name_len = strlen(policydb.p_bool_val_to_name[i]) + 1;
- (*names)[i] = kmalloc(sizeof(char) * name_len, GFP_ATOMIC);
+ name_len = strlen(sym_name(&policydb, SYM_BOOLS, i)) + 1;
+
+ rc = -ENOMEM;
+ (*names)[i] = kmalloc(sizeof(char) * name_len, GFP_ATOMIC);
if (!(*names)[i])
goto err;
- strncpy((*names)[i], policydb.p_bool_val_to_name[i], name_len);
+
+ strncpy((*names)[i], sym_name(&policydb, SYM_BOOLS, i), name_len);
(*names)[i][name_len - 1] = 0;
}
rc = 0;
@@ -2287,27 +2437,26 @@ err:
int security_set_bools(int len, int *values)
{
- int i, rc = 0;
+ int i, rc;
int lenp, seqno = 0;
struct cond_node *cur;
write_lock_irq(&policy_rwlock);
+ rc = -EFAULT;
lenp = policydb.p_bools.nprim;
- if (len != lenp) {
- rc = -EFAULT;
+ if (len != lenp)
goto out;
- }
for (i = 0; i < len; i++) {
if (!!values[i] != policydb.bool_val_to_struct[i]->state) {
audit_log(current->audit_context, GFP_ATOMIC,
AUDIT_MAC_CONFIG_CHANGE,
"bool=%s val=%d old_val=%d auid=%u ses=%u",
- policydb.p_bool_val_to_name[i],
+ sym_name(&policydb, SYM_BOOLS, i),
!!values[i],
policydb.bool_val_to_struct[i]->state,
- audit_get_loginuid(current),
+ from_kuid(&init_user_ns, audit_get_loginuid(current)),
audit_get_sessionid(current));
}
if (values[i])
@@ -2323,12 +2472,13 @@ int security_set_bools(int len, int *values)
}
seqno = ++latest_granting;
-
+ rc = 0;
out:
write_unlock_irq(&policy_rwlock);
if (!rc) {
avc_ss_reset(seqno);
selnl_notify_policyload(seqno);
+ selinux_status_update_policyload(seqno);
selinux_xfrm_notify_policyload();
}
return rc;
@@ -2336,16 +2486,15 @@ out:
int security_get_bool_value(int bool)
{
- int rc = 0;
+ int rc;
int len;
read_lock(&policy_rwlock);
+ rc = -EFAULT;
len = policydb.p_bools.nprim;
- if (bool >= len) {
- rc = -EFAULT;
+ if (bool >= len)
goto out;
- }
rc = policydb.bool_val_to_struct[bool]->state;
out:
@@ -2395,9 +2544,10 @@ int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid)
struct context newcon;
char *s;
u32 len;
- int rc = 0;
+ int rc;
- if (!ss_initialized || !selinux_mls_enabled) {
+ rc = 0;
+ if (!ss_initialized || !policydb.mls_enabled) {
*new_sid = sid;
goto out;
}
@@ -2405,19 +2555,20 @@ int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid)
context_init(&newcon);
read_lock(&policy_rwlock);
+
+ rc = -EINVAL;
context1 = sidtab_search(&sidtab, sid);
if (!context1) {
printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
__func__, sid);
- rc = -EINVAL;
goto out_unlock;
}
+ rc = -EINVAL;
context2 = sidtab_search(&sidtab, mls_sid);
if (!context2) {
printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
__func__, mls_sid);
- rc = -EINVAL;
goto out_unlock;
}
@@ -2431,20 +2582,17 @@ int security_sid_mls_copy(u32 sid, u32 mls_sid, u32 *new_sid)
/* Check the validity of the new context. */
if (!policydb_context_isvalid(&policydb, &newcon)) {
rc = convert_context_handle_invalid_context(&newcon);
- if (rc)
- goto bad;
+ if (rc) {
+ if (!context_struct_to_string(&newcon, &s, &len)) {
+ audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR,
+ "security_sid_mls_copy: invalid context %s", s);
+ kfree(s);
+ }
+ goto out_unlock;
+ }
}
rc = sidtab_context_to_sid(&sidtab, &newcon, new_sid);
- goto out_unlock;
-
-bad:
- if (!context_struct_to_string(&newcon, &s, &len)) {
- audit_log(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR,
- "security_sid_mls_copy: invalid context %s", s);
- kfree(s);
- }
-
out_unlock:
read_unlock(&policy_rwlock);
context_destroy(&newcon);
@@ -2480,6 +2628,8 @@ int security_net_peersid_resolve(u32 nlbl_sid, u32 nlbl_type,
struct context *nlbl_ctx;
struct context *xfrm_ctx;
+ *peer_sid = SECSID_NULL;
+
/* handle the common (which also happens to be the set of easy) cases
* right away, these two if statements catch everything involving a
* single or absent peer SID/label */
@@ -2498,40 +2648,37 @@ int security_net_peersid_resolve(u32 nlbl_sid, u32 nlbl_type,
/* we don't need to check ss_initialized here since the only way both
* nlbl_sid and xfrm_sid are not equal to SECSID_NULL would be if the
* security server was initialized and ss_initialized was true */
- if (!selinux_mls_enabled) {
- *peer_sid = SECSID_NULL;
+ if (!policydb.mls_enabled)
return 0;
- }
read_lock(&policy_rwlock);
+ rc = -EINVAL;
nlbl_ctx = sidtab_search(&sidtab, nlbl_sid);
if (!nlbl_ctx) {
printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
__func__, nlbl_sid);
- rc = -EINVAL;
- goto out_slowpath;
+ goto out;
}
+ rc = -EINVAL;
xfrm_ctx = sidtab_search(&sidtab, xfrm_sid);
if (!xfrm_ctx) {
printk(KERN_ERR "SELinux: %s: unrecognized SID %d\n",
__func__, xfrm_sid);
- rc = -EINVAL;
- goto out_slowpath;
+ goto out;
}
rc = (mls_context_cmp(nlbl_ctx, xfrm_ctx) ? 0 : -EACCES);
+ if (rc)
+ goto out;
-out_slowpath:
+ /* at present NetLabel SIDs/labels really only carry MLS
+ * information so if the MLS portion of the NetLabel SID
+ * matches the MLS portion of the labeled XFRM SID/label
+ * then pass along the XFRM SID as it is the most
+ * expressive */
+ *peer_sid = xfrm_sid;
+out:
read_unlock(&policy_rwlock);
- if (rc == 0)
- /* at present NetLabel SIDs/labels really only carry MLS
- * information so if the MLS portion of the NetLabel SID
- * matches the MLS portion of the labeled XFRM SID/label
- * then pass along the XFRM SID as it is the most
- * expressive */
- *peer_sid = xfrm_sid;
- else
- *peer_sid = SECSID_NULL;
return rc;
}
@@ -2550,18 +2697,19 @@ static int get_classes_callback(void *k, void *d, void *args)
int security_get_classes(char ***classes, int *nclasses)
{
- int rc = -ENOMEM;
+ int rc;
read_lock(&policy_rwlock);
+ rc = -ENOMEM;
*nclasses = policydb.p_classes.nprim;
- *classes = kcalloc(*nclasses, sizeof(*classes), GFP_ATOMIC);
+ *classes = kcalloc(*nclasses, sizeof(**classes), GFP_ATOMIC);
if (!*classes)
goto out;
rc = hashtab_map(policydb.p_classes.table, get_classes_callback,
*classes);
- if (rc < 0) {
+ if (rc) {
int i;
for (i = 0; i < *nclasses; i++)
kfree((*classes)[i]);
@@ -2588,34 +2736,35 @@ static int get_permissions_callback(void *k, void *d, void *args)
int security_get_permissions(char *class, char ***perms, int *nperms)
{
- int rc = -ENOMEM, i;
+ int rc, i;
struct class_datum *match;
read_lock(&policy_rwlock);
+ rc = -EINVAL;
match = hashtab_search(policydb.p_classes.table, class);
if (!match) {
printk(KERN_ERR "SELinux: %s: unrecognized class %s\n",
__func__, class);
- rc = -EINVAL;
goto out;
}
+ rc = -ENOMEM;
*nperms = match->permissions.nprim;
- *perms = kcalloc(*nperms, sizeof(*perms), GFP_ATOMIC);
+ *perms = kcalloc(*nperms, sizeof(**perms), GFP_ATOMIC);
if (!*perms)
goto out;
if (match->comdatum) {
rc = hashtab_map(match->comdatum->permissions.table,
get_permissions_callback, *perms);
- if (rc < 0)
+ if (rc)
goto err;
}
rc = hashtab_map(match->permissions.table, get_permissions_callback,
*perms);
- if (rc < 0)
+ if (rc)
goto err;
out:
@@ -2705,7 +2854,7 @@ int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
case AUDIT_SUBJ_CLR:
case AUDIT_OBJ_LEV_LOW:
case AUDIT_OBJ_LEV_HIGH:
- /* we do not allow a range, indicated by the presense of '-' */
+ /* we do not allow a range, indicated by the presence of '-' */
if (strchr(rulestr, '-'))
return -EINVAL;
break;
@@ -2727,36 +2876,39 @@ int selinux_audit_rule_init(u32 field, u32 op, char *rulestr, void **vrule)
switch (field) {
case AUDIT_SUBJ_USER:
case AUDIT_OBJ_USER:
+ rc = -EINVAL;
userdatum = hashtab_search(policydb.p_users.table, rulestr);
if (!userdatum)
- rc = -EINVAL;
- else
- tmprule->au_ctxt.user = userdatum->value;
+ goto out;
+ tmprule->au_ctxt.user = userdatum->value;
break;
case AUDIT_SUBJ_ROLE:
case AUDIT_OBJ_ROLE:
+ rc = -EINVAL;
roledatum = hashtab_search(policydb.p_roles.table, rulestr);
if (!roledatum)
- rc = -EINVAL;
- else
- tmprule->au_ctxt.role = roledatum->value;
+ goto out;
+ tmprule->au_ctxt.role = roledatum->value;
break;
case AUDIT_SUBJ_TYPE:
case AUDIT_OBJ_TYPE:
+ rc = -EINVAL;
typedatum = hashtab_search(policydb.p_types.table, rulestr);
if (!typedatum)
- rc = -EINVAL;
- else
- tmprule->au_ctxt.type = typedatum->value;
+ goto out;
+ tmprule->au_ctxt.type = typedatum->value;
break;
case AUDIT_SUBJ_SEN:
case AUDIT_SUBJ_CLR:
case AUDIT_OBJ_LEV_LOW:
case AUDIT_OBJ_LEV_HIGH:
rc = mls_from_string(rulestr, &tmprule->au_ctxt, GFP_ATOMIC);
+ if (rc)
+ goto out;
break;
}
-
+ rc = 0;
+out:
read_unlock(&policy_rwlock);
if (rc) {
@@ -2802,25 +2954,21 @@ int selinux_audit_rule_match(u32 sid, u32 field, u32 op, void *vrule,
struct selinux_audit_rule *rule = vrule;
int match = 0;
- if (!rule) {
- audit_log(actx, GFP_ATOMIC, AUDIT_SELINUX_ERR,
- "selinux_audit_rule_match: missing rule\n");
+ if (unlikely(!rule)) {
+ WARN_ONCE(1, "selinux_audit_rule_match: missing rule\n");
return -ENOENT;
}
read_lock(&policy_rwlock);
if (rule->au_seqno < latest_granting) {
- audit_log(actx, GFP_ATOMIC, AUDIT_SELINUX_ERR,
- "selinux_audit_rule_match: stale rule\n");
match = -ESTALE;
goto out;
}
ctxt = sidtab_search(&sidtab, sid);
- if (!ctxt) {
- audit_log(actx, GFP_ATOMIC, AUDIT_SELINUX_ERR,
- "selinux_audit_rule_match: unrecognized SID %d\n",
+ if (unlikely(!ctxt)) {
+ WARN_ONCE(1, "selinux_audit_rule_match: unrecognized SID %d\n",
sid);
match = -ENOENT;
goto out;
@@ -2908,8 +3056,7 @@ out:
static int (*aurule_callback)(void) = audit_update_lsm_rules;
-static int aurule_avc_callback(u32 event, u32 ssid, u32 tsid,
- u16 class, u32 perms, u32 *retained)
+static int aurule_avc_callback(u32 event)
{
int err = 0;
@@ -2922,8 +3069,7 @@ static int __init aurule_init(void)
{
int err;
- err = avc_add_callback(aurule_avc_callback, AVC_CALLBACK_RESET,
- SECSID_NULL, SECSID_NULL, SECCLASS_NULL, 0);
+ err = avc_add_callback(aurule_avc_callback, AVC_CALLBACK_RESET);
if (err)
panic("avc_add_callback() failed, error %d\n", err);
@@ -2971,7 +3117,7 @@ static void security_netlbl_cache_add(struct netlbl_lsm_secattr *secattr,
* Description:
* Convert the given NetLabel security attributes in @secattr into a
* SELinux SID. If the @secattr field does not contain a full SELinux
- * SID/context then use SECINITSID_NETMSG as the foundation. If possibile the
+ * SID/context then use SECINITSID_NETMSG as the foundation. If possible the
* 'cache' field of @secattr is set and the CACHE flag is set; this is to
* allow the @secattr to be used by NetLabel to cache the secattr to SID
* conversion for future lookups. Returns zero on success, negative values on
@@ -2981,7 +3127,7 @@ static void security_netlbl_cache_add(struct netlbl_lsm_secattr *secattr,
int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr,
u32 *sid)
{
- int rc = -EIDRM;
+ int rc;
struct context *ctx;
struct context ctx_new;
@@ -2992,16 +3138,15 @@ int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr,
read_lock(&policy_rwlock);
- if (secattr->flags & NETLBL_SECATTR_CACHE) {
+ if (secattr->flags & NETLBL_SECATTR_CACHE)
*sid = *(u32 *)secattr->cache->data;
- rc = 0;
- } else if (secattr->flags & NETLBL_SECATTR_SECID) {
+ else if (secattr->flags & NETLBL_SECATTR_SECID)
*sid = secattr->attr.secid;
- rc = 0;
- } else if (secattr->flags & NETLBL_SECATTR_MLS_LVL) {
+ else if (secattr->flags & NETLBL_SECATTR_MLS_LVL) {
+ rc = -EIDRM;
ctx = sidtab_search(&sidtab, SECINITSID_NETMSG);
if (ctx == NULL)
- goto netlbl_secattr_to_sid_return;
+ goto out;
context_init(&ctx_new);
ctx_new.user = ctx->user;
@@ -3009,34 +3154,35 @@ int security_netlbl_secattr_to_sid(struct netlbl_lsm_secattr *secattr,
ctx_new.type = ctx->type;
mls_import_netlbl_lvl(&ctx_new, secattr);
if (secattr->flags & NETLBL_SECATTR_MLS_CAT) {
- if (ebitmap_netlbl_import(&ctx_new.range.level[0].cat,
- secattr->attr.mls.cat) != 0)
- goto netlbl_secattr_to_sid_return;
+ rc = ebitmap_netlbl_import(&ctx_new.range.level[0].cat,
+ secattr->attr.mls.cat);
+ if (rc)
+ goto out;
memcpy(&ctx_new.range.level[1].cat,
&ctx_new.range.level[0].cat,
sizeof(ctx_new.range.level[0].cat));
}
- if (mls_context_isvalid(&policydb, &ctx_new) != 1)
- goto netlbl_secattr_to_sid_return_cleanup;
+ rc = -EIDRM;
+ if (!mls_context_isvalid(&policydb, &ctx_new))
+ goto out_free;
rc = sidtab_context_to_sid(&sidtab, &ctx_new, sid);
- if (rc != 0)
- goto netlbl_secattr_to_sid_return_cleanup;
+ if (rc)
+ goto out_free;
security_netlbl_cache_add(secattr, *sid);
ebitmap_destroy(&ctx_new.range.level[0].cat);
- } else {
+ } else
*sid = SECSID_NULL;
- rc = 0;
- }
-netlbl_secattr_to_sid_return:
read_unlock(&policy_rwlock);
- return rc;
-netlbl_secattr_to_sid_return_cleanup:
+ return 0;
+out_free:
ebitmap_destroy(&ctx_new.range.level[0].cat);
- goto netlbl_secattr_to_sid_return;
+out:
+ read_unlock(&policy_rwlock);
+ return rc;
}
/**
@@ -3058,29 +3204,59 @@ int security_netlbl_sid_to_secattr(u32 sid, struct netlbl_lsm_secattr *secattr)
return 0;
read_lock(&policy_rwlock);
+
+ rc = -ENOENT;
ctx = sidtab_search(&sidtab, sid);
- if (ctx == NULL) {
- rc = -ENOENT;
- goto netlbl_sid_to_secattr_failure;
- }
- secattr->domain = kstrdup(policydb.p_type_val_to_name[ctx->type - 1],
+ if (ctx == NULL)
+ goto out;
+
+ rc = -ENOMEM;
+ secattr->domain = kstrdup(sym_name(&policydb, SYM_TYPES, ctx->type - 1),
GFP_ATOMIC);
- if (secattr->domain == NULL) {
- rc = -ENOMEM;
- goto netlbl_sid_to_secattr_failure;
- }
+ if (secattr->domain == NULL)
+ goto out;
+
secattr->attr.secid = sid;
secattr->flags |= NETLBL_SECATTR_DOMAIN_CPY | NETLBL_SECATTR_SECID;
mls_export_netlbl_lvl(ctx, secattr);
rc = mls_export_netlbl_cat(ctx, secattr);
- if (rc != 0)
- goto netlbl_sid_to_secattr_failure;
+out:
read_unlock(&policy_rwlock);
+ return rc;
+}
+#endif /* CONFIG_NETLABEL */
- return 0;
+/**
+ * security_read_policy - read the policy.
+ * @data: binary policy data
+ * @len: length of data in bytes
+ *
+ */
+int security_read_policy(void **data, size_t *len)
+{
+ int rc;
+ struct policy_file fp;
+
+ if (!ss_initialized)
+ return -EINVAL;
-netlbl_sid_to_secattr_failure:
+ *len = security_policydb_len();
+
+ *data = vmalloc_user(*len);
+ if (!*data)
+ return -ENOMEM;
+
+ fp.data = *data;
+ fp.len = *len;
+
+ read_lock(&policy_rwlock);
+ rc = policydb_write(&policydb, &fp);
read_unlock(&policy_rwlock);
- return rc;
+
+ if (rc)
+ return rc;
+
+ *len = (unsigned long)fp.data - (unsigned long)*data;
+ return 0;
+
}
-#endif /* CONFIG_NETLABEL */
diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c
index e817989764c..5840a35155f 100644
--- a/security/selinux/ss/sidtab.c
+++ b/security/selinux/ss/sidtab.c
@@ -147,6 +147,17 @@ out:
return rc;
}
+static void sidtab_update_cache(struct sidtab *s, struct sidtab_node *n, int loc)
+{
+ BUG_ON(loc >= SIDTAB_CACHE_LEN);
+
+ while (loc > 0) {
+ s->cache[loc] = s->cache[loc - 1];
+ loc--;
+ }
+ s->cache[0] = n;
+}
+
static inline u32 sidtab_search_context(struct sidtab *s,
struct context *context)
{
@@ -156,14 +167,33 @@ static inline u32 sidtab_search_context(struct sidtab *s,
for (i = 0; i < SIDTAB_SIZE; i++) {
cur = s->htable[i];
while (cur) {
- if (context_cmp(&cur->context, context))
+ if (context_cmp(&cur->context, context)) {
+ sidtab_update_cache(s, cur, SIDTAB_CACHE_LEN - 1);
return cur->sid;
+ }
cur = cur->next;
}
}
return 0;
}
+static inline u32 sidtab_search_cache(struct sidtab *s, struct context *context)
+{
+ int i;
+ struct sidtab_node *node;
+
+ for (i = 0; i < SIDTAB_CACHE_LEN; i++) {
+ node = s->cache[i];
+ if (unlikely(!node))
+ return 0;
+ if (context_cmp(&node->context, context)) {
+ sidtab_update_cache(s, node, i);
+ return node->sid;
+ }
+ }
+ return 0;
+}
+
int sidtab_context_to_sid(struct sidtab *s,
struct context *context,
u32 *out_sid)
@@ -174,7 +204,9 @@ int sidtab_context_to_sid(struct sidtab *s,
*out_sid = SECSID_NULL;
- sid = sidtab_search_context(s, context);
+ sid = sidtab_search_cache(s, context);
+ if (!sid)
+ sid = sidtab_search_context(s, context);
if (!sid) {
spin_lock_irqsave(&s->lock, flags);
/* Rescan now that we hold the lock. */
@@ -259,12 +291,15 @@ void sidtab_destroy(struct sidtab *s)
void sidtab_set(struct sidtab *dst, struct sidtab *src)
{
unsigned long flags;
+ int i;
spin_lock_irqsave(&src->lock, flags);
dst->htable = src->htable;
dst->nel = src->nel;
dst->next_sid = src->next_sid;
dst->shutdown = 0;
+ for (i = 0; i < SIDTAB_CACHE_LEN; i++)
+ dst->cache[i] = NULL;
spin_unlock_irqrestore(&src->lock, flags);
}
diff --git a/security/selinux/ss/sidtab.h b/security/selinux/ss/sidtab.h
index 64ea5b1cdea..84dc154d938 100644
--- a/security/selinux/ss/sidtab.h
+++ b/security/selinux/ss/sidtab.h
@@ -26,6 +26,8 @@ struct sidtab {
unsigned int nel; /* number of elements */
unsigned int next_sid; /* next SID to allocate */
unsigned char shutdown;
+#define SIDTAB_CACHE_LEN 3
+ struct sidtab_node *cache[SIDTAB_CACHE_LEN];
spinlock_t lock;
};
diff --git a/security/selinux/ss/status.c b/security/selinux/ss/status.c
new file mode 100644
index 00000000000..d982365f9d1
--- /dev/null
+++ b/security/selinux/ss/status.c
@@ -0,0 +1,126 @@
+/*
+ * mmap based event notifications for SELinux
+ *
+ * Author: KaiGai Kohei <kaigai@ak.jp.nec.com>
+ *
+ * Copyright (C) 2010 NEC corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2,
+ * as published by the Free Software Foundation.
+ */
+#include <linux/kernel.h>
+#include <linux/gfp.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include "avc.h"
+#include "services.h"
+
+/*
+ * The selinux_status_page shall be exposed to userspace applications
+ * using mmap interface on /selinux/status.
+ * It enables to notify applications a few events that will cause reset
+ * of userspace access vector without context switching.
+ *
+ * The selinux_kernel_status structure on the head of status page is
+ * protected from concurrent accesses using seqlock logic, so userspace
+ * application should reference the status page according to the seqlock
+ * logic.
+ *
+ * Typically, application checks status->sequence at the head of access
+ * control routine. If it is odd-number, kernel is updating the status,
+ * so please wait for a moment. If it is changed from the last sequence
+ * number, it means something happen, so application will reset userspace
+ * avc, if needed.
+ * In most cases, application shall confirm the kernel status is not
+ * changed without any system call invocations.
+ */
+static struct page *selinux_status_page;
+static DEFINE_MUTEX(selinux_status_lock);
+
+/*
+ * selinux_kernel_status_page
+ *
+ * It returns a reference to selinux_status_page. If the status page is
+ * not allocated yet, it also tries to allocate it at the first time.
+ */
+struct page *selinux_kernel_status_page(void)
+{
+ struct selinux_kernel_status *status;
+ struct page *result = NULL;
+
+ mutex_lock(&selinux_status_lock);
+ if (!selinux_status_page) {
+ selinux_status_page = alloc_page(GFP_KERNEL|__GFP_ZERO);
+
+ if (selinux_status_page) {
+ status = page_address(selinux_status_page);
+
+ status->version = SELINUX_KERNEL_STATUS_VERSION;
+ status->sequence = 0;
+ status->enforcing = selinux_enforcing;
+ /*
+ * NOTE: the next policyload event shall set
+ * a positive value on the status->policyload,
+ * although it may not be 1, but never zero.
+ * So, application can know it was updated.
+ */
+ status->policyload = 0;
+ status->deny_unknown = !security_get_allow_unknown();
+ }
+ }
+ result = selinux_status_page;
+ mutex_unlock(&selinux_status_lock);
+
+ return result;
+}
+
+/*
+ * selinux_status_update_setenforce
+ *
+ * It updates status of the current enforcing/permissive mode.
+ */
+void selinux_status_update_setenforce(int enforcing)
+{
+ struct selinux_kernel_status *status;
+
+ mutex_lock(&selinux_status_lock);
+ if (selinux_status_page) {
+ status = page_address(selinux_status_page);
+
+ status->sequence++;
+ smp_wmb();
+
+ status->enforcing = enforcing;
+
+ smp_wmb();
+ status->sequence++;
+ }
+ mutex_unlock(&selinux_status_lock);
+}
+
+/*
+ * selinux_status_update_policyload
+ *
+ * It updates status of the times of policy reloaded, and current
+ * setting of deny_unknown.
+ */
+void selinux_status_update_policyload(int seqno)
+{
+ struct selinux_kernel_status *status;
+
+ mutex_lock(&selinux_status_lock);
+ if (selinux_status_page) {
+ status = page_address(selinux_status_page);
+
+ status->sequence++;
+ smp_wmb();
+
+ status->policyload = seqno;
+ status->deny_unknown = !security_get_allow_unknown();
+
+ smp_wmb();
+ status->sequence++;
+ }
+ mutex_unlock(&selinux_status_lock);
+}
diff --git a/security/selinux/ss/symtab.c b/security/selinux/ss/symtab.c
index 837658a98a5..160326ee99e 100644
--- a/security/selinux/ss/symtab.c
+++ b/security/selinux/ss/symtab.c
@@ -4,7 +4,6 @@
* Author : Stephen Smalley, <sds@epoch.ncsc.mil>
*/
#include <linux/kernel.h>
-#include <linux/slab.h>
#include <linux/string.h>
#include <linux/errno.h>
#include "symtab.h"
@@ -37,7 +36,7 @@ int symtab_init(struct symtab *s, unsigned int size)
{
s->table = hashtab_create(symhash, symcmp, size);
if (!s->table)
- return -1;
+ return -ENOMEM;
s->nprim = 0;
return 0;
}
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
index f3cb9ed731a..98b042630a9 100644
--- a/security/selinux/xfrm.c
+++ b/security/selinux/xfrm.c
@@ -38,6 +38,7 @@
#include <linux/netfilter.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_ipv6.h>
+#include <linux/slab.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/skbuff.h>
@@ -45,7 +46,7 @@
#include <net/xfrm.h>
#include <net/checksum.h>
#include <net/udp.h>
-#include <asm/atomic.h>
+#include <linux/atomic.h>
#include "avc.h"
#include "objsec.h"
@@ -55,7 +56,7 @@
atomic_t selinux_xfrm_refcount = ATOMIC_INIT(0);
/*
- * Returns true if an LSM/SELinux context
+ * Returns true if the context is an LSM/SELinux context.
*/
static inline int selinux_authorizable_ctx(struct xfrm_sec_ctx *ctx)
{
@@ -65,7 +66,7 @@ static inline int selinux_authorizable_ctx(struct xfrm_sec_ctx *ctx)
}
/*
- * Returns true if the xfrm contains a security blob for SELinux
+ * Returns true if the xfrm contains a security blob for SELinux.
*/
static inline int selinux_authorizable_xfrm(struct xfrm_state *x)
{
@@ -73,48 +74,112 @@ static inline int selinux_authorizable_xfrm(struct xfrm_state *x)
}
/*
- * LSM hook implementation that authorizes that a flow can use
- * a xfrm policy rule.
+ * Allocates a xfrm_sec_state and populates it using the supplied security
+ * xfrm_user_sec_ctx context.
*/
-int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
+static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp,
+ struct xfrm_user_sec_ctx *uctx,
+ gfp_t gfp)
{
int rc;
- u32 sel_sid;
+ const struct task_security_struct *tsec = current_security();
+ struct xfrm_sec_ctx *ctx = NULL;
+ u32 str_len;
- /* Context sid is either set to label or ANY_ASSOC */
- if (ctx) {
- if (!selinux_authorizable_ctx(ctx))
- return -EINVAL;
-
- sel_sid = ctx->ctx_sid;
- } else
- /*
- * All flows should be treated as polmatch'ing an
- * otherwise applicable "non-labeled" policy. This
- * would prevent inadvertent "leaks".
- */
- return 0;
+ if (ctxp == NULL || uctx == NULL ||
+ uctx->ctx_doi != XFRM_SC_DOI_LSM ||
+ uctx->ctx_alg != XFRM_SC_ALG_SELINUX)
+ return -EINVAL;
+
+ str_len = uctx->ctx_len;
+ if (str_len >= PAGE_SIZE)
+ return -ENOMEM;
+
+ ctx = kmalloc(sizeof(*ctx) + str_len + 1, gfp);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->ctx_doi = XFRM_SC_DOI_LSM;
+ ctx->ctx_alg = XFRM_SC_ALG_SELINUX;
+ ctx->ctx_len = str_len;
+ memcpy(ctx->ctx_str, &uctx[1], str_len);
+ ctx->ctx_str[str_len] = '\0';
+ rc = security_context_to_sid(ctx->ctx_str, str_len, &ctx->ctx_sid, gfp);
+ if (rc)
+ goto err;
- rc = avc_has_perm(fl_secid, sel_sid, SECCLASS_ASSOCIATION,
- ASSOCIATION__POLMATCH,
- NULL);
+ rc = avc_has_perm(tsec->sid, ctx->ctx_sid,
+ SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT, NULL);
+ if (rc)
+ goto err;
- if (rc == -EACCES)
- return -ESRCH;
+ *ctxp = ctx;
+ atomic_inc(&selinux_xfrm_refcount);
+ return 0;
+err:
+ kfree(ctx);
return rc;
}
/*
+ * Free the xfrm_sec_ctx structure.
+ */
+static void selinux_xfrm_free(struct xfrm_sec_ctx *ctx)
+{
+ if (!ctx)
+ return;
+
+ atomic_dec(&selinux_xfrm_refcount);
+ kfree(ctx);
+}
+
+/*
+ * Authorize the deletion of a labeled SA or policy rule.
+ */
+static int selinux_xfrm_delete(struct xfrm_sec_ctx *ctx)
+{
+ const struct task_security_struct *tsec = current_security();
+
+ if (!ctx)
+ return 0;
+
+ return avc_has_perm(tsec->sid, ctx->ctx_sid,
+ SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT,
+ NULL);
+}
+
+/*
+ * LSM hook implementation that authorizes that a flow can use a xfrm policy
+ * rule.
+ */
+int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
+{
+ int rc;
+
+ /* All flows should be treated as polmatch'ing an otherwise applicable
+ * "non-labeled" policy. This would prevent inadvertent "leaks". */
+ if (!ctx)
+ return 0;
+
+ /* Context sid is either set to label or ANY_ASSOC */
+ if (!selinux_authorizable_ctx(ctx))
+ return -EINVAL;
+
+ rc = avc_has_perm(fl_secid, ctx->ctx_sid,
+ SECCLASS_ASSOCIATION, ASSOCIATION__POLMATCH, NULL);
+ return (rc == -EACCES ? -ESRCH : rc);
+}
+
+/*
* LSM hook implementation that authorizes that a state matches
* the given policy, flow combo.
*/
-
-int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *xp,
- struct flowi *fl)
+int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x,
+ struct xfrm_policy *xp,
+ const struct flowi *fl)
{
u32 state_sid;
- int rc;
if (!xp->security)
if (x->security)
@@ -134,189 +199,115 @@ int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *
state_sid = x->security->ctx_sid;
- if (fl->secid != state_sid)
+ if (fl->flowi_secid != state_sid)
return 0;
- rc = avc_has_perm(fl->secid, state_sid, SECCLASS_ASSOCIATION,
- ASSOCIATION__SENDTO,
- NULL)? 0:1;
-
- /*
- * We don't need a separate SA Vs. policy polmatch check
- * since the SA is now of the same label as the flow and
- * a flow Vs. policy polmatch check had already happened
- * in selinux_xfrm_policy_lookup() above.
- */
-
- return rc;
+ /* We don't need a separate SA Vs. policy polmatch check since the SA
+ * is now of the same label as the flow and a flow Vs. policy polmatch
+ * check had already happened in selinux_xfrm_policy_lookup() above. */
+ return (avc_has_perm(fl->flowi_secid, state_sid,
+ SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO,
+ NULL) ? 0 : 1);
}
-/*
- * LSM hook implementation that checks and/or returns the xfrm sid for the
- * incoming packet.
- */
-
-int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
+static u32 selinux_xfrm_skb_sid_egress(struct sk_buff *skb)
{
- struct sec_path *sp;
+ struct dst_entry *dst = skb_dst(skb);
+ struct xfrm_state *x;
- *sid = SECSID_NULL;
+ if (dst == NULL)
+ return SECSID_NULL;
+ x = dst->xfrm;
+ if (x == NULL || !selinux_authorizable_xfrm(x))
+ return SECSID_NULL;
- if (skb == NULL)
- return 0;
+ return x->security->ctx_sid;
+}
+
+static int selinux_xfrm_skb_sid_ingress(struct sk_buff *skb,
+ u32 *sid, int ckall)
+{
+ u32 sid_session = SECSID_NULL;
+ struct sec_path *sp = skb->sp;
- sp = skb->sp;
if (sp) {
- int i, sid_set = 0;
+ int i;
- for (i = sp->len-1; i >= 0; i--) {
+ for (i = sp->len - 1; i >= 0; i--) {
struct xfrm_state *x = sp->xvec[i];
if (selinux_authorizable_xfrm(x)) {
struct xfrm_sec_ctx *ctx = x->security;
- if (!sid_set) {
- *sid = ctx->ctx_sid;
- sid_set = 1;
-
+ if (sid_session == SECSID_NULL) {
+ sid_session = ctx->ctx_sid;
if (!ckall)
- break;
- } else if (*sid != ctx->ctx_sid)
+ goto out;
+ } else if (sid_session != ctx->ctx_sid) {
+ *sid = SECSID_NULL;
return -EINVAL;
+ }
}
}
}
+out:
+ *sid = sid_session;
return 0;
}
/*
- * Security blob allocation for xfrm_policy and xfrm_state
- * CTX does not have a meaningful value on input
+ * LSM hook implementation that checks and/or returns the xfrm sid for the
+ * incoming packet.
*/
-static int selinux_xfrm_sec_ctx_alloc(struct xfrm_sec_ctx **ctxp,
- struct xfrm_user_sec_ctx *uctx, u32 sid)
+int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
{
- int rc = 0;
- const struct task_security_struct *tsec = current_security();
- struct xfrm_sec_ctx *ctx = NULL;
- char *ctx_str = NULL;
- u32 str_len;
-
- BUG_ON(uctx && sid);
-
- if (!uctx)
- goto not_from_user;
-
- if (uctx->ctx_doi != XFRM_SC_ALG_SELINUX)
- return -EINVAL;
-
- str_len = uctx->ctx_len;
- if (str_len >= PAGE_SIZE)
- return -ENOMEM;
-
- *ctxp = ctx = kmalloc(sizeof(*ctx) +
- str_len + 1,
- GFP_KERNEL);
-
- if (!ctx)
- return -ENOMEM;
-
- ctx->ctx_doi = uctx->ctx_doi;
- ctx->ctx_len = str_len;
- ctx->ctx_alg = uctx->ctx_alg;
-
- memcpy(ctx->ctx_str,
- uctx+1,
- str_len);
- ctx->ctx_str[str_len] = 0;
- rc = security_context_to_sid(ctx->ctx_str,
- str_len,
- &ctx->ctx_sid);
-
- if (rc)
- goto out;
-
- /*
- * Does the subject have permission to set security context?
- */
- rc = avc_has_perm(tsec->sid, ctx->ctx_sid,
- SECCLASS_ASSOCIATION,
- ASSOCIATION__SETCONTEXT, NULL);
- if (rc)
- goto out;
-
- return rc;
-
-not_from_user:
- rc = security_sid_to_context(sid, &ctx_str, &str_len);
- if (rc)
- goto out;
-
- *ctxp = ctx = kmalloc(sizeof(*ctx) +
- str_len,
- GFP_ATOMIC);
-
- if (!ctx) {
- rc = -ENOMEM;
- goto out;
+ if (skb == NULL) {
+ *sid = SECSID_NULL;
+ return 0;
}
+ return selinux_xfrm_skb_sid_ingress(skb, sid, ckall);
+}
- ctx->ctx_doi = XFRM_SC_DOI_LSM;
- ctx->ctx_alg = XFRM_SC_ALG_SELINUX;
- ctx->ctx_sid = sid;
- ctx->ctx_len = str_len;
- memcpy(ctx->ctx_str,
- ctx_str,
- str_len);
+int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid)
+{
+ int rc;
- goto out2;
+ rc = selinux_xfrm_skb_sid_ingress(skb, sid, 0);
+ if (rc == 0 && *sid == SECSID_NULL)
+ *sid = selinux_xfrm_skb_sid_egress(skb);
-out:
- *ctxp = NULL;
- kfree(ctx);
-out2:
- kfree(ctx_str);
return rc;
}
/*
- * LSM hook implementation that allocs and transfers uctx spec to
- * xfrm_policy.
+ * LSM hook implementation that allocs and transfers uctx spec to xfrm_policy.
*/
int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
- struct xfrm_user_sec_ctx *uctx)
+ struct xfrm_user_sec_ctx *uctx,
+ gfp_t gfp)
{
- int err;
-
- BUG_ON(!uctx);
-
- err = selinux_xfrm_sec_ctx_alloc(ctxp, uctx, 0);
- if (err == 0)
- atomic_inc(&selinux_xfrm_refcount);
-
- return err;
+ return selinux_xfrm_alloc_user(ctxp, uctx, gfp);
}
-
/*
- * LSM hook implementation that copies security data structure from old to
- * new for policy cloning.
+ * LSM hook implementation that copies security data structure from old to new
+ * for policy cloning.
*/
int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
struct xfrm_sec_ctx **new_ctxp)
{
struct xfrm_sec_ctx *new_ctx;
- if (old_ctx) {
- new_ctx = kmalloc(sizeof(*old_ctx) + old_ctx->ctx_len,
- GFP_KERNEL);
- if (!new_ctx)
- return -ENOMEM;
+ if (!old_ctx)
+ return 0;
+
+ new_ctx = kmemdup(old_ctx, sizeof(*old_ctx) + old_ctx->ctx_len,
+ GFP_ATOMIC);
+ if (!new_ctx)
+ return -ENOMEM;
+ atomic_inc(&selinux_xfrm_refcount);
+ *new_ctxp = new_ctx;
- memcpy(new_ctx, old_ctx, sizeof(*new_ctx));
- memcpy(new_ctx->ctx_str, old_ctx->ctx_str, new_ctx->ctx_len);
- *new_ctxp = new_ctx;
- }
return 0;
}
@@ -325,7 +316,7 @@ int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
*/
void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
{
- kfree(ctx);
+ selinux_xfrm_free(ctx);
}
/*
@@ -333,35 +324,58 @@ void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
*/
int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
{
- const struct task_security_struct *tsec = current_security();
- int rc = 0;
-
- if (ctx) {
- rc = avc_has_perm(tsec->sid, ctx->ctx_sid,
- SECCLASS_ASSOCIATION,
- ASSOCIATION__SETCONTEXT, NULL);
- if (rc == 0)
- atomic_dec(&selinux_xfrm_refcount);
- }
+ return selinux_xfrm_delete(ctx);
+}
- return rc;
+/*
+ * LSM hook implementation that allocates a xfrm_sec_state, populates it using
+ * the supplied security context, and assigns it to the xfrm_state.
+ */
+int selinux_xfrm_state_alloc(struct xfrm_state *x,
+ struct xfrm_user_sec_ctx *uctx)
+{
+ return selinux_xfrm_alloc_user(&x->security, uctx, GFP_KERNEL);
}
/*
- * LSM hook implementation that allocs and transfers sec_ctx spec to
- * xfrm_state.
+ * LSM hook implementation that allocates a xfrm_sec_state and populates based
+ * on a secid.
*/
-int selinux_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *uctx,
- u32 secid)
+int selinux_xfrm_state_alloc_acquire(struct xfrm_state *x,
+ struct xfrm_sec_ctx *polsec, u32 secid)
{
- int err;
+ int rc;
+ struct xfrm_sec_ctx *ctx;
+ char *ctx_str = NULL;
+ int str_len;
+
+ if (!polsec)
+ return 0;
- BUG_ON(!x);
+ if (secid == 0)
+ return -EINVAL;
+
+ rc = security_sid_to_context(secid, &ctx_str, &str_len);
+ if (rc)
+ return rc;
+
+ ctx = kmalloc(sizeof(*ctx) + str_len, GFP_ATOMIC);
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ ctx->ctx_doi = XFRM_SC_DOI_LSM;
+ ctx->ctx_alg = XFRM_SC_ALG_SELINUX;
+ ctx->ctx_sid = secid;
+ ctx->ctx_len = str_len;
+ memcpy(ctx->ctx_str, ctx_str, str_len);
- err = selinux_xfrm_sec_ctx_alloc(&x->security, uctx, secid);
- if (err == 0)
- atomic_inc(&selinux_xfrm_refcount);
- return err;
+ x->security = ctx;
+ atomic_inc(&selinux_xfrm_refcount);
+out:
+ kfree(ctx_str);
+ return rc;
}
/*
@@ -369,28 +383,15 @@ int selinux_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *uct
*/
void selinux_xfrm_state_free(struct xfrm_state *x)
{
- struct xfrm_sec_ctx *ctx = x->security;
- kfree(ctx);
+ selinux_xfrm_free(x->security);
}
- /*
- * LSM hook implementation that authorizes deletion of labeled SAs.
- */
+/*
+ * LSM hook implementation that authorizes deletion of labeled SAs.
+ */
int selinux_xfrm_state_delete(struct xfrm_state *x)
{
- const struct task_security_struct *tsec = current_security();
- struct xfrm_sec_ctx *ctx = x->security;
- int rc = 0;
-
- if (ctx) {
- rc = avc_has_perm(tsec->sid, ctx->ctx_sid,
- SECCLASS_ASSOCIATION,
- ASSOCIATION__SETCONTEXT, NULL);
- if (rc == 0)
- atomic_dec(&selinux_xfrm_refcount);
- }
-
- return rc;
+ return selinux_xfrm_delete(x->security);
}
/*
@@ -400,14 +401,12 @@ int selinux_xfrm_state_delete(struct xfrm_state *x)
* we need to check for unlabelled access since this may not have
* gone thru the IPSec process.
*/
-int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb,
- struct common_audit_data *ad)
+int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
+ struct common_audit_data *ad)
{
- int i, rc = 0;
- struct sec_path *sp;
- u32 sel_sid = SECINITSID_UNLABELED;
-
- sp = skb->sp;
+ int i;
+ struct sec_path *sp = skb->sp;
+ u32 peer_sid = SECINITSID_UNLABELED;
if (sp) {
for (i = 0; i < sp->len; i++) {
@@ -415,23 +414,17 @@ int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb,
if (x && selinux_authorizable_xfrm(x)) {
struct xfrm_sec_ctx *ctx = x->security;
- sel_sid = ctx->ctx_sid;
+ peer_sid = ctx->ctx_sid;
break;
}
}
}
- /*
- * This check even when there's no association involved is
- * intended, according to Trent Jaeger, to make sure a
- * process can't engage in non-ipsec communication unless
- * explicitly allowed by policy.
- */
-
- rc = avc_has_perm(isec_sid, sel_sid, SECCLASS_ASSOCIATION,
- ASSOCIATION__RECVFROM, ad);
-
- return rc;
+ /* This check even when there's no association involved is intended,
+ * according to Trent Jaeger, to make sure a process can't engage in
+ * non-IPsec communication unless explicitly allowed by policy. */
+ return avc_has_perm(sk_sid, peer_sid,
+ SECCLASS_ASSOCIATION, ASSOCIATION__RECVFROM, ad);
}
/*
@@ -441,49 +434,38 @@ int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb,
* If we do have a authorizable security association, then it has already been
* checked in the selinux_xfrm_state_pol_flow_match hook above.
*/
-int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb,
- struct common_audit_data *ad, u8 proto)
+int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
+ struct common_audit_data *ad, u8 proto)
{
struct dst_entry *dst;
- int rc = 0;
-
- dst = skb_dst(skb);
-
- if (dst) {
- struct dst_entry *dst_test;
-
- for (dst_test = dst; dst_test != NULL;
- dst_test = dst_test->child) {
- struct xfrm_state *x = dst_test->xfrm;
-
- if (x && selinux_authorizable_xfrm(x))
- goto out;
- }
- }
switch (proto) {
case IPPROTO_AH:
case IPPROTO_ESP:
case IPPROTO_COMP:
- /*
- * We should have already seen this packet once before
- * it underwent xfrm(s). No need to subject it to the
- * unlabeled check.
- */
- goto out;
+ /* We should have already seen this packet once before it
+ * underwent xfrm(s). No need to subject it to the unlabeled
+ * check. */
+ return 0;
default:
break;
}
- /*
- * This check even when there's no association involved is
- * intended, according to Trent Jaeger, to make sure a
- * process can't engage in non-ipsec communication unless
- * explicitly allowed by policy.
- */
+ dst = skb_dst(skb);
+ if (dst) {
+ struct dst_entry *iter;
+
+ for (iter = dst; iter != NULL; iter = iter->child) {
+ struct xfrm_state *x = iter->xfrm;
- rc = avc_has_perm(isec_sid, SECINITSID_UNLABELED, SECCLASS_ASSOCIATION,
- ASSOCIATION__SENDTO, ad);
-out:
- return rc;
+ if (x && selinux_authorizable_xfrm(x))
+ return 0;
+ }
+ }
+
+ /* This check even when there's no association involved is intended,
+ * according to Trent Jaeger, to make sure a process can't engage in
+ * non-IPsec communication unless explicitly allowed by policy. */
+ return avc_has_perm(sk_sid, SECINITSID_UNLABELED,
+ SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO, ad);
}
diff --git a/security/smack/Kconfig b/security/smack/Kconfig
index 603b0878434..e69de9c642b 100644
--- a/security/smack/Kconfig
+++ b/security/smack/Kconfig
@@ -1,6 +1,10 @@
config SECURITY_SMACK
bool "Simplified Mandatory Access Control Kernel Support"
- depends on NETLABEL && SECURITY_NETWORK
+ depends on NET
+ depends on INET
+ depends on SECURITY
+ select NETLABEL
+ select SECURITY_NETWORK
default n
help
This selects the Simplified Mandatory Access Control Kernel.
diff --git a/security/smack/smack.h b/security/smack/smack.h
index c6e9acae72e..020307ef097 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -23,13 +23,52 @@
#include <linux/lsm_audit.h>
/*
+ * Smack labels were limited to 23 characters for a long time.
+ */
+#define SMK_LABELLEN 24
+#define SMK_LONGLABEL 256
+
+/*
+ * This is the repository for labels seen so that it is
+ * not necessary to keep allocating tiny chuncks of memory
+ * and so that they can be shared.
+ *
+ * Labels are never modified in place. Anytime a label
+ * is imported (e.g. xattrset on a file) the list is checked
+ * for it and it is added if it doesn't exist. The address
+ * is passed out in either case. Entries are added, but
+ * never deleted.
+ *
+ * Since labels are hanging around anyway it doesn't
+ * hurt to maintain a secid for those awkward situations
+ * where kernel components that ought to use LSM independent
+ * interfaces don't. The secid should go away when all of
+ * these components have been repaired.
+ *
+ * The cipso value associated with the label gets stored here, too.
+ *
+ * Keep the access rules for this subject label here so that
+ * the entire set of rules does not need to be examined every
+ * time.
+ */
+struct smack_known {
+ struct list_head list;
+ struct hlist_node smk_hashed;
+ char *smk_known;
+ u32 smk_secid;
+ struct netlbl_lsm_secattr smk_netlabel; /* on wire labels */
+ struct list_head smk_rules; /* access rules */
+ struct mutex smk_rules_lock; /* lock for rules */
+};
+
+/*
+ * Maximum number of bytes for the levels in a CIPSO IP option.
* Why 23? CIPSO is constrained to 30, so a 32 byte buffer is
* bigger than can be used, and 24 is the next lower multiple
* of 8, and there are too many issues if there isn't space set
* aside for the terminating null byte.
*/
-#define SMK_MAXLEN 23
-#define SMK_LABELLEN (SMK_MAXLEN+1)
+#define SMK_CIPSOLEN 24
struct superblock_smack {
char *smk_root;
@@ -37,46 +76,47 @@ struct superblock_smack {
char *smk_hat;
char *smk_default;
int smk_initialized;
- spinlock_t smk_sblock; /* for initialization */
};
struct socket_smack {
- char *smk_out; /* outbound label */
- char *smk_in; /* inbound label */
- char smk_packet[SMK_LABELLEN]; /* TCP peer label */
+ struct smack_known *smk_out; /* outbound label */
+ struct smack_known *smk_in; /* inbound label */
+ struct smack_known *smk_packet; /* TCP peer label */
};
/*
* Inode smack data
*/
struct inode_smack {
- char *smk_inode; /* label of the fso */
- struct mutex smk_lock; /* initialization lock */
- int smk_flags; /* smack inode flags */
+ char *smk_inode; /* label of the fso */
+ struct smack_known *smk_task; /* label of the task */
+ struct smack_known *smk_mmap; /* label of the mmap domain */
+ struct mutex smk_lock; /* initialization lock */
+ int smk_flags; /* smack inode flags */
+};
+
+struct task_smack {
+ struct smack_known *smk_task; /* label for access control */
+ struct smack_known *smk_forked; /* label when forked */
+ struct list_head smk_rules; /* per task access rules */
+ struct mutex smk_rules_lock; /* lock for the rules */
};
#define SMK_INODE_INSTANT 0x01 /* inode is instantiated */
+#define SMK_INODE_TRANSMUTE 0x02 /* directory is transmuting */
+#define SMK_INODE_CHANGED 0x04 /* smack was transmuted */
/*
* A label access rule.
*/
struct smack_rule {
struct list_head list;
- char *smk_subject;
+ struct smack_known *smk_subject;
char *smk_object;
int smk_access;
};
/*
- * An entry in the table mapping smack values to
- * CIPSO level/category-set values.
- */
-struct smack_cipso {
- int smk_level;
- char smk_catset[SMK_LABELLEN];
-};
-
-/*
* An entry in the table identifying hosts.
*/
struct smk_netlbladdr {
@@ -87,32 +127,14 @@ struct smk_netlbladdr {
};
/*
- * This is the repository for labels seen so that it is
- * not necessary to keep allocating tiny chuncks of memory
- * and so that they can be shared.
- *
- * Labels are never modified in place. Anytime a label
- * is imported (e.g. xattrset on a file) the list is checked
- * for it and it is added if it doesn't exist. The address
- * is passed out in either case. Entries are added, but
- * never deleted.
- *
- * Since labels are hanging around anyway it doesn't
- * hurt to maintain a secid for those awkward situations
- * where kernel components that ought to use LSM independent
- * interfaces don't. The secid should go away when all of
- * these components have been repaired.
- *
- * If there is a cipso value associated with the label it
- * gets stored here, too. This will most likely be rare as
- * the cipso direct mapping in used internally.
+ * An entry in the table identifying ports.
*/
-struct smack_known {
+struct smk_port_label {
struct list_head list;
- char smk_known[SMK_LABELLEN];
- u32 smk_secid;
- struct smack_cipso *smk_cipso;
- spinlock_t smk_cipsolock; /* for changing cipso map */
+ struct sock *smk_sock; /* socket initialized on */
+ unsigned short smk_port; /* the port number */
+ struct smack_known *smk_in; /* inbound label */
+ struct smack_known *smk_out; /* outgoing label */
};
/*
@@ -122,16 +144,7 @@ struct smack_known {
#define SMK_FSFLOOR "smackfsfloor="
#define SMK_FSHAT "smackfshat="
#define SMK_FSROOT "smackfsroot="
-
-/*
- * xattr names
- */
-#define XATTR_SMACK_SUFFIX "SMACK64"
-#define XATTR_SMACK_IPIN "SMACK64IPIN"
-#define XATTR_SMACK_IPOUT "SMACK64IPOUT"
-#define XATTR_NAME_SMACK XATTR_SECURITY_PREFIX XATTR_SMACK_SUFFIX
-#define XATTR_NAME_SMACKIPIN XATTR_SECURITY_PREFIX XATTR_SMACK_IPIN
-#define XATTR_NAME_SMACKIPOUT XATTR_SECURITY_PREFIX XATTR_SMACK_IPOUT
+#define SMK_FSTRANS "smackfstransmute="
#define SMACK_CIPSO_OPTION "-CIPSO"
@@ -149,40 +162,56 @@ struct smack_known {
#define SMACK_CIPSO_SOCKET 1
/*
- * smackfs magic number
- * smackfs macic number
+ * CIPSO defaults.
+ */
+#define SMACK_CIPSO_DOI_DEFAULT 3 /* Historical */
+#define SMACK_CIPSO_DOI_INVALID -1 /* Not a DOI */
+#define SMACK_CIPSO_DIRECT_DEFAULT 250 /* Arbitrary */
+#define SMACK_CIPSO_MAPPED_DEFAULT 251 /* Also arbitrary */
+#define SMACK_CIPSO_MAXLEVEL 255 /* CIPSO 2.2 standard */
+/*
+ * CIPSO 2.2 standard is 239, but Smack wants to use the
+ * categories in a structured way that limits the value to
+ * the bits in 23 bytes, hence the unusual number.
*/
-#define SMACK_MAGIC 0x43415d53 /* "SMAC" */
+#define SMACK_CIPSO_MAXCATNUM 184 /* 23 * 8 */
/*
- * A limit on the number of entries in the lists
- * makes some of the list administration easier.
+ * Ptrace rules
*/
-#define SMACK_LIST_MAX 10000
+#define SMACK_PTRACE_DEFAULT 0
+#define SMACK_PTRACE_EXACT 1
+#define SMACK_PTRACE_DRACONIAN 2
+#define SMACK_PTRACE_MAX SMACK_PTRACE_DRACONIAN
/*
- * CIPSO defaults.
+ * Flags for untraditional access modes.
+ * It shouldn't be necessary to avoid conflicts with definitions
+ * in fs.h, but do so anyway.
*/
-#define SMACK_CIPSO_DOI_DEFAULT 3 /* Historical */
-#define SMACK_CIPSO_DOI_INVALID -1 /* Not a DOI */
-#define SMACK_CIPSO_DIRECT_DEFAULT 250 /* Arbitrary */
-#define SMACK_CIPSO_MAXCATVAL 63 /* Bigger gets harder */
-#define SMACK_CIPSO_MAXLEVEL 255 /* CIPSO 2.2 standard */
-#define SMACK_CIPSO_MAXCATNUM 239 /* CIPSO 2.2 standard */
+#define MAY_TRANSMUTE 0x00001000 /* Controls directory labeling */
+#define MAY_LOCK 0x00002000 /* Locks should be writes, but ... */
/*
* Just to make the common cases easier to deal with
*/
-#define MAY_ANY (MAY_READ | MAY_WRITE | MAY_APPEND | MAY_EXEC)
#define MAY_ANYREAD (MAY_READ | MAY_EXEC)
-#define MAY_ANYWRITE (MAY_WRITE | MAY_APPEND)
#define MAY_READWRITE (MAY_READ | MAY_WRITE)
#define MAY_NOT 0
/*
- * Number of access types used by Smack (rwxa)
+ * Number of access types used by Smack (rwxatl)
*/
-#define SMK_NUM_ACCESS_TYPE 4
+#define SMK_NUM_ACCESS_TYPE 6
+
+/* SMACK data */
+struct smack_audit_data {
+ const char *function;
+ char *subject;
+ char *object;
+ char *request;
+ int result;
+};
/*
* Smack audit data; is empty if CONFIG_AUDIT not set
@@ -191,6 +220,7 @@ struct smack_known {
struct smk_audit_info {
#ifdef CONFIG_AUDIT
struct common_audit_data a;
+ struct smack_audit_data sad;
#endif
};
/*
@@ -201,22 +231,29 @@ struct inode_smack *new_inode_smack(char *);
/*
* These functions are in smack_access.c
*/
-int smk_access(char *, char *, int, struct smk_audit_info *);
+int smk_access_entry(char *, char *, struct list_head *);
+int smk_access(struct smack_known *, char *, int, struct smk_audit_info *);
+int smk_tskacc(struct task_smack *, char *, u32, struct smk_audit_info *);
int smk_curacc(char *, u32, struct smk_audit_info *);
-int smack_to_cipso(const char *, struct smack_cipso *);
-void smack_from_cipso(u32, char *, char *);
-char *smack_from_secid(const u32);
+struct smack_known *smack_from_secid(const u32);
+char *smk_parse_smack(const char *string, int len);
+int smk_netlbl_mls(int, char *, struct netlbl_lsm_secattr *, int);
char *smk_import(const char *, int);
struct smack_known *smk_import_entry(const char *, int);
+void smk_insert_entry(struct smack_known *skp);
+struct smack_known *smk_find_entry(const char *);
u32 smack_to_secid(const char *);
/*
* Shared data.
*/
extern int smack_cipso_direct;
-extern char *smack_net_ambient;
-extern char *smack_onlycap;
+extern int smack_cipso_mapped;
+extern struct smack_known *smack_net_ambient;
+extern struct smack_known *smack_onlycap;
+extern struct smack_known *smack_syslog_label;
extern const char *smack_cipso_option;
+extern int smack_ptrace_rule;
extern struct smack_known smack_known_floor;
extern struct smack_known smack_known_hat;
@@ -225,22 +262,22 @@ extern struct smack_known smack_known_invalid;
extern struct smack_known smack_known_star;
extern struct smack_known smack_known_web;
+extern struct mutex smack_known_lock;
extern struct list_head smack_known_list;
-extern struct list_head smack_rule_list;
extern struct list_head smk_netlbladdr_list;
extern struct security_operations smack_ops;
+#define SMACK_HASH_SLOTS 16
+extern struct hlist_head smack_known_hash[SMACK_HASH_SLOTS];
+
/*
- * Stricly for CIPSO level manipulation.
- * Set the category bit number in a smack label sized buffer.
+ * Is the directory transmuting?
*/
-static inline void smack_catset_bit(int cat, char *catsetp)
+static inline int smk_inode_transmutable(const struct inode *isp)
{
- if (cat > SMK_LABELLEN * 8)
- return;
-
- catsetp[(cat - 1) / 8] |= 0x80 >> ((cat - 1) % 8);
+ struct inode_smack *sip = isp->i_security;
+ return (sip->smk_flags & SMK_INODE_TRANSMUTE) != 0;
}
/*
@@ -253,6 +290,45 @@ static inline char *smk_of_inode(const struct inode *isp)
}
/*
+ * Present a pointer to the smack label entry in an task blob.
+ */
+static inline struct smack_known *smk_of_task(const struct task_smack *tsp)
+{
+ return tsp->smk_task;
+}
+
+/*
+ * Present a pointer to the forked smack label entry in an task blob.
+ */
+static inline struct smack_known *smk_of_forked(const struct task_smack *tsp)
+{
+ return tsp->smk_forked;
+}
+
+/*
+ * Present a pointer to the smack label in the current task blob.
+ */
+static inline struct smack_known *smk_of_current(void)
+{
+ return smk_of_task(current_security());
+}
+
+/*
+ * Is the task privileged and allowed to be privileged
+ * by the onlycap rule.
+ */
+static inline int smack_privileged(int cap)
+{
+ struct smack_known *skp = smk_of_current();
+
+ if (!capable(cap))
+ return 0;
+ if (smack_onlycap == NULL || smack_onlycap == skp)
+ return 1;
+ return 0;
+}
+
+/*
* logging functions
*/
#define SMACK_AUDIT_DENIED 0x1
@@ -273,9 +349,18 @@ void smack_log(char *subject_label, char *object_label,
static inline void smk_ad_init(struct smk_audit_info *a, const char *func,
char type)
{
- memset(a, 0, sizeof(*a));
+ memset(&a->sad, 0, sizeof(a->sad));
a->a.type = type;
- a->a.smack_audit_data.function = func;
+ a->a.smack_audit_data = &a->sad;
+ a->a.smack_audit_data->function = func;
+}
+
+static inline void smk_ad_init_net(struct smk_audit_info *a, const char *func,
+ char type, struct lsm_network_audit *net)
+{
+ smk_ad_init(a, func, type);
+ memset(net, 0, sizeof(*net));
+ a->a.u.net = net;
}
static inline void smk_ad_setfield_u_tsk(struct smk_audit_info *a,
@@ -286,27 +371,22 @@ static inline void smk_ad_setfield_u_tsk(struct smk_audit_info *a,
static inline void smk_ad_setfield_u_fs_path_dentry(struct smk_audit_info *a,
struct dentry *d)
{
- a->a.u.fs.path.dentry = d;
-}
-static inline void smk_ad_setfield_u_fs_path_mnt(struct smk_audit_info *a,
- struct vfsmount *m)
-{
- a->a.u.fs.path.mnt = m;
+ a->a.u.dentry = d;
}
static inline void smk_ad_setfield_u_fs_inode(struct smk_audit_info *a,
struct inode *i)
{
- a->a.u.fs.inode = i;
+ a->a.u.inode = i;
}
static inline void smk_ad_setfield_u_fs_path(struct smk_audit_info *a,
struct path p)
{
- a->a.u.fs.path = p;
+ a->a.u.path = p;
}
static inline void smk_ad_setfield_u_net_sk(struct smk_audit_info *a,
struct sock *sk)
{
- a->a.u.net.sk = sk;
+ a->a.u.net->sk = sk;
}
#else /* no AUDIT */
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index 0f9ac814690..c062e9467b6 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -11,6 +11,7 @@
*/
#include <linux/types.h>
+#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/sched.h>
#include "smack.h"
@@ -18,37 +19,31 @@
struct smack_known smack_known_huh = {
.smk_known = "?",
.smk_secid = 2,
- .smk_cipso = NULL,
};
struct smack_known smack_known_hat = {
.smk_known = "^",
.smk_secid = 3,
- .smk_cipso = NULL,
};
struct smack_known smack_known_star = {
.smk_known = "*",
.smk_secid = 4,
- .smk_cipso = NULL,
};
struct smack_known smack_known_floor = {
.smk_known = "_",
.smk_secid = 5,
- .smk_cipso = NULL,
};
struct smack_known smack_known_invalid = {
.smk_known = "",
.smk_secid = 6,
- .smk_cipso = NULL,
};
struct smack_known smack_known_web = {
.smk_known = "@",
.smk_secid = 7,
- .smk_cipso = NULL,
};
LIST_HEAD(smack_known_list);
@@ -66,9 +61,58 @@ static u32 smack_next_secid = 10;
int log_policy = SMACK_AUDIT_DENIED;
/**
- * smk_access - determine if a subject has a specific access to an object
+ * smk_access_entry - look up matching access rule
* @subject_label: a pointer to the subject's Smack label
* @object_label: a pointer to the object's Smack label
+ * @rule_list: the list of rules to search
+ *
+ * This function looks up the subject/object pair in the
+ * access rule list and returns the access mode. If no
+ * entry is found returns -ENOENT.
+ *
+ * NOTE:
+ *
+ * Earlier versions of this function allowed for labels that
+ * were not on the label list. This was done to allow for
+ * labels to come over the network that had never been seen
+ * before on this host. Unless the receiving socket has the
+ * star label this will always result in a failure check. The
+ * star labeled socket case is now handled in the networking
+ * hooks so there is no case where the label is not on the
+ * label list. Checking to see if the address of two labels
+ * is the same is now a reliable test.
+ *
+ * Do the object check first because that is more
+ * likely to differ.
+ *
+ * Allowing write access implies allowing locking.
+ */
+int smk_access_entry(char *subject_label, char *object_label,
+ struct list_head *rule_list)
+{
+ int may = -ENOENT;
+ struct smack_rule *srp;
+
+ list_for_each_entry_rcu(srp, rule_list, list) {
+ if (srp->smk_object == object_label &&
+ srp->smk_subject->smk_known == subject_label) {
+ may = srp->smk_access;
+ break;
+ }
+ }
+
+ /*
+ * MAY_WRITE implies MAY_LOCK.
+ */
+ if ((may & MAY_WRITE) == MAY_WRITE)
+ may |= MAY_LOCK;
+ return may;
+}
+
+/**
+ * smk_access - determine if a subject has a specific access to an object
+ * @subject_known: a pointer to the subject's Smack label entry
+ * @object_label: a pointer to the object's Smack label
* @request: the access requested, in "MAY" format
* @a : a pointer to the audit data
*
@@ -76,20 +120,12 @@ int log_policy = SMACK_AUDIT_DENIED;
* access rule list and returns 0 if the access is permitted,
* non zero otherwise.
*
- * Even though Smack labels are usually shared on smack_list
- * labels that come in off the network can't be imported
- * and added to the list for locking reasons.
- *
- * Therefore, it is necessary to check the contents of the labels,
- * not just the pointer values. Of course, in most cases the labels
- * will be on the list, so checking the pointers may be a worthwhile
- * optimization.
+ * Smack labels are shared on smack_list
*/
-int smk_access(char *subject_label, char *object_label, int request,
- struct smk_audit_info *a)
+int smk_access(struct smack_known *subject_known, char *object_label,
+ int request, struct smk_audit_info *a)
{
- u32 may = MAY_NOT;
- struct smack_rule *srp;
+ int may = MAY_NOT;
int rc = 0;
/*
@@ -97,8 +133,7 @@ int smk_access(char *subject_label, char *object_label, int request,
*
* A star subject can't access any object.
*/
- if (subject_label == smack_known_star.smk_known ||
- strcmp(subject_label, smack_known_star.smk_known) == 0) {
+ if (subject_known == &smack_known_star) {
rc = -EACCES;
goto out_audit;
}
@@ -108,107 +143,123 @@ int smk_access(char *subject_label, char *object_label, int request,
* An internet subject can access any object.
*/
if (object_label == smack_known_web.smk_known ||
- subject_label == smack_known_web.smk_known ||
- strcmp(object_label, smack_known_web.smk_known) == 0 ||
- strcmp(subject_label, smack_known_web.smk_known) == 0)
+ subject_known == &smack_known_web)
goto out_audit;
/*
* A star object can be accessed by any subject.
*/
- if (object_label == smack_known_star.smk_known ||
- strcmp(object_label, smack_known_star.smk_known) == 0)
+ if (object_label == smack_known_star.smk_known)
goto out_audit;
/*
* An object can be accessed in any way by a subject
* with the same label.
*/
- if (subject_label == object_label ||
- strcmp(subject_label, object_label) == 0)
+ if (subject_known->smk_known == object_label)
goto out_audit;
/*
* A hat subject can read any object.
* A floor object can be read by any subject.
*/
if ((request & MAY_ANYREAD) == request) {
- if (object_label == smack_known_floor.smk_known ||
- strcmp(object_label, smack_known_floor.smk_known) == 0)
+ if (object_label == smack_known_floor.smk_known)
goto out_audit;
- if (subject_label == smack_known_hat.smk_known ||
- strcmp(subject_label, smack_known_hat.smk_known) == 0)
+ if (subject_known == &smack_known_hat)
goto out_audit;
}
/*
* Beyond here an explicit relationship is required.
* If the requested access is contained in the available
* access (e.g. read is included in readwrite) it's
- * good.
+ * good. A negative response from smk_access_entry()
+ * indicates there is no entry for this pair.
*/
rcu_read_lock();
- list_for_each_entry_rcu(srp, &smack_rule_list, list) {
- if (srp->smk_subject == subject_label ||
- strcmp(srp->smk_subject, subject_label) == 0) {
- if (srp->smk_object == object_label ||
- strcmp(srp->smk_object, object_label) == 0) {
- may = srp->smk_access;
- break;
- }
- }
- }
+ may = smk_access_entry(subject_known->smk_known, object_label,
+ &subject_known->smk_rules);
rcu_read_unlock();
- /*
- * This is a bit map operation.
- */
- if ((request & may) == request)
+
+ if (may > 0 && (request & may) == request)
goto out_audit;
rc = -EACCES;
out_audit:
#ifdef CONFIG_AUDIT
if (a)
- smack_log(subject_label, object_label, request, rc, a);
+ smack_log(subject_known->smk_known, object_label, request,
+ rc, a);
#endif
return rc;
}
/**
- * smk_curacc - determine if current has a specific access to an object
+ * smk_tskacc - determine if a task has a specific access to an object
+ * @tsp: a pointer to the subject task
* @obj_label: a pointer to the object's Smack label
* @mode: the access requested, in "MAY" format
* @a : common audit data
*
- * This function checks the current subject label/object label pair
+ * This function checks the subject task's label/object label pair
* in the access rule list and returns 0 if the access is permitted,
- * non zero otherwise. It allows that current may have the capability
+ * non zero otherwise. It allows that the task may have the capability
* to override the rules.
*/
-int smk_curacc(char *obj_label, u32 mode, struct smk_audit_info *a)
+int smk_tskacc(struct task_smack *subject, char *obj_label,
+ u32 mode, struct smk_audit_info *a)
{
+ struct smack_known *skp = smk_of_task(subject);
+ int may;
int rc;
- char *sp = current_security();
-
- rc = smk_access(sp, obj_label, mode, NULL);
- if (rc == 0)
- goto out_audit;
/*
- * Return if a specific label has been designated as the
- * only one that gets privilege and current does not
- * have that label.
+ * Check the global rule list
*/
- if (smack_onlycap != NULL && smack_onlycap != current->cred->security)
- goto out_audit;
+ rc = smk_access(skp, obj_label, mode, NULL);
+ if (rc == 0) {
+ /*
+ * If there is an entry in the task's rule list
+ * it can further restrict access.
+ */
+ may = smk_access_entry(skp->smk_known, obj_label,
+ &subject->smk_rules);
+ if (may < 0)
+ goto out_audit;
+ if ((mode & may) == mode)
+ goto out_audit;
+ rc = -EACCES;
+ }
- if (capable(CAP_MAC_OVERRIDE))
- return 0;
+ /*
+ * Allow for priviliged to override policy.
+ */
+ if (rc != 0 && smack_privileged(CAP_MAC_OVERRIDE))
+ rc = 0;
out_audit:
#ifdef CONFIG_AUDIT
if (a)
- smack_log(sp, obj_label, mode, rc, a);
+ smack_log(skp->smk_known, obj_label, mode, rc, a);
#endif
return rc;
}
+/**
+ * smk_curacc - determine if current has a specific access to an object
+ * @obj_label: a pointer to the object's Smack label
+ * @mode: the access requested, in "MAY" format
+ * @a : common audit data
+ *
+ * This function checks the current subject label/object label pair
+ * in the access rule list and returns 0 if the access is permitted,
+ * non zero otherwise. It allows that current may have the capability
+ * to override the rules.
+ */
+int smk_curacc(char *obj_label, u32 mode, struct smk_audit_info *a)
+{
+ struct task_smack *tsp = current_security();
+
+ return smk_tskacc(tsp, obj_label, mode, a);
+}
+
#ifdef CONFIG_AUDIT
/**
* smack_str_from_perm : helper to transalate an int to a
@@ -220,6 +271,7 @@ out_audit:
static inline void smack_str_from_perm(char *string, int access)
{
int i = 0;
+
if (access & MAY_READ)
string[i++] = 'r';
if (access & MAY_WRITE)
@@ -228,6 +280,10 @@ static inline void smack_str_from_perm(char *string, int access)
string[i++] = 'x';
if (access & MAY_APPEND)
string[i++] = 'a';
+ if (access & MAY_TRANSMUTE)
+ string[i++] = 't';
+ if (access & MAY_LOCK)
+ string[i++] = 'l';
string[i] = '\0';
}
/**
@@ -240,15 +296,18 @@ static inline void smack_str_from_perm(char *string, int access)
static void smack_log_callback(struct audit_buffer *ab, void *a)
{
struct common_audit_data *ad = a;
- struct smack_audit_data *sad = &ad->smack_audit_data;
+ struct smack_audit_data *sad = ad->smack_audit_data;
audit_log_format(ab, "lsm=SMACK fn=%s action=%s",
- ad->smack_audit_data.function,
+ ad->smack_audit_data->function,
sad->result ? "denied" : "granted");
audit_log_format(ab, " subject=");
audit_log_untrustedstring(ab, sad->subject);
audit_log_format(ab, " object=");
audit_log_untrustedstring(ab, sad->object);
- audit_log_format(ab, " requested=%s", sad->request);
+ if (sad->request[0] == '\0')
+ audit_log_format(ab, " labels_differ");
+ else
+ audit_log_format(ab, " requested=%s", sad->request);
}
/**
@@ -275,19 +334,19 @@ void smack_log(char *subject_label, char *object_label, int request,
if (result == 0 && (log_policy & SMACK_AUDIT_ACCEPT) == 0)
return;
- if (a->smack_audit_data.function == NULL)
- a->smack_audit_data.function = "unknown";
+ sad = a->smack_audit_data;
+
+ if (sad->function == NULL)
+ sad->function = "unknown";
/* end preparing the audit data */
- sad = &a->smack_audit_data;
smack_str_from_perm(request_buffer, request);
sad->subject = subject_label;
sad->object = object_label;
sad->request = request_buffer;
sad->result = result;
- a->lsm_pre_audit = smack_log_callback;
- common_lsm_audit(a);
+ common_lsm_audit(a, smack_log_callback, NULL);
}
#else /* #ifdef CONFIG_AUDIT */
void smack_log(char *subject_label, char *object_label, int request,
@@ -296,7 +355,127 @@ void smack_log(char *subject_label, char *object_label, int request,
}
#endif
-static DEFINE_MUTEX(smack_known_lock);
+DEFINE_MUTEX(smack_known_lock);
+
+struct hlist_head smack_known_hash[SMACK_HASH_SLOTS];
+
+/**
+ * smk_insert_entry - insert a smack label into a hash map,
+ *
+ * this function must be called under smack_known_lock
+ */
+void smk_insert_entry(struct smack_known *skp)
+{
+ unsigned int hash;
+ struct hlist_head *head;
+
+ hash = full_name_hash(skp->smk_known, strlen(skp->smk_known));
+ head = &smack_known_hash[hash & (SMACK_HASH_SLOTS - 1)];
+
+ hlist_add_head_rcu(&skp->smk_hashed, head);
+ list_add_rcu(&skp->list, &smack_known_list);
+}
+
+/**
+ * smk_find_entry - find a label on the list, return the list entry
+ * @string: a text string that might be a Smack label
+ *
+ * Returns a pointer to the entry in the label list that
+ * matches the passed string.
+ */
+struct smack_known *smk_find_entry(const char *string)
+{
+ unsigned int hash;
+ struct hlist_head *head;
+ struct smack_known *skp;
+
+ hash = full_name_hash(string, strlen(string));
+ head = &smack_known_hash[hash & (SMACK_HASH_SLOTS - 1)];
+
+ hlist_for_each_entry_rcu(skp, head, smk_hashed)
+ if (strcmp(skp->smk_known, string) == 0)
+ return skp;
+
+ return NULL;
+}
+
+/**
+ * smk_parse_smack - parse smack label from a text string
+ * @string: a text string that might contain a Smack label
+ * @len: the maximum size, or zero if it is NULL terminated.
+ *
+ * Returns a pointer to the clean label, or NULL
+ */
+char *smk_parse_smack(const char *string, int len)
+{
+ char *smack;
+ int i;
+
+ if (len <= 0)
+ len = strlen(string) + 1;
+
+ /*
+ * Reserve a leading '-' as an indicator that
+ * this isn't a label, but an option to interfaces
+ * including /smack/cipso and /smack/cipso2
+ */
+ if (string[0] == '-')
+ return NULL;
+
+ for (i = 0; i < len; i++)
+ if (string[i] > '~' || string[i] <= ' ' || string[i] == '/' ||
+ string[i] == '"' || string[i] == '\\' || string[i] == '\'')
+ break;
+
+ if (i == 0 || i >= SMK_LONGLABEL)
+ return NULL;
+
+ smack = kzalloc(i + 1, GFP_KERNEL);
+ if (smack != NULL) {
+ strncpy(smack, string, i + 1);
+ smack[i] = '\0';
+ }
+ return smack;
+}
+
+/**
+ * smk_netlbl_mls - convert a catset to netlabel mls categories
+ * @catset: the Smack categories
+ * @sap: where to put the netlabel categories
+ *
+ * Allocates and fills attr.mls
+ * Returns 0 on success, error code on failure.
+ */
+int smk_netlbl_mls(int level, char *catset, struct netlbl_lsm_secattr *sap,
+ int len)
+{
+ unsigned char *cp;
+ unsigned char m;
+ int cat;
+ int rc;
+ int byte;
+
+ sap->flags |= NETLBL_SECATTR_MLS_CAT;
+ sap->attr.mls.lvl = level;
+ sap->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC);
+ if (!sap->attr.mls.cat)
+ return -ENOMEM;
+ sap->attr.mls.cat->startbit = 0;
+
+ for (cat = 1, cp = catset, byte = 0; byte < len; cp++, byte++)
+ for (m = 0x80; m != 0; m >>= 1, cat++) {
+ if ((m & *cp) == 0)
+ continue;
+ rc = netlbl_secattr_catmap_setbit(sap->attr.mls.cat,
+ cat, GFP_ATOMIC);
+ if (rc < 0) {
+ netlbl_secattr_catmap_free(sap->attr.mls.cat);
+ return rc;
+ }
+ }
+
+ return 0;
+}
/**
* smk_import_entry - import a label, return the list entry
@@ -309,53 +488,59 @@ static DEFINE_MUTEX(smack_known_lock);
struct smack_known *smk_import_entry(const char *string, int len)
{
struct smack_known *skp;
- char smack[SMK_LABELLEN];
- int found;
- int i;
-
- if (len <= 0 || len > SMK_MAXLEN)
- len = SMK_MAXLEN;
-
- for (i = 0, found = 0; i < SMK_LABELLEN; i++) {
- if (found)
- smack[i] = '\0';
- else if (i >= len || string[i] > '~' || string[i] <= ' ' ||
- string[i] == '/' || string[i] == '"' ||
- string[i] == '\\' || string[i] == '\'') {
- smack[i] = '\0';
- found = 1;
- } else
- smack[i] = string[i];
- }
+ char *smack;
+ int slen;
+ int rc;
- if (smack[0] == '\0')
+ smack = smk_parse_smack(string, len);
+ if (smack == NULL)
return NULL;
mutex_lock(&smack_known_lock);
- found = 0;
- list_for_each_entry_rcu(skp, &smack_known_list, list) {
- if (strncmp(skp->smk_known, smack, SMK_MAXLEN) == 0) {
- found = 1;
- break;
- }
- }
+ skp = smk_find_entry(smack);
+ if (skp != NULL)
+ goto freeout;
- if (found == 0) {
- skp = kzalloc(sizeof(struct smack_known), GFP_KERNEL);
- if (skp != NULL) {
- strncpy(skp->smk_known, smack, SMK_MAXLEN);
- skp->smk_secid = smack_next_secid++;
- skp->smk_cipso = NULL;
- spin_lock_init(&skp->smk_cipsolock);
- /*
- * Make sure that the entry is actually
- * filled before putting it on the list.
- */
- list_add_rcu(&skp->list, &smack_known_list);
- }
- }
+ skp = kzalloc(sizeof(*skp), GFP_KERNEL);
+ if (skp == NULL)
+ goto freeout;
+ skp->smk_known = smack;
+ skp->smk_secid = smack_next_secid++;
+ skp->smk_netlabel.domain = skp->smk_known;
+ skp->smk_netlabel.flags =
+ NETLBL_SECATTR_DOMAIN | NETLBL_SECATTR_MLS_LVL;
+ /*
+ * If direct labeling works use it.
+ * Otherwise use mapped labeling.
+ */
+ slen = strlen(smack);
+ if (slen < SMK_CIPSOLEN)
+ rc = smk_netlbl_mls(smack_cipso_direct, skp->smk_known,
+ &skp->smk_netlabel, slen);
+ else
+ rc = smk_netlbl_mls(smack_cipso_mapped, (char *)&skp->smk_secid,
+ &skp->smk_netlabel, sizeof(skp->smk_secid));
+
+ if (rc >= 0) {
+ INIT_LIST_HEAD(&skp->smk_rules);
+ mutex_init(&skp->smk_rules_lock);
+ /*
+ * Make sure that the entry is actually
+ * filled before putting it on the list.
+ */
+ smk_insert_entry(skp);
+ goto unlockout;
+ }
+ /*
+ * smk_netlbl_mls failed.
+ */
+ kfree(skp);
+ skp = NULL;
+freeout:
+ kfree(smack);
+unlockout:
mutex_unlock(&smack_known_lock);
return skp;
@@ -386,10 +571,10 @@ char *smk_import(const char *string, int len)
* smack_from_secid - find the Smack label associated with a secid
* @secid: an integer that might be associated with a Smack label
*
- * Returns a pointer to the appropraite Smack label if there is one,
+ * Returns a pointer to the appropriate Smack label entry if there is one,
* otherwise a pointer to the invalid Smack label.
*/
-char *smack_from_secid(const u32 secid)
+struct smack_known *smack_from_secid(const u32 secid)
{
struct smack_known *skp;
@@ -397,7 +582,7 @@ char *smack_from_secid(const u32 secid)
list_for_each_entry_rcu(skp, &smack_known_list, list) {
if (skp->smk_secid == secid) {
rcu_read_unlock();
- return skp->smk_known;
+ return skp;
}
}
@@ -406,7 +591,7 @@ char *smack_from_secid(const u32 secid)
* of a secid that is not on the list.
*/
rcu_read_unlock();
- return smack_known_invalid.smk_known;
+ return &smack_known_invalid;
}
/**
@@ -418,85 +603,9 @@ char *smack_from_secid(const u32 secid)
*/
u32 smack_to_secid(const char *smack)
{
- struct smack_known *skp;
-
- rcu_read_lock();
- list_for_each_entry_rcu(skp, &smack_known_list, list) {
- if (strncmp(skp->smk_known, smack, SMK_MAXLEN) == 0) {
- rcu_read_unlock();
- return skp->smk_secid;
- }
- }
- rcu_read_unlock();
- return 0;
-}
-
-/**
- * smack_from_cipso - find the Smack label associated with a CIPSO option
- * @level: Bell & LaPadula level from the network
- * @cp: Bell & LaPadula categories from the network
- * @result: where to put the Smack value
- *
- * This is a simple lookup in the label table.
- *
- * This is an odd duck as far as smack handling goes in that
- * it sends back a copy of the smack label rather than a pointer
- * to the master list. This is done because it is possible for
- * a foreign host to send a smack label that is new to this
- * machine and hence not on the list. That would not be an
- * issue except that adding an entry to the master list can't
- * be done at that point.
- */
-void smack_from_cipso(u32 level, char *cp, char *result)
-{
- struct smack_known *kp;
- char *final = NULL;
-
- rcu_read_lock();
- list_for_each_entry(kp, &smack_known_list, list) {
- if (kp->smk_cipso == NULL)
- continue;
-
- spin_lock_bh(&kp->smk_cipsolock);
-
- if (kp->smk_cipso->smk_level == level &&
- memcmp(kp->smk_cipso->smk_catset, cp, SMK_LABELLEN) == 0)
- final = kp->smk_known;
-
- spin_unlock_bh(&kp->smk_cipsolock);
- }
- rcu_read_unlock();
- if (final == NULL)
- final = smack_known_huh.smk_known;
- strncpy(result, final, SMK_MAXLEN);
- return;
-}
-
-/**
- * smack_to_cipso - find the CIPSO option to go with a Smack label
- * @smack: a pointer to the smack label in question
- * @cp: where to put the result
- *
- * Returns zero if a value is available, non-zero otherwise.
- */
-int smack_to_cipso(const char *smack, struct smack_cipso *cp)
-{
- struct smack_known *kp;
- int found = 0;
+ struct smack_known *skp = smk_find_entry(smack);
- rcu_read_lock();
- list_for_each_entry_rcu(kp, &smack_known_list, list) {
- if (kp->smk_known == smack ||
- strcmp(kp->smk_known, smack) == 0) {
- found = 1;
- break;
- }
- }
- rcu_read_unlock();
-
- if (found == 0 || kp->smk_cipso == NULL)
- return -ENOENT;
-
- memcpy(cp, kp->smk_cipso, sizeof(struct smack_cipso));
- return 0;
+ if (skp == NULL)
+ return 0;
+ return skp->smk_secid;
}
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index 529c9ca6587..f2c30801ce4 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -3,12 +3,15 @@
*
* This file contains the smack hook function implementations.
*
- * Author:
+ * Authors:
* Casey Schaufler <casey@schaufler-ca.com>
+ * Jarkko Sakkinen <jarkko.sakkinen@intel.com>
*
* Copyright (C) 2007 Casey Schaufler <casey@schaufler-ca.com>
* Copyright (C) 2009 Hewlett-Packard Development Company, L.P.
- * Paul Moore <paul.moore@hp.com>
+ * Paul Moore <paul@paul-moore.com>
+ * Copyright (C) 2010 Nokia Corporation
+ * Copyright (C) 2011 Intel Corporation.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
@@ -19,22 +22,38 @@
#include <linux/pagemap.h>
#include <linux/mount.h>
#include <linux/stat.h>
-#include <linux/ext2_fs.h>
#include <linux/kd.h>
#include <asm/ioctls.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/udp.h>
+#include <linux/dccp.h>
+#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/pipe_fs_i.h>
-#include <net/netlabel.h>
#include <net/cipso_ipv4.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
#include <linux/audit.h>
#include <linux/magic.h>
+#include <linux/dcache.h>
+#include <linux/personality.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/binfmts.h>
#include "smack.h"
#define task_security(task) (task_cred_xxx((task), security))
+#define TRANS_TRUE "TRUE"
+#define TRANS_TRUE_SIZE 4
+
+#define SMK_CONNECTING 0
+#define SMK_RECEIVING 1
+#define SMK_SENDING 2
+
+LIST_HEAD(smk_ipv6_port_list);
+
/**
* smk_fetch - Fetch the smack label from a file.
* @ip: a pointer to the inode
@@ -43,19 +62,27 @@
* Returns a pointer to the master list entry for the Smack label
* or NULL if there was no label to fetch.
*/
-static char *smk_fetch(struct inode *ip, struct dentry *dp)
+static struct smack_known *smk_fetch(const char *name, struct inode *ip,
+ struct dentry *dp)
{
int rc;
- char in[SMK_LABELLEN];
+ char *buffer;
+ struct smack_known *skp = NULL;
if (ip->i_op->getxattr == NULL)
return NULL;
- rc = ip->i_op->getxattr(dp, XATTR_NAME_SMACK, in, SMK_LABELLEN);
- if (rc < 0)
+ buffer = kzalloc(SMK_LONGLABEL, GFP_KERNEL);
+ if (buffer == NULL)
return NULL;
- return smk_import(in, rc);
+ rc = ip->i_op->getxattr(dp, name, buffer, SMK_LONGLABEL);
+ if (rc > 0)
+ skp = smk_import_entry(buffer, rc);
+
+ kfree(buffer);
+
+ return skp;
}
/**
@@ -68,7 +95,7 @@ struct inode_smack *new_inode_smack(char *smack)
{
struct inode_smack *isp;
- isp = kzalloc(sizeof(struct inode_smack), GFP_KERNEL);
+ isp = kzalloc(sizeof(struct inode_smack), GFP_NOFS);
if (isp == NULL)
return NULL;
@@ -79,6 +106,125 @@ struct inode_smack *new_inode_smack(char *smack)
return isp;
}
+/**
+ * new_task_smack - allocate a task security blob
+ * @smack: a pointer to the Smack label to use in the blob
+ *
+ * Returns the new blob or NULL if there's no memory available
+ */
+static struct task_smack *new_task_smack(struct smack_known *task,
+ struct smack_known *forked, gfp_t gfp)
+{
+ struct task_smack *tsp;
+
+ tsp = kzalloc(sizeof(struct task_smack), gfp);
+ if (tsp == NULL)
+ return NULL;
+
+ tsp->smk_task = task;
+ tsp->smk_forked = forked;
+ INIT_LIST_HEAD(&tsp->smk_rules);
+ mutex_init(&tsp->smk_rules_lock);
+
+ return tsp;
+}
+
+/**
+ * smk_copy_rules - copy a rule set
+ * @nhead - new rules header pointer
+ * @ohead - old rules header pointer
+ *
+ * Returns 0 on success, -ENOMEM on error
+ */
+static int smk_copy_rules(struct list_head *nhead, struct list_head *ohead,
+ gfp_t gfp)
+{
+ struct smack_rule *nrp;
+ struct smack_rule *orp;
+ int rc = 0;
+
+ INIT_LIST_HEAD(nhead);
+
+ list_for_each_entry_rcu(orp, ohead, list) {
+ nrp = kzalloc(sizeof(struct smack_rule), gfp);
+ if (nrp == NULL) {
+ rc = -ENOMEM;
+ break;
+ }
+ *nrp = *orp;
+ list_add_rcu(&nrp->list, nhead);
+ }
+ return rc;
+}
+
+/**
+ * smk_ptrace_mode - helper function for converting PTRACE_MODE_* into MAY_*
+ * @mode - input mode in form of PTRACE_MODE_*
+ *
+ * Returns a converted MAY_* mode usable by smack rules
+ */
+static inline unsigned int smk_ptrace_mode(unsigned int mode)
+{
+ switch (mode) {
+ case PTRACE_MODE_READ:
+ return MAY_READ;
+ case PTRACE_MODE_ATTACH:
+ return MAY_READWRITE;
+ }
+
+ return 0;
+}
+
+/**
+ * smk_ptrace_rule_check - helper for ptrace access
+ * @tracer: tracer process
+ * @tracee_label: label of the process that's about to be traced,
+ * the pointer must originate from smack structures
+ * @mode: ptrace attachment mode (PTRACE_MODE_*)
+ * @func: name of the function that called us, used for audit
+ *
+ * Returns 0 on access granted, -error on error
+ */
+static int smk_ptrace_rule_check(struct task_struct *tracer, char *tracee_label,
+ unsigned int mode, const char *func)
+{
+ int rc;
+ struct smk_audit_info ad, *saip = NULL;
+ struct task_smack *tsp;
+ struct smack_known *skp;
+
+ if ((mode & PTRACE_MODE_NOAUDIT) == 0) {
+ smk_ad_init(&ad, func, LSM_AUDIT_DATA_TASK);
+ smk_ad_setfield_u_tsk(&ad, tracer);
+ saip = &ad;
+ }
+
+ tsp = task_security(tracer);
+ skp = smk_of_task(tsp);
+
+ if ((mode & PTRACE_MODE_ATTACH) &&
+ (smack_ptrace_rule == SMACK_PTRACE_EXACT ||
+ smack_ptrace_rule == SMACK_PTRACE_DRACONIAN)) {
+ if (skp->smk_known == tracee_label)
+ rc = 0;
+ else if (smack_ptrace_rule == SMACK_PTRACE_DRACONIAN)
+ rc = -EACCES;
+ else if (capable(CAP_SYS_PTRACE))
+ rc = 0;
+ else
+ rc = -EACCES;
+
+ if (saip)
+ smack_log(skp->smk_known, tracee_label, 0, rc, saip);
+
+ return rc;
+ }
+
+ /* In case of rule==SMACK_PTRACE_DEFAULT or mode==PTRACE_MODE_READ */
+ rc = smk_tskacc(tsp, tracee_label, smk_ptrace_mode(mode), saip);
+ return rc;
+}
+
/*
* LSM hooks.
* We he, that is fun!
@@ -87,33 +233,24 @@ struct inode_smack *new_inode_smack(char *smack)
/**
* smack_ptrace_access_check - Smack approval on PTRACE_ATTACH
* @ctp: child task pointer
- * @mode: ptrace attachment mode
+ * @mode: ptrace attachment mode (PTRACE_MODE_*)
*
* Returns 0 if access is OK, an error code otherwise
*
- * Do the capability checks, and require read and write.
+ * Do the capability checks.
*/
static int smack_ptrace_access_check(struct task_struct *ctp, unsigned int mode)
{
int rc;
- struct smk_audit_info ad;
- char *sp, *tsp;
+ struct smack_known *skp;
rc = cap_ptrace_access_check(ctp, mode);
if (rc != 0)
return rc;
- sp = current_security();
- tsp = task_security(ctp);
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
- smk_ad_setfield_u_tsk(&ad, ctp);
+ skp = smk_of_task(task_security(ctp));
- /* we won't log here, because rc can be overriden */
- rc = smk_access(sp, tsp, MAY_READWRITE, NULL);
- if (rc != 0 && capable(CAP_MAC_OVERRIDE))
- rc = 0;
-
- smack_log(sp, tsp, MAY_READWRITE, rc, &ad);
+ rc = smk_ptrace_rule_check(current, skp->smk_known, mode, __func__);
return rc;
}
@@ -123,29 +260,21 @@ static int smack_ptrace_access_check(struct task_struct *ctp, unsigned int mode)
*
* Returns 0 if access is OK, an error code otherwise
*
- * Do the capability checks, and require read and write.
+ * Do the capability checks, and require PTRACE_MODE_ATTACH.
*/
static int smack_ptrace_traceme(struct task_struct *ptp)
{
int rc;
- struct smk_audit_info ad;
- char *sp, *tsp;
+ struct smack_known *skp;
rc = cap_ptrace_traceme(ptp);
if (rc != 0)
return rc;
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
- smk_ad_setfield_u_tsk(&ad, ptp);
+ skp = smk_of_task(current_security());
- sp = current_security();
- tsp = task_security(ptp);
- /* we won't log here, because rc can be overriden */
- rc = smk_access(tsp, sp, MAY_READWRITE, NULL);
- if (rc != 0 && has_capability(ptp, CAP_MAC_OVERRIDE))
- rc = 0;
-
- smack_log(tsp, sp, MAY_READWRITE, rc, &ad);
+ rc = smk_ptrace_rule_check(ptp, skp->smk_known,
+ PTRACE_MODE_ATTACH, __func__);
return rc;
}
@@ -153,23 +282,17 @@ static int smack_ptrace_traceme(struct task_struct *ptp)
* smack_syslog - Smack approval on syslog
* @type: message type
*
- * Require that the task has the floor label
- *
* Returns 0 on success, error code otherwise.
*/
-static int smack_syslog(int type)
+static int smack_syslog(int typefrom_file)
{
- int rc;
- char *sp = current_security();
-
- rc = cap_syslog(type);
- if (rc != 0)
- return rc;
+ int rc = 0;
+ struct smack_known *skp = smk_of_current();
- if (capable(CAP_MAC_OVERRIDE))
+ if (smack_privileged(CAP_MAC_OVERRIDE))
return 0;
- if (sp != smack_known_floor.smk_known)
+ if (smack_syslog_label != NULL && smack_syslog_label != skp)
rc = -EACCES;
return rc;
@@ -199,9 +322,9 @@ static int smack_sb_alloc_security(struct super_block *sb)
sbsp->smk_default = smack_known_floor.smk_known;
sbsp->smk_floor = smack_known_floor.smk_known;
sbsp->smk_hat = smack_known_hat.smk_known;
- sbsp->smk_initialized = 0;
- spin_lock_init(&sbsp->smk_sblock);
-
+ /*
+ * smk_initialized will be zero from kzalloc.
+ */
sb->s_security = sbsp;
return 0;
@@ -245,6 +368,8 @@ static int smack_sb_copy_data(char *orig, char *smackopts)
dp = smackopts;
else if (strstr(cp, SMK_FSROOT) == cp)
dp = smackopts;
+ else if (strstr(cp, SMK_FSTRANS) == cp)
+ dp = smackopts;
else
dp = otheropts;
@@ -277,17 +402,17 @@ static int smack_sb_kern_mount(struct super_block *sb, int flags, void *data)
struct inode *inode = root->d_inode;
struct superblock_smack *sp = sb->s_security;
struct inode_smack *isp;
+ struct smack_known *skp;
char *op;
char *commap;
char *nsp;
+ int transmute = 0;
+ int specified = 0;
- spin_lock(&sp->smk_sblock);
- if (sp->smk_initialized != 0) {
- spin_unlock(&sp->smk_sblock);
+ if (sp->smk_initialized)
return 0;
- }
+
sp->smk_initialized = 1;
- spin_unlock(&sp->smk_sblock);
for (op = data; op != NULL; op = commap) {
commap = strchr(op, ',');
@@ -297,36 +422,71 @@ static int smack_sb_kern_mount(struct super_block *sb, int flags, void *data)
if (strncmp(op, SMK_FSHAT, strlen(SMK_FSHAT)) == 0) {
op += strlen(SMK_FSHAT);
nsp = smk_import(op, 0);
- if (nsp != NULL)
+ if (nsp != NULL) {
sp->smk_hat = nsp;
+ specified = 1;
+ }
} else if (strncmp(op, SMK_FSFLOOR, strlen(SMK_FSFLOOR)) == 0) {
op += strlen(SMK_FSFLOOR);
nsp = smk_import(op, 0);
- if (nsp != NULL)
+ if (nsp != NULL) {
sp->smk_floor = nsp;
+ specified = 1;
+ }
} else if (strncmp(op, SMK_FSDEFAULT,
strlen(SMK_FSDEFAULT)) == 0) {
op += strlen(SMK_FSDEFAULT);
nsp = smk_import(op, 0);
- if (nsp != NULL)
+ if (nsp != NULL) {
sp->smk_default = nsp;
+ specified = 1;
+ }
} else if (strncmp(op, SMK_FSROOT, strlen(SMK_FSROOT)) == 0) {
op += strlen(SMK_FSROOT);
nsp = smk_import(op, 0);
- if (nsp != NULL)
+ if (nsp != NULL) {
+ sp->smk_root = nsp;
+ specified = 1;
+ }
+ } else if (strncmp(op, SMK_FSTRANS, strlen(SMK_FSTRANS)) == 0) {
+ op += strlen(SMK_FSTRANS);
+ nsp = smk_import(op, 0);
+ if (nsp != NULL) {
sp->smk_root = nsp;
+ transmute = 1;
+ specified = 1;
+ }
}
}
+ if (!smack_privileged(CAP_MAC_ADMIN)) {
+ /*
+ * Unprivileged mounts don't get to specify Smack values.
+ */
+ if (specified)
+ return -EPERM;
+ /*
+ * Unprivileged mounts get root and default from the caller.
+ */
+ skp = smk_of_current();
+ sp->smk_root = skp->smk_known;
+ sp->smk_default = skp->smk_known;
+ }
/*
* Initialize the root inode.
*/
isp = inode->i_security;
- if (isp == NULL)
- inode->i_security = new_inode_smack(sp->smk_root);
- else
+ if (isp == NULL) {
+ isp = new_inode_smack(sp->smk_root);
+ if (isp == NULL)
+ return -ENOMEM;
+ inode->i_security = isp;
+ } else
isp->smk_inode = sp->smk_root;
+ if (transmute)
+ isp->smk_flags |= SMK_INODE_TRANSMUTE;
+
return 0;
}
@@ -343,55 +503,94 @@ static int smack_sb_statfs(struct dentry *dentry)
int rc;
struct smk_audit_info ad;
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS);
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
rc = smk_curacc(sbp->smk_floor, MAY_READ, &ad);
return rc;
}
+/*
+ * BPRM hooks
+ */
+
/**
- * smack_sb_mount - Smack check for mounting
- * @dev_name: unused
- * @path: mount point
- * @type: unused
- * @flags: unused
- * @data: unused
+ * smack_bprm_set_creds - set creds for exec
+ * @bprm: the exec information
*
- * Returns 0 if current can write the floor of the filesystem
- * being mounted on, an error code otherwise.
+ * Returns 0 if it gets a blob, -EPERM if exec forbidden and -ENOMEM otherwise
*/
-static int smack_sb_mount(char *dev_name, struct path *path,
- char *type, unsigned long flags, void *data)
+static int smack_bprm_set_creds(struct linux_binprm *bprm)
{
- struct superblock_smack *sbp = path->mnt->mnt_sb->s_security;
- struct smk_audit_info ad;
+ struct inode *inode = file_inode(bprm->file);
+ struct task_smack *bsp = bprm->cred->security;
+ struct inode_smack *isp;
+ int rc;
+
+ rc = cap_bprm_set_creds(bprm);
+ if (rc != 0)
+ return rc;
+
+ if (bprm->cred_prepared)
+ return 0;
+
+ isp = inode->i_security;
+ if (isp->smk_task == NULL || isp->smk_task == bsp->smk_task)
+ return 0;
+
+ if (bprm->unsafe & (LSM_UNSAFE_PTRACE | LSM_UNSAFE_PTRACE_CAP)) {
+ struct task_struct *tracer;
+ rc = 0;
+
+ rcu_read_lock();
+ tracer = ptrace_parent(current);
+ if (likely(tracer != NULL))
+ rc = smk_ptrace_rule_check(tracer,
+ isp->smk_task->smk_known,
+ PTRACE_MODE_ATTACH,
+ __func__);
+ rcu_read_unlock();
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS);
- smk_ad_setfield_u_fs_path(&ad, *path);
+ if (rc != 0)
+ return rc;
+ } else if (bprm->unsafe)
+ return -EPERM;
+
+ bsp->smk_task = isp->smk_task;
+ bprm->per_clear |= PER_CLEAR_ON_SETID;
- return smk_curacc(sbp->smk_floor, MAY_WRITE, &ad);
+ return 0;
}
/**
- * smack_sb_umount - Smack check for unmounting
- * @mnt: file system to unmount
- * @flags: unused
+ * smack_bprm_committing_creds - Prepare to install the new credentials
+ * from bprm.
*
- * Returns 0 if current can write the floor of the filesystem
- * being unmounted, an error code otherwise.
+ * @bprm: binprm for exec
*/
-static int smack_sb_umount(struct vfsmount *mnt, int flags)
+static void smack_bprm_committing_creds(struct linux_binprm *bprm)
{
- struct superblock_smack *sbp;
- struct smk_audit_info ad;
+ struct task_smack *bsp = bprm->cred->security;
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS);
- smk_ad_setfield_u_fs_path_dentry(&ad, mnt->mnt_mountpoint);
- smk_ad_setfield_u_fs_path_mnt(&ad, mnt);
+ if (bsp->smk_task != bsp->smk_forked)
+ current->pdeath_signal = 0;
+}
- sbp = mnt->mnt_sb->s_security;
- return smk_curacc(sbp->smk_floor, MAY_WRITE, &ad);
+/**
+ * smack_bprm_secureexec - Return the decision to use secureexec.
+ * @bprm: binprm for exec
+ *
+ * Returns 0 on success.
+ */
+static int smack_bprm_secureexec(struct linux_binprm *bprm)
+{
+ struct task_smack *tsp = current_security();
+ int ret = cap_bprm_secureexec(bprm);
+
+ if (!ret && (tsp->smk_task != tsp->smk_forked))
+ ret = 1;
+
+ return ret;
}
/*
@@ -406,7 +605,9 @@ static int smack_sb_umount(struct vfsmount *mnt, int flags)
*/
static int smack_inode_alloc_security(struct inode *inode)
{
- inode->i_security = new_inode_smack(current_security());
+ struct smack_known *skp = smk_of_current();
+
+ inode->i_security = new_inode_smack(skp->smk_known);
if (inode->i_security == NULL)
return -ENOMEM;
return 0;
@@ -428,6 +629,7 @@ static void smack_inode_free_security(struct inode *inode)
* smack_inode_init_security - copy out the smack from an inode
* @inode: the inode
* @dir: unused
+ * @qstr: unused
* @name: where to put the attribute name
* @value: where to put the attribute value
* @len: where to put the length of the attribute
@@ -435,18 +637,36 @@ static void smack_inode_free_security(struct inode *inode)
* Returns 0 if it all works out, -ENOMEM if there's no memory
*/
static int smack_inode_init_security(struct inode *inode, struct inode *dir,
- char **name, void **value, size_t *len)
+ const struct qstr *qstr, const char **name,
+ void **value, size_t *len)
{
+ struct inode_smack *issp = inode->i_security;
+ struct smack_known *skp = smk_of_current();
char *isp = smk_of_inode(inode);
+ char *dsp = smk_of_inode(dir);
+ int may;
- if (name) {
- *name = kstrdup(XATTR_SMACK_SUFFIX, GFP_KERNEL);
- if (*name == NULL)
- return -ENOMEM;
- }
+ if (name)
+ *name = XATTR_SMACK_SUFFIX;
if (value) {
- *value = kstrdup(isp, GFP_KERNEL);
+ rcu_read_lock();
+ may = smk_access_entry(skp->smk_known, dsp, &skp->smk_rules);
+ rcu_read_unlock();
+
+ /*
+ * If the access rule allows transmutation and
+ * the directory requests transmutation then
+ * by all means transmute.
+ * Mark the inode as changed.
+ */
+ if (may > 0 && ((may & MAY_TRANSMUTE) != 0) &&
+ smk_inode_transmutable(dir)) {
+ isp = dsp;
+ issp->smk_flags |= SMK_INODE_CHANGED;
+ }
+
+ *value = kstrdup(isp, GFP_NOFS);
if (*value == NULL)
return -ENOMEM;
}
@@ -472,7 +692,7 @@ static int smack_inode_link(struct dentry *old_dentry, struct inode *dir,
struct smk_audit_info ad;
int rc;
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS);
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
smk_ad_setfield_u_fs_path_dentry(&ad, old_dentry);
isp = smk_of_inode(old_dentry->d_inode);
@@ -501,7 +721,7 @@ static int smack_inode_unlink(struct inode *dir, struct dentry *dentry)
struct smk_audit_info ad;
int rc;
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS);
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
/*
@@ -512,7 +732,7 @@ static int smack_inode_unlink(struct inode *dir, struct dentry *dentry)
/*
* You also need write access to the containing directory
*/
- smk_ad_setfield_u_fs_path_dentry(&ad, NULL);
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_INODE);
smk_ad_setfield_u_fs_inode(&ad, dir);
rc = smk_curacc(smk_of_inode(dir), MAY_WRITE, &ad);
}
@@ -532,7 +752,7 @@ static int smack_inode_rmdir(struct inode *dir, struct dentry *dentry)
struct smk_audit_info ad;
int rc;
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS);
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
/*
@@ -543,7 +763,7 @@ static int smack_inode_rmdir(struct inode *dir, struct dentry *dentry)
/*
* You also need write access to the containing directory
*/
- smk_ad_setfield_u_fs_path_dentry(&ad, NULL);
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_INODE);
smk_ad_setfield_u_fs_inode(&ad, dir);
rc = smk_curacc(smk_of_inode(dir), MAY_WRITE, &ad);
}
@@ -572,7 +792,7 @@ static int smack_inode_rename(struct inode *old_inode,
char *isp;
struct smk_audit_info ad;
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS);
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
smk_ad_setfield_u_fs_path_dentry(&ad, old_dentry);
isp = smk_of_inode(old_dentry->d_inode);
@@ -598,12 +818,19 @@ static int smack_inode_rename(struct inode *old_inode,
static int smack_inode_permission(struct inode *inode, int mask)
{
struct smk_audit_info ad;
+ int no_block = mask & MAY_NOT_BLOCK;
+
+ mask &= (MAY_READ|MAY_WRITE|MAY_EXEC|MAY_APPEND);
/*
* No permission to check. Existence test. Yup, it's there.
*/
if (mask == 0)
return 0;
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS);
+
+ /* May be droppable after audit */
+ if (no_block)
+ return -ECHILD;
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_INODE);
smk_ad_setfield_u_fs_inode(&ad, inode);
return smk_curacc(smk_of_inode(inode), mask, &ad);
}
@@ -623,7 +850,7 @@ static int smack_inode_setattr(struct dentry *dentry, struct iattr *iattr)
*/
if (iattr->ia_valid & ATTR_FORCE)
return 0;
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS);
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
return smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE, &ad);
@@ -639,10 +866,13 @@ static int smack_inode_setattr(struct dentry *dentry, struct iattr *iattr)
static int smack_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
{
struct smk_audit_info ad;
+ struct path path;
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS);
- smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
- smk_ad_setfield_u_fs_path_mnt(&ad, mnt);
+ path.dentry = dentry;
+ path.mnt = mnt;
+
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
+ smk_ad_setfield_u_fs_path(&ad, path);
return smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ, &ad);
}
@@ -662,24 +892,44 @@ static int smack_inode_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
struct smk_audit_info ad;
+ struct smack_known *skp;
+ int check_priv = 0;
+ int check_import = 0;
+ int check_star = 0;
int rc = 0;
+ /*
+ * Check label validity here so import won't fail in post_setxattr
+ */
if (strcmp(name, XATTR_NAME_SMACK) == 0 ||
strcmp(name, XATTR_NAME_SMACKIPIN) == 0 ||
strcmp(name, XATTR_NAME_SMACKIPOUT) == 0) {
- if (!capable(CAP_MAC_ADMIN))
- rc = -EPERM;
- /*
- * check label validity here so import wont fail on
- * post_setxattr
- */
- if (size == 0 || size >= SMK_LABELLEN ||
- smk_import(value, size) == NULL)
+ check_priv = 1;
+ check_import = 1;
+ } else if (strcmp(name, XATTR_NAME_SMACKEXEC) == 0 ||
+ strcmp(name, XATTR_NAME_SMACKMMAP) == 0) {
+ check_priv = 1;
+ check_import = 1;
+ check_star = 1;
+ } else if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0) {
+ check_priv = 1;
+ if (size != TRANS_TRUE_SIZE ||
+ strncmp(value, TRANS_TRUE, TRANS_TRUE_SIZE) != 0)
rc = -EINVAL;
} else
rc = cap_inode_setxattr(dentry, name, value, size, flags);
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS);
+ if (check_priv && !smack_privileged(CAP_MAC_ADMIN))
+ rc = -EPERM;
+
+ if (rc == 0 && check_import) {
+ skp = smk_import_entry(value, size);
+ if (skp == NULL || (check_star &&
+ (skp == &smack_known_star || skp == &smack_known_web)))
+ rc = -EINVAL;
+ }
+
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
if (rc == 0)
@@ -702,31 +952,38 @@ static int smack_inode_setxattr(struct dentry *dentry, const char *name,
static void smack_inode_post_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
- struct inode_smack *isp;
- char *nsp;
+ struct smack_known *skp;
+ struct inode_smack *isp = dentry->d_inode->i_security;
- /*
- * Not SMACK
- */
- if (strcmp(name, XATTR_NAME_SMACK))
+ if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0) {
+ isp->smk_flags |= SMK_INODE_TRANSMUTE;
return;
+ }
- isp = dentry->d_inode->i_security;
-
- /*
- * No locking is done here. This is a pointer
- * assignment.
- */
- nsp = smk_import(value, size);
- if (nsp != NULL)
- isp->smk_inode = nsp;
- else
- isp->smk_inode = smack_known_invalid.smk_known;
+ if (strcmp(name, XATTR_NAME_SMACK) == 0) {
+ skp = smk_import_entry(value, size);
+ if (skp != NULL)
+ isp->smk_inode = skp->smk_known;
+ else
+ isp->smk_inode = smack_known_invalid.smk_known;
+ } else if (strcmp(name, XATTR_NAME_SMACKEXEC) == 0) {
+ skp = smk_import_entry(value, size);
+ if (skp != NULL)
+ isp->smk_task = skp;
+ else
+ isp->smk_task = &smack_known_invalid;
+ } else if (strcmp(name, XATTR_NAME_SMACKMMAP) == 0) {
+ skp = smk_import_entry(value, size);
+ if (skp != NULL)
+ isp->smk_mmap = skp;
+ else
+ isp->smk_mmap = &smack_known_invalid;
+ }
return;
}
-/*
+/**
* smack_inode_getxattr - Smack check on getxattr
* @dentry: the object
* @name: unused
@@ -737,13 +994,13 @@ static int smack_inode_getxattr(struct dentry *dentry, const char *name)
{
struct smk_audit_info ad;
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS);
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
return smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ, &ad);
}
-/*
+/**
* smack_inode_removexattr - Smack check on removexattr
* @dentry: the object
* @name: name of the attribute
@@ -754,23 +1011,46 @@ static int smack_inode_getxattr(struct dentry *dentry, const char *name)
*/
static int smack_inode_removexattr(struct dentry *dentry, const char *name)
{
+ struct inode_smack *isp;
struct smk_audit_info ad;
int rc = 0;
if (strcmp(name, XATTR_NAME_SMACK) == 0 ||
strcmp(name, XATTR_NAME_SMACKIPIN) == 0 ||
- strcmp(name, XATTR_NAME_SMACKIPOUT) == 0) {
- if (!capable(CAP_MAC_ADMIN))
+ strcmp(name, XATTR_NAME_SMACKIPOUT) == 0 ||
+ strcmp(name, XATTR_NAME_SMACKEXEC) == 0 ||
+ strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0 ||
+ strcmp(name, XATTR_NAME_SMACKMMAP) == 0) {
+ if (!smack_privileged(CAP_MAC_ADMIN))
rc = -EPERM;
} else
rc = cap_inode_removexattr(dentry, name);
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS);
+ if (rc != 0)
+ return rc;
+
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
- if (rc == 0)
- rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE, &ad);
- return rc;
+ rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE, &ad);
+ if (rc != 0)
+ return rc;
+
+ isp = dentry->d_inode->i_security;
+ /*
+ * Don't do anything special for these.
+ * XATTR_NAME_SMACKIPIN
+ * XATTR_NAME_SMACKIPOUT
+ * XATTR_NAME_SMACKEXEC
+ */
+ if (strcmp(name, XATTR_NAME_SMACK) == 0)
+ isp->smk_task = NULL;
+ else if (strcmp(name, XATTR_NAME_SMACKMMAP) == 0)
+ isp->smk_mmap = NULL;
+ else if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0)
+ isp->smk_flags &= ~SMK_INODE_TRANSMUTE;
+
+ return 0;
}
/**
@@ -815,9 +1095,9 @@ static int smack_inode_getsecurity(const struct inode *inode,
ssp = sock->sk->sk_security;
if (strcmp(name, XATTR_SMACK_IPIN) == 0)
- isp = ssp->smk_in;
+ isp = ssp->smk_in->smk_known;
else if (strcmp(name, XATTR_SMACK_IPOUT) == 0)
- isp = ssp->smk_out;
+ isp = ssp->smk_out->smk_known;
else
return -EOPNOTSUPP;
@@ -897,7 +1177,9 @@ static int smack_file_permission(struct file *file, int mask)
*/
static int smack_file_alloc_security(struct file *file)
{
- file->f_security = current_security();
+ struct smack_known *skp = smk_of_current();
+
+ file->f_security = skp->smk_known;
return 0;
}
@@ -929,7 +1211,7 @@ static int smack_file_ioctl(struct file *file, unsigned int cmd,
int rc = 0;
struct smk_audit_info ad;
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS);
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
smk_ad_setfield_u_fs_path(&ad, file->f_path);
if (_IOC_DIR(cmd) & _IOC_WRITE)
@@ -946,15 +1228,15 @@ static int smack_file_ioctl(struct file *file, unsigned int cmd,
* @file: the object
* @cmd: unused
*
- * Returns 0 if current has write access, error code otherwise
+ * Returns 0 if current has lock access, error code otherwise
*/
static int smack_file_lock(struct file *file, unsigned int cmd)
{
struct smk_audit_info ad;
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS);
- smk_ad_setfield_u_fs_path_dentry(&ad, file->f_path.dentry);
- return smk_curacc(file->f_security, MAY_WRITE, &ad);
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
+ smk_ad_setfield_u_fs_path(&ad, file->f_path);
+ return smk_curacc(file->f_security, MAY_LOCK, &ad);
}
/**
@@ -963,42 +1245,145 @@ static int smack_file_lock(struct file *file, unsigned int cmd)
* @cmd: what action to check
* @arg: unused
*
+ * Generally these operations are harmless.
+ * File locking operations present an obvious mechanism
+ * for passing information, so they require write access.
+ *
* Returns 0 if current has access, error code otherwise
*/
static int smack_file_fcntl(struct file *file, unsigned int cmd,
unsigned long arg)
{
struct smk_audit_info ad;
- int rc;
+ int rc = 0;
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_FS);
- smk_ad_setfield_u_fs_path(&ad, file->f_path);
switch (cmd) {
- case F_DUPFD:
- case F_GETFD:
- case F_GETFL:
case F_GETLK:
- case F_GETOWN:
- case F_GETSIG:
- rc = smk_curacc(file->f_security, MAY_READ, &ad);
break;
- case F_SETFD:
- case F_SETFL:
case F_SETLK:
case F_SETLKW:
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
+ smk_ad_setfield_u_fs_path(&ad, file->f_path);
+ rc = smk_curacc(file->f_security, MAY_LOCK, &ad);
+ break;
case F_SETOWN:
case F_SETSIG:
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
+ smk_ad_setfield_u_fs_path(&ad, file->f_path);
rc = smk_curacc(file->f_security, MAY_WRITE, &ad);
break;
default:
- rc = smk_curacc(file->f_security, MAY_READWRITE, &ad);
+ break;
}
return rc;
}
/**
+ * smack_mmap_file :
+ * Check permissions for a mmap operation. The @file may be NULL, e.g.
+ * if mapping anonymous memory.
+ * @file contains the file structure for file to map (may be NULL).
+ * @reqprot contains the protection requested by the application.
+ * @prot contains the protection that will be applied by the kernel.
+ * @flags contains the operational flags.
+ * Return 0 if permission is granted.
+ */
+static int smack_mmap_file(struct file *file,
+ unsigned long reqprot, unsigned long prot,
+ unsigned long flags)
+{
+ struct smack_known *skp;
+ struct smack_known *mkp;
+ struct smack_rule *srp;
+ struct task_smack *tsp;
+ char *osmack;
+ struct inode_smack *isp;
+ int may;
+ int mmay;
+ int tmay;
+ int rc;
+
+ if (file == NULL)
+ return 0;
+
+ isp = file_inode(file)->i_security;
+ if (isp->smk_mmap == NULL)
+ return 0;
+ mkp = isp->smk_mmap;
+
+ tsp = current_security();
+ skp = smk_of_current();
+ rc = 0;
+
+ rcu_read_lock();
+ /*
+ * For each Smack rule associated with the subject
+ * label verify that the SMACK64MMAP also has access
+ * to that rule's object label.
+ */
+ list_for_each_entry_rcu(srp, &skp->smk_rules, list) {
+ osmack = srp->smk_object;
+ /*
+ * Matching labels always allows access.
+ */
+ if (mkp->smk_known == osmack)
+ continue;
+ /*
+ * If there is a matching local rule take
+ * that into account as well.
+ */
+ may = smk_access_entry(srp->smk_subject->smk_known, osmack,
+ &tsp->smk_rules);
+ if (may == -ENOENT)
+ may = srp->smk_access;
+ else
+ may &= srp->smk_access;
+ /*
+ * If may is zero the SMACK64MMAP subject can't
+ * possibly have less access.
+ */
+ if (may == 0)
+ continue;
+
+ /*
+ * Fetch the global list entry.
+ * If there isn't one a SMACK64MMAP subject
+ * can't have as much access as current.
+ */
+ mmay = smk_access_entry(mkp->smk_known, osmack,
+ &mkp->smk_rules);
+ if (mmay == -ENOENT) {
+ rc = -EACCES;
+ break;
+ }
+ /*
+ * If there is a local entry it modifies the
+ * potential access, too.
+ */
+ tmay = smk_access_entry(mkp->smk_known, osmack,
+ &tsp->smk_rules);
+ if (tmay != -ENOENT)
+ mmay &= tmay;
+
+ /*
+ * If there is any access available to current that is
+ * not available to a SMACK64MMAP subject
+ * deny access.
+ */
+ if ((may | mmay) != mmay) {
+ rc = -EACCES;
+ break;
+ }
+ }
+
+ rcu_read_unlock();
+
+ return rc;
+}
+
+/**
* smack_file_set_fowner - set the file security blob value
* @file: object in question
*
@@ -1007,7 +1392,9 @@ static int smack_file_fcntl(struct file *file, unsigned int cmd,
*/
static int smack_file_set_fowner(struct file *file)
{
- file->f_security = current_security();
+ struct smack_known *skp = smk_of_current();
+
+ file->f_security = skp->smk_known;
return 0;
}
@@ -1025,23 +1412,26 @@ static int smack_file_set_fowner(struct file *file)
static int smack_file_send_sigiotask(struct task_struct *tsk,
struct fown_struct *fown, int signum)
{
+ struct smack_known *skp;
+ struct smack_known *tkp = smk_of_task(tsk->cred->security);
struct file *file;
int rc;
- char *tsp = tsk->cred->security;
struct smk_audit_info ad;
/*
* struct fown_struct is never outside the context of a struct file
*/
file = container_of(fown, struct file, f_owner);
+
/* we don't log here as rc can be overriden */
- rc = smk_access(file->f_security, tsp, MAY_WRITE, NULL);
+ skp = smk_find_entry(file->f_security);
+ rc = smk_access(skp, tkp->smk_known, MAY_WRITE, NULL);
if (rc != 0 && has_capability(tsk, CAP_MAC_OVERRIDE))
rc = 0;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
smk_ad_setfield_u_tsk(&ad, tsk);
- smack_log(file->f_security, tsp, MAY_WRITE, rc, &ad);
+ smack_log(file->f_security, tkp->smk_known, MAY_WRITE, rc, &ad);
return rc;
}
@@ -1056,7 +1446,7 @@ static int smack_file_receive(struct file *file)
int may = 0;
struct smk_audit_info ad;
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
smk_ad_setfield_u_fs_path(&ad, file->f_path);
/*
* This code relies on bitmasks.
@@ -1069,6 +1459,37 @@ static int smack_file_receive(struct file *file)
return smk_curacc(file->f_security, may, &ad);
}
+/**
+ * smack_file_open - Smack dentry open processing
+ * @file: the object
+ * @cred: task credential
+ *
+ * Set the security blob in the file structure.
+ * Allow the open only if the task has read access. There are
+ * many read operations (e.g. fstat) that you can do with an
+ * fd even if you have the file open write-only.
+ *
+ * Returns 0
+ */
+static int smack_file_open(struct file *file, const struct cred *cred)
+{
+ struct task_smack *tsp = cred->security;
+ struct inode_smack *isp = file_inode(file)->i_security;
+ struct smk_audit_info ad;
+ int rc;
+
+ if (smack_privileged(CAP_MAC_OVERRIDE))
+ return 0;
+
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
+ smk_ad_setfield_u_fs_path(&ad, file->f_path);
+ rc = smk_access(tsp->smk_task, isp->smk_inode, MAY_READ, &ad);
+ if (rc == 0)
+ file->f_security = isp->smk_inode;
+
+ return rc;
+}
+
/*
* Task hooks
*/
@@ -1084,7 +1505,14 @@ static int smack_file_receive(struct file *file)
*/
static int smack_cred_alloc_blank(struct cred *cred, gfp_t gfp)
{
- cred->security = NULL;
+ struct task_smack *tsp;
+
+ tsp = new_task_smack(NULL, NULL, gfp);
+ if (tsp == NULL)
+ return -ENOMEM;
+
+ cred->security = tsp;
+
return 0;
}
@@ -1093,13 +1521,24 @@ static int smack_cred_alloc_blank(struct cred *cred, gfp_t gfp)
* smack_cred_free - "free" task-level security credentials
* @cred: the credentials in question
*
- * Smack isn't using copies of blobs. Everyone
- * points to an immutable list. The blobs never go away.
- * There is no leak here.
*/
static void smack_cred_free(struct cred *cred)
{
+ struct task_smack *tsp = cred->security;
+ struct smack_rule *rp;
+ struct list_head *l;
+ struct list_head *n;
+
+ if (tsp == NULL)
+ return;
cred->security = NULL;
+
+ list_for_each_safe(l, n, &tsp->smk_rules) {
+ rp = list_entry(l, struct smack_rule, list);
+ list_del(&rp->list);
+ kfree(rp);
+ }
+ kfree(tsp);
}
/**
@@ -1113,17 +1552,20 @@ static void smack_cred_free(struct cred *cred)
static int smack_cred_prepare(struct cred *new, const struct cred *old,
gfp_t gfp)
{
- new->security = old->security;
- return 0;
-}
+ struct task_smack *old_tsp = old->security;
+ struct task_smack *new_tsp;
+ int rc;
-/**
- * smack_cred_commit - commit new credentials
- * @new: the new credentials
- * @old: the original credentials
- */
-static void smack_cred_commit(struct cred *new, const struct cred *old)
-{
+ new_tsp = new_task_smack(old_tsp->smk_task, old_tsp->smk_task, gfp);
+ if (new_tsp == NULL)
+ return -ENOMEM;
+
+ rc = smk_copy_rules(&new_tsp->smk_rules, &old_tsp->smk_rules, gfp);
+ if (rc != 0)
+ return rc;
+
+ new->security = new_tsp;
+ return 0;
}
/**
@@ -1135,7 +1577,16 @@ static void smack_cred_commit(struct cred *new, const struct cred *old)
*/
static void smack_cred_transfer(struct cred *new, const struct cred *old)
{
- new->security = old->security;
+ struct task_smack *old_tsp = old->security;
+ struct task_smack *new_tsp = new->security;
+
+ new_tsp->smk_task = old_tsp->smk_task;
+ new_tsp->smk_forked = old_tsp->smk_task;
+ mutex_init(&new_tsp->smk_rules_lock);
+ INIT_LIST_HEAD(&new_tsp->smk_rules);
+
+
+ /* cbs copy rule list */
}
/**
@@ -1147,12 +1598,13 @@ static void smack_cred_transfer(struct cred *new, const struct cred *old)
*/
static int smack_kernel_act_as(struct cred *new, u32 secid)
{
- char *smack = smack_from_secid(secid);
+ struct task_smack *new_tsp = new->security;
+ struct smack_known *skp = smack_from_secid(secid);
- if (smack == NULL)
+ if (skp == NULL)
return -EINVAL;
- new->security = smack;
+ new_tsp->smk_task = skp;
return 0;
}
@@ -1168,25 +1620,30 @@ static int smack_kernel_create_files_as(struct cred *new,
struct inode *inode)
{
struct inode_smack *isp = inode->i_security;
+ struct task_smack *tsp = new->security;
- new->security = isp->smk_inode;
+ tsp->smk_forked = smk_find_entry(isp->smk_inode);
+ tsp->smk_task = tsp->smk_forked;
return 0;
}
/**
* smk_curacc_on_task - helper to log task related access
* @p: the task object
- * @access : the access requested
+ * @access: the access requested
+ * @caller: name of the calling function for audit
*
* Return 0 if access is permitted
*/
-static int smk_curacc_on_task(struct task_struct *p, int access)
+static int smk_curacc_on_task(struct task_struct *p, int access,
+ const char *caller)
{
struct smk_audit_info ad;
+ struct smack_known *skp = smk_of_task(task_security(p));
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
+ smk_ad_init(&ad, caller, LSM_AUDIT_DATA_TASK);
smk_ad_setfield_u_tsk(&ad, p);
- return smk_curacc(task_security(p), access, &ad);
+ return smk_curacc(skp->smk_known, access, &ad);
}
/**
@@ -1198,7 +1655,7 @@ static int smk_curacc_on_task(struct task_struct *p, int access)
*/
static int smack_task_setpgid(struct task_struct *p, pid_t pgid)
{
- return smk_curacc_on_task(p, MAY_WRITE);
+ return smk_curacc_on_task(p, MAY_WRITE, __func__);
}
/**
@@ -1209,7 +1666,7 @@ static int smack_task_setpgid(struct task_struct *p, pid_t pgid)
*/
static int smack_task_getpgid(struct task_struct *p)
{
- return smk_curacc_on_task(p, MAY_READ);
+ return smk_curacc_on_task(p, MAY_READ, __func__);
}
/**
@@ -1220,7 +1677,7 @@ static int smack_task_getpgid(struct task_struct *p)
*/
static int smack_task_getsid(struct task_struct *p)
{
- return smk_curacc_on_task(p, MAY_READ);
+ return smk_curacc_on_task(p, MAY_READ, __func__);
}
/**
@@ -1232,7 +1689,9 @@ static int smack_task_getsid(struct task_struct *p)
*/
static void smack_task_getsecid(struct task_struct *p, u32 *secid)
{
- *secid = smack_to_secid(task_security(p));
+ struct smack_known *skp = smk_of_task(task_security(p));
+
+ *secid = skp->smk_secid;
}
/**
@@ -1248,7 +1707,7 @@ static int smack_task_setnice(struct task_struct *p, int nice)
rc = cap_task_setnice(p, nice);
if (rc == 0)
- rc = smk_curacc_on_task(p, MAY_WRITE);
+ rc = smk_curacc_on_task(p, MAY_WRITE, __func__);
return rc;
}
@@ -1265,7 +1724,7 @@ static int smack_task_setioprio(struct task_struct *p, int ioprio)
rc = cap_task_setioprio(p, ioprio);
if (rc == 0)
- rc = smk_curacc_on_task(p, MAY_WRITE);
+ rc = smk_curacc_on_task(p, MAY_WRITE, __func__);
return rc;
}
@@ -1277,7 +1736,7 @@ static int smack_task_setioprio(struct task_struct *p, int ioprio)
*/
static int smack_task_getioprio(struct task_struct *p)
{
- return smk_curacc_on_task(p, MAY_READ);
+ return smk_curacc_on_task(p, MAY_READ, __func__);
}
/**
@@ -1288,14 +1747,13 @@ static int smack_task_getioprio(struct task_struct *p)
*
* Return 0 if read access is permitted
*/
-static int smack_task_setscheduler(struct task_struct *p, int policy,
- struct sched_param *lp)
+static int smack_task_setscheduler(struct task_struct *p)
{
int rc;
- rc = cap_task_setscheduler(p, policy, lp);
+ rc = cap_task_setscheduler(p);
if (rc == 0)
- rc = smk_curacc_on_task(p, MAY_WRITE);
+ rc = smk_curacc_on_task(p, MAY_WRITE, __func__);
return rc;
}
@@ -1307,7 +1765,7 @@ static int smack_task_setscheduler(struct task_struct *p, int policy,
*/
static int smack_task_getscheduler(struct task_struct *p)
{
- return smk_curacc_on_task(p, MAY_READ);
+ return smk_curacc_on_task(p, MAY_READ, __func__);
}
/**
@@ -1318,7 +1776,7 @@ static int smack_task_getscheduler(struct task_struct *p)
*/
static int smack_task_movememory(struct task_struct *p)
{
- return smk_curacc_on_task(p, MAY_WRITE);
+ return smk_curacc_on_task(p, MAY_WRITE, __func__);
}
/**
@@ -1337,6 +1795,8 @@ static int smack_task_kill(struct task_struct *p, struct siginfo *info,
int sig, u32 secid)
{
struct smk_audit_info ad;
+ struct smack_known *skp;
+ struct smack_known *tkp = smk_of_task(task_security(p));
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
smk_ad_setfield_u_tsk(&ad, p);
@@ -1345,53 +1805,33 @@ static int smack_task_kill(struct task_struct *p, struct siginfo *info,
* can write the receiver.
*/
if (secid == 0)
- return smk_curacc(task_security(p), MAY_WRITE, &ad);
+ return smk_curacc(tkp->smk_known, MAY_WRITE, &ad);
/*
* If the secid isn't 0 we're dealing with some USB IO
* specific behavior. This is not clean. For one thing
* we can't take privilege into account.
*/
- return smk_access(smack_from_secid(secid), task_security(p),
- MAY_WRITE, &ad);
+ skp = smack_from_secid(secid);
+ return smk_access(skp, tkp->smk_known, MAY_WRITE, &ad);
}
/**
* smack_task_wait - Smack access check for waiting
* @p: task to wait for
*
- * Returns 0 if current can wait for p, error code otherwise
+ * Returns 0
*/
static int smack_task_wait(struct task_struct *p)
{
- struct smk_audit_info ad;
- char *sp = current_security();
- char *tsp = task_security(p);
- int rc;
-
- /* we don't log here, we can be overriden */
- rc = smk_access(sp, tsp, MAY_WRITE, NULL);
- if (rc == 0)
- goto out_log;
-
/*
- * Allow the operation to succeed if either task
- * has privilege to perform operations that might
- * account for the smack labels having gotten to
- * be different in the first place.
- *
- * This breaks the strict subject/object access
- * control ideal, taking the object's privilege
- * state into account in the decision as well as
- * the smack value.
+ * Allow the operation to succeed.
+ * Zombies are bad.
+ * In userless environments (e.g. phones) programs
+ * get marked with SMACK64EXEC and even if the parent
+ * and child shouldn't be talking the parent still
+ * may expect to know when the child exits.
*/
- if (capable(CAP_MAC_OVERRIDE) || has_capability(p, CAP_MAC_OVERRIDE))
- rc = 0;
- /* we log only if we didn't get overriden */
- out_log:
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
- smk_ad_setfield_u_tsk(&ad, p);
- smack_log(sp, tsp, MAY_WRITE, rc, &ad);
- return rc;
+ return 0;
}
/**
@@ -1404,7 +1844,9 @@ static int smack_task_wait(struct task_struct *p)
static void smack_task_to_inode(struct task_struct *p, struct inode *inode)
{
struct inode_smack *isp = inode->i_security;
- isp->smk_inode = task_security(p);
+ struct smack_known *skp = smk_of_task(task_security(p));
+
+ isp->smk_inode = skp->smk_known;
}
/*
@@ -1423,16 +1865,16 @@ static void smack_task_to_inode(struct task_struct *p, struct inode *inode)
*/
static int smack_sk_alloc_security(struct sock *sk, int family, gfp_t gfp_flags)
{
- char *csp = current_security();
+ struct smack_known *skp = smk_of_current();
struct socket_smack *ssp;
ssp = kzalloc(sizeof(struct socket_smack), gfp_flags);
if (ssp == NULL)
return -ENOMEM;
- ssp->smk_in = csp;
- ssp->smk_out = csp;
- ssp->smk_packet[0] = '\0';
+ ssp->smk_in = skp;
+ ssp->smk_out = skp;
+ ssp->smk_packet = NULL;
sk->sk_security = ssp;
@@ -1488,65 +1930,6 @@ static char *smack_host_label(struct sockaddr_in *sip)
}
/**
- * smack_set_catset - convert a capset to netlabel mls categories
- * @catset: the Smack categories
- * @sap: where to put the netlabel categories
- *
- * Allocates and fills attr.mls.cat
- */
-static void smack_set_catset(char *catset, struct netlbl_lsm_secattr *sap)
-{
- unsigned char *cp;
- unsigned char m;
- int cat;
- int rc;
- int byte;
-
- if (!catset)
- return;
-
- sap->flags |= NETLBL_SECATTR_MLS_CAT;
- sap->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC);
- sap->attr.mls.cat->startbit = 0;
-
- for (cat = 1, cp = catset, byte = 0; byte < SMK_LABELLEN; cp++, byte++)
- for (m = 0x80; m != 0; m >>= 1, cat++) {
- if ((m & *cp) == 0)
- continue;
- rc = netlbl_secattr_catmap_setbit(sap->attr.mls.cat,
- cat, GFP_ATOMIC);
- }
-}
-
-/**
- * smack_to_secattr - fill a secattr from a smack value
- * @smack: the smack value
- * @nlsp: where the result goes
- *
- * Casey says that CIPSO is good enough for now.
- * It can be used to effect.
- * It can also be abused to effect when necessary.
- * Appologies to the TSIG group in general and GW in particular.
- */
-static void smack_to_secattr(char *smack, struct netlbl_lsm_secattr *nlsp)
-{
- struct smack_cipso cipso;
- int rc;
-
- nlsp->domain = smack;
- nlsp->flags = NETLBL_SECATTR_DOMAIN | NETLBL_SECATTR_MLS_LVL;
-
- rc = smack_to_cipso(smack, &cipso);
- if (rc == 0) {
- nlsp->attr.mls.lvl = cipso.smk_level;
- smack_set_catset(cipso.smk_catset, nlsp);
- } else {
- nlsp->attr.mls.lvl = smack_cipso_direct;
- smack_set_catset(smack, nlsp);
- }
-}
-
-/**
* smack_netlabel - Set the secattr on a socket
* @sk: the socket
* @labeled: socket label scheme
@@ -1558,8 +1941,8 @@ static void smack_to_secattr(char *smack, struct netlbl_lsm_secattr *nlsp)
*/
static int smack_netlabel(struct sock *sk, int labeled)
{
+ struct smack_known *skp;
struct socket_smack *ssp = sk->sk_security;
- struct netlbl_lsm_secattr secattr;
int rc = 0;
/*
@@ -1577,10 +1960,8 @@ static int smack_netlabel(struct sock *sk, int labeled)
labeled == SMACK_UNLABELED_SOCKET)
netlbl_sock_delattr(sk);
else {
- netlbl_secattr_init(&secattr);
- smack_to_secattr(ssp->smk_out, &secattr);
- rc = netlbl_sock_setattr(sk, sk->sk_family, &secattr);
- netlbl_secattr_destroy(&secattr);
+ skp = ssp->smk_out;
+ rc = netlbl_sock_setattr(sk, sk->sk_family, &skp->smk_netlabel);
}
bh_unlock_sock(sk);
@@ -1602,6 +1983,7 @@ static int smack_netlabel(struct sock *sk, int labeled)
*/
static int smack_netlabel_send(struct sock *sk, struct sockaddr_in *sap)
{
+ struct smack_known *skp;
int rc;
int sk_lbl;
char *hostsp;
@@ -1611,14 +1993,17 @@ static int smack_netlabel_send(struct sock *sk, struct sockaddr_in *sap)
rcu_read_lock();
hostsp = smack_host_label(sap);
if (hostsp != NULL) {
- sk_lbl = SMACK_UNLABELED_SOCKET;
#ifdef CONFIG_AUDIT
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET);
- ad.a.u.net.family = sap->sin_family;
- ad.a.u.net.dport = sap->sin_port;
- ad.a.u.net.v4info.daddr = sap->sin_addr.s_addr;
+ struct lsm_network_audit net;
+
+ smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+ ad.a.u.net->family = sap->sin_family;
+ ad.a.u.net->dport = sap->sin_port;
+ ad.a.u.net->v4info.daddr = sap->sin_addr.s_addr;
#endif
- rc = smk_access(ssp->smk_out, hostsp, MAY_WRITE, &ad);
+ sk_lbl = SMACK_UNLABELED_SOCKET;
+ skp = ssp->smk_out;
+ rc = smk_access(skp, hostsp, MAY_WRITE, &ad);
} else {
sk_lbl = SMACK_CIPSO_SOCKET;
rc = 0;
@@ -1631,6 +2016,153 @@ static int smack_netlabel_send(struct sock *sk, struct sockaddr_in *sap)
}
/**
+ * smk_ipv6_port_label - Smack port access table management
+ * @sock: socket
+ * @address: address
+ *
+ * Create or update the port list entry
+ */
+static void smk_ipv6_port_label(struct socket *sock, struct sockaddr *address)
+{
+ struct sock *sk = sock->sk;
+ struct sockaddr_in6 *addr6;
+ struct socket_smack *ssp = sock->sk->sk_security;
+ struct smk_port_label *spp;
+ unsigned short port = 0;
+
+ if (address == NULL) {
+ /*
+ * This operation is changing the Smack information
+ * on the bound socket. Take the changes to the port
+ * as well.
+ */
+ list_for_each_entry(spp, &smk_ipv6_port_list, list) {
+ if (sk != spp->smk_sock)
+ continue;
+ spp->smk_in = ssp->smk_in;
+ spp->smk_out = ssp->smk_out;
+ return;
+ }
+ /*
+ * A NULL address is only used for updating existing
+ * bound entries. If there isn't one, it's OK.
+ */
+ return;
+ }
+
+ addr6 = (struct sockaddr_in6 *)address;
+ port = ntohs(addr6->sin6_port);
+ /*
+ * This is a special case that is safely ignored.
+ */
+ if (port == 0)
+ return;
+
+ /*
+ * Look for an existing port list entry.
+ * This is an indication that a port is getting reused.
+ */
+ list_for_each_entry(spp, &smk_ipv6_port_list, list) {
+ if (spp->smk_port != port)
+ continue;
+ spp->smk_port = port;
+ spp->smk_sock = sk;
+ spp->smk_in = ssp->smk_in;
+ spp->smk_out = ssp->smk_out;
+ return;
+ }
+
+ /*
+ * A new port entry is required.
+ */
+ spp = kzalloc(sizeof(*spp), GFP_KERNEL);
+ if (spp == NULL)
+ return;
+
+ spp->smk_port = port;
+ spp->smk_sock = sk;
+ spp->smk_in = ssp->smk_in;
+ spp->smk_out = ssp->smk_out;
+
+ list_add(&spp->list, &smk_ipv6_port_list);
+ return;
+}
+
+/**
+ * smk_ipv6_port_check - check Smack port access
+ * @sock: socket
+ * @address: address
+ *
+ * Create or update the port list entry
+ */
+static int smk_ipv6_port_check(struct sock *sk, struct sockaddr_in6 *address,
+ int act)
+{
+ __be16 *bep;
+ __be32 *be32p;
+ struct smk_port_label *spp;
+ struct socket_smack *ssp = sk->sk_security;
+ struct smack_known *skp;
+ unsigned short port = 0;
+ char *object;
+ struct smk_audit_info ad;
+#ifdef CONFIG_AUDIT
+ struct lsm_network_audit net;
+#endif
+
+ if (act == SMK_RECEIVING) {
+ skp = smack_net_ambient;
+ object = ssp->smk_in->smk_known;
+ } else {
+ skp = ssp->smk_out;
+ object = smack_net_ambient->smk_known;
+ }
+
+ /*
+ * Get the IP address and port from the address.
+ */
+ port = ntohs(address->sin6_port);
+ bep = (__be16 *)(&address->sin6_addr);
+ be32p = (__be32 *)(&address->sin6_addr);
+
+ /*
+ * It's remote, so port lookup does no good.
+ */
+ if (be32p[0] || be32p[1] || be32p[2] || bep[6] || ntohs(bep[7]) != 1)
+ goto auditout;
+
+ /*
+ * It's local so the send check has to have passed.
+ */
+ if (act == SMK_RECEIVING) {
+ skp = &smack_known_web;
+ goto auditout;
+ }
+
+ list_for_each_entry(spp, &smk_ipv6_port_list, list) {
+ if (spp->smk_port != port)
+ continue;
+ object = spp->smk_in->smk_known;
+ if (act == SMK_CONNECTING)
+ ssp->smk_packet = spp->smk_out;
+ break;
+ }
+
+auditout:
+
+#ifdef CONFIG_AUDIT
+ smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+ ad.a.u.net->family = sk->sk_family;
+ ad.a.u.net->dport = port;
+ if (act == SMK_RECEIVING)
+ ad.a.u.net->v6info.saddr = address->sin6_addr;
+ else
+ ad.a.u.net->v6info.daddr = address->sin6_addr;
+#endif
+ return smk_access(skp, object, MAY_WRITE, &ad);
+}
+
+/**
* smack_inode_setsecurity - set smack xattrs
* @inode: the object
* @name: attribute name
@@ -1645,21 +2177,21 @@ static int smack_netlabel_send(struct sock *sk, struct sockaddr_in *sap)
static int smack_inode_setsecurity(struct inode *inode, const char *name,
const void *value, size_t size, int flags)
{
- char *sp;
+ struct smack_known *skp;
struct inode_smack *nsp = inode->i_security;
struct socket_smack *ssp;
struct socket *sock;
int rc = 0;
- if (value == NULL || size > SMK_LABELLEN || size == 0)
- return -EACCES;
+ if (value == NULL || size > SMK_LONGLABEL || size == 0)
+ return -EINVAL;
- sp = smk_import(value, size);
- if (sp == NULL)
+ skp = smk_import_entry(value, size);
+ if (skp == NULL)
return -EINVAL;
if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) {
- nsp->smk_inode = sp;
+ nsp->smk_inode = skp->smk_known;
nsp->smk_flags |= SMK_INODE_INSTANT;
return 0;
}
@@ -1676,16 +2208,22 @@ static int smack_inode_setsecurity(struct inode *inode, const char *name,
ssp = sock->sk->sk_security;
if (strcmp(name, XATTR_SMACK_IPIN) == 0)
- ssp->smk_in = sp;
+ ssp->smk_in = skp;
else if (strcmp(name, XATTR_SMACK_IPOUT) == 0) {
- ssp->smk_out = sp;
- rc = smack_netlabel(sock->sk, SMACK_CIPSO_SOCKET);
- if (rc != 0)
- printk(KERN_WARNING "Smack: \"%s\" netlbl error %d.\n",
- __func__, -rc);
+ ssp->smk_out = skp;
+ if (sock->sk->sk_family == PF_INET) {
+ rc = smack_netlabel(sock->sk, SMACK_CIPSO_SOCKET);
+ if (rc != 0)
+ printk(KERN_WARNING
+ "Smack: \"%s\" netlbl error %d.\n",
+ __func__, -rc);
+ }
} else
return -EOPNOTSUPP;
+ if (sock->sk->sk_family == PF_INET6)
+ smk_ipv6_port_label(sock, NULL);
+
return 0;
}
@@ -1713,6 +2251,25 @@ static int smack_socket_post_create(struct socket *sock, int family,
}
/**
+ * smack_socket_bind - record port binding information.
+ * @sock: the socket
+ * @address: the port address
+ * @addrlen: size of the address
+ *
+ * Records the label bound to a port.
+ *
+ * Returns 0
+ */
+static int smack_socket_bind(struct socket *sock, struct sockaddr *address,
+ int addrlen)
+{
+ if (sock->sk != NULL && sock->sk->sk_family == PF_INET6)
+ smk_ipv6_port_label(sock, address);
+
+ return 0;
+}
+
+/**
* smack_socket_connect - connect access check
* @sock: the socket
* @sap: the other end
@@ -1725,12 +2282,25 @@ static int smack_socket_post_create(struct socket *sock, int family,
static int smack_socket_connect(struct socket *sock, struct sockaddr *sap,
int addrlen)
{
- if (sock->sk == NULL || sock->sk->sk_family != PF_INET)
+ int rc = 0;
+
+ if (sock->sk == NULL)
return 0;
- if (addrlen < sizeof(struct sockaddr_in))
- return -EINVAL;
- return smack_netlabel_send(sock->sk, (struct sockaddr_in *)sap);
+ switch (sock->sk->sk_family) {
+ case PF_INET:
+ if (addrlen < sizeof(struct sockaddr_in))
+ return -EINVAL;
+ rc = smack_netlabel_send(sock->sk, (struct sockaddr_in *)sap);
+ break;
+ case PF_INET6:
+ if (addrlen < sizeof(struct sockaddr_in6))
+ return -EINVAL;
+ rc = smk_ipv6_port_check(sock->sk, (struct sockaddr_in6 *)sap,
+ SMK_CONNECTING);
+ break;
+ }
+ return rc;
}
/**
@@ -1761,7 +2331,9 @@ static int smack_flags_to_may(int flags)
*/
static int smack_msg_msg_alloc_security(struct msg_msg *msg)
{
- msg->security = current_security();
+ struct smack_known *skp = smk_of_current();
+
+ msg->security = skp->smk_known;
return 0;
}
@@ -1796,8 +2368,9 @@ static char *smack_of_shm(struct shmid_kernel *shp)
static int smack_shm_alloc_security(struct shmid_kernel *shp)
{
struct kern_ipc_perm *isp = &shp->shm_perm;
+ struct smack_known *skp = smk_of_current();
- isp->security = current_security();
+ isp->security = skp->smk_known;
return 0;
}
@@ -1919,8 +2492,9 @@ static char *smack_of_sem(struct sem_array *sma)
static int smack_sem_alloc_security(struct sem_array *sma)
{
struct kern_ipc_perm *isp = &sma->sem_perm;
+ struct smack_known *skp = smk_of_current();
- isp->security = current_security();
+ isp->security = skp->smk_known;
return 0;
}
@@ -2037,8 +2611,9 @@ static int smack_sem_semop(struct sem_array *sma, struct sembuf *sops,
static int smack_msg_queue_alloc_security(struct msg_queue *msq)
{
struct kern_ipc_perm *kisp = &msq->q_perm;
+ struct smack_known *skp = smk_of_current();
- kisp->security = current_security();
+ kisp->security = skp->smk_known;
return 0;
}
@@ -2200,7 +2775,7 @@ static void smack_ipc_getsecid(struct kern_ipc_perm *ipp, u32 *secid)
/**
* smack_d_instantiate - Make sure the blob is correct on an inode
- * @opt_dentry: unused
+ * @opt_dentry: dentry where inode will be attached
* @inode: the object
*
* Set the inode's security blob if it hasn't been done already.
@@ -2210,9 +2785,12 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
struct super_block *sbp;
struct superblock_smack *sbsp;
struct inode_smack *isp;
- char *csp = current_security();
- char *fetched;
+ struct smack_known *skp;
+ struct smack_known *ckp = smk_of_current();
char *final;
+ char trattr[TRANS_TRUE_SIZE];
+ int transflag = 0;
+ int rc;
struct dentry *dp;
if (inode == NULL)
@@ -2243,6 +2821,15 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
* of the superblock.
*/
if (opt_dentry->d_parent == opt_dentry) {
+ if (sbp->s_magic == CGROUP_SUPER_MAGIC) {
+ /*
+ * The cgroup filesystem is never mounted,
+ * so there's no opportunity to set the mount
+ * options.
+ */
+ sbsp->smk_root = smack_known_star.smk_known;
+ sbsp->smk_default = smack_known_star.smk_known;
+ }
isp->smk_inode = sbsp->smk_root;
isp->smk_flags |= SMK_INODE_INSTANT;
goto unlockandout;
@@ -2256,16 +2843,20 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
*/
switch (sbp->s_magic) {
case SMACK_MAGIC:
+ case PIPEFS_MAGIC:
+ case SOCKFS_MAGIC:
+ case CGROUP_SUPER_MAGIC:
/*
- * Casey says that it's a little embarassing
+ * Casey says that it's a little embarrassing
* that the smack file system doesn't do
* extended attributes.
- */
- final = smack_known_star.smk_known;
- break;
- case PIPEFS_MAGIC:
- /*
+ *
* Casey says pipes are easy (?)
+ *
+ * Socket access is controlled by the socket
+ * structures associated with the task involved.
+ *
+ * Cgroupfs is special
*/
final = smack_known_star.smk_known;
break;
@@ -2275,13 +2866,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
* Programs that change smack have to treat the
* pty with respect.
*/
- final = csp;
- break;
- case SOCKFS_MAGIC:
- /*
- * Casey says sockets get the smack of the task.
- */
- final = csp;
+ final = ckp->smk_known;
break;
case PROC_SUPER_MAGIC:
/*
@@ -2308,7 +2893,16 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
/*
* This isn't an understood special case.
* Get the value from the xattr.
- *
+ */
+
+ /*
+ * UNIX domain sockets use lower level socket data.
+ */
+ if (S_ISSOCK(inode->i_mode)) {
+ final = smack_known_star.smk_known;
+ break;
+ }
+ /*
* No xattr support means, alas, no SMACK label.
* Use the aforeapplied default.
* It would be curious if the label of the task
@@ -2319,30 +2913,63 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
/*
* Get the dentry for xattr.
*/
- if (opt_dentry == NULL) {
- dp = d_find_alias(inode);
- if (dp == NULL)
- break;
- } else {
- dp = dget(opt_dentry);
- if (dp == NULL)
- break;
- }
+ dp = dget(opt_dentry);
+ skp = smk_fetch(XATTR_NAME_SMACK, inode, dp);
+ if (skp != NULL)
+ final = skp->smk_known;
- fetched = smk_fetch(inode, dp);
- if (fetched != NULL)
- final = fetched;
+ /*
+ * Transmuting directory
+ */
+ if (S_ISDIR(inode->i_mode)) {
+ /*
+ * If this is a new directory and the label was
+ * transmuted when the inode was initialized
+ * set the transmute attribute on the directory
+ * and mark the inode.
+ *
+ * If there is a transmute attribute on the
+ * directory mark the inode.
+ */
+ if (isp->smk_flags & SMK_INODE_CHANGED) {
+ isp->smk_flags &= ~SMK_INODE_CHANGED;
+ rc = inode->i_op->setxattr(dp,
+ XATTR_NAME_SMACKTRANSMUTE,
+ TRANS_TRUE, TRANS_TRUE_SIZE,
+ 0);
+ } else {
+ rc = inode->i_op->getxattr(dp,
+ XATTR_NAME_SMACKTRANSMUTE, trattr,
+ TRANS_TRUE_SIZE);
+ if (rc >= 0 && strncmp(trattr, TRANS_TRUE,
+ TRANS_TRUE_SIZE) != 0)
+ rc = -EINVAL;
+ }
+ if (rc >= 0)
+ transflag = SMK_INODE_TRANSMUTE;
+ }
+ /*
+ * Don't let the exec or mmap label be "*" or "@".
+ */
+ skp = smk_fetch(XATTR_NAME_SMACKEXEC, inode, dp);
+ if (skp == &smack_known_star || skp == &smack_known_web)
+ skp = NULL;
+ isp->smk_task = skp;
+ skp = smk_fetch(XATTR_NAME_SMACKMMAP, inode, dp);
+ if (skp == &smack_known_star || skp == &smack_known_web)
+ skp = NULL;
+ isp->smk_mmap = skp;
dput(dp);
break;
}
if (final == NULL)
- isp->smk_inode = csp;
+ isp->smk_inode = ckp->smk_known;
else
isp->smk_inode = final;
- isp->smk_flags |= SMK_INODE_INSTANT;
+ isp->smk_flags |= (SMK_INODE_INSTANT | transflag);
unlockandout:
mutex_unlock(&isp->smk_lock);
@@ -2361,13 +2988,14 @@ unlockandout:
*/
static int smack_getprocattr(struct task_struct *p, char *name, char **value)
{
+ struct smack_known *skp = smk_of_task(task_security(p));
char *cp;
int slen;
if (strcmp(name, "current") != 0)
return -EINVAL;
- cp = kstrdup(task_security(p), GFP_KERNEL);
+ cp = kstrdup(skp->smk_known, GFP_KERNEL);
if (cp == NULL)
return -ENOMEM;
@@ -2391,8 +3019,9 @@ static int smack_getprocattr(struct task_struct *p, char *name, char **value)
static int smack_setprocattr(struct task_struct *p, char *name,
void *value, size_t size)
{
+ struct task_smack *tsp;
struct cred *new;
- char *newsmack;
+ struct smack_known *skp;
/*
* Changing another process' Smack value is too dangerous
@@ -2401,53 +3030,80 @@ static int smack_setprocattr(struct task_struct *p, char *name,
if (p != current)
return -EPERM;
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
- if (value == NULL || size == 0 || size >= SMK_LABELLEN)
+ if (value == NULL || size == 0 || size >= SMK_LONGLABEL)
return -EINVAL;
if (strcmp(name, "current") != 0)
return -EINVAL;
- newsmack = smk_import(value, size);
- if (newsmack == NULL)
+ skp = smk_import_entry(value, size);
+ if (skp == NULL)
return -EINVAL;
/*
* No process is ever allowed the web ("@") label.
*/
- if (newsmack == smack_known_web.smk_known)
+ if (skp == &smack_known_web)
return -EPERM;
new = prepare_creds();
if (new == NULL)
return -ENOMEM;
- new->security = newsmack;
+
+ tsp = new->security;
+ tsp->smk_task = skp;
+
commit_creds(new);
return size;
}
/**
* smack_unix_stream_connect - Smack access on UDS
- * @sock: one socket
- * @other: the other socket
+ * @sock: one sock
+ * @other: the other sock
* @newsk: unused
*
* Return 0 if a subject with the smack of sock could access
* an object with the smack of other, otherwise an error code
*/
-static int smack_unix_stream_connect(struct socket *sock,
- struct socket *other, struct sock *newsk)
+static int smack_unix_stream_connect(struct sock *sock,
+ struct sock *other, struct sock *newsk)
{
- struct inode *sp = SOCK_INODE(sock);
- struct inode *op = SOCK_INODE(other);
+ struct smack_known *skp;
+ struct smack_known *okp;
+ struct socket_smack *ssp = sock->sk_security;
+ struct socket_smack *osp = other->sk_security;
+ struct socket_smack *nsp = newsk->sk_security;
struct smk_audit_info ad;
+ int rc = 0;
+#ifdef CONFIG_AUDIT
+ struct lsm_network_audit net;
+#endif
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET);
- smk_ad_setfield_u_net_sk(&ad, other->sk);
- return smk_access(smk_of_inode(sp), smk_of_inode(op),
- MAY_READWRITE, &ad);
+ if (!smack_privileged(CAP_MAC_OVERRIDE)) {
+ skp = ssp->smk_out;
+ okp = osp->smk_out;
+#ifdef CONFIG_AUDIT
+ smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+ smk_ad_setfield_u_net_sk(&ad, other);
+#endif
+ rc = smk_access(skp, okp->smk_known, MAY_WRITE, &ad);
+ if (rc == 0)
+ rc = smk_access(okp, okp->smk_known, MAY_WRITE, NULL);
+ }
+
+ /*
+ * Cross reference the peer labels for SO_PEERSEC.
+ */
+ if (rc == 0) {
+ nsp->smk_packet = ssp->smk_out;
+ ssp->smk_packet = osp->smk_out;
+ }
+
+ return rc;
}
/**
@@ -2460,13 +3116,23 @@ static int smack_unix_stream_connect(struct socket *sock,
*/
static int smack_unix_may_send(struct socket *sock, struct socket *other)
{
- struct inode *sp = SOCK_INODE(sock);
- struct inode *op = SOCK_INODE(other);
+ struct socket_smack *ssp = sock->sk->sk_security;
+ struct socket_smack *osp = other->sk->sk_security;
+ struct smack_known *skp;
struct smk_audit_info ad;
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET);
+#ifdef CONFIG_AUDIT
+ struct lsm_network_audit net;
+
+ smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
smk_ad_setfield_u_net_sk(&ad, other->sk);
- return smk_access(smk_of_inode(sp), smk_of_inode(op), MAY_WRITE, &ad);
+#endif
+
+ if (smack_privileged(CAP_MAC_OVERRIDE))
+ return 0;
+
+ skp = ssp->smk_out;
+ return smk_access(skp, osp->smk_in->smk_known, MAY_WRITE, &ad);
}
/**
@@ -2475,37 +3141,48 @@ static int smack_unix_may_send(struct socket *sock, struct socket *other)
* @msg: the message
* @size: the size of the message
*
- * Return 0 if the current subject can write to the destination
- * host. This is only a question if the destination is a single
- * label host.
+ * Return 0 if the current subject can write to the destination host.
+ * For IPv4 this is only a question if the destination is a single label host.
+ * For IPv6 this is a check against the label of the port.
*/
static int smack_socket_sendmsg(struct socket *sock, struct msghdr *msg,
int size)
{
struct sockaddr_in *sip = (struct sockaddr_in *) msg->msg_name;
+ struct sockaddr_in6 *sap = (struct sockaddr_in6 *) msg->msg_name;
+ int rc = 0;
/*
* Perfectly reasonable for this to be NULL
*/
- if (sip == NULL || sip->sin_family != AF_INET)
+ if (sip == NULL)
return 0;
- return smack_netlabel_send(sock->sk, sip);
+ switch (sip->sin_family) {
+ case AF_INET:
+ rc = smack_netlabel_send(sock->sk, sip);
+ break;
+ case AF_INET6:
+ rc = smk_ipv6_port_check(sock->sk, sap, SMK_SENDING);
+ break;
+ }
+ return rc;
}
-
/**
* smack_from_secattr - Convert a netlabel attr.mls.lvl/attr.mls.cat pair to smack
* @sap: netlabel secattr
- * @sip: where to put the result
+ * @ssp: socket security information
*
- * Copies a smack label into sip
+ * Returns a pointer to a Smack label entry found on the label list.
*/
-static void smack_from_secattr(struct netlbl_lsm_secattr *sap, char *sip)
+static struct smack_known *smack_from_secattr(struct netlbl_lsm_secattr *sap,
+ struct socket_smack *ssp)
{
- char smack[SMK_LABELLEN];
- char *sp;
- int pcat;
+ struct smack_known *skp;
+ int found = 0;
+ int acat;
+ int kcat;
if ((sap->flags & NETLBL_SECATTR_MLS_LVL) != 0) {
/*
@@ -2513,40 +3190,52 @@ static void smack_from_secattr(struct netlbl_lsm_secattr *sap, char *sip)
* If there are flags but no level netlabel isn't
* behaving the way we expect it to.
*
- * Get the categories, if any
+ * Look it up in the label table
* Without guidance regarding the smack value
* for the packet fall back on the network
* ambient value.
*/
- memset(smack, '\0', SMK_LABELLEN);
- if ((sap->flags & NETLBL_SECATTR_MLS_CAT) != 0)
- for (pcat = -1;;) {
- pcat = netlbl_secattr_catmap_walk(
- sap->attr.mls.cat, pcat + 1);
- if (pcat < 0)
+ rcu_read_lock();
+ list_for_each_entry(skp, &smack_known_list, list) {
+ if (sap->attr.mls.lvl != skp->smk_netlabel.attr.mls.lvl)
+ continue;
+ /*
+ * Compare the catsets. Use the netlbl APIs.
+ */
+ if ((sap->flags & NETLBL_SECATTR_MLS_CAT) == 0) {
+ if ((skp->smk_netlabel.flags &
+ NETLBL_SECATTR_MLS_CAT) == 0)
+ found = 1;
+ break;
+ }
+ for (acat = -1, kcat = -1; acat == kcat; ) {
+ acat = netlbl_secattr_catmap_walk(
+ sap->attr.mls.cat, acat + 1);
+ kcat = netlbl_secattr_catmap_walk(
+ skp->smk_netlabel.attr.mls.cat,
+ kcat + 1);
+ if (acat < 0 || kcat < 0)
break;
- smack_catset_bit(pcat, smack);
}
- /*
- * If it is CIPSO using smack direct mapping
- * we are already done. WeeHee.
- */
- if (sap->attr.mls.lvl == smack_cipso_direct) {
- memcpy(sip, smack, SMK_MAXLEN);
- return;
+ if (acat == kcat) {
+ found = 1;
+ break;
+ }
}
- /*
- * Look it up in the supplied table if it is not
- * a direct mapping.
- */
- smack_from_cipso(sap->attr.mls.lvl, smack, sip);
- return;
+ rcu_read_unlock();
+
+ if (found)
+ return skp;
+
+ if (ssp != NULL && ssp->smk_in == &smack_known_star)
+ return &smack_known_web;
+ return &smack_known_star;
}
if ((sap->flags & NETLBL_SECATTR_SECID) != 0) {
/*
* Looks like a fallback, which gives us a secid.
*/
- sp = smack_from_secid(sap->attr.secid);
+ skp = smack_from_secid(sap->attr.secid);
/*
* This has got to be a bug because it is
* impossible to specify a fallback without
@@ -2554,17 +3243,62 @@ static void smack_from_secattr(struct netlbl_lsm_secattr *sap, char *sip)
* it has a secid, and the only way to get a
* secid is from a fallback.
*/
- BUG_ON(sp == NULL);
- strncpy(sip, sp, SMK_MAXLEN);
- return;
+ BUG_ON(skp == NULL);
+ return skp;
}
/*
* Without guidance regarding the smack value
* for the packet fall back on the network
* ambient value.
*/
- strncpy(sip, smack_net_ambient, SMK_MAXLEN);
- return;
+ return smack_net_ambient;
+}
+
+static int smk_skb_to_addr_ipv6(struct sk_buff *skb, struct sockaddr_in6 *sip)
+{
+ u8 nexthdr;
+ int offset;
+ int proto = -EINVAL;
+ struct ipv6hdr _ipv6h;
+ struct ipv6hdr *ip6;
+ __be16 frag_off;
+ struct tcphdr _tcph, *th;
+ struct udphdr _udph, *uh;
+ struct dccp_hdr _dccph, *dh;
+
+ sip->sin6_port = 0;
+
+ offset = skb_network_offset(skb);
+ ip6 = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
+ if (ip6 == NULL)
+ return -EINVAL;
+ sip->sin6_addr = ip6->saddr;
+
+ nexthdr = ip6->nexthdr;
+ offset += sizeof(_ipv6h);
+ offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
+ if (offset < 0)
+ return -EINVAL;
+
+ proto = nexthdr;
+ switch (proto) {
+ case IPPROTO_TCP:
+ th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
+ if (th != NULL)
+ sip->sin6_port = th->source;
+ break;
+ case IPPROTO_UDP:
+ uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
+ if (uh != NULL)
+ sip->sin6_port = uh->source;
+ break;
+ case IPPROTO_DCCP:
+ dh = skb_header_pointer(skb, offset, sizeof(_dccph), &_dccph);
+ if (dh != NULL)
+ sip->sin6_port = dh->dccph_sport;
+ break;
+ }
+ return proto;
}
/**
@@ -2578,42 +3312,52 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
struct netlbl_lsm_secattr secattr;
struct socket_smack *ssp = sk->sk_security;
- char smack[SMK_LABELLEN];
- char *csp;
- int rc;
+ struct smack_known *skp;
+ struct sockaddr_in6 sadd;
+ int rc = 0;
struct smk_audit_info ad;
- if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
- return 0;
-
- /*
- * Translate what netlabel gave us.
- */
- netlbl_secattr_init(&secattr);
+#ifdef CONFIG_AUDIT
+ struct lsm_network_audit net;
+#endif
+ switch (sk->sk_family) {
+ case PF_INET:
+ /*
+ * Translate what netlabel gave us.
+ */
+ netlbl_secattr_init(&secattr);
- rc = netlbl_skbuff_getattr(skb, sk->sk_family, &secattr);
- if (rc == 0) {
- smack_from_secattr(&secattr, smack);
- csp = smack;
- } else
- csp = smack_net_ambient;
+ rc = netlbl_skbuff_getattr(skb, sk->sk_family, &secattr);
+ if (rc == 0)
+ skp = smack_from_secattr(&secattr, ssp);
+ else
+ skp = smack_net_ambient;
- netlbl_secattr_destroy(&secattr);
+ netlbl_secattr_destroy(&secattr);
#ifdef CONFIG_AUDIT
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET);
- ad.a.u.net.family = sk->sk_family;
- ad.a.u.net.netif = skb->skb_iif;
- ipv4_skb_to_auditdata(skb, &ad.a, NULL);
+ smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+ ad.a.u.net->family = sk->sk_family;
+ ad.a.u.net->netif = skb->skb_iif;
+ ipv4_skb_to_auditdata(skb, &ad.a, NULL);
#endif
- /*
- * Receiving a packet requires that the other end
- * be able to write here. Read access is not required.
- * This is the simplist possible security model
- * for networking.
- */
- rc = smk_access(csp, ssp->smk_in, MAY_WRITE, &ad);
- if (rc != 0)
- netlbl_skbuff_err(skb, rc, 0);
+ /*
+ * Receiving a packet requires that the other end
+ * be able to write here. Read access is not required.
+ * This is the simplist possible security model
+ * for networking.
+ */
+ rc = smk_access(skp, ssp->smk_in->smk_known, MAY_WRITE, &ad);
+ if (rc != 0)
+ netlbl_skbuff_err(skb, rc, 0);
+ break;
+ case PF_INET6:
+ rc = smk_skb_to_addr_ipv6(skb, &sadd);
+ if (rc == IPPROTO_UDP || rc == IPPROTO_TCP)
+ rc = smk_ipv6_port_check(sk, &sadd, SMK_RECEIVING);
+ else
+ rc = 0;
+ break;
+ }
return rc;
}
@@ -2631,15 +3375,19 @@ static int smack_socket_getpeersec_stream(struct socket *sock,
int __user *optlen, unsigned len)
{
struct socket_smack *ssp;
- int slen;
+ char *rcp = "";
+ int slen = 1;
int rc = 0;
ssp = sock->sk->sk_security;
- slen = strlen(ssp->smk_packet) + 1;
+ if (ssp->smk_packet != NULL) {
+ rcp = ssp->smk_packet->smk_known;
+ slen = strlen(rcp) + 1;
+ }
if (slen > len)
rc = -ERANGE;
- else if (copy_to_user(optval, ssp->smk_packet, slen) != 0)
+ else if (copy_to_user(optval, rcp, slen) != 0)
rc = -EFAULT;
if (put_user(slen, optlen) != 0)
@@ -2651,7 +3399,7 @@ static int smack_socket_getpeersec_stream(struct socket *sock,
/**
* smack_socket_getpeersec_dgram - pull in packet label
- * @sock: the socket
+ * @sock: the peer socket
* @skb: packet data
* @secid: pointer to where to put the secid of the packet
*
@@ -2662,41 +3410,41 @@ static int smack_socket_getpeersec_dgram(struct socket *sock,
{
struct netlbl_lsm_secattr secattr;
- struct sock *sk;
- char smack[SMK_LABELLEN];
- int family = PF_INET;
- u32 s;
+ struct socket_smack *ssp = NULL;
+ struct smack_known *skp;
+ int family = PF_UNSPEC;
+ u32 s = 0; /* 0 is the invalid secid */
int rc;
- /*
- * Only works for families with packets.
- */
- if (sock != NULL) {
- sk = sock->sk;
- if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
- return 0;
- family = sk->sk_family;
+ if (skb != NULL) {
+ if (skb->protocol == htons(ETH_P_IP))
+ family = PF_INET;
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ family = PF_INET6;
}
- /*
- * Translate what netlabel gave us.
- */
- netlbl_secattr_init(&secattr);
- rc = netlbl_skbuff_getattr(skb, family, &secattr);
- if (rc == 0)
- smack_from_secattr(&secattr, smack);
- netlbl_secattr_destroy(&secattr);
-
- /*
- * Give up if we couldn't get anything
- */
- if (rc != 0)
- return rc;
+ if (family == PF_UNSPEC && sock != NULL)
+ family = sock->sk->sk_family;
- s = smack_to_secid(smack);
+ if (family == PF_UNIX) {
+ ssp = sock->sk->sk_security;
+ s = ssp->smk_out->smk_secid;
+ } else if (family == PF_INET || family == PF_INET6) {
+ /*
+ * Translate what netlabel gave us.
+ */
+ if (sock != NULL && sock->sk != NULL)
+ ssp = sock->sk->sk_security;
+ netlbl_secattr_init(&secattr);
+ rc = netlbl_skbuff_getattr(skb, family, &secattr);
+ if (rc == 0) {
+ skp = smack_from_secattr(&secattr, ssp);
+ s = skp->smk_secid;
+ }
+ netlbl_secattr_destroy(&secattr);
+ }
+ *secid = s;
if (s == 0)
return -EINVAL;
-
- *secid = s;
return 0;
}
@@ -2711,13 +3459,15 @@ static int smack_socket_getpeersec_dgram(struct socket *sock,
static void smack_sock_graft(struct sock *sk, struct socket *parent)
{
struct socket_smack *ssp;
+ struct smack_known *skp = smk_of_current();
if (sk == NULL ||
(sk->sk_family != PF_INET && sk->sk_family != PF_INET6))
return;
ssp = sk->sk_security;
- ssp->smk_in = ssp->smk_out = current_security();
+ ssp->smk_in = skp;
+ ssp->smk_out = skp;
/* cssp->smk_packet is already set in smack_inet_csk_clone() */
}
@@ -2734,37 +3484,49 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
struct request_sock *req)
{
u16 family = sk->sk_family;
+ struct smack_known *skp;
struct socket_smack *ssp = sk->sk_security;
struct netlbl_lsm_secattr secattr;
struct sockaddr_in addr;
struct iphdr *hdr;
- char smack[SMK_LABELLEN];
+ char *hsp;
int rc;
struct smk_audit_info ad;
+#ifdef CONFIG_AUDIT
+ struct lsm_network_audit net;
+#endif
- /* handle mapped IPv4 packets arriving via IPv6 sockets */
- if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP))
- family = PF_INET;
+ if (family == PF_INET6) {
+ /*
+ * Handle mapped IPv4 packets arriving
+ * via IPv6 sockets. Don't set up netlabel
+ * processing on IPv6.
+ */
+ if (skb->protocol == htons(ETH_P_IP))
+ family = PF_INET;
+ else
+ return 0;
+ }
netlbl_secattr_init(&secattr);
rc = netlbl_skbuff_getattr(skb, family, &secattr);
if (rc == 0)
- smack_from_secattr(&secattr, smack);
+ skp = smack_from_secattr(&secattr, ssp);
else
- strncpy(smack, smack_known_huh.smk_known, SMK_MAXLEN);
+ skp = &smack_known_huh;
netlbl_secattr_destroy(&secattr);
#ifdef CONFIG_AUDIT
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET);
- ad.a.u.net.family = family;
- ad.a.u.net.netif = skb->skb_iif;
+ smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+ ad.a.u.net->family = family;
+ ad.a.u.net->netif = skb->skb_iif;
ipv4_skb_to_auditdata(skb, &ad.a, NULL);
#endif
/*
* Receiving a packet requires that the other end be able to write
* here. Read access is not required.
*/
- rc = smk_access(smack, ssp->smk_in, MAY_WRITE, &ad);
+ rc = smk_access(skp, ssp->smk_in->smk_known, MAY_WRITE, &ad);
if (rc != 0)
return rc;
@@ -2772,26 +3534,23 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
* Save the peer's label in the request_sock so we can later setup
* smk_packet in the child socket so that SO_PEERCRED can report it.
*/
- req->peer_secid = smack_to_secid(smack);
+ req->peer_secid = skp->smk_secid;
/*
* We need to decide if we want to label the incoming connection here
* if we do we only need to label the request_sock and the stack will
- * propogate the wire-label to the sock when it is created.
+ * propagate the wire-label to the sock when it is created.
*/
hdr = ip_hdr(skb);
addr.sin_addr.s_addr = hdr->saddr;
rcu_read_lock();
- if (smack_host_label(&addr) == NULL) {
- rcu_read_unlock();
- netlbl_secattr_init(&secattr);
- smack_to_secattr(smack, &secattr);
- rc = netlbl_req_setattr(req, &secattr);
- netlbl_secattr_destroy(&secattr);
- } else {
- rcu_read_unlock();
+ hsp = smack_host_label(&addr);
+ rcu_read_unlock();
+
+ if (hsp == NULL)
+ rc = netlbl_req_setattr(req, &skp->smk_netlabel);
+ else
netlbl_req_delattr(req);
- }
return rc;
}
@@ -2807,13 +3566,13 @@ static void smack_inet_csk_clone(struct sock *sk,
const struct request_sock *req)
{
struct socket_smack *ssp = sk->sk_security;
- char *smack;
+ struct smack_known *skp;
if (req->peer_secid != 0) {
- smack = smack_from_secid(req->peer_secid);
- strncpy(ssp->smk_packet, smack, SMK_MAXLEN);
+ skp = smack_from_secid(req->peer_secid);
+ ssp->smk_packet = skp;
} else
- ssp->smk_packet[0] = '\0';
+ ssp->smk_packet = NULL;
}
/*
@@ -2838,7 +3597,9 @@ static void smack_inet_csk_clone(struct sock *sk,
static int smack_key_alloc(struct key *key, const struct cred *cred,
unsigned long flags)
{
- key->security = cred->security;
+ struct smack_known *skp = smk_of_task(cred->security);
+
+ key->security = skp->smk_known;
return 0;
}
@@ -2863,10 +3624,12 @@ static void smack_key_free(struct key *key)
* an error code otherwise
*/
static int smack_key_permission(key_ref_t key_ref,
- const struct cred *cred, key_perm_t perm)
+ const struct cred *cred, unsigned perm)
{
struct key *keyp;
struct smk_audit_info ad;
+ struct smack_known *tkp = smk_of_task(cred->security);
+ int request = 0;
keyp = key_ref_to_ptr(key_ref);
if (keyp == NULL)
@@ -2880,15 +3643,18 @@ static int smack_key_permission(key_ref_t key_ref,
/*
* This should not occur
*/
- if (cred->security == NULL)
+ if (tkp == NULL)
return -EACCES;
#ifdef CONFIG_AUDIT
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_KEY);
ad.a.u.key_struct.key = keyp->serial;
ad.a.u.key_struct.key_desc = keyp->description;
#endif
- return smk_access(cred->security, keyp->security,
- MAY_READWRITE, &ad);
+ if (perm & KEY_NEED_READ)
+ request = MAY_READ;
+ if (perm & (KEY_NEED_WRITE | KEY_NEED_LINK | KEY_NEED_SETATTR))
+ request = MAY_WRITE;
+ return smk_access(tkp, keyp->security, request, &ad);
}
#endif /* CONFIG_KEYS */
@@ -2970,19 +3736,18 @@ static int smack_audit_rule_known(struct audit_krule *krule)
static int smack_audit_rule_match(u32 secid, u32 field, u32 op, void *vrule,
struct audit_context *actx)
{
- char *smack;
+ struct smack_known *skp;
char *rule = vrule;
- if (!rule) {
- audit_log(actx, GFP_KERNEL, AUDIT_SELINUX_ERR,
- "Smack: missing rule\n");
+ if (unlikely(!rule)) {
+ WARN_ONCE(1, "Smack: missing rule\n");
return -ENOENT;
}
if (field != AUDIT_SUBJ_USER && field != AUDIT_OBJ_USER)
return 0;
- smack = smack_from_secid(secid);
+ skp = smack_from_secid(secid);
/*
* No need to do string comparisons. If a match occurs,
@@ -2990,9 +3755,9 @@ static int smack_audit_rule_match(u32 secid, u32 field, u32 op, void *vrule,
* label.
*/
if (op == Audit_equal)
- return (rule == smack);
+ return (rule == skp->smk_known);
if (op == Audit_not_equal)
- return (rule != smack);
+ return (rule != skp->smk_known);
return 0;
}
@@ -3011,6 +3776,16 @@ static void smack_audit_rule_free(void *vrule)
#endif /* CONFIG_AUDIT */
/**
+ * smack_ismaclabel - check if xattr @name references a smack MAC label
+ * @name: Full xattr name to check.
+ */
+static int smack_ismaclabel(const char *name)
+{
+ return (strcmp(name, XATTR_SMACK_SUFFIX) == 0);
+}
+
+
+/**
* smack_secid_to_secctx - return the smack label for a secid
* @secid: incoming integer
* @secdata: destination
@@ -3020,10 +3795,11 @@ static void smack_audit_rule_free(void *vrule)
*/
static int smack_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
{
- char *sp = smack_from_secid(secid);
+ struct smack_known *skp = smack_from_secid(secid);
- *secdata = sp;
- *seclen = strlen(sp);
+ if (secdata)
+ *secdata = skp->smk_known;
+ *seclen = strlen(skp->smk_known);
return 0;
}
@@ -3085,8 +3861,10 @@ struct security_operations smack_ops = {
.sb_copy_data = smack_sb_copy_data,
.sb_kern_mount = smack_sb_kern_mount,
.sb_statfs = smack_sb_statfs,
- .sb_mount = smack_sb_mount,
- .sb_umount = smack_sb_umount,
+
+ .bprm_set_creds = smack_bprm_set_creds,
+ .bprm_committing_creds = smack_bprm_committing_creds,
+ .bprm_secureexec = smack_bprm_secureexec,
.inode_alloc_security = smack_inode_alloc_security,
.inode_free_security = smack_inode_free_security,
@@ -3113,14 +3891,17 @@ struct security_operations smack_ops = {
.file_ioctl = smack_file_ioctl,
.file_lock = smack_file_lock,
.file_fcntl = smack_file_fcntl,
+ .mmap_file = smack_mmap_file,
+ .mmap_addr = cap_mmap_addr,
.file_set_fowner = smack_file_set_fowner,
.file_send_sigiotask = smack_file_send_sigiotask,
.file_receive = smack_file_receive,
+ .file_open = smack_file_open,
+
.cred_alloc_blank = smack_cred_alloc_blank,
.cred_free = smack_cred_free,
.cred_prepare = smack_cred_prepare,
- .cred_commit = smack_cred_commit,
.cred_transfer = smack_cred_transfer,
.kernel_act_as = smack_kernel_act_as,
.kernel_create_files_as = smack_kernel_create_files_as,
@@ -3172,6 +3953,7 @@ struct security_operations smack_ops = {
.unix_may_send = smack_unix_may_send,
.socket_post_create = smack_socket_post_create,
+ .socket_bind = smack_socket_bind,
.socket_connect = smack_socket_connect,
.socket_sendmsg = smack_socket_sendmsg,
.socket_sock_rcv_skb = smack_socket_sock_rcv_skb,
@@ -3198,6 +3980,7 @@ struct security_operations smack_ops = {
.audit_rule_free = smack_audit_rule_free,
#endif /* CONFIG_AUDIT */
+ .ismaclabel = smack_ismaclabel,
.secid_to_secctx = smack_secid_to_secctx,
.secctx_to_secid = smack_secctx_to_secid,
.release_secctx = smack_release_secctx,
@@ -3207,14 +3990,35 @@ struct security_operations smack_ops = {
};
-static __init void init_smack_know_list(void)
+static __init void init_smack_known_list(void)
{
- list_add(&smack_known_huh.list, &smack_known_list);
- list_add(&smack_known_hat.list, &smack_known_list);
- list_add(&smack_known_star.list, &smack_known_list);
- list_add(&smack_known_floor.list, &smack_known_list);
- list_add(&smack_known_invalid.list, &smack_known_list);
- list_add(&smack_known_web.list, &smack_known_list);
+ /*
+ * Initialize rule list locks
+ */
+ mutex_init(&smack_known_huh.smk_rules_lock);
+ mutex_init(&smack_known_hat.smk_rules_lock);
+ mutex_init(&smack_known_floor.smk_rules_lock);
+ mutex_init(&smack_known_star.smk_rules_lock);
+ mutex_init(&smack_known_invalid.smk_rules_lock);
+ mutex_init(&smack_known_web.smk_rules_lock);
+ /*
+ * Initialize rule lists
+ */
+ INIT_LIST_HEAD(&smack_known_huh.smk_rules);
+ INIT_LIST_HEAD(&smack_known_hat.smk_rules);
+ INIT_LIST_HEAD(&smack_known_star.smk_rules);
+ INIT_LIST_HEAD(&smack_known_floor.smk_rules);
+ INIT_LIST_HEAD(&smack_known_invalid.smk_rules);
+ INIT_LIST_HEAD(&smack_known_web.smk_rules);
+ /*
+ * Create the known labels list
+ */
+ smk_insert_entry(&smack_known_huh);
+ smk_insert_entry(&smack_known_hat);
+ smk_insert_entry(&smack_known_star);
+ smk_insert_entry(&smack_known_floor);
+ smk_insert_entry(&smack_known_invalid);
+ smk_insert_entry(&smack_known_web);
}
/**
@@ -3225,28 +4029,26 @@ static __init void init_smack_know_list(void)
static __init int smack_init(void)
{
struct cred *cred;
+ struct task_smack *tsp;
if (!security_module_enable(&smack_ops))
return 0;
+ tsp = new_task_smack(&smack_known_floor, &smack_known_floor,
+ GFP_KERNEL);
+ if (tsp == NULL)
+ return -ENOMEM;
+
printk(KERN_INFO "Smack: Initializing.\n");
/*
* Set the security state for the initial task.
*/
cred = (struct cred *) current->cred;
- cred->security = &smack_known_floor.smk_known;
+ cred->security = tsp;
- /* initilize the smack_know_list */
- init_smack_know_list();
- /*
- * Initialize locks
- */
- spin_lock_init(&smack_known_huh.smk_cipsolock);
- spin_lock_init(&smack_known_hat.smk_cipsolock);
- spin_lock_init(&smack_known_star.smk_cipsolock);
- spin_lock_init(&smack_known_floor.smk_cipsolock);
- spin_lock_init(&smack_known_invalid.smk_cipsolock);
+ /* initialize the smack_known_list */
+ init_smack_known_list();
/*
* Register with LSM
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index aeead758509..32b24882084 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -20,12 +20,13 @@
#include <linux/vmalloc.h>
#include <linux/security.h>
#include <linux/mutex.h>
+#include <linux/slab.h>
#include <net/net_namespace.h>
-#include <net/netlabel.h>
#include <net/cipso_ipv4.h>
#include <linux/seq_file.h>
#include <linux/ctype.h>
#include <linux/audit.h>
+#include <linux/magic.h>
#include "smack.h"
/*
@@ -42,14 +43,25 @@ enum smk_inos {
SMK_NETLBLADDR = 8, /* single label hosts */
SMK_ONLYCAP = 9, /* the only "capable" label */
SMK_LOGGING = 10, /* logging */
+ SMK_LOAD_SELF = 11, /* task specific rules */
+ SMK_ACCESSES = 12, /* access policy */
+ SMK_MAPPED = 13, /* CIPSO level indicating mapped label */
+ SMK_LOAD2 = 14, /* load policy with long labels */
+ SMK_LOAD_SELF2 = 15, /* load task specific rules with long labels */
+ SMK_ACCESS2 = 16, /* make an access check with long labels */
+ SMK_CIPSO2 = 17, /* load long label -> CIPSO mapping */
+ SMK_REVOKE_SUBJ = 18, /* set rules with subject label to '-' */
+ SMK_CHANGE_RULE = 19, /* change or add rules (long labels) */
+ SMK_SYSLOG = 20, /* change syslog label) */
+ SMK_PTRACE = 21, /* set ptrace rule */
};
/*
* List locks
*/
-static DEFINE_MUTEX(smack_list_lock);
static DEFINE_MUTEX(smack_cipso_lock);
static DEFINE_MUTEX(smack_ambient_lock);
+static DEFINE_MUTEX(smack_syslog_lock);
static DEFINE_MUTEX(smk_netlbladdr_lock);
/*
@@ -57,7 +69,7 @@ static DEFINE_MUTEX(smk_netlbladdr_lock);
* If it isn't somehow marked, use this.
* It can be reset via smackfs/ambient
*/
-char *smack_net_ambient = smack_known_floor.smk_known;
+struct smack_known *smack_net_ambient;
/*
* This is the level in a CIPSO header that indicates a
@@ -67,6 +79,13 @@ char *smack_net_ambient = smack_known_floor.smk_known;
int smack_cipso_direct = SMACK_CIPSO_DIRECT_DEFAULT;
/*
+ * This is the level in a CIPSO header that indicates a
+ * secid is contained directly in the category set.
+ * It can be reset via smackfs/mapped
+ */
+int smack_cipso_mapped = SMACK_CIPSO_MAPPED_DEFAULT;
+
+/*
* Unless a process is running with this label even
* having CAP_MAC_OVERRIDE isn't enough to grant
* privilege to violate MAC policy. If no label is
@@ -74,7 +93,22 @@ int smack_cipso_direct = SMACK_CIPSO_DIRECT_DEFAULT;
* everyone. It is expected that the hat (^) label
* will be used if any label is used.
*/
-char *smack_onlycap;
+struct smack_known *smack_onlycap;
+
+/*
+ * If this value is set restrict syslog use to the label specified.
+ * It can be reset via smackfs/syslog
+ */
+struct smack_known *smack_syslog_label;
+
+/*
+ * Ptrace current rule
+ * SMACK_PTRACE_DEFAULT regular smack ptrace rules (/proc based)
+ * SMACK_PTRACE_EXACT labels must match, but can be overriden with
+ * CAP_SYS_PTRACE
+ * SMACK_PTRACE_DRACONIAN lables must match, CAP_SYS_PTRACE has no effect
+ */
+int smack_ptrace_rule = SMACK_PTRACE_DEFAULT;
/*
* Certain IP addresses may be designated as single label hosts.
@@ -83,15 +117,29 @@ char *smack_onlycap;
*/
LIST_HEAD(smk_netlbladdr_list);
+
+/*
+ * Rule lists are maintained for each label.
+ * This master list is just for reading /smack/load and /smack/load2.
+ */
+struct smack_master_list {
+ struct list_head list;
+ struct smack_rule *smk_rule;
+};
+
LIST_HEAD(smack_rule_list);
+struct smack_parsed_rule {
+ struct smack_known *smk_subject;
+ char *smk_object;
+ int smk_access1;
+ int smk_access2;
+};
+
static int smk_cipso_doi_value = SMACK_CIPSO_DOI_DEFAULT;
const char *smack_cipso_option = SMACK_CIPSO_OPTION;
-
-#define SEQ_READ_FINISHED 1
-
/*
* Values for parsing cipso rules
* SMK_DIGITLEN: Length of a digit field in a rule.
@@ -108,9 +156,24 @@ const char *smack_cipso_option = SMACK_CIPSO_OPTION;
* SMK_ACCESSLEN: Maximum length for a rule access field
* SMK_LOADLEN: Smack rule length
*/
-#define SMK_ACCESS "rwxa"
-#define SMK_ACCESSLEN (sizeof(SMK_ACCESS) - 1)
-#define SMK_LOADLEN (SMK_LABELLEN + SMK_LABELLEN + SMK_ACCESSLEN)
+#define SMK_OACCESS "rwxa"
+#define SMK_ACCESS "rwxatl"
+#define SMK_OACCESSLEN (sizeof(SMK_OACCESS) - 1)
+#define SMK_ACCESSLEN (sizeof(SMK_ACCESS) - 1)
+#define SMK_OLOADLEN (SMK_LABELLEN + SMK_LABELLEN + SMK_OACCESSLEN)
+#define SMK_LOADLEN (SMK_LABELLEN + SMK_LABELLEN + SMK_ACCESSLEN)
+
+/*
+ * Stricly for CIPSO level manipulation.
+ * Set the category bit number in a smack label sized buffer.
+ */
+static inline void smack_catset_bit(unsigned int cat, char *catsetp)
+{
+ if (cat == 0 || cat > (SMK_CIPSOLEN * 8))
+ return;
+
+ catsetp[(cat - 1) / 8] |= 0x80 >> ((cat - 1) % 8);
+}
/**
* smk_netlabel_audit_set - fill a netlbl_audit struct
@@ -118,51 +181,426 @@ const char *smack_cipso_option = SMACK_CIPSO_OPTION;
*/
static void smk_netlabel_audit_set(struct netlbl_audit *nap)
{
+ struct smack_known *skp = smk_of_current();
+
nap->loginuid = audit_get_loginuid(current);
nap->sessionid = audit_get_sessionid(current);
- nap->secid = smack_to_secid(current_security());
+ nap->secid = skp->smk_secid;
}
/*
- * Values for parsing single label host rules
+ * Value for parsing single label host rules
* "1.2.3.4 X"
- * "192.168.138.129/32 abcdefghijklmnopqrstuvw"
*/
#define SMK_NETLBLADDRMIN 9
-#define SMK_NETLBLADDRMAX 42
+
+/**
+ * smk_set_access - add a rule to the rule list or replace an old rule
+ * @srp: the rule to add or replace
+ * @rule_list: the list of rules
+ * @rule_lock: the rule list lock
+ * @global: if non-zero, indicates a global rule
+ *
+ * Looks through the current subject/object/access list for
+ * the subject/object pair and replaces the access that was
+ * there. If the pair isn't found add it with the specified
+ * access.
+ *
+ * Returns 0 if nothing goes wrong or -ENOMEM if it fails
+ * during the allocation of the new pair to add.
+ */
+static int smk_set_access(struct smack_parsed_rule *srp,
+ struct list_head *rule_list,
+ struct mutex *rule_lock, int global)
+{
+ struct smack_rule *sp;
+ struct smack_master_list *smlp;
+ int found = 0;
+ int rc = 0;
+
+ mutex_lock(rule_lock);
+
+ /*
+ * Because the object label is less likely to match
+ * than the subject label check it first
+ */
+ list_for_each_entry_rcu(sp, rule_list, list) {
+ if (sp->smk_object == srp->smk_object &&
+ sp->smk_subject == srp->smk_subject) {
+ found = 1;
+ sp->smk_access |= srp->smk_access1;
+ sp->smk_access &= ~srp->smk_access2;
+ break;
+ }
+ }
+
+ if (found == 0) {
+ sp = kzalloc(sizeof(*sp), GFP_KERNEL);
+ if (sp == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ sp->smk_subject = srp->smk_subject;
+ sp->smk_object = srp->smk_object;
+ sp->smk_access = srp->smk_access1 & ~srp->smk_access2;
+
+ list_add_rcu(&sp->list, rule_list);
+ /*
+ * If this is a global as opposed to self and a new rule
+ * it needs to get added for reporting.
+ */
+ if (global) {
+ smlp = kzalloc(sizeof(*smlp), GFP_KERNEL);
+ if (smlp != NULL) {
+ smlp->smk_rule = sp;
+ list_add_rcu(&smlp->list, &smack_rule_list);
+ } else
+ rc = -ENOMEM;
+ }
+ }
+
+out:
+ mutex_unlock(rule_lock);
+ return rc;
+}
+
+/**
+ * smk_perm_from_str - parse smack accesses from a text string
+ * @string: a text string that contains a Smack accesses code
+ *
+ * Returns an integer with respective bits set for specified accesses.
+ */
+static int smk_perm_from_str(const char *string)
+{
+ int perm = 0;
+ const char *cp;
+
+ for (cp = string; ; cp++)
+ switch (*cp) {
+ case '-':
+ break;
+ case 'r':
+ case 'R':
+ perm |= MAY_READ;
+ break;
+ case 'w':
+ case 'W':
+ perm |= MAY_WRITE;
+ break;
+ case 'x':
+ case 'X':
+ perm |= MAY_EXEC;
+ break;
+ case 'a':
+ case 'A':
+ perm |= MAY_APPEND;
+ break;
+ case 't':
+ case 'T':
+ perm |= MAY_TRANSMUTE;
+ break;
+ case 'l':
+ case 'L':
+ perm |= MAY_LOCK;
+ break;
+ default:
+ return perm;
+ }
+}
+
+/**
+ * smk_fill_rule - Fill Smack rule from strings
+ * @subject: subject label string
+ * @object: object label string
+ * @access1: access string
+ * @access2: string with permissions to be removed
+ * @rule: Smack rule
+ * @import: if non-zero, import labels
+ * @len: label length limit
+ *
+ * Returns 0 on success, -EINVAL on failure and -ENOENT when either subject
+ * or object is missing.
+ */
+static int smk_fill_rule(const char *subject, const char *object,
+ const char *access1, const char *access2,
+ struct smack_parsed_rule *rule, int import,
+ int len)
+{
+ const char *cp;
+ struct smack_known *skp;
+
+ if (import) {
+ rule->smk_subject = smk_import_entry(subject, len);
+ if (rule->smk_subject == NULL)
+ return -EINVAL;
+
+ rule->smk_object = smk_import(object, len);
+ if (rule->smk_object == NULL)
+ return -EINVAL;
+ } else {
+ cp = smk_parse_smack(subject, len);
+ if (cp == NULL)
+ return -EINVAL;
+ skp = smk_find_entry(cp);
+ kfree(cp);
+ if (skp == NULL)
+ return -ENOENT;
+ rule->smk_subject = skp;
+
+ cp = smk_parse_smack(object, len);
+ if (cp == NULL)
+ return -EINVAL;
+ skp = smk_find_entry(cp);
+ kfree(cp);
+ if (skp == NULL)
+ return -ENOENT;
+ rule->smk_object = skp->smk_known;
+ }
+
+ rule->smk_access1 = smk_perm_from_str(access1);
+ if (access2)
+ rule->smk_access2 = smk_perm_from_str(access2);
+ else
+ rule->smk_access2 = ~rule->smk_access1;
+
+ return 0;
+}
+
+/**
+ * smk_parse_rule - parse Smack rule from load string
+ * @data: string to be parsed whose size is SMK_LOADLEN
+ * @rule: Smack rule
+ * @import: if non-zero, import labels
+ *
+ * Returns 0 on success, -1 on errors.
+ */
+static int smk_parse_rule(const char *data, struct smack_parsed_rule *rule,
+ int import)
+{
+ int rc;
+
+ rc = smk_fill_rule(data, data + SMK_LABELLEN,
+ data + SMK_LABELLEN + SMK_LABELLEN, NULL, rule,
+ import, SMK_LABELLEN);
+ return rc;
+}
+
+/**
+ * smk_parse_long_rule - parse Smack rule from rule string
+ * @data: string to be parsed, null terminated
+ * @rule: Will be filled with Smack parsed rule
+ * @import: if non-zero, import labels
+ * @tokens: numer of substrings expected in data
+ *
+ * Returns number of processed bytes on success, -1 on failure.
+ */
+static ssize_t smk_parse_long_rule(char *data, struct smack_parsed_rule *rule,
+ int import, int tokens)
+{
+ ssize_t cnt = 0;
+ char *tok[4];
+ int rc;
+ int i;
+
+ /*
+ * Parsing the rule in-place, filling all white-spaces with '\0'
+ */
+ for (i = 0; i < tokens; ++i) {
+ while (isspace(data[cnt]))
+ data[cnt++] = '\0';
+
+ if (data[cnt] == '\0')
+ /* Unexpected end of data */
+ return -1;
+
+ tok[i] = data + cnt;
+
+ while (data[cnt] && !isspace(data[cnt]))
+ ++cnt;
+ }
+ while (isspace(data[cnt]))
+ data[cnt++] = '\0';
+
+ while (i < 4)
+ tok[i++] = NULL;
+
+ rc = smk_fill_rule(tok[0], tok[1], tok[2], tok[3], rule, import, 0);
+ return rc == 0 ? cnt : rc;
+}
+
+#define SMK_FIXED24_FMT 0 /* Fixed 24byte label format */
+#define SMK_LONG_FMT 1 /* Variable long label format */
+#define SMK_CHANGE_FMT 2 /* Rule modification format */
+/**
+ * smk_write_rules_list - write() for any /smack rule file
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ * @rule_list: the list of rules to write to
+ * @rule_lock: lock for the rule list
+ * @format: /smack/load or /smack/load2 or /smack/change-rule format.
+ *
+ * Get one smack access rule from above.
+ * The format for SMK_LONG_FMT is:
+ * "subject<whitespace>object<whitespace>access[<whitespace>...]"
+ * The format for SMK_FIXED24_FMT is exactly:
+ * "subject object rwxat"
+ * The format for SMK_CHANGE_FMT is:
+ * "subject<whitespace>object<whitespace>
+ * acc_enable<whitespace>acc_disable[<whitespace>...]"
+ */
+static ssize_t smk_write_rules_list(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos,
+ struct list_head *rule_list,
+ struct mutex *rule_lock, int format)
+{
+ struct smack_parsed_rule rule;
+ char *data;
+ int rc;
+ int trunc = 0;
+ int tokens;
+ ssize_t cnt = 0;
+
+ /*
+ * No partial writes.
+ * Enough data must be present.
+ */
+ if (*ppos != 0)
+ return -EINVAL;
+
+ if (format == SMK_FIXED24_FMT) {
+ /*
+ * Minor hack for backward compatibility
+ */
+ if (count < SMK_OLOADLEN || count > SMK_LOADLEN)
+ return -EINVAL;
+ } else {
+ if (count >= PAGE_SIZE) {
+ count = PAGE_SIZE - 1;
+ trunc = 1;
+ }
+ }
+
+ data = kmalloc(count + 1, GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(data, buf, count) != 0) {
+ rc = -EFAULT;
+ goto out;
+ }
+
+ /*
+ * In case of parsing only part of user buf,
+ * avoid having partial rule at the data buffer
+ */
+ if (trunc) {
+ while (count > 0 && (data[count - 1] != '\n'))
+ --count;
+ if (count == 0) {
+ rc = -EINVAL;
+ goto out;
+ }
+ }
+
+ data[count] = '\0';
+ tokens = (format == SMK_CHANGE_FMT ? 4 : 3);
+ while (cnt < count) {
+ if (format == SMK_FIXED24_FMT) {
+ rc = smk_parse_rule(data, &rule, 1);
+ if (rc != 0) {
+ rc = -EINVAL;
+ goto out;
+ }
+ cnt = count;
+ } else {
+ rc = smk_parse_long_rule(data + cnt, &rule, 1, tokens);
+ if (rc <= 0) {
+ rc = -EINVAL;
+ goto out;
+ }
+ cnt += rc;
+ }
+
+ if (rule_list == NULL)
+ rc = smk_set_access(&rule, &rule.smk_subject->smk_rules,
+ &rule.smk_subject->smk_rules_lock, 1);
+ else
+ rc = smk_set_access(&rule, rule_list, rule_lock, 0);
+
+ if (rc)
+ goto out;
+ }
+
+ rc = cnt;
+out:
+ kfree(data);
+ return rc;
+}
/*
- * Seq_file read operations for /smack/load
+ * Core logic for smackfs seq list operations.
*/
-static void *load_seq_start(struct seq_file *s, loff_t *pos)
+static void *smk_seq_start(struct seq_file *s, loff_t *pos,
+ struct list_head *head)
{
- if (*pos == SEQ_READ_FINISHED)
+ struct list_head *list;
+
+ /*
+ * This is 0 the first time through.
+ */
+ if (s->index == 0)
+ s->private = head;
+
+ if (s->private == NULL)
return NULL;
- if (list_empty(&smack_rule_list))
+
+ list = s->private;
+ if (list_empty(list))
return NULL;
- return smack_rule_list.next;
+
+ if (s->index == 0)
+ return list->next;
+ return list;
}
-static void *load_seq_next(struct seq_file *s, void *v, loff_t *pos)
+static void *smk_seq_next(struct seq_file *s, void *v, loff_t *pos,
+ struct list_head *head)
{
struct list_head *list = v;
- if (list_is_last(list, &smack_rule_list)) {
- *pos = SEQ_READ_FINISHED;
+ if (list_is_last(list, head)) {
+ s->private = NULL;
return NULL;
}
+ s->private = list->next;
return list->next;
}
-static int load_seq_show(struct seq_file *s, void *v)
+static void smk_seq_stop(struct seq_file *s, void *v)
{
- struct list_head *list = v;
- struct smack_rule *srp =
- list_entry(list, struct smack_rule, list);
+ /* No-op */
+}
- seq_printf(s, "%s %s", (char *)srp->smk_subject,
- (char *)srp->smk_object);
+static void smk_rule_show(struct seq_file *s, struct smack_rule *srp, int max)
+{
+ /*
+ * Don't show any rules with label names too long for
+ * interface file (/smack/load or /smack/load2)
+ * because you should expect to be able to write
+ * anything you read back.
+ */
+ if (strlen(srp->smk_subject->smk_known) >= max ||
+ strlen(srp->smk_object) >= max)
+ return;
+
+ if (srp->smk_access == 0)
+ return;
+
+ seq_printf(s, "%s %s", srp->smk_subject->smk_known, srp->smk_object);
seq_putc(s, ' ');
@@ -174,24 +612,44 @@ static int load_seq_show(struct seq_file *s, void *v)
seq_putc(s, 'x');
if (srp->smk_access & MAY_APPEND)
seq_putc(s, 'a');
- if (srp->smk_access == 0)
- seq_putc(s, '-');
+ if (srp->smk_access & MAY_TRANSMUTE)
+ seq_putc(s, 't');
+ if (srp->smk_access & MAY_LOCK)
+ seq_putc(s, 'l');
seq_putc(s, '\n');
+}
- return 0;
+/*
+ * Seq_file read operations for /smack/load
+ */
+
+static void *load2_seq_start(struct seq_file *s, loff_t *pos)
+{
+ return smk_seq_start(s, pos, &smack_rule_list);
}
-static void load_seq_stop(struct seq_file *s, void *v)
+static void *load2_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
- /* No-op */
+ return smk_seq_next(s, v, pos, &smack_rule_list);
+}
+
+static int load_seq_show(struct seq_file *s, void *v)
+{
+ struct list_head *list = v;
+ struct smack_master_list *smlp =
+ list_entry(list, struct smack_master_list, list);
+
+ smk_rule_show(s, smlp->smk_rule, SMK_LABELLEN);
+
+ return 0;
}
static const struct seq_operations load_seq_ops = {
- .start = load_seq_start,
- .next = load_seq_next,
+ .start = load2_seq_start,
+ .next = load2_seq_next,
.show = load_seq_show,
- .stop = load_seq_stop,
+ .stop = smk_seq_stop,
};
/**
@@ -207,154 +665,26 @@ static int smk_open_load(struct inode *inode, struct file *file)
}
/**
- * smk_set_access - add a rule to the rule list
- * @srp: the new rule to add
- *
- * Looks through the current subject/object/access list for
- * the subject/object pair and replaces the access that was
- * there. If the pair isn't found add it with the specified
- * access.
- *
- * Returns 0 if nothing goes wrong or -ENOMEM if it fails
- * during the allocation of the new pair to add.
- */
-static int smk_set_access(struct smack_rule *srp)
-{
- struct smack_rule *sp;
- int ret = 0;
- int found;
- mutex_lock(&smack_list_lock);
-
- found = 0;
- list_for_each_entry_rcu(sp, &smack_rule_list, list) {
- if (sp->smk_subject == srp->smk_subject &&
- sp->smk_object == srp->smk_object) {
- found = 1;
- sp->smk_access = srp->smk_access;
- break;
- }
- }
- if (found == 0)
- list_add_rcu(&srp->list, &smack_rule_list);
-
- mutex_unlock(&smack_list_lock);
-
- return ret;
-}
-
-/**
* smk_write_load - write() for /smack/load
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
* @ppos: where to start - must be 0
*
- * Get one smack access rule from above.
- * The format is exactly:
- * char subject[SMK_LABELLEN]
- * char object[SMK_LABELLEN]
- * char access[SMK_ACCESSLEN]
- *
- * writes must be SMK_LABELLEN+SMK_LABELLEN+SMK_ACCESSLEN bytes.
*/
static ssize_t smk_write_load(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- struct smack_rule *rule;
- char *data;
- int rc = -EINVAL;
-
/*
* Must have privilege.
* No partial writes.
* Enough data must be present.
*/
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
- if (*ppos != 0 || count != SMK_LOADLEN)
- return -EINVAL;
-
- data = kzalloc(count, GFP_KERNEL);
- if (data == NULL)
- return -ENOMEM;
-
- if (copy_from_user(data, buf, count) != 0) {
- rc = -EFAULT;
- goto out;
- }
-
- rule = kzalloc(sizeof(*rule), GFP_KERNEL);
- if (rule == NULL) {
- rc = -ENOMEM;
- goto out;
- }
-
- rule->smk_subject = smk_import(data, 0);
- if (rule->smk_subject == NULL)
- goto out_free_rule;
-
- rule->smk_object = smk_import(data + SMK_LABELLEN, 0);
- if (rule->smk_object == NULL)
- goto out_free_rule;
-
- rule->smk_access = 0;
-
- switch (data[SMK_LABELLEN + SMK_LABELLEN]) {
- case '-':
- break;
- case 'r':
- case 'R':
- rule->smk_access |= MAY_READ;
- break;
- default:
- goto out_free_rule;
- }
-
- switch (data[SMK_LABELLEN + SMK_LABELLEN + 1]) {
- case '-':
- break;
- case 'w':
- case 'W':
- rule->smk_access |= MAY_WRITE;
- break;
- default:
- goto out_free_rule;
- }
-
- switch (data[SMK_LABELLEN + SMK_LABELLEN + 2]) {
- case '-':
- break;
- case 'x':
- case 'X':
- rule->smk_access |= MAY_EXEC;
- break;
- default:
- goto out_free_rule;
- }
-
- switch (data[SMK_LABELLEN + SMK_LABELLEN + 3]) {
- case '-':
- break;
- case 'a':
- case 'A':
- rule->smk_access |= MAY_APPEND;
- break;
- default:
- goto out_free_rule;
- }
-
- rc = smk_set_access(rule);
-
- if (!rc)
- rc = count;
- goto out;
-
-out_free_rule:
- kfree(rule);
-out:
- kfree(data);
- return rc;
+ return smk_write_rules_list(file, buf, count, ppos, NULL, NULL,
+ SMK_FIXED24_FMT);
}
static const struct file_operations smk_load_ops = {
@@ -424,8 +754,10 @@ static void smk_unlbl_ambient(char *oldambient)
printk(KERN_WARNING "%s:%d remove rc = %d\n",
__func__, __LINE__, rc);
}
+ if (smack_net_ambient == NULL)
+ smack_net_ambient = &smack_known_floor;
- rc = netlbl_cfg_unlbl_map_add(smack_net_ambient, PF_INET,
+ rc = netlbl_cfg_unlbl_map_add(smack_net_ambient->smk_known, PF_INET,
NULL, NULL, &nai);
if (rc != 0)
printk(KERN_WARNING "%s:%d add rc = %d\n",
@@ -438,28 +770,12 @@ static void smk_unlbl_ambient(char *oldambient)
static void *cipso_seq_start(struct seq_file *s, loff_t *pos)
{
- if (*pos == SEQ_READ_FINISHED)
- return NULL;
- if (list_empty(&smack_known_list))
- return NULL;
-
- return smack_known_list.next;
+ return smk_seq_start(s, pos, &smack_known_list);
}
static void *cipso_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
- struct list_head *list = v;
-
- /*
- * labels with no associated cipso value wont be printed
- * in cipso_seq_show
- */
- if (list_is_last(list, &smack_known_list)) {
- *pos = SEQ_READ_FINISHED;
- return NULL;
- }
-
- return list->next;
+ return smk_seq_next(s, v, pos, &smack_known_list);
}
/*
@@ -471,43 +787,39 @@ static int cipso_seq_show(struct seq_file *s, void *v)
struct list_head *list = v;
struct smack_known *skp =
list_entry(list, struct smack_known, list);
- struct smack_cipso *scp = skp->smk_cipso;
- char *cbp;
+ struct netlbl_lsm_secattr_catmap *cmp = skp->smk_netlabel.attr.mls.cat;
char sep = '/';
- int cat = 1;
int i;
- unsigned char m;
- if (scp == NULL)
+ /*
+ * Don't show a label that could not have been set using
+ * /smack/cipso. This is in support of the notion that
+ * anything read from /smack/cipso ought to be writeable
+ * to /smack/cipso.
+ *
+ * /smack/cipso2 should be used instead.
+ */
+ if (strlen(skp->smk_known) >= SMK_LABELLEN)
return 0;
- seq_printf(s, "%s %3d", (char *)&skp->smk_known, scp->smk_level);
+ seq_printf(s, "%s %3d", skp->smk_known, skp->smk_netlabel.attr.mls.lvl);
- cbp = scp->smk_catset;
- for (i = 0; i < SMK_LABELLEN; i++)
- for (m = 0x80; m != 0; m >>= 1) {
- if (m & cbp[i]) {
- seq_printf(s, "%c%d", sep, cat);
- sep = ',';
- }
- cat++;
- }
+ for (i = netlbl_secattr_catmap_walk(cmp, 0); i >= 0;
+ i = netlbl_secattr_catmap_walk(cmp, i + 1)) {
+ seq_printf(s, "%c%d", sep, i);
+ sep = ',';
+ }
seq_putc(s, '\n');
return 0;
}
-static void cipso_seq_stop(struct seq_file *s, void *v)
-{
- /* No-op */
-}
-
static const struct seq_operations cipso_seq_ops = {
.start = cipso_seq_start,
- .stop = cipso_seq_stop,
.next = cipso_seq_next,
.show = cipso_seq_show,
+ .stop = smk_seq_stop,
};
/**
@@ -524,23 +836,24 @@ static int smk_open_cipso(struct inode *inode, struct file *file)
}
/**
- * smk_write_cipso - write() for /smack/cipso
+ * smk_set_cipso - do the work for write() for cipso and cipso2
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
* @ppos: where to start
+ * @format: /smack/cipso or /smack/cipso2
*
* Accepts only one cipso rule per write call.
* Returns number of bytes written or error code, as appropriate
*/
-static ssize_t smk_write_cipso(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t smk_set_cipso(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos, int format)
{
struct smack_known *skp;
- struct smack_cipso *scp = NULL;
- char mapcatset[SMK_LABELLEN];
+ struct netlbl_lsm_secattr ncats;
+ char mapcatset[SMK_CIPSOLEN];
int maplevel;
- int cat;
+ unsigned int cat;
int catlen;
ssize_t rc = -EINVAL;
char *data = NULL;
@@ -553,11 +866,12 @@ static ssize_t smk_write_cipso(struct file *file, const char __user *buf,
* No partial writes.
* Enough data must be present.
*/
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
if (*ppos != 0)
return -EINVAL;
- if (count < SMK_CIPSOMIN || count > SMK_CIPSOMAX)
+ if (format == SMK_FIXED24_FMT &&
+ (count < SMK_CIPSOMIN || count > SMK_CIPSOMAX))
return -EINVAL;
data = kzalloc(count + 1, GFP_KERNEL);
@@ -569,11 +883,6 @@ static ssize_t smk_write_cipso(struct file *file, const char __user *buf,
goto unlockedout;
}
- /* labels cannot begin with a '-' */
- if (data[0] == '-') {
- rc = -EINVAL;
- goto unlockedout;
- }
data[count] = '\0';
rule = data;
/*
@@ -586,7 +895,11 @@ static ssize_t smk_write_cipso(struct file *file, const char __user *buf,
if (skp == NULL)
goto out;
- rule += SMK_LABELLEN;
+ if (format == SMK_FIXED24_FMT)
+ rule += SMK_LABELLEN;
+ else
+ rule += strlen(skp->smk_known) + 1;
+
ret = sscanf(rule, "%d", &maplevel);
if (ret != 1 || maplevel > SMACK_CIPSO_MAXLEVEL)
goto out;
@@ -596,41 +909,29 @@ static ssize_t smk_write_cipso(struct file *file, const char __user *buf,
if (ret != 1 || catlen > SMACK_CIPSO_MAXCATNUM)
goto out;
- if (count != (SMK_CIPSOMIN + catlen * SMK_DIGITLEN))
+ if (format == SMK_FIXED24_FMT &&
+ count != (SMK_CIPSOMIN + catlen * SMK_DIGITLEN))
goto out;
memset(mapcatset, 0, sizeof(mapcatset));
for (i = 0; i < catlen; i++) {
rule += SMK_DIGITLEN;
- ret = sscanf(rule, "%d", &cat);
- if (ret != 1 || cat > SMACK_CIPSO_MAXCATVAL)
+ ret = sscanf(rule, "%u", &cat);
+ if (ret != 1 || cat > SMACK_CIPSO_MAXCATNUM)
goto out;
smack_catset_bit(cat, mapcatset);
}
- if (skp->smk_cipso == NULL) {
- scp = kzalloc(sizeof(struct smack_cipso), GFP_KERNEL);
- if (scp == NULL) {
- rc = -ENOMEM;
- goto out;
- }
+ rc = smk_netlbl_mls(maplevel, mapcatset, &ncats, SMK_CIPSOLEN);
+ if (rc >= 0) {
+ netlbl_secattr_catmap_free(skp->smk_netlabel.attr.mls.cat);
+ skp->smk_netlabel.attr.mls.cat = ncats.attr.mls.cat;
+ skp->smk_netlabel.attr.mls.lvl = ncats.attr.mls.lvl;
+ rc = count;
}
- spin_lock_bh(&skp->smk_cipsolock);
-
- if (scp == NULL)
- scp = skp->smk_cipso;
- else
- skp->smk_cipso = scp;
-
- scp->smk_level = maplevel;
- memcpy(scp->smk_catset, mapcatset, sizeof(mapcatset));
-
- spin_unlock_bh(&skp->smk_cipsolock);
-
- rc = count;
out:
mutex_unlock(&smack_cipso_lock);
unlockedout:
@@ -638,6 +939,22 @@ unlockedout:
return rc;
}
+/**
+ * smk_write_cipso - write() for /smack/cipso
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Accepts only one cipso rule per write call.
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_cipso(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return smk_set_cipso(file, buf, count, ppos, SMK_FIXED24_FMT);
+}
+
static const struct file_operations smk_cipso_ops = {
.open = smk_open_cipso,
.read = seq_read,
@@ -647,28 +964,91 @@ static const struct file_operations smk_cipso_ops = {
};
/*
+ * Seq_file read operations for /smack/cipso2
+ */
+
+/*
+ * Print cipso labels in format:
+ * label level[/cat[,cat]]
+ */
+static int cipso2_seq_show(struct seq_file *s, void *v)
+{
+ struct list_head *list = v;
+ struct smack_known *skp =
+ list_entry(list, struct smack_known, list);
+ struct netlbl_lsm_secattr_catmap *cmp = skp->smk_netlabel.attr.mls.cat;
+ char sep = '/';
+ int i;
+
+ seq_printf(s, "%s %3d", skp->smk_known, skp->smk_netlabel.attr.mls.lvl);
+
+ for (i = netlbl_secattr_catmap_walk(cmp, 0); i >= 0;
+ i = netlbl_secattr_catmap_walk(cmp, i + 1)) {
+ seq_printf(s, "%c%d", sep, i);
+ sep = ',';
+ }
+
+ seq_putc(s, '\n');
+
+ return 0;
+}
+
+static const struct seq_operations cipso2_seq_ops = {
+ .start = cipso_seq_start,
+ .next = cipso_seq_next,
+ .show = cipso2_seq_show,
+ .stop = smk_seq_stop,
+};
+
+/**
+ * smk_open_cipso2 - open() for /smack/cipso2
+ * @inode: inode structure representing file
+ * @file: "cipso2" file pointer
+ *
+ * Connect our cipso_seq_* operations with /smack/cipso2
+ * file_operations
+ */
+static int smk_open_cipso2(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &cipso2_seq_ops);
+}
+
+/**
+ * smk_write_cipso2 - write() for /smack/cipso2
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Accepts only one cipso rule per write call.
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_cipso2(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return smk_set_cipso(file, buf, count, ppos, SMK_LONG_FMT);
+}
+
+static const struct file_operations smk_cipso2_ops = {
+ .open = smk_open_cipso2,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = smk_write_cipso2,
+ .release = seq_release,
+};
+
+/*
* Seq_file read operations for /smack/netlabel
*/
static void *netlbladdr_seq_start(struct seq_file *s, loff_t *pos)
{
- if (*pos == SEQ_READ_FINISHED)
- return NULL;
- if (list_empty(&smk_netlbladdr_list))
- return NULL;
- return smk_netlbladdr_list.next;
+ return smk_seq_start(s, pos, &smk_netlbladdr_list);
}
static void *netlbladdr_seq_next(struct seq_file *s, void *v, loff_t *pos)
{
- struct list_head *list = v;
-
- if (list_is_last(list, &smk_netlbladdr_list)) {
- *pos = SEQ_READ_FINISHED;
- return NULL;
- }
-
- return list->next;
+ return smk_seq_next(s, v, pos, &smk_netlbladdr_list);
}
#define BEBITS (sizeof(__be32) * 8)
@@ -692,16 +1072,11 @@ static int netlbladdr_seq_show(struct seq_file *s, void *v)
return 0;
}
-static void netlbladdr_seq_stop(struct seq_file *s, void *v)
-{
- /* No-op */
-}
-
static const struct seq_operations netlbladdr_seq_ops = {
.start = netlbladdr_seq_start,
- .stop = netlbladdr_seq_stop,
.next = netlbladdr_seq_next,
.show = netlbladdr_seq_show,
+ .stop = smk_seq_stop,
};
/**
@@ -774,9 +1149,9 @@ static ssize_t smk_write_netlbladdr(struct file *file, const char __user *buf,
{
struct smk_netlbladdr *skp;
struct sockaddr_in newname;
- char smack[SMK_LABELLEN];
+ char *smack;
char *sp;
- char data[SMK_NETLBLADDRMAX + 1];
+ char *data;
char *host = (char *)&newname.sin_addr.s_addr;
int rc;
struct netlbl_audit audit_info;
@@ -794,40 +1169,63 @@ static ssize_t smk_write_netlbladdr(struct file *file, const char __user *buf,
* "<addr/mask, as a.b.c.d/e><space><label>"
* "<addr, as a.b.c.d><space><label>"
*/
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
if (*ppos != 0)
return -EINVAL;
- if (count < SMK_NETLBLADDRMIN || count > SMK_NETLBLADDRMAX)
+ if (count < SMK_NETLBLADDRMIN)
return -EINVAL;
- if (copy_from_user(data, buf, count) != 0)
- return -EFAULT;
+
+ data = kzalloc(count + 1, GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(data, buf, count) != 0) {
+ rc = -EFAULT;
+ goto free_data_out;
+ }
+
+ smack = kzalloc(count + 1, GFP_KERNEL);
+ if (smack == NULL) {
+ rc = -ENOMEM;
+ goto free_data_out;
+ }
data[count] = '\0';
- rc = sscanf(data, "%hhd.%hhd.%hhd.%hhd/%d %s",
+ rc = sscanf(data, "%hhd.%hhd.%hhd.%hhd/%u %s",
&host[0], &host[1], &host[2], &host[3], &m, smack);
if (rc != 6) {
rc = sscanf(data, "%hhd.%hhd.%hhd.%hhd %s",
&host[0], &host[1], &host[2], &host[3], smack);
- if (rc != 5)
- return -EINVAL;
+ if (rc != 5) {
+ rc = -EINVAL;
+ goto free_out;
+ }
m = BEBITS;
}
- if (m > BEBITS)
- return -EINVAL;
+ if (m > BEBITS) {
+ rc = -EINVAL;
+ goto free_out;
+ }
- /* if smack begins with '-', its an option, don't import it */
+ /*
+ * If smack begins with '-', it is an option, don't import it
+ */
if (smack[0] != '-') {
sp = smk_import(smack, 0);
- if (sp == NULL)
- return -EINVAL;
+ if (sp == NULL) {
+ rc = -EINVAL;
+ goto free_out;
+ }
} else {
/* check known options */
if (strcmp(smack, smack_cipso_option) == 0)
sp = (char *)smack_cipso_option;
- else
- return -EINVAL;
+ else {
+ rc = -EINVAL;
+ goto free_out;
+ }
}
for (temp_mask = 0; m > 0; m--) {
@@ -868,7 +1266,7 @@ static ssize_t smk_write_netlbladdr(struct file *file, const char __user *buf,
}
} else {
/* we delete the unlabeled entry, only if the previous label
- * wasnt the special CIPSO option */
+ * wasn't the special CIPSO option */
if (skp->smk_label != smack_cipso_option)
rc = netlbl_cfg_unlbl_static_del(&init_net, NULL,
&skp->smk_host.sin_addr, &skp->smk_mask,
@@ -893,6 +1291,11 @@ static ssize_t smk_write_netlbladdr(struct file *file, const char __user *buf,
mutex_unlock(&smk_netlbladdr_lock);
+free_out:
+ kfree(smack);
+free_data_out:
+ kfree(data);
+
return rc;
}
@@ -943,7 +1346,7 @@ static ssize_t smk_write_doi(struct file *file, const char __user *buf,
char temp[80];
int i;
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
if (count >= sizeof(temp) || count == 0)
@@ -967,6 +1370,7 @@ static ssize_t smk_write_doi(struct file *file, const char __user *buf,
static const struct file_operations smk_doi_ops = {
.read = smk_read_doi,
.write = smk_write_doi,
+ .llseek = default_llseek,
};
/**
@@ -1005,10 +1409,11 @@ static ssize_t smk_read_direct(struct file *filp, char __user *buf,
static ssize_t smk_write_direct(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
+ struct smack_known *skp;
char temp[80];
int i;
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
if (count >= sizeof(temp) || count == 0)
@@ -1022,7 +1427,20 @@ static ssize_t smk_write_direct(struct file *file, const char __user *buf,
if (sscanf(temp, "%d", &i) != 1)
return -EINVAL;
- smack_cipso_direct = i;
+ /*
+ * Don't do anything if the value hasn't actually changed.
+ * If it is changing reset the level on entries that were
+ * set up to be direct when they were created.
+ */
+ if (smack_cipso_direct != i) {
+ mutex_lock(&smack_known_lock);
+ list_for_each_entry_rcu(skp, &smack_known_list, list)
+ if (skp->smk_netlabel.attr.mls.lvl ==
+ smack_cipso_direct)
+ skp->smk_netlabel.attr.mls.lvl = i;
+ smack_cipso_direct = i;
+ mutex_unlock(&smack_known_lock);
+ }
return count;
}
@@ -1030,6 +1448,85 @@ static ssize_t smk_write_direct(struct file *file, const char __user *buf,
static const struct file_operations smk_direct_ops = {
.read = smk_read_direct,
.write = smk_write_direct,
+ .llseek = default_llseek,
+};
+
+/**
+ * smk_read_mapped - read() for /smack/mapped
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @count: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t smk_read_mapped(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char temp[80];
+ ssize_t rc;
+
+ if (*ppos != 0)
+ return 0;
+
+ sprintf(temp, "%d", smack_cipso_mapped);
+ rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
+
+ return rc;
+}
+
+/**
+ * smk_write_mapped - write() for /smack/mapped
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_mapped(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct smack_known *skp;
+ char temp[80];
+ int i;
+
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ if (count >= sizeof(temp) || count == 0)
+ return -EINVAL;
+
+ if (copy_from_user(temp, buf, count) != 0)
+ return -EFAULT;
+
+ temp[count] = '\0';
+
+ if (sscanf(temp, "%d", &i) != 1)
+ return -EINVAL;
+
+ /*
+ * Don't do anything if the value hasn't actually changed.
+ * If it is changing reset the level on entries that were
+ * set up to be mapped when they were created.
+ */
+ if (smack_cipso_mapped != i) {
+ mutex_lock(&smack_known_lock);
+ list_for_each_entry_rcu(skp, &smack_known_list, list)
+ if (skp->smk_netlabel.attr.mls.lvl ==
+ smack_cipso_mapped)
+ skp->smk_netlabel.attr.mls.lvl = i;
+ smack_cipso_mapped = i;
+ mutex_unlock(&smack_known_lock);
+ }
+
+ return count;
+}
+
+static const struct file_operations smk_mapped_ops = {
+ .read = smk_read_mapped,
+ .write = smk_write_mapped,
+ .llseek = default_llseek,
};
/**
@@ -1055,11 +1552,12 @@ static ssize_t smk_read_ambient(struct file *filp, char __user *buf,
*/
mutex_lock(&smack_ambient_lock);
- asize = strlen(smack_net_ambient) + 1;
+ asize = strlen(smack_net_ambient->smk_known) + 1;
if (cn >= asize)
rc = simple_read_from_buffer(buf, cn, ppos,
- smack_net_ambient, asize);
+ smack_net_ambient->smk_known,
+ asize);
else
rc = -EINVAL;
@@ -1080,41 +1578,50 @@ static ssize_t smk_read_ambient(struct file *filp, char __user *buf,
static ssize_t smk_write_ambient(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- char in[SMK_LABELLEN];
+ struct smack_known *skp;
char *oldambient;
- char *smack;
+ char *data;
+ int rc = count;
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
- if (count >= SMK_LABELLEN)
- return -EINVAL;
+ data = kzalloc(count + 1, GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
- if (copy_from_user(in, buf, count) != 0)
- return -EFAULT;
+ if (copy_from_user(data, buf, count) != 0) {
+ rc = -EFAULT;
+ goto out;
+ }
- smack = smk_import(in, count);
- if (smack == NULL)
- return -EINVAL;
+ skp = smk_import_entry(data, count);
+ if (skp == NULL) {
+ rc = -EINVAL;
+ goto out;
+ }
mutex_lock(&smack_ambient_lock);
- oldambient = smack_net_ambient;
- smack_net_ambient = smack;
+ oldambient = smack_net_ambient->smk_known;
+ smack_net_ambient = skp;
smk_unlbl_ambient(oldambient);
mutex_unlock(&smack_ambient_lock);
- return count;
+out:
+ kfree(data);
+ return rc;
}
static const struct file_operations smk_ambient_ops = {
.read = smk_read_ambient,
.write = smk_write_ambient,
+ .llseek = default_llseek,
};
/**
- * smk_read_onlycap - read() for /smack/onlycap
+ * smk_read_onlycap - read() for smackfs/onlycap
* @filp: file pointer, not actually used
* @buf: where to put the result
* @cn: maximum to send along
@@ -1133,7 +1640,7 @@ static ssize_t smk_read_onlycap(struct file *filp, char __user *buf,
return 0;
if (smack_onlycap != NULL)
- smack = smack_onlycap;
+ smack = smack_onlycap->smk_known;
asize = strlen(smack) + 1;
@@ -1144,7 +1651,7 @@ static ssize_t smk_read_onlycap(struct file *filp, char __user *buf,
}
/**
- * smk_write_onlycap - write() for /smack/onlycap
+ * smk_write_onlycap - write() for smackfs/onlycap
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
@@ -1155,10 +1662,11 @@ static ssize_t smk_read_onlycap(struct file *filp, char __user *buf,
static ssize_t smk_write_onlycap(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- char in[SMK_LABELLEN];
- char *sp = current->cred->security;
+ char *data;
+ struct smack_known *skp = smk_of_task(current->cred->security);
+ int rc = count;
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
/*
@@ -1166,14 +1674,12 @@ static ssize_t smk_write_onlycap(struct file *file, const char __user *buf,
* explicitly for clarity. The smk_access() implementation
* would use smk_access(smack_onlycap, MAY_WRITE)
*/
- if (smack_onlycap != NULL && smack_onlycap != sp)
+ if (smack_onlycap != NULL && smack_onlycap != skp)
return -EPERM;
- if (count >= SMK_LABELLEN)
- return -EINVAL;
-
- if (copy_from_user(in, buf, count) != 0)
- return -EFAULT;
+ data = kzalloc(count, GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
/*
* Should the null string be passed in unset the onlycap value.
@@ -1181,15 +1687,23 @@ static ssize_t smk_write_onlycap(struct file *file, const char __user *buf,
* smk_import only expects to return NULL for errors. It
* is usually the case that a nullstring or "\n" would be
* bad to pass to smk_import but in fact this is useful here.
+ *
+ * smk_import will also reject a label beginning with '-',
+ * so "-usecapabilities" will also work.
*/
- smack_onlycap = smk_import(in, count);
+ if (copy_from_user(data, buf, count) != 0)
+ rc = -EFAULT;
+ else
+ smack_onlycap = smk_import_entry(data, count);
- return count;
+ kfree(data);
+ return rc;
}
static const struct file_operations smk_onlycap_ops = {
.read = smk_read_onlycap,
.write = smk_write_onlycap,
+ .llseek = default_llseek,
};
/**
@@ -1230,7 +1744,7 @@ static ssize_t smk_write_logging(struct file *file, const char __user *buf,
char temp[32];
int i;
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
if (count >= sizeof(temp) || count == 0)
@@ -1254,14 +1768,560 @@ static ssize_t smk_write_logging(struct file *file, const char __user *buf,
static const struct file_operations smk_logging_ops = {
.read = smk_read_logging,
.write = smk_write_logging,
+ .llseek = default_llseek,
+};
+
+/*
+ * Seq_file read operations for /smack/load-self
+ */
+
+static void *load_self_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct task_smack *tsp = current_security();
+
+ return smk_seq_start(s, pos, &tsp->smk_rules);
+}
+
+static void *load_self_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct task_smack *tsp = current_security();
+
+ return smk_seq_next(s, v, pos, &tsp->smk_rules);
+}
+
+static int load_self_seq_show(struct seq_file *s, void *v)
+{
+ struct list_head *list = v;
+ struct smack_rule *srp =
+ list_entry(list, struct smack_rule, list);
+
+ smk_rule_show(s, srp, SMK_LABELLEN);
+
+ return 0;
+}
+
+static const struct seq_operations load_self_seq_ops = {
+ .start = load_self_seq_start,
+ .next = load_self_seq_next,
+ .show = load_self_seq_show,
+ .stop = smk_seq_stop,
+};
+
+
+/**
+ * smk_open_load_self - open() for /smack/load-self2
+ * @inode: inode structure representing file
+ * @file: "load" file pointer
+ *
+ * For reading, use load_seq_* seq_file reading operations.
+ */
+static int smk_open_load_self(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &load_self_seq_ops);
+}
+
+/**
+ * smk_write_load_self - write() for /smack/load-self
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ *
+ */
+static ssize_t smk_write_load_self(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct task_smack *tsp = current_security();
+
+ return smk_write_rules_list(file, buf, count, ppos, &tsp->smk_rules,
+ &tsp->smk_rules_lock, SMK_FIXED24_FMT);
+}
+
+static const struct file_operations smk_load_self_ops = {
+ .open = smk_open_load_self,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = smk_write_load_self,
+ .release = seq_release,
+};
+
+/**
+ * smk_user_access - handle access check transaction
+ * @file: file pointer
+ * @buf: data from user space
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ */
+static ssize_t smk_user_access(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos, int format)
+{
+ struct smack_parsed_rule rule;
+ char *data;
+ int res;
+
+ data = simple_transaction_get(file, buf, count);
+ if (IS_ERR(data))
+ return PTR_ERR(data);
+
+ if (format == SMK_FIXED24_FMT) {
+ if (count < SMK_LOADLEN)
+ return -EINVAL;
+ res = smk_parse_rule(data, &rule, 0);
+ } else {
+ /*
+ * simple_transaction_get() returns null-terminated data
+ */
+ res = smk_parse_long_rule(data, &rule, 0, 3);
+ }
+
+ if (res >= 0)
+ res = smk_access(rule.smk_subject, rule.smk_object,
+ rule.smk_access1, NULL);
+ else if (res != -ENOENT)
+ return -EINVAL;
+
+ data[0] = res == 0 ? '1' : '0';
+ data[1] = '\0';
+
+ simple_transaction_set(file, 2);
+
+ if (format == SMK_FIXED24_FMT)
+ return SMK_LOADLEN;
+ return count;
+}
+
+/**
+ * smk_write_access - handle access check transaction
+ * @file: file pointer
+ * @buf: data from user space
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ */
+static ssize_t smk_write_access(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return smk_user_access(file, buf, count, ppos, SMK_FIXED24_FMT);
+}
+
+static const struct file_operations smk_access_ops = {
+ .write = smk_write_access,
+ .read = simple_transaction_read,
+ .release = simple_transaction_release,
+ .llseek = generic_file_llseek,
+};
+
+
+/*
+ * Seq_file read operations for /smack/load2
+ */
+
+static int load2_seq_show(struct seq_file *s, void *v)
+{
+ struct list_head *list = v;
+ struct smack_master_list *smlp =
+ list_entry(list, struct smack_master_list, list);
+
+ smk_rule_show(s, smlp->smk_rule, SMK_LONGLABEL);
+
+ return 0;
+}
+
+static const struct seq_operations load2_seq_ops = {
+ .start = load2_seq_start,
+ .next = load2_seq_next,
+ .show = load2_seq_show,
+ .stop = smk_seq_stop,
+};
+
+/**
+ * smk_open_load2 - open() for /smack/load2
+ * @inode: inode structure representing file
+ * @file: "load2" file pointer
+ *
+ * For reading, use load2_seq_* seq_file reading operations.
+ */
+static int smk_open_load2(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &load2_seq_ops);
+}
+
+/**
+ * smk_write_load2 - write() for /smack/load2
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ *
+ */
+static ssize_t smk_write_load2(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ /*
+ * Must have privilege.
+ */
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ return smk_write_rules_list(file, buf, count, ppos, NULL, NULL,
+ SMK_LONG_FMT);
+}
+
+static const struct file_operations smk_load2_ops = {
+ .open = smk_open_load2,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = smk_write_load2,
+ .release = seq_release,
+};
+
+/*
+ * Seq_file read operations for /smack/load-self2
+ */
+
+static void *load_self2_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct task_smack *tsp = current_security();
+
+ return smk_seq_start(s, pos, &tsp->smk_rules);
+}
+
+static void *load_self2_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct task_smack *tsp = current_security();
+
+ return smk_seq_next(s, v, pos, &tsp->smk_rules);
+}
+
+static int load_self2_seq_show(struct seq_file *s, void *v)
+{
+ struct list_head *list = v;
+ struct smack_rule *srp =
+ list_entry(list, struct smack_rule, list);
+
+ smk_rule_show(s, srp, SMK_LONGLABEL);
+
+ return 0;
+}
+
+static const struct seq_operations load_self2_seq_ops = {
+ .start = load_self2_seq_start,
+ .next = load_self2_seq_next,
+ .show = load_self2_seq_show,
+ .stop = smk_seq_stop,
+};
+
+/**
+ * smk_open_load_self2 - open() for /smack/load-self2
+ * @inode: inode structure representing file
+ * @file: "load" file pointer
+ *
+ * For reading, use load_seq_* seq_file reading operations.
+ */
+static int smk_open_load_self2(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &load_self2_seq_ops);
+}
+
+/**
+ * smk_write_load_self2 - write() for /smack/load-self2
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ *
+ */
+static ssize_t smk_write_load_self2(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct task_smack *tsp = current_security();
+
+ return smk_write_rules_list(file, buf, count, ppos, &tsp->smk_rules,
+ &tsp->smk_rules_lock, SMK_LONG_FMT);
+}
+
+static const struct file_operations smk_load_self2_ops = {
+ .open = smk_open_load_self2,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = smk_write_load_self2,
+ .release = seq_release,
+};
+
+/**
+ * smk_write_access2 - handle access check transaction
+ * @file: file pointer
+ * @buf: data from user space
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ */
+static ssize_t smk_write_access2(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return smk_user_access(file, buf, count, ppos, SMK_LONG_FMT);
+}
+
+static const struct file_operations smk_access2_ops = {
+ .write = smk_write_access2,
+ .read = simple_transaction_read,
+ .release = simple_transaction_release,
+ .llseek = generic_file_llseek,
+};
+
+/**
+ * smk_write_revoke_subj - write() for /smack/revoke-subject
+ * @file: file pointer
+ * @buf: data from user space
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ */
+static ssize_t smk_write_revoke_subj(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *data = NULL;
+ const char *cp = NULL;
+ struct smack_known *skp;
+ struct smack_rule *sp;
+ struct list_head *rule_list;
+ struct mutex *rule_lock;
+ int rc = count;
+
+ if (*ppos != 0)
+ return -EINVAL;
+
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ if (count == 0 || count > SMK_LONGLABEL)
+ return -EINVAL;
+
+ data = kzalloc(count, GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(data, buf, count) != 0) {
+ rc = -EFAULT;
+ goto free_out;
+ }
+
+ cp = smk_parse_smack(data, count);
+ if (cp == NULL) {
+ rc = -EINVAL;
+ goto free_out;
+ }
+
+ skp = smk_find_entry(cp);
+ if (skp == NULL)
+ goto free_out;
+
+ rule_list = &skp->smk_rules;
+ rule_lock = &skp->smk_rules_lock;
+
+ mutex_lock(rule_lock);
+
+ list_for_each_entry_rcu(sp, rule_list, list)
+ sp->smk_access = 0;
+
+ mutex_unlock(rule_lock);
+
+free_out:
+ kfree(data);
+ kfree(cp);
+ return rc;
+}
+
+static const struct file_operations smk_revoke_subj_ops = {
+ .write = smk_write_revoke_subj,
+ .read = simple_transaction_read,
+ .release = simple_transaction_release,
+ .llseek = generic_file_llseek,
};
+
+static struct kset *smackfs_kset;
+/**
+ * smk_init_sysfs - initialize /sys/fs/smackfs
+ *
+ */
+static int smk_init_sysfs(void)
+{
+ smackfs_kset = kset_create_and_add("smackfs", NULL, fs_kobj);
+ if (!smackfs_kset)
+ return -ENOMEM;
+ return 0;
+}
+
/**
- * smk_fill_super - fill the /smackfs superblock
+ * smk_write_change_rule - write() for /smack/change-rule
+ * @file: file pointer
+ * @buf: data from user space
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ */
+static ssize_t smk_write_change_rule(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ /*
+ * Must have privilege.
+ */
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ return smk_write_rules_list(file, buf, count, ppos, NULL, NULL,
+ SMK_CHANGE_FMT);
+}
+
+static const struct file_operations smk_change_rule_ops = {
+ .write = smk_write_change_rule,
+ .read = simple_transaction_read,
+ .release = simple_transaction_release,
+ .llseek = generic_file_llseek,
+};
+
+/**
+ * smk_read_syslog - read() for smackfs/syslog
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @cn: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t smk_read_syslog(struct file *filp, char __user *buf,
+ size_t cn, loff_t *ppos)
+{
+ struct smack_known *skp;
+ ssize_t rc = -EINVAL;
+ int asize;
+
+ if (*ppos != 0)
+ return 0;
+
+ if (smack_syslog_label == NULL)
+ skp = &smack_known_star;
+ else
+ skp = smack_syslog_label;
+
+ asize = strlen(skp->smk_known) + 1;
+
+ if (cn >= asize)
+ rc = simple_read_from_buffer(buf, cn, ppos, skp->smk_known,
+ asize);
+
+ return rc;
+}
+
+/**
+ * smk_write_syslog - write() for smackfs/syslog
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_syslog(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *data;
+ struct smack_known *skp;
+ int rc = count;
+
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ data = kzalloc(count, GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(data, buf, count) != 0)
+ rc = -EFAULT;
+ else {
+ skp = smk_import_entry(data, count);
+ if (skp == NULL)
+ rc = -EINVAL;
+ else
+ smack_syslog_label = smk_import_entry(data, count);
+ }
+
+ kfree(data);
+ return rc;
+}
+
+static const struct file_operations smk_syslog_ops = {
+ .read = smk_read_syslog,
+ .write = smk_write_syslog,
+ .llseek = default_llseek,
+};
+
+
+/**
+ * smk_read_ptrace - read() for /smack/ptrace
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @count: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t smk_read_ptrace(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char temp[32];
+ ssize_t rc;
+
+ if (*ppos != 0)
+ return 0;
+
+ sprintf(temp, "%d\n", smack_ptrace_rule);
+ rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
+ return rc;
+}
+
+/**
+ * smk_write_ptrace - write() for /smack/ptrace
+ * @file: file pointer
+ * @buf: data from user space
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ */
+static ssize_t smk_write_ptrace(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char temp[32];
+ int i;
+
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ if (*ppos != 0 || count >= sizeof(temp) || count == 0)
+ return -EINVAL;
+
+ if (copy_from_user(temp, buf, count) != 0)
+ return -EFAULT;
+
+ temp[count] = '\0';
+
+ if (sscanf(temp, "%d", &i) != 1)
+ return -EINVAL;
+ if (i < SMACK_PTRACE_DEFAULT || i > SMACK_PTRACE_MAX)
+ return -EINVAL;
+ smack_ptrace_rule = i;
+
+ return count;
+}
+
+static const struct file_operations smk_ptrace_ops = {
+ .write = smk_write_ptrace,
+ .read = smk_read_ptrace,
+ .llseek = default_llseek,
+};
+
+/**
+ * smk_fill_super - fill the smackfs superblock
* @sb: the empty superblock
* @data: unused
* @silent: unused
*
- * Fill in the well known entries for /smack
+ * Fill in the well known entries for the smack filesystem
*
* Returns 0 on success, an error code on failure
*/
@@ -1271,23 +2331,47 @@ static int smk_fill_super(struct super_block *sb, void *data, int silent)
struct inode *root_inode;
static struct tree_descr smack_files[] = {
- [SMK_LOAD] =
- {"load", &smk_load_ops, S_IRUGO|S_IWUSR},
- [SMK_CIPSO] =
- {"cipso", &smk_cipso_ops, S_IRUGO|S_IWUSR},
- [SMK_DOI] =
- {"doi", &smk_doi_ops, S_IRUGO|S_IWUSR},
- [SMK_DIRECT] =
- {"direct", &smk_direct_ops, S_IRUGO|S_IWUSR},
- [SMK_AMBIENT] =
- {"ambient", &smk_ambient_ops, S_IRUGO|S_IWUSR},
- [SMK_NETLBLADDR] =
- {"netlabel", &smk_netlbladdr_ops, S_IRUGO|S_IWUSR},
- [SMK_ONLYCAP] =
- {"onlycap", &smk_onlycap_ops, S_IRUGO|S_IWUSR},
- [SMK_LOGGING] =
- {"logging", &smk_logging_ops, S_IRUGO|S_IWUSR},
- /* last one */ {""}
+ [SMK_LOAD] = {
+ "load", &smk_load_ops, S_IRUGO|S_IWUSR},
+ [SMK_CIPSO] = {
+ "cipso", &smk_cipso_ops, S_IRUGO|S_IWUSR},
+ [SMK_DOI] = {
+ "doi", &smk_doi_ops, S_IRUGO|S_IWUSR},
+ [SMK_DIRECT] = {
+ "direct", &smk_direct_ops, S_IRUGO|S_IWUSR},
+ [SMK_AMBIENT] = {
+ "ambient", &smk_ambient_ops, S_IRUGO|S_IWUSR},
+ [SMK_NETLBLADDR] = {
+ "netlabel", &smk_netlbladdr_ops, S_IRUGO|S_IWUSR},
+ [SMK_ONLYCAP] = {
+ "onlycap", &smk_onlycap_ops, S_IRUGO|S_IWUSR},
+ [SMK_LOGGING] = {
+ "logging", &smk_logging_ops, S_IRUGO|S_IWUSR},
+ [SMK_LOAD_SELF] = {
+ "load-self", &smk_load_self_ops, S_IRUGO|S_IWUGO},
+ [SMK_ACCESSES] = {
+ "access", &smk_access_ops, S_IRUGO|S_IWUGO},
+ [SMK_MAPPED] = {
+ "mapped", &smk_mapped_ops, S_IRUGO|S_IWUSR},
+ [SMK_LOAD2] = {
+ "load2", &smk_load2_ops, S_IRUGO|S_IWUSR},
+ [SMK_LOAD_SELF2] = {
+ "load-self2", &smk_load_self2_ops, S_IRUGO|S_IWUGO},
+ [SMK_ACCESS2] = {
+ "access2", &smk_access2_ops, S_IRUGO|S_IWUGO},
+ [SMK_CIPSO2] = {
+ "cipso2", &smk_cipso2_ops, S_IRUGO|S_IWUSR},
+ [SMK_REVOKE_SUBJ] = {
+ "revoke-subject", &smk_revoke_subj_ops,
+ S_IRUGO|S_IWUSR},
+ [SMK_CHANGE_RULE] = {
+ "change-rule", &smk_change_rule_ops, S_IRUGO|S_IWUSR},
+ [SMK_SYSLOG] = {
+ "syslog", &smk_syslog_ops, S_IRUGO|S_IWUSR},
+ [SMK_PTRACE] = {
+ "ptrace", &smk_ptrace_ops, S_IRUGO|S_IWUSR},
+ /* last one */
+ {""}
};
rc = simple_fill_super(sb, SMACK_MAGIC, smack_files);
@@ -1298,38 +2382,44 @@ static int smk_fill_super(struct super_block *sb, void *data, int silent)
}
root_inode = sb->s_root->d_inode;
- root_inode->i_security = new_inode_smack(smack_known_floor.smk_known);
return 0;
}
/**
- * smk_get_sb - get the smackfs superblock
+ * smk_mount - get the smackfs superblock
* @fs_type: passed along without comment
* @flags: passed along without comment
* @dev_name: passed along without comment
* @data: passed along without comment
- * @mnt: passed along without comment
*
* Just passes everything along.
*
* Returns what the lower level code does.
*/
-static int smk_get_sb(struct file_system_type *fs_type,
- int flags, const char *dev_name, void *data,
- struct vfsmount *mnt)
+static struct dentry *smk_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
{
- return get_sb_single(fs_type, flags, data, smk_fill_super, mnt);
+ return mount_single(fs_type, flags, data, smk_fill_super);
}
static struct file_system_type smk_fs_type = {
.name = "smackfs",
- .get_sb = smk_get_sb,
+ .mount = smk_mount,
.kill_sb = kill_litter_super,
};
static struct vfsmount *smackfs_mount;
+static int __init smk_preset_netlabel(struct smack_known *skp)
+{
+ skp->smk_netlabel.domain = skp->smk_known;
+ skp->smk_netlabel.flags =
+ NETLBL_SECATTR_DOMAIN | NETLBL_SECATTR_MLS_LVL;
+ return smk_netlbl_mls(smack_cipso_direct, skp->smk_known,
+ &skp->smk_netlabel, strlen(skp->smk_known));
+}
+
/**
* init_smk_fs - get the smackfs superblock
*
@@ -1346,10 +2436,15 @@ static struct vfsmount *smackfs_mount;
static int __init init_smk_fs(void)
{
int err;
+ int rc;
if (!security_module_enable(&smack_ops))
return 0;
+ err = smk_init_sysfs();
+ if (err)
+ printk(KERN_ERR "smackfs: sysfs mountpoint problem.\n");
+
err = register_filesystem(&smk_fs_type);
if (!err) {
smackfs_mount = kern_mount(&smk_fs_type);
@@ -1363,6 +2458,25 @@ static int __init init_smk_fs(void)
smk_cipso_doi();
smk_unlbl_ambient(NULL);
+ rc = smk_preset_netlabel(&smack_known_floor);
+ if (err == 0 && rc < 0)
+ err = rc;
+ rc = smk_preset_netlabel(&smack_known_hat);
+ if (err == 0 && rc < 0)
+ err = rc;
+ rc = smk_preset_netlabel(&smack_known_huh);
+ if (err == 0 && rc < 0)
+ err = rc;
+ rc = smk_preset_netlabel(&smack_known_invalid);
+ if (err == 0 && rc < 0)
+ err = rc;
+ rc = smk_preset_netlabel(&smack_known_star);
+ if (err == 0 && rc < 0)
+ err = rc;
+ rc = smk_preset_netlabel(&smack_known_web);
+ if (err == 0 && rc < 0)
+ err = rc;
+
return err;
}
diff --git a/security/tomoyo/.gitignore b/security/tomoyo/.gitignore
new file mode 100644
index 00000000000..5caf1a6f590
--- /dev/null
+++ b/security/tomoyo/.gitignore
@@ -0,0 +1,2 @@
+builtin-policy.h
+policy/
diff --git a/security/tomoyo/Kconfig b/security/tomoyo/Kconfig
index c8f38579323..8eb779b9d77 100644
--- a/security/tomoyo/Kconfig
+++ b/security/tomoyo/Kconfig
@@ -1,11 +1,74 @@
config SECURITY_TOMOYO
bool "TOMOYO Linux Support"
depends on SECURITY
+ depends on NET
select SECURITYFS
select SECURITY_PATH
+ select SECURITY_NETWORK
default n
help
This selects TOMOYO Linux, pathname-based access control.
Required userspace tools and further information may be
found at <http://tomoyo.sourceforge.jp/>.
If you are unsure how to answer this question, answer N.
+
+config SECURITY_TOMOYO_MAX_ACCEPT_ENTRY
+ int "Default maximal count for learning mode"
+ default 2048
+ range 0 2147483647
+ depends on SECURITY_TOMOYO
+ help
+ This is the default value for maximal ACL entries
+ that are automatically appended into policy at "learning mode".
+ Some programs access thousands of objects, so running
+ such programs in "learning mode" dulls the system response
+ and consumes much memory.
+ This is the safeguard for such programs.
+
+config SECURITY_TOMOYO_MAX_AUDIT_LOG
+ int "Default maximal count for audit log"
+ default 1024
+ range 0 2147483647
+ depends on SECURITY_TOMOYO
+ help
+ This is the default value for maximal entries for
+ audit logs that the kernel can hold on memory.
+ You can read the log via /sys/kernel/security/tomoyo/audit.
+ If you don't need audit logs, you may set this value to 0.
+
+config SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
+ bool "Activate without calling userspace policy loader."
+ default n
+ depends on SECURITY_TOMOYO
+ ---help---
+ Say Y here if you want to activate access control as soon as built-in
+ policy was loaded. This option will be useful for systems where
+ operations which can lead to the hijacking of the boot sequence are
+ needed before loading the policy. For example, you can activate
+ immediately after loading the fixed part of policy which will allow
+ only operations needed for mounting a partition which contains the
+ variant part of policy and verifying (e.g. running GPG check) and
+ loading the variant part of policy. Since you can start using
+ enforcing mode from the beginning, you can reduce the possibility of
+ hijacking the boot sequence.
+
+config SECURITY_TOMOYO_POLICY_LOADER
+ string "Location of userspace policy loader"
+ default "/sbin/tomoyo-init"
+ depends on SECURITY_TOMOYO
+ depends on !SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
+ ---help---
+ This is the default pathname of policy loader which is called before
+ activation. You can override this setting via TOMOYO_loader= kernel
+ command line option.
+
+config SECURITY_TOMOYO_ACTIVATION_TRIGGER
+ string "Trigger for calling userspace policy loader"
+ default "/sbin/init"
+ depends on SECURITY_TOMOYO
+ depends on !SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
+ ---help---
+ This is the default pathname of activation trigger.
+ You can override this setting via TOMOYO_trigger= kernel command line
+ option. For example, if you pass init=/bin/systemd option, you may
+ want to also pass TOMOYO_trigger=/bin/systemd option.
diff --git a/security/tomoyo/Makefile b/security/tomoyo/Makefile
index 10ccd686b29..56a0c7be409 100644
--- a/security/tomoyo/Makefile
+++ b/security/tomoyo/Makefile
@@ -1 +1,48 @@
-obj-y = common.o realpath.o tomoyo.o domain.o file.o
+obj-y = audit.o common.o condition.o domain.o environ.o file.o gc.o group.o load_policy.o memory.o mount.o network.o realpath.o securityfs_if.o tomoyo.o util.o
+
+$(obj)/policy/profile.conf:
+ @mkdir -p $(obj)/policy/
+ @echo Creating an empty policy/profile.conf
+ @touch $@
+
+$(obj)/policy/exception_policy.conf:
+ @mkdir -p $(obj)/policy/
+ @echo Creating a default policy/exception_policy.conf
+ @echo initialize_domain /sbin/modprobe from any >> $@
+ @echo initialize_domain /sbin/hotplug from any >> $@
+
+$(obj)/policy/domain_policy.conf:
+ @mkdir -p $(obj)/policy/
+ @echo Creating an empty policy/domain_policy.conf
+ @touch $@
+
+$(obj)/policy/manager.conf:
+ @mkdir -p $(obj)/policy/
+ @echo Creating an empty policy/manager.conf
+ @touch $@
+
+$(obj)/policy/stat.conf:
+ @mkdir -p $(obj)/policy/
+ @echo Creating an empty policy/stat.conf
+ @touch $@
+
+$(obj)/builtin-policy.h: $(obj)/policy/profile.conf $(obj)/policy/exception_policy.conf $(obj)/policy/domain_policy.conf $(obj)/policy/manager.conf $(obj)/policy/stat.conf
+ @echo Generating built-in policy for TOMOYO 2.5.x.
+ @echo "static char tomoyo_builtin_profile[] __initdata =" > $@.tmp
+ @sed -e 's/\\/\\\\/g' -e 's/\"/\\"/g' -e 's/\(.*\)/"\1\\n"/' < $(obj)/policy/profile.conf >> $@.tmp
+ @echo "\"\";" >> $@.tmp
+ @echo "static char tomoyo_builtin_exception_policy[] __initdata =" >> $@.tmp
+ @sed -e 's/\\/\\\\/g' -e 's/\"/\\"/g' -e 's/\(.*\)/"\1\\n"/' < $(obj)/policy/exception_policy.conf >> $@.tmp
+ @echo "\"\";" >> $@.tmp
+ @echo "static char tomoyo_builtin_domain_policy[] __initdata =" >> $@.tmp
+ @sed -e 's/\\/\\\\/g' -e 's/\"/\\"/g' -e 's/\(.*\)/"\1\\n"/' < $(obj)/policy/domain_policy.conf >> $@.tmp
+ @echo "\"\";" >> $@.tmp
+ @echo "static char tomoyo_builtin_manager[] __initdata =" >> $@.tmp
+ @sed -e 's/\\/\\\\/g' -e 's/\"/\\"/g' -e 's/\(.*\)/"\1\\n"/' < $(obj)/policy/manager.conf >> $@.tmp
+ @echo "\"\";" >> $@.tmp
+ @echo "static char tomoyo_builtin_stat[] __initdata =" >> $@.tmp
+ @sed -e 's/\\/\\\\/g' -e 's/\"/\\"/g' -e 's/\(.*\)/"\1\\n"/' < $(obj)/policy/stat.conf >> $@.tmp
+ @echo "\"\";" >> $@.tmp
+ @mv $@.tmp $@
+
+$(obj)/common.o: $(obj)/builtin-policy.h
diff --git a/security/tomoyo/audit.c b/security/tomoyo/audit.c
new file mode 100644
index 00000000000..c1b00375c9a
--- /dev/null
+++ b/security/tomoyo/audit.c
@@ -0,0 +1,470 @@
+/*
+ * security/tomoyo/audit.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include "common.h"
+#include <linux/slab.h>
+
+/**
+ * tomoyo_print_bprm - Print "struct linux_binprm" for auditing.
+ *
+ * @bprm: Pointer to "struct linux_binprm".
+ * @dump: Pointer to "struct tomoyo_page_dump".
+ *
+ * Returns the contents of @bprm on success, NULL otherwise.
+ *
+ * This function uses kzalloc(), so caller must kfree() if this function
+ * didn't return NULL.
+ */
+static char *tomoyo_print_bprm(struct linux_binprm *bprm,
+ struct tomoyo_page_dump *dump)
+{
+ static const int tomoyo_buffer_len = 4096 * 2;
+ char *buffer = kzalloc(tomoyo_buffer_len, GFP_NOFS);
+ char *cp;
+ char *last_start;
+ int len;
+ unsigned long pos = bprm->p;
+ int offset = pos % PAGE_SIZE;
+ int argv_count = bprm->argc;
+ int envp_count = bprm->envc;
+ bool truncated = false;
+ if (!buffer)
+ return NULL;
+ len = snprintf(buffer, tomoyo_buffer_len - 1, "argv[]={ ");
+ cp = buffer + len;
+ if (!argv_count) {
+ memmove(cp, "} envp[]={ ", 11);
+ cp += 11;
+ }
+ last_start = cp;
+ while (argv_count || envp_count) {
+ if (!tomoyo_dump_page(bprm, pos, dump))
+ goto out;
+ pos += PAGE_SIZE - offset;
+ /* Read. */
+ while (offset < PAGE_SIZE) {
+ const char *kaddr = dump->data;
+ const unsigned char c = kaddr[offset++];
+ if (cp == last_start)
+ *cp++ = '"';
+ if (cp >= buffer + tomoyo_buffer_len - 32) {
+ /* Reserve some room for "..." string. */
+ truncated = true;
+ } else if (c == '\\') {
+ *cp++ = '\\';
+ *cp++ = '\\';
+ } else if (c > ' ' && c < 127) {
+ *cp++ = c;
+ } else if (!c) {
+ *cp++ = '"';
+ *cp++ = ' ';
+ last_start = cp;
+ } else {
+ *cp++ = '\\';
+ *cp++ = (c >> 6) + '0';
+ *cp++ = ((c >> 3) & 7) + '0';
+ *cp++ = (c & 7) + '0';
+ }
+ if (c)
+ continue;
+ if (argv_count) {
+ if (--argv_count == 0) {
+ if (truncated) {
+ cp = last_start;
+ memmove(cp, "... ", 4);
+ cp += 4;
+ }
+ memmove(cp, "} envp[]={ ", 11);
+ cp += 11;
+ last_start = cp;
+ truncated = false;
+ }
+ } else if (envp_count) {
+ if (--envp_count == 0) {
+ if (truncated) {
+ cp = last_start;
+ memmove(cp, "... ", 4);
+ cp += 4;
+ }
+ }
+ }
+ if (!argv_count && !envp_count)
+ break;
+ }
+ offset = 0;
+ }
+ *cp++ = '}';
+ *cp = '\0';
+ return buffer;
+out:
+ snprintf(buffer, tomoyo_buffer_len - 1,
+ "argv[]={ ... } envp[]= { ... }");
+ return buffer;
+}
+
+/**
+ * tomoyo_filetype - Get string representation of file type.
+ *
+ * @mode: Mode value for stat().
+ *
+ * Returns file type string.
+ */
+static inline const char *tomoyo_filetype(const umode_t mode)
+{
+ switch (mode & S_IFMT) {
+ case S_IFREG:
+ case 0:
+ return tomoyo_condition_keyword[TOMOYO_TYPE_IS_FILE];
+ case S_IFDIR:
+ return tomoyo_condition_keyword[TOMOYO_TYPE_IS_DIRECTORY];
+ case S_IFLNK:
+ return tomoyo_condition_keyword[TOMOYO_TYPE_IS_SYMLINK];
+ case S_IFIFO:
+ return tomoyo_condition_keyword[TOMOYO_TYPE_IS_FIFO];
+ case S_IFSOCK:
+ return tomoyo_condition_keyword[TOMOYO_TYPE_IS_SOCKET];
+ case S_IFBLK:
+ return tomoyo_condition_keyword[TOMOYO_TYPE_IS_BLOCK_DEV];
+ case S_IFCHR:
+ return tomoyo_condition_keyword[TOMOYO_TYPE_IS_CHAR_DEV];
+ }
+ return "unknown"; /* This should not happen. */
+}
+
+/**
+ * tomoyo_print_header - Get header line of audit log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns string representation.
+ *
+ * This function uses kmalloc(), so caller must kfree() if this function
+ * didn't return NULL.
+ */
+static char *tomoyo_print_header(struct tomoyo_request_info *r)
+{
+ struct tomoyo_time stamp;
+ const pid_t gpid = task_pid_nr(current);
+ struct tomoyo_obj_info *obj = r->obj;
+ static const int tomoyo_buffer_len = 4096;
+ char *buffer = kmalloc(tomoyo_buffer_len, GFP_NOFS);
+ int pos;
+ u8 i;
+ if (!buffer)
+ return NULL;
+ {
+ struct timeval tv;
+ do_gettimeofday(&tv);
+ tomoyo_convert_time(tv.tv_sec, &stamp);
+ }
+ pos = snprintf(buffer, tomoyo_buffer_len - 1,
+ "#%04u/%02u/%02u %02u:%02u:%02u# profile=%u mode=%s "
+ "granted=%s (global-pid=%u) task={ pid=%u ppid=%u "
+ "uid=%u gid=%u euid=%u egid=%u suid=%u sgid=%u "
+ "fsuid=%u fsgid=%u }", stamp.year, stamp.month,
+ stamp.day, stamp.hour, stamp.min, stamp.sec, r->profile,
+ tomoyo_mode[r->mode], tomoyo_yesno(r->granted), gpid,
+ tomoyo_sys_getpid(), tomoyo_sys_getppid(),
+ from_kuid(&init_user_ns, current_uid()),
+ from_kgid(&init_user_ns, current_gid()),
+ from_kuid(&init_user_ns, current_euid()),
+ from_kgid(&init_user_ns, current_egid()),
+ from_kuid(&init_user_ns, current_suid()),
+ from_kgid(&init_user_ns, current_sgid()),
+ from_kuid(&init_user_ns, current_fsuid()),
+ from_kgid(&init_user_ns, current_fsgid()));
+ if (!obj)
+ goto no_obj_info;
+ if (!obj->validate_done) {
+ tomoyo_get_attributes(obj);
+ obj->validate_done = true;
+ }
+ for (i = 0; i < TOMOYO_MAX_PATH_STAT; i++) {
+ struct tomoyo_mini_stat *stat;
+ unsigned int dev;
+ umode_t mode;
+ if (!obj->stat_valid[i])
+ continue;
+ stat = &obj->stat[i];
+ dev = stat->dev;
+ mode = stat->mode;
+ if (i & 1) {
+ pos += snprintf(buffer + pos,
+ tomoyo_buffer_len - 1 - pos,
+ " path%u.parent={ uid=%u gid=%u "
+ "ino=%lu perm=0%o }", (i >> 1) + 1,
+ from_kuid(&init_user_ns, stat->uid),
+ from_kgid(&init_user_ns, stat->gid),
+ (unsigned long)stat->ino,
+ stat->mode & S_IALLUGO);
+ continue;
+ }
+ pos += snprintf(buffer + pos, tomoyo_buffer_len - 1 - pos,
+ " path%u={ uid=%u gid=%u ino=%lu major=%u"
+ " minor=%u perm=0%o type=%s", (i >> 1) + 1,
+ from_kuid(&init_user_ns, stat->uid),
+ from_kgid(&init_user_ns, stat->gid),
+ (unsigned long)stat->ino,
+ MAJOR(dev), MINOR(dev),
+ mode & S_IALLUGO, tomoyo_filetype(mode));
+ if (S_ISCHR(mode) || S_ISBLK(mode)) {
+ dev = stat->rdev;
+ pos += snprintf(buffer + pos,
+ tomoyo_buffer_len - 1 - pos,
+ " dev_major=%u dev_minor=%u",
+ MAJOR(dev), MINOR(dev));
+ }
+ pos += snprintf(buffer + pos, tomoyo_buffer_len - 1 - pos,
+ " }");
+ }
+no_obj_info:
+ if (pos < tomoyo_buffer_len - 1)
+ return buffer;
+ kfree(buffer);
+ return NULL;
+}
+
+/**
+ * tomoyo_init_log - Allocate buffer for audit logs.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @len: Buffer size needed for @fmt and @args.
+ * @fmt: The printf()'s format string.
+ * @args: va_list structure for @fmt.
+ *
+ * Returns pointer to allocated memory.
+ *
+ * This function uses kzalloc(), so caller must kfree() if this function
+ * didn't return NULL.
+ */
+char *tomoyo_init_log(struct tomoyo_request_info *r, int len, const char *fmt,
+ va_list args)
+{
+ char *buf = NULL;
+ char *bprm_info = NULL;
+ const char *header = NULL;
+ char *realpath = NULL;
+ const char *symlink = NULL;
+ int pos;
+ const char *domainname = r->domain->domainname->name;
+ header = tomoyo_print_header(r);
+ if (!header)
+ return NULL;
+ /* +10 is for '\n' etc. and '\0'. */
+ len += strlen(domainname) + strlen(header) + 10;
+ if (r->ee) {
+ struct file *file = r->ee->bprm->file;
+ realpath = tomoyo_realpath_from_path(&file->f_path);
+ bprm_info = tomoyo_print_bprm(r->ee->bprm, &r->ee->dump);
+ if (!realpath || !bprm_info)
+ goto out;
+ /* +80 is for " exec={ realpath=\"%s\" argc=%d envc=%d %s }" */
+ len += strlen(realpath) + 80 + strlen(bprm_info);
+ } else if (r->obj && r->obj->symlink_target) {
+ symlink = r->obj->symlink_target->name;
+ /* +18 is for " symlink.target=\"%s\"" */
+ len += 18 + strlen(symlink);
+ }
+ len = tomoyo_round2(len);
+ buf = kzalloc(len, GFP_NOFS);
+ if (!buf)
+ goto out;
+ len--;
+ pos = snprintf(buf, len, "%s", header);
+ if (realpath) {
+ struct linux_binprm *bprm = r->ee->bprm;
+ pos += snprintf(buf + pos, len - pos,
+ " exec={ realpath=\"%s\" argc=%d envc=%d %s }",
+ realpath, bprm->argc, bprm->envc, bprm_info);
+ } else if (symlink)
+ pos += snprintf(buf + pos, len - pos, " symlink.target=\"%s\"",
+ symlink);
+ pos += snprintf(buf + pos, len - pos, "\n%s\n", domainname);
+ vsnprintf(buf + pos, len - pos, fmt, args);
+out:
+ kfree(realpath);
+ kfree(bprm_info);
+ kfree(header);
+ return buf;
+}
+
+/* Wait queue for /sys/kernel/security/tomoyo/audit. */
+static DECLARE_WAIT_QUEUE_HEAD(tomoyo_log_wait);
+
+/* Structure for audit log. */
+struct tomoyo_log {
+ struct list_head list;
+ char *log;
+ int size;
+};
+
+/* The list for "struct tomoyo_log". */
+static LIST_HEAD(tomoyo_log);
+
+/* Lock for "struct list_head tomoyo_log". */
+static DEFINE_SPINLOCK(tomoyo_log_lock);
+
+/* Length of "stuct list_head tomoyo_log". */
+static unsigned int tomoyo_log_count;
+
+/**
+ * tomoyo_get_audit - Get audit mode.
+ *
+ * @ns: Pointer to "struct tomoyo_policy_namespace".
+ * @profile: Profile number.
+ * @index: Index number of functionality.
+ * @is_granted: True if granted log, false otherwise.
+ *
+ * Returns true if this request should be audited, false otherwise.
+ */
+static bool tomoyo_get_audit(const struct tomoyo_policy_namespace *ns,
+ const u8 profile, const u8 index,
+ const struct tomoyo_acl_info *matched_acl,
+ const bool is_granted)
+{
+ u8 mode;
+ const u8 category = tomoyo_index2category[index] +
+ TOMOYO_MAX_MAC_INDEX;
+ struct tomoyo_profile *p;
+ if (!tomoyo_policy_loaded)
+ return false;
+ p = tomoyo_profile(ns, profile);
+ if (tomoyo_log_count >= p->pref[TOMOYO_PREF_MAX_AUDIT_LOG])
+ return false;
+ if (is_granted && matched_acl && matched_acl->cond &&
+ matched_acl->cond->grant_log != TOMOYO_GRANTLOG_AUTO)
+ return matched_acl->cond->grant_log == TOMOYO_GRANTLOG_YES;
+ mode = p->config[index];
+ if (mode == TOMOYO_CONFIG_USE_DEFAULT)
+ mode = p->config[category];
+ if (mode == TOMOYO_CONFIG_USE_DEFAULT)
+ mode = p->default_config;
+ if (is_granted)
+ return mode & TOMOYO_CONFIG_WANT_GRANT_LOG;
+ return mode & TOMOYO_CONFIG_WANT_REJECT_LOG;
+}
+
+/**
+ * tomoyo_write_log2 - Write an audit log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @len: Buffer size needed for @fmt and @args.
+ * @fmt: The printf()'s format string.
+ * @args: va_list structure for @fmt.
+ *
+ * Returns nothing.
+ */
+void tomoyo_write_log2(struct tomoyo_request_info *r, int len, const char *fmt,
+ va_list args)
+{
+ char *buf;
+ struct tomoyo_log *entry;
+ bool quota_exceeded = false;
+ if (!tomoyo_get_audit(r->domain->ns, r->profile, r->type,
+ r->matched_acl, r->granted))
+ goto out;
+ buf = tomoyo_init_log(r, len, fmt, args);
+ if (!buf)
+ goto out;
+ entry = kzalloc(sizeof(*entry), GFP_NOFS);
+ if (!entry) {
+ kfree(buf);
+ goto out;
+ }
+ entry->log = buf;
+ len = tomoyo_round2(strlen(buf) + 1);
+ /*
+ * The entry->size is used for memory quota checks.
+ * Don't go beyond strlen(entry->log).
+ */
+ entry->size = len + tomoyo_round2(sizeof(*entry));
+ spin_lock(&tomoyo_log_lock);
+ if (tomoyo_memory_quota[TOMOYO_MEMORY_AUDIT] &&
+ tomoyo_memory_used[TOMOYO_MEMORY_AUDIT] + entry->size >=
+ tomoyo_memory_quota[TOMOYO_MEMORY_AUDIT]) {
+ quota_exceeded = true;
+ } else {
+ tomoyo_memory_used[TOMOYO_MEMORY_AUDIT] += entry->size;
+ list_add_tail(&entry->list, &tomoyo_log);
+ tomoyo_log_count++;
+ }
+ spin_unlock(&tomoyo_log_lock);
+ if (quota_exceeded) {
+ kfree(buf);
+ kfree(entry);
+ goto out;
+ }
+ wake_up(&tomoyo_log_wait);
+out:
+ return;
+}
+
+/**
+ * tomoyo_write_log - Write an audit log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @fmt: The printf()'s format string, followed by parameters.
+ *
+ * Returns nothing.
+ */
+void tomoyo_write_log(struct tomoyo_request_info *r, const char *fmt, ...)
+{
+ va_list args;
+ int len;
+ va_start(args, fmt);
+ len = vsnprintf((char *) &len, 1, fmt, args) + 1;
+ va_end(args);
+ va_start(args, fmt);
+ tomoyo_write_log2(r, len, fmt, args);
+ va_end(args);
+}
+
+/**
+ * tomoyo_read_log - Read an audit log.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns nothing.
+ */
+void tomoyo_read_log(struct tomoyo_io_buffer *head)
+{
+ struct tomoyo_log *ptr = NULL;
+ if (head->r.w_pos)
+ return;
+ kfree(head->read_buf);
+ head->read_buf = NULL;
+ spin_lock(&tomoyo_log_lock);
+ if (!list_empty(&tomoyo_log)) {
+ ptr = list_entry(tomoyo_log.next, typeof(*ptr), list);
+ list_del(&ptr->list);
+ tomoyo_log_count--;
+ tomoyo_memory_used[TOMOYO_MEMORY_AUDIT] -= ptr->size;
+ }
+ spin_unlock(&tomoyo_log_lock);
+ if (ptr) {
+ head->read_buf = ptr->log;
+ head->r.w[head->r.w_pos++] = head->read_buf;
+ kfree(ptr);
+ }
+}
+
+/**
+ * tomoyo_poll_log - Wait for an audit log.
+ *
+ * @file: Pointer to "struct file".
+ * @wait: Pointer to "poll_table". Maybe NULL.
+ *
+ * Returns POLLIN | POLLRDNORM when ready to read an audit log.
+ */
+unsigned int tomoyo_poll_log(struct file *file, poll_table *wait)
+{
+ if (tomoyo_log_count)
+ return POLLIN | POLLRDNORM;
+ poll_wait(file, &tomoyo_log_wait, wait);
+ if (tomoyo_log_count)
+ return POLLIN | POLLRDNORM;
+ return 0;
+}
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index e0d0354008b..283862aebdc 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -1,942 +1,657 @@
/*
* security/tomoyo/common.c
*
- * Common functions for TOMOYO.
- *
- * Copyright (C) 2005-2009 NTT DATA CORPORATION
- *
- * Version: 2.2.0 2009/04/01
- *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
*/
#include <linux/uaccess.h>
+#include <linux/slab.h>
#include <linux/security.h>
-#include <linux/hardirq.h>
-#include "realpath.h"
#include "common.h"
-#include "tomoyo.h"
-/* Has loading policy done? */
-bool tomoyo_policy_loaded;
+/* String table for operation mode. */
+const char * const tomoyo_mode[TOMOYO_CONFIG_MAX_MODE] = {
+ [TOMOYO_CONFIG_DISABLED] = "disabled",
+ [TOMOYO_CONFIG_LEARNING] = "learning",
+ [TOMOYO_CONFIG_PERMISSIVE] = "permissive",
+ [TOMOYO_CONFIG_ENFORCING] = "enforcing"
+};
-/* String table for functionality that takes 4 modes. */
-static const char *tomoyo_mode_4[4] = {
- "disabled", "learning", "permissive", "enforcing"
+/* String table for /sys/kernel/security/tomoyo/profile */
+const char * const tomoyo_mac_keywords[TOMOYO_MAX_MAC_INDEX
+ + TOMOYO_MAX_MAC_CATEGORY_INDEX] = {
+ /* CONFIG::file group */
+ [TOMOYO_MAC_FILE_EXECUTE] = "execute",
+ [TOMOYO_MAC_FILE_OPEN] = "open",
+ [TOMOYO_MAC_FILE_CREATE] = "create",
+ [TOMOYO_MAC_FILE_UNLINK] = "unlink",
+ [TOMOYO_MAC_FILE_GETATTR] = "getattr",
+ [TOMOYO_MAC_FILE_MKDIR] = "mkdir",
+ [TOMOYO_MAC_FILE_RMDIR] = "rmdir",
+ [TOMOYO_MAC_FILE_MKFIFO] = "mkfifo",
+ [TOMOYO_MAC_FILE_MKSOCK] = "mksock",
+ [TOMOYO_MAC_FILE_TRUNCATE] = "truncate",
+ [TOMOYO_MAC_FILE_SYMLINK] = "symlink",
+ [TOMOYO_MAC_FILE_MKBLOCK] = "mkblock",
+ [TOMOYO_MAC_FILE_MKCHAR] = "mkchar",
+ [TOMOYO_MAC_FILE_LINK] = "link",
+ [TOMOYO_MAC_FILE_RENAME] = "rename",
+ [TOMOYO_MAC_FILE_CHMOD] = "chmod",
+ [TOMOYO_MAC_FILE_CHOWN] = "chown",
+ [TOMOYO_MAC_FILE_CHGRP] = "chgrp",
+ [TOMOYO_MAC_FILE_IOCTL] = "ioctl",
+ [TOMOYO_MAC_FILE_CHROOT] = "chroot",
+ [TOMOYO_MAC_FILE_MOUNT] = "mount",
+ [TOMOYO_MAC_FILE_UMOUNT] = "unmount",
+ [TOMOYO_MAC_FILE_PIVOT_ROOT] = "pivot_root",
+ /* CONFIG::network group */
+ [TOMOYO_MAC_NETWORK_INET_STREAM_BIND] = "inet_stream_bind",
+ [TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN] = "inet_stream_listen",
+ [TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT] = "inet_stream_connect",
+ [TOMOYO_MAC_NETWORK_INET_DGRAM_BIND] = "inet_dgram_bind",
+ [TOMOYO_MAC_NETWORK_INET_DGRAM_SEND] = "inet_dgram_send",
+ [TOMOYO_MAC_NETWORK_INET_RAW_BIND] = "inet_raw_bind",
+ [TOMOYO_MAC_NETWORK_INET_RAW_SEND] = "inet_raw_send",
+ [TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND] = "unix_stream_bind",
+ [TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN] = "unix_stream_listen",
+ [TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT] = "unix_stream_connect",
+ [TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND] = "unix_dgram_bind",
+ [TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND] = "unix_dgram_send",
+ [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND] = "unix_seqpacket_bind",
+ [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN] = "unix_seqpacket_listen",
+ [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT] = "unix_seqpacket_connect",
+ /* CONFIG::misc group */
+ [TOMOYO_MAC_ENVIRON] = "env",
+ /* CONFIG group */
+ [TOMOYO_MAX_MAC_INDEX + TOMOYO_MAC_CATEGORY_FILE] = "file",
+ [TOMOYO_MAX_MAC_INDEX + TOMOYO_MAC_CATEGORY_NETWORK] = "network",
+ [TOMOYO_MAX_MAC_INDEX + TOMOYO_MAC_CATEGORY_MISC] = "misc",
};
-/* String table for functionality that takes 2 modes. */
-static const char *tomoyo_mode_2[4] = {
- "disabled", "enabled", "enabled", "enabled"
+
+/* String table for conditions. */
+const char * const tomoyo_condition_keyword[TOMOYO_MAX_CONDITION_KEYWORD] = {
+ [TOMOYO_TASK_UID] = "task.uid",
+ [TOMOYO_TASK_EUID] = "task.euid",
+ [TOMOYO_TASK_SUID] = "task.suid",
+ [TOMOYO_TASK_FSUID] = "task.fsuid",
+ [TOMOYO_TASK_GID] = "task.gid",
+ [TOMOYO_TASK_EGID] = "task.egid",
+ [TOMOYO_TASK_SGID] = "task.sgid",
+ [TOMOYO_TASK_FSGID] = "task.fsgid",
+ [TOMOYO_TASK_PID] = "task.pid",
+ [TOMOYO_TASK_PPID] = "task.ppid",
+ [TOMOYO_EXEC_ARGC] = "exec.argc",
+ [TOMOYO_EXEC_ENVC] = "exec.envc",
+ [TOMOYO_TYPE_IS_SOCKET] = "socket",
+ [TOMOYO_TYPE_IS_SYMLINK] = "symlink",
+ [TOMOYO_TYPE_IS_FILE] = "file",
+ [TOMOYO_TYPE_IS_BLOCK_DEV] = "block",
+ [TOMOYO_TYPE_IS_DIRECTORY] = "directory",
+ [TOMOYO_TYPE_IS_CHAR_DEV] = "char",
+ [TOMOYO_TYPE_IS_FIFO] = "fifo",
+ [TOMOYO_MODE_SETUID] = "setuid",
+ [TOMOYO_MODE_SETGID] = "setgid",
+ [TOMOYO_MODE_STICKY] = "sticky",
+ [TOMOYO_MODE_OWNER_READ] = "owner_read",
+ [TOMOYO_MODE_OWNER_WRITE] = "owner_write",
+ [TOMOYO_MODE_OWNER_EXECUTE] = "owner_execute",
+ [TOMOYO_MODE_GROUP_READ] = "group_read",
+ [TOMOYO_MODE_GROUP_WRITE] = "group_write",
+ [TOMOYO_MODE_GROUP_EXECUTE] = "group_execute",
+ [TOMOYO_MODE_OTHERS_READ] = "others_read",
+ [TOMOYO_MODE_OTHERS_WRITE] = "others_write",
+ [TOMOYO_MODE_OTHERS_EXECUTE] = "others_execute",
+ [TOMOYO_EXEC_REALPATH] = "exec.realpath",
+ [TOMOYO_SYMLINK_TARGET] = "symlink.target",
+ [TOMOYO_PATH1_UID] = "path1.uid",
+ [TOMOYO_PATH1_GID] = "path1.gid",
+ [TOMOYO_PATH1_INO] = "path1.ino",
+ [TOMOYO_PATH1_MAJOR] = "path1.major",
+ [TOMOYO_PATH1_MINOR] = "path1.minor",
+ [TOMOYO_PATH1_PERM] = "path1.perm",
+ [TOMOYO_PATH1_TYPE] = "path1.type",
+ [TOMOYO_PATH1_DEV_MAJOR] = "path1.dev_major",
+ [TOMOYO_PATH1_DEV_MINOR] = "path1.dev_minor",
+ [TOMOYO_PATH2_UID] = "path2.uid",
+ [TOMOYO_PATH2_GID] = "path2.gid",
+ [TOMOYO_PATH2_INO] = "path2.ino",
+ [TOMOYO_PATH2_MAJOR] = "path2.major",
+ [TOMOYO_PATH2_MINOR] = "path2.minor",
+ [TOMOYO_PATH2_PERM] = "path2.perm",
+ [TOMOYO_PATH2_TYPE] = "path2.type",
+ [TOMOYO_PATH2_DEV_MAJOR] = "path2.dev_major",
+ [TOMOYO_PATH2_DEV_MINOR] = "path2.dev_minor",
+ [TOMOYO_PATH1_PARENT_UID] = "path1.parent.uid",
+ [TOMOYO_PATH1_PARENT_GID] = "path1.parent.gid",
+ [TOMOYO_PATH1_PARENT_INO] = "path1.parent.ino",
+ [TOMOYO_PATH1_PARENT_PERM] = "path1.parent.perm",
+ [TOMOYO_PATH2_PARENT_UID] = "path2.parent.uid",
+ [TOMOYO_PATH2_PARENT_GID] = "path2.parent.gid",
+ [TOMOYO_PATH2_PARENT_INO] = "path2.parent.ino",
+ [TOMOYO_PATH2_PARENT_PERM] = "path2.parent.perm",
};
-/*
- * tomoyo_control_array is a static data which contains
- *
- * (1) functionality name used by /sys/kernel/security/tomoyo/profile .
- * (2) initial values for "struct tomoyo_profile".
- * (3) max values for "struct tomoyo_profile".
- */
-static struct {
- const char *keyword;
- unsigned int current_value;
- const unsigned int max_value;
-} tomoyo_control_array[TOMOYO_MAX_CONTROL_INDEX] = {
- [TOMOYO_MAC_FOR_FILE] = { "MAC_FOR_FILE", 0, 3 },
- [TOMOYO_MAX_ACCEPT_ENTRY] = { "MAX_ACCEPT_ENTRY", 2048, INT_MAX },
- [TOMOYO_VERBOSE] = { "TOMOYO_VERBOSE", 1, 1 },
+/* String table for PREFERENCE keyword. */
+static const char * const tomoyo_pref_keywords[TOMOYO_MAX_PREF] = {
+ [TOMOYO_PREF_MAX_AUDIT_LOG] = "max_audit_log",
+ [TOMOYO_PREF_MAX_LEARNING_ENTRY] = "max_learning_entry",
};
-/*
- * tomoyo_profile is a structure which is used for holding the mode of access
- * controls. TOMOYO has 4 modes: disabled, learning, permissive, enforcing.
- * An administrator can define up to 256 profiles.
- * The ->profile of "struct tomoyo_domain_info" is used for remembering
- * the profile's number (0 - 255) assigned to that domain.
- */
-static struct tomoyo_profile {
- unsigned int value[TOMOYO_MAX_CONTROL_INDEX];
- const struct tomoyo_path_info *comment;
-} *tomoyo_profile_ptr[TOMOYO_MAX_PROFILES];
+/* String table for path operation. */
+const char * const tomoyo_path_keyword[TOMOYO_MAX_PATH_OPERATION] = {
+ [TOMOYO_TYPE_EXECUTE] = "execute",
+ [TOMOYO_TYPE_READ] = "read",
+ [TOMOYO_TYPE_WRITE] = "write",
+ [TOMOYO_TYPE_APPEND] = "append",
+ [TOMOYO_TYPE_UNLINK] = "unlink",
+ [TOMOYO_TYPE_GETATTR] = "getattr",
+ [TOMOYO_TYPE_RMDIR] = "rmdir",
+ [TOMOYO_TYPE_TRUNCATE] = "truncate",
+ [TOMOYO_TYPE_SYMLINK] = "symlink",
+ [TOMOYO_TYPE_CHROOT] = "chroot",
+ [TOMOYO_TYPE_UMOUNT] = "unmount",
+};
+
+/* String table for socket's operation. */
+const char * const tomoyo_socket_keyword[TOMOYO_MAX_NETWORK_OPERATION] = {
+ [TOMOYO_NETWORK_BIND] = "bind",
+ [TOMOYO_NETWORK_LISTEN] = "listen",
+ [TOMOYO_NETWORK_CONNECT] = "connect",
+ [TOMOYO_NETWORK_SEND] = "send",
+};
+
+/* String table for categories. */
+static const char * const tomoyo_category_keywords
+[TOMOYO_MAX_MAC_CATEGORY_INDEX] = {
+ [TOMOYO_MAC_CATEGORY_FILE] = "file",
+ [TOMOYO_MAC_CATEGORY_NETWORK] = "network",
+ [TOMOYO_MAC_CATEGORY_MISC] = "misc",
+};
/* Permit policy management by non-root user? */
static bool tomoyo_manage_by_non_root;
/* Utility functions. */
-/* Open operation for /sys/kernel/security/tomoyo/ interface. */
-static int tomoyo_open_control(const u8 type, struct file *file);
-/* Close /sys/kernel/security/tomoyo/ interface. */
-static int tomoyo_close_control(struct file *file);
-/* Read operation for /sys/kernel/security/tomoyo/ interface. */
-static int tomoyo_read_control(struct file *file, char __user *buffer,
- const int buffer_len);
-/* Write operation for /sys/kernel/security/tomoyo/ interface. */
-static int tomoyo_write_control(struct file *file, const char __user *buffer,
- const int buffer_len);
-
-/**
- * tomoyo_is_byte_range - Check whether the string isa \ooo style octal value.
- *
- * @str: Pointer to the string.
- *
- * Returns true if @str is a \ooo style octal value, false otherwise.
- *
- * TOMOYO uses \ooo style representation for 0x01 - 0x20 and 0x7F - 0xFF.
- * This function verifies that \ooo is in valid range.
- */
-static inline bool tomoyo_is_byte_range(const char *str)
-{
- return *str >= '0' && *str++ <= '3' &&
- *str >= '0' && *str++ <= '7' &&
- *str >= '0' && *str <= '7';
-}
-
/**
- * tomoyo_is_alphabet_char - Check whether the character is an alphabet.
- *
- * @c: The character to check.
+ * tomoyo_yesno - Return "yes" or "no".
*
- * Returns true if @c is an alphabet character, false otherwise.
+ * @value: Bool value.
*/
-static inline bool tomoyo_is_alphabet_char(const char c)
+const char *tomoyo_yesno(const unsigned int value)
{
- return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z');
+ return value ? "yes" : "no";
}
/**
- * tomoyo_make_byte - Make byte value from three octal characters.
+ * tomoyo_addprintf - strncat()-like-snprintf().
*
- * @c1: The first character.
- * @c2: The second character.
- * @c3: The third character.
+ * @buffer: Buffer to write to. Must be '\0'-terminated.
+ * @len: Size of @buffer.
+ * @fmt: The printf()'s format string, followed by parameters.
*
- * Returns byte value.
+ * Returns nothing.
*/
-static inline u8 tomoyo_make_byte(const u8 c1, const u8 c2, const u8 c3)
+static void tomoyo_addprintf(char *buffer, int len, const char *fmt, ...)
{
- return ((c1 - '0') << 6) + ((c2 - '0') << 3) + (c3 - '0');
+ va_list args;
+ const int pos = strlen(buffer);
+ va_start(args, fmt);
+ vsnprintf(buffer + pos, len - pos - 1, fmt, args);
+ va_end(args);
}
/**
- * tomoyo_str_starts - Check whether the given string starts with the given keyword.
+ * tomoyo_flush - Flush queued string to userspace's buffer.
*
- * @src: Pointer to pointer to the string.
- * @find: Pointer to the keyword.
+ * @head: Pointer to "struct tomoyo_io_buffer".
*
- * Returns true if @src starts with @find, false otherwise.
- *
- * The @src is updated to point the first character after the @find
- * if @src starts with @find.
+ * Returns true if all data was flushed, false otherwise.
*/
-static bool tomoyo_str_starts(char **src, const char *find)
+static bool tomoyo_flush(struct tomoyo_io_buffer *head)
{
- const int len = strlen(find);
- char *tmp = *src;
-
- if (strncmp(tmp, find, len))
- return false;
- tmp += len;
- *src = tmp;
+ while (head->r.w_pos) {
+ const char *w = head->r.w[0];
+ size_t len = strlen(w);
+ if (len) {
+ if (len > head->read_user_buf_avail)
+ len = head->read_user_buf_avail;
+ if (!len)
+ return false;
+ if (copy_to_user(head->read_user_buf, w, len))
+ return false;
+ head->read_user_buf_avail -= len;
+ head->read_user_buf += len;
+ w += len;
+ }
+ head->r.w[0] = w;
+ if (*w)
+ return false;
+ /* Add '\0' for audit logs and query. */
+ if (head->poll) {
+ if (!head->read_user_buf_avail ||
+ copy_to_user(head->read_user_buf, "", 1))
+ return false;
+ head->read_user_buf_avail--;
+ head->read_user_buf++;
+ }
+ head->r.w_pos--;
+ for (len = 0; len < head->r.w_pos; len++)
+ head->r.w[len] = head->r.w[len + 1];
+ }
+ head->r.avail = 0;
return true;
}
/**
- * tomoyo_normalize_line - Format string.
- *
- * @buffer: The line to normalize.
+ * tomoyo_set_string - Queue string to "struct tomoyo_io_buffer" structure.
*
- * Leading and trailing whitespaces are removed.
- * Multiple whitespaces are packed into single space.
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @string: String to print.
*
- * Returns nothing.
+ * Note that @string has to be kept valid until @head is kfree()d.
+ * This means that char[] allocated on stack memory cannot be passed to
+ * this function. Use tomoyo_io_printf() for char[] allocated on stack memory.
*/
-static void tomoyo_normalize_line(unsigned char *buffer)
+static void tomoyo_set_string(struct tomoyo_io_buffer *head, const char *string)
{
- unsigned char *sp = buffer;
- unsigned char *dp = buffer;
- bool first = true;
-
- while (tomoyo_is_invalid(*sp))
- sp++;
- while (*sp) {
- if (!first)
- *dp++ = ' ';
- first = false;
- while (tomoyo_is_valid(*sp))
- *dp++ = *sp++;
- while (tomoyo_is_invalid(*sp))
- sp++;
- }
- *dp = '\0';
+ if (head->r.w_pos < TOMOYO_MAX_IO_READ_QUEUE) {
+ head->r.w[head->r.w_pos++] = string;
+ tomoyo_flush(head);
+ } else
+ WARN_ON(1);
}
-/**
- * tomoyo_is_correct_path - Validate a pathname.
- * @filename: The pathname to check.
- * @start_type: Should the pathname start with '/'?
- * 1 = must / -1 = must not / 0 = don't care
- * @pattern_type: Can the pathname contain a wildcard?
- * 1 = must / -1 = must not / 0 = don't care
- * @end_type: Should the pathname end with '/'?
- * 1 = must / -1 = must not / 0 = don't care
- * @function: The name of function calling me.
- *
- * Check whether the given filename follows the naming rules.
- * Returns true if @filename follows the naming rules, false otherwise.
- */
-bool tomoyo_is_correct_path(const char *filename, const s8 start_type,
- const s8 pattern_type, const s8 end_type,
- const char *function)
-{
- const char *const start = filename;
- bool in_repetition = false;
- bool contains_pattern = false;
- unsigned char c;
- unsigned char d;
- unsigned char e;
- const char *original_filename = filename;
-
- if (!filename)
- goto out;
- c = *filename;
- if (start_type == 1) { /* Must start with '/' */
- if (c != '/')
- goto out;
- } else if (start_type == -1) { /* Must not start with '/' */
- if (c == '/')
- goto out;
- }
- if (c)
- c = *(filename + strlen(filename) - 1);
- if (end_type == 1) { /* Must end with '/' */
- if (c != '/')
- goto out;
- } else if (end_type == -1) { /* Must not end with '/' */
- if (c == '/')
- goto out;
- }
- while (1) {
- c = *filename++;
- if (!c)
- break;
- if (c == '\\') {
- c = *filename++;
- switch (c) {
- case '\\': /* "\\" */
- continue;
- case '$': /* "\$" */
- case '+': /* "\+" */
- case '?': /* "\?" */
- case '*': /* "\*" */
- case '@': /* "\@" */
- case 'x': /* "\x" */
- case 'X': /* "\X" */
- case 'a': /* "\a" */
- case 'A': /* "\A" */
- case '-': /* "\-" */
- if (pattern_type == -1)
- break; /* Must not contain pattern */
- contains_pattern = true;
- continue;
- case '{': /* "/\{" */
- if (filename - 3 < start ||
- *(filename - 3) != '/')
- break;
- if (pattern_type == -1)
- break; /* Must not contain pattern */
- contains_pattern = true;
- in_repetition = true;
- continue;
- case '}': /* "\}/" */
- if (*filename != '/')
- break;
- if (!in_repetition)
- break;
- in_repetition = false;
- continue;
- case '0': /* "\ooo" */
- case '1':
- case '2':
- case '3':
- d = *filename++;
- if (d < '0' || d > '7')
- break;
- e = *filename++;
- if (e < '0' || e > '7')
- break;
- c = tomoyo_make_byte(c, d, e);
- if (tomoyo_is_invalid(c))
- continue; /* pattern is not \000 */
- }
- goto out;
- } else if (in_repetition && c == '/') {
- goto out;
- } else if (tomoyo_is_invalid(c)) {
- goto out;
- }
- }
- if (pattern_type == 1) { /* Must contain pattern */
- if (!contains_pattern)
- goto out;
- }
- if (in_repetition)
- goto out;
- return true;
- out:
- printk(KERN_DEBUG "%s: Invalid pathname '%s'\n", function,
- original_filename);
- return false;
-}
+static void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt,
+ ...) __printf(2, 3);
/**
- * tomoyo_is_correct_domain - Check whether the given domainname follows the naming rules.
- * @domainname: The domainname to check.
- * @function: The name of function calling me.
+ * tomoyo_io_printf - printf() to "struct tomoyo_io_buffer" structure.
*
- * Returns true if @domainname follows the naming rules, false otherwise.
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @fmt: The printf()'s format string, followed by parameters.
*/
-bool tomoyo_is_correct_domain(const unsigned char *domainname,
- const char *function)
+static void tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt,
+ ...)
{
- unsigned char c;
- unsigned char d;
- unsigned char e;
- const char *org_domainname = domainname;
-
- if (!domainname || strncmp(domainname, TOMOYO_ROOT_NAME,
- TOMOYO_ROOT_NAME_LEN))
- goto out;
- domainname += TOMOYO_ROOT_NAME_LEN;
- if (!*domainname)
- return true;
- do {
- if (*domainname++ != ' ')
- goto out;
- if (*domainname++ != '/')
- goto out;
- while ((c = *domainname) != '\0' && c != ' ') {
- domainname++;
- if (c == '\\') {
- c = *domainname++;
- switch ((c)) {
- case '\\': /* "\\" */
- continue;
- case '0': /* "\ooo" */
- case '1':
- case '2':
- case '3':
- d = *domainname++;
- if (d < '0' || d > '7')
- break;
- e = *domainname++;
- if (e < '0' || e > '7')
- break;
- c = tomoyo_make_byte(c, d, e);
- if (tomoyo_is_invalid(c))
- /* pattern is not \000 */
- continue;
- }
- goto out;
- } else if (tomoyo_is_invalid(c)) {
- goto out;
- }
- }
- } while (*domainname);
- return true;
- out:
- printk(KERN_DEBUG "%s: Invalid domainname '%s'\n", function,
- org_domainname);
- return false;
+ va_list args;
+ size_t len;
+ size_t pos = head->r.avail;
+ int size = head->readbuf_size - pos;
+ if (size <= 0)
+ return;
+ va_start(args, fmt);
+ len = vsnprintf(head->read_buf + pos, size, fmt, args) + 1;
+ va_end(args);
+ if (pos + len >= head->readbuf_size) {
+ WARN_ON(1);
+ return;
+ }
+ head->r.avail += len;
+ tomoyo_set_string(head, head->read_buf + pos);
}
/**
- * tomoyo_is_domain_def - Check whether the given token can be a domainname.
+ * tomoyo_set_space - Put a space to "struct tomoyo_io_buffer" structure.
*
- * @buffer: The token to check.
+ * @head: Pointer to "struct tomoyo_io_buffer".
*
- * Returns true if @buffer possibly be a domainname, false otherwise.
+ * Returns nothing.
*/
-bool tomoyo_is_domain_def(const unsigned char *buffer)
+static void tomoyo_set_space(struct tomoyo_io_buffer *head)
{
- return !strncmp(buffer, TOMOYO_ROOT_NAME, TOMOYO_ROOT_NAME_LEN);
+ tomoyo_set_string(head, " ");
}
/**
- * tomoyo_find_domain - Find a domain by the given name.
- *
- * @domainname: The domainname to find.
+ * tomoyo_set_lf - Put a line feed to "struct tomoyo_io_buffer" structure.
*
- * Caller must call down_read(&tomoyo_domain_list_lock); or
- * down_write(&tomoyo_domain_list_lock); .
+ * @head: Pointer to "struct tomoyo_io_buffer".
*
- * Returns pointer to "struct tomoyo_domain_info" if found, NULL otherwise.
+ * Returns nothing.
*/
-struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname)
+static bool tomoyo_set_lf(struct tomoyo_io_buffer *head)
{
- struct tomoyo_domain_info *domain;
- struct tomoyo_path_info name;
-
- name.name = domainname;
- tomoyo_fill_path_info(&name);
- list_for_each_entry(domain, &tomoyo_domain_list, list) {
- if (!domain->is_deleted &&
- !tomoyo_pathcmp(&name, domain->domainname))
- return domain;
- }
- return NULL;
+ tomoyo_set_string(head, "\n");
+ return !head->r.w_pos;
}
/**
- * tomoyo_const_part_length - Evaluate the initial length without a pattern in a token.
+ * tomoyo_set_slash - Put a shash to "struct tomoyo_io_buffer" structure.
*
- * @filename: The string to evaluate.
+ * @head: Pointer to "struct tomoyo_io_buffer".
*
- * Returns the initial length without a pattern in @filename.
+ * Returns nothing.
*/
-static int tomoyo_const_part_length(const char *filename)
+static void tomoyo_set_slash(struct tomoyo_io_buffer *head)
{
- char c;
- int len = 0;
-
- if (!filename)
- return 0;
- while ((c = *filename++) != '\0') {
- if (c != '\\') {
- len++;
- continue;
- }
- c = *filename++;
- switch (c) {
- case '\\': /* "\\" */
- len += 2;
- continue;
- case '0': /* "\ooo" */
- case '1':
- case '2':
- case '3':
- c = *filename++;
- if (c < '0' || c > '7')
- break;
- c = *filename++;
- if (c < '0' || c > '7')
- break;
- len += 4;
- continue;
- }
- break;
- }
- return len;
+ tomoyo_set_string(head, "/");
}
+/* List of namespaces. */
+LIST_HEAD(tomoyo_namespace_list);
+/* True if namespace other than tomoyo_kernel_namespace is defined. */
+static bool tomoyo_namespace_enabled;
+
/**
- * tomoyo_fill_path_info - Fill in "struct tomoyo_path_info" members.
+ * tomoyo_init_policy_namespace - Initialize namespace.
*
- * @ptr: Pointer to "struct tomoyo_path_info" to fill in.
+ * @ns: Pointer to "struct tomoyo_policy_namespace".
*
- * The caller sets "struct tomoyo_path_info"->name.
+ * Returns nothing.
*/
-void tomoyo_fill_path_info(struct tomoyo_path_info *ptr)
+void tomoyo_init_policy_namespace(struct tomoyo_policy_namespace *ns)
{
- const char *name = ptr->name;
- const int len = strlen(name);
-
- ptr->const_len = tomoyo_const_part_length(name);
- ptr->is_dir = len && (name[len - 1] == '/');
- ptr->is_patterned = (ptr->const_len < len);
- ptr->hash = full_name_hash(name, len);
+ unsigned int idx;
+ for (idx = 0; idx < TOMOYO_MAX_ACL_GROUPS; idx++)
+ INIT_LIST_HEAD(&ns->acl_group[idx]);
+ for (idx = 0; idx < TOMOYO_MAX_GROUP; idx++)
+ INIT_LIST_HEAD(&ns->group_list[idx]);
+ for (idx = 0; idx < TOMOYO_MAX_POLICY; idx++)
+ INIT_LIST_HEAD(&ns->policy_list[idx]);
+ ns->profile_version = 20110903;
+ tomoyo_namespace_enabled = !list_empty(&tomoyo_namespace_list);
+ list_add_tail_rcu(&ns->namespace_list, &tomoyo_namespace_list);
}
/**
- * tomoyo_file_matches_pattern2 - Pattern matching without '/' character
- * and "\-" pattern.
+ * tomoyo_print_namespace - Print namespace header.
*
- * @filename: The start of string to check.
- * @filename_end: The end of string to check.
- * @pattern: The start of pattern to compare.
- * @pattern_end: The end of pattern to compare.
+ * @head: Pointer to "struct tomoyo_io_buffer".
*
- * Returns true if @filename matches @pattern, false otherwise.
+ * Returns nothing.
*/
-static bool tomoyo_file_matches_pattern2(const char *filename,
- const char *filename_end,
- const char *pattern,
- const char *pattern_end)
+static void tomoyo_print_namespace(struct tomoyo_io_buffer *head)
{
- while (filename < filename_end && pattern < pattern_end) {
- char c;
- if (*pattern != '\\') {
- if (*filename++ != *pattern++)
- return false;
- continue;
- }
- c = *filename;
- pattern++;
- switch (*pattern) {
- int i;
- int j;
- case '?':
- if (c == '/') {
- return false;
- } else if (c == '\\') {
- if (filename[1] == '\\')
- filename++;
- else if (tomoyo_is_byte_range(filename + 1))
- filename += 3;
- else
- return false;
- }
- break;
- case '\\':
- if (c != '\\')
- return false;
- if (*++filename != '\\')
- return false;
- break;
- case '+':
- if (!isdigit(c))
- return false;
- break;
- case 'x':
- if (!isxdigit(c))
- return false;
- break;
- case 'a':
- if (!tomoyo_is_alphabet_char(c))
- return false;
- break;
- case '0':
- case '1':
- case '2':
- case '3':
- if (c == '\\' && tomoyo_is_byte_range(filename + 1)
- && strncmp(filename + 1, pattern, 3) == 0) {
- filename += 3;
- pattern += 2;
- break;
- }
- return false; /* Not matched. */
- case '*':
- case '@':
- for (i = 0; i <= filename_end - filename; i++) {
- if (tomoyo_file_matches_pattern2(
- filename + i, filename_end,
- pattern + 1, pattern_end))
- return true;
- c = filename[i];
- if (c == '.' && *pattern == '@')
- break;
- if (c != '\\')
- continue;
- if (filename[i + 1] == '\\')
- i++;
- else if (tomoyo_is_byte_range(filename + i + 1))
- i += 3;
- else
- break; /* Bad pattern. */
- }
- return false; /* Not matched. */
- default:
- j = 0;
- c = *pattern;
- if (c == '$') {
- while (isdigit(filename[j]))
- j++;
- } else if (c == 'X') {
- while (isxdigit(filename[j]))
- j++;
- } else if (c == 'A') {
- while (tomoyo_is_alphabet_char(filename[j]))
- j++;
- }
- for (i = 1; i <= j; i++) {
- if (tomoyo_file_matches_pattern2(
- filename + i, filename_end,
- pattern + 1, pattern_end))
- return true;
- }
- return false; /* Not matched or bad pattern. */
- }
- filename++;
- pattern++;
- }
- while (*pattern == '\\' &&
- (*(pattern + 1) == '*' || *(pattern + 1) == '@'))
- pattern += 2;
- return filename == filename_end && pattern == pattern_end;
+ if (!tomoyo_namespace_enabled)
+ return;
+ tomoyo_set_string(head,
+ container_of(head->r.ns,
+ struct tomoyo_policy_namespace,
+ namespace_list)->name);
+ tomoyo_set_space(head);
}
/**
- * tomoyo_file_matches_pattern - Pattern matching without without '/' character.
+ * tomoyo_print_name_union - Print a tomoyo_name_union.
*
- * @filename: The start of string to check.
- * @filename_end: The end of string to check.
- * @pattern: The start of pattern to compare.
- * @pattern_end: The end of pattern to compare.
- *
- * Returns true if @filename matches @pattern, false otherwise.
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @ptr: Pointer to "struct tomoyo_name_union".
*/
-static bool tomoyo_file_matches_pattern(const char *filename,
- const char *filename_end,
- const char *pattern,
- const char *pattern_end)
+static void tomoyo_print_name_union(struct tomoyo_io_buffer *head,
+ const struct tomoyo_name_union *ptr)
{
- const char *pattern_start = pattern;
- bool first = true;
- bool result;
-
- while (pattern < pattern_end - 1) {
- /* Split at "\-" pattern. */
- if (*pattern++ != '\\' || *pattern++ != '-')
- continue;
- result = tomoyo_file_matches_pattern2(filename,
- filename_end,
- pattern_start,
- pattern - 2);
- if (first)
- result = !result;
- if (result)
- return false;
- first = false;
- pattern_start = pattern;
+ tomoyo_set_space(head);
+ if (ptr->group) {
+ tomoyo_set_string(head, "@");
+ tomoyo_set_string(head, ptr->group->group_name->name);
+ } else {
+ tomoyo_set_string(head, ptr->filename->name);
}
- result = tomoyo_file_matches_pattern2(filename, filename_end,
- pattern_start, pattern_end);
- return first ? result : !result;
}
/**
- * tomoyo_path_matches_pattern2 - Do pathname pattern matching.
+ * tomoyo_print_name_union_quoted - Print a tomoyo_name_union with a quote.
*
- * @f: The start of string to check.
- * @p: The start of pattern to compare.
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @ptr: Pointer to "struct tomoyo_name_union".
*
- * Returns true if @f matches @p, false otherwise.
+ * Returns nothing.
*/
-static bool tomoyo_path_matches_pattern2(const char *f, const char *p)
+static void tomoyo_print_name_union_quoted(struct tomoyo_io_buffer *head,
+ const struct tomoyo_name_union *ptr)
{
- const char *f_delimiter;
- const char *p_delimiter;
-
- while (*f && *p) {
- f_delimiter = strchr(f, '/');
- if (!f_delimiter)
- f_delimiter = f + strlen(f);
- p_delimiter = strchr(p, '/');
- if (!p_delimiter)
- p_delimiter = p + strlen(p);
- if (*p == '\\' && *(p + 1) == '{')
- goto recursive;
- if (!tomoyo_file_matches_pattern(f, f_delimiter, p,
- p_delimiter))
- return false;
- f = f_delimiter;
- if (*f)
- f++;
- p = p_delimiter;
- if (*p)
- p++;
+ if (ptr->group) {
+ tomoyo_set_string(head, "@");
+ tomoyo_set_string(head, ptr->group->group_name->name);
+ } else {
+ tomoyo_set_string(head, "\"");
+ tomoyo_set_string(head, ptr->filename->name);
+ tomoyo_set_string(head, "\"");
}
- /* Ignore trailing "\*" and "\@" in @pattern. */
- while (*p == '\\' &&
- (*(p + 1) == '*' || *(p + 1) == '@'))
- p += 2;
- return !*f && !*p;
- recursive:
- /*
- * The "\{" pattern is permitted only after '/' character.
- * This guarantees that below "*(p - 1)" is safe.
- * Also, the "\}" pattern is permitted only before '/' character
- * so that "\{" + "\}" pair will not break the "\-" operator.
- */
- if (*(p - 1) != '/' || p_delimiter <= p + 3 || *p_delimiter != '/' ||
- *(p_delimiter - 1) != '}' || *(p_delimiter - 2) != '\\')
- return false; /* Bad pattern. */
- do {
- /* Compare current component with pattern. */
- if (!tomoyo_file_matches_pattern(f, f_delimiter, p + 2,
- p_delimiter - 2))
- break;
- /* Proceed to next component. */
- f = f_delimiter;
- if (!*f)
- break;
- f++;
- /* Continue comparison. */
- if (tomoyo_path_matches_pattern2(f, p_delimiter + 1))
- return true;
- f_delimiter = strchr(f, '/');
- } while (f_delimiter);
- return false; /* Not matched. */
}
/**
- * tomoyo_path_matches_pattern - Check whether the given filename matches the given pattern.
- *
- * @filename: The filename to check.
- * @pattern: The pattern to compare.
- *
- * Returns true if matches, false otherwise.
- *
- * The following patterns are available.
- * \\ \ itself.
- * \ooo Octal representation of a byte.
- * \* Zero or more repetitions of characters other than '/'.
- * \@ Zero or more repetitions of characters other than '/' or '.'.
- * \? 1 byte character other than '/'.
- * \$ One or more repetitions of decimal digits.
- * \+ 1 decimal digit.
- * \X One or more repetitions of hexadecimal digits.
- * \x 1 hexadecimal digit.
- * \A One or more repetitions of alphabet characters.
- * \a 1 alphabet character.
+ * tomoyo_print_number_union_nospace - Print a tomoyo_number_union without a space.
*
- * \- Subtraction operator.
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @ptr: Pointer to "struct tomoyo_number_union".
*
- * /\{dir\}/ '/' + 'One or more repetitions of dir/' (e.g. /dir/ /dir/dir/
- * /dir/dir/dir/ ).
+ * Returns nothing.
*/
-bool tomoyo_path_matches_pattern(const struct tomoyo_path_info *filename,
- const struct tomoyo_path_info *pattern)
+static void tomoyo_print_number_union_nospace
+(struct tomoyo_io_buffer *head, const struct tomoyo_number_union *ptr)
{
- const char *f = filename->name;
- const char *p = pattern->name;
- const int len = pattern->const_len;
-
- /* If @pattern doesn't contain pattern, I can use strcmp(). */
- if (!pattern->is_patterned)
- return !tomoyo_pathcmp(filename, pattern);
- /* Don't compare directory and non-directory. */
- if (filename->is_dir != pattern->is_dir)
- return false;
- /* Compare the initial length without patterns. */
- if (strncmp(f, p, len))
- return false;
- f += len;
- p += len;
- return tomoyo_path_matches_pattern2(f, p);
+ if (ptr->group) {
+ tomoyo_set_string(head, "@");
+ tomoyo_set_string(head, ptr->group->group_name->name);
+ } else {
+ int i;
+ unsigned long min = ptr->values[0];
+ const unsigned long max = ptr->values[1];
+ u8 min_type = ptr->value_type[0];
+ const u8 max_type = ptr->value_type[1];
+ char buffer[128];
+ buffer[0] = '\0';
+ for (i = 0; i < 2; i++) {
+ switch (min_type) {
+ case TOMOYO_VALUE_TYPE_HEXADECIMAL:
+ tomoyo_addprintf(buffer, sizeof(buffer),
+ "0x%lX", min);
+ break;
+ case TOMOYO_VALUE_TYPE_OCTAL:
+ tomoyo_addprintf(buffer, sizeof(buffer),
+ "0%lo", min);
+ break;
+ default:
+ tomoyo_addprintf(buffer, sizeof(buffer), "%lu",
+ min);
+ break;
+ }
+ if (min == max && min_type == max_type)
+ break;
+ tomoyo_addprintf(buffer, sizeof(buffer), "-");
+ min_type = max_type;
+ min = max;
+ }
+ tomoyo_io_printf(head, "%s", buffer);
+ }
}
/**
- * tomoyo_io_printf - Transactional printf() to "struct tomoyo_io_buffer" structure.
+ * tomoyo_print_number_union - Print a tomoyo_number_union.
*
* @head: Pointer to "struct tomoyo_io_buffer".
- * @fmt: The printf()'s format string, followed by parameters.
+ * @ptr: Pointer to "struct tomoyo_number_union".
*
- * Returns true if output was written, false otherwise.
- *
- * The snprintf() will truncate, but tomoyo_io_printf() won't.
+ * Returns nothing.
*/
-bool tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, ...)
+static void tomoyo_print_number_union(struct tomoyo_io_buffer *head,
+ const struct tomoyo_number_union *ptr)
{
- va_list args;
- int len;
- int pos = head->read_avail;
- int size = head->readbuf_size - pos;
-
- if (size <= 0)
- return false;
- va_start(args, fmt);
- len = vsnprintf(head->read_buf + pos, size, fmt, args);
- va_end(args);
- if (pos + len >= head->readbuf_size)
- return false;
- head->read_avail += len;
- return true;
+ tomoyo_set_space(head);
+ tomoyo_print_number_union_nospace(head, ptr);
}
/**
- * tomoyo_get_exe - Get tomoyo_realpath() of current process.
+ * tomoyo_assign_profile - Create a new profile.
*
- * Returns the tomoyo_realpath() of current process on success, NULL otherwise.
+ * @ns: Pointer to "struct tomoyo_policy_namespace".
+ * @profile: Profile number to create.
*
- * This function uses tomoyo_alloc(), so the caller must call tomoyo_free()
- * if this function didn't return NULL.
+ * Returns pointer to "struct tomoyo_profile" on success, NULL otherwise.
*/
-static const char *tomoyo_get_exe(void)
+static struct tomoyo_profile *tomoyo_assign_profile
+(struct tomoyo_policy_namespace *ns, const unsigned int profile)
{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- const char *cp = NULL;
-
- if (!mm)
+ struct tomoyo_profile *ptr;
+ struct tomoyo_profile *entry;
+ if (profile >= TOMOYO_MAX_PROFILES)
return NULL;
- down_read(&mm->mmap_sem);
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file) {
- cp = tomoyo_realpath_from_path(&vma->vm_file->f_path);
- break;
- }
+ ptr = ns->profile_ptr[profile];
+ if (ptr)
+ return ptr;
+ entry = kzalloc(sizeof(*entry), GFP_NOFS);
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
+ goto out;
+ ptr = ns->profile_ptr[profile];
+ if (!ptr && tomoyo_memory_ok(entry)) {
+ ptr = entry;
+ ptr->default_config = TOMOYO_CONFIG_DISABLED |
+ TOMOYO_CONFIG_WANT_GRANT_LOG |
+ TOMOYO_CONFIG_WANT_REJECT_LOG;
+ memset(ptr->config, TOMOYO_CONFIG_USE_DEFAULT,
+ sizeof(ptr->config));
+ ptr->pref[TOMOYO_PREF_MAX_AUDIT_LOG] =
+ CONFIG_SECURITY_TOMOYO_MAX_AUDIT_LOG;
+ ptr->pref[TOMOYO_PREF_MAX_LEARNING_ENTRY] =
+ CONFIG_SECURITY_TOMOYO_MAX_ACCEPT_ENTRY;
+ mb(); /* Avoid out-of-order execution. */
+ ns->profile_ptr[profile] = ptr;
+ entry = NULL;
}
- up_read(&mm->mmap_sem);
- return cp;
+ mutex_unlock(&tomoyo_policy_lock);
+ out:
+ kfree(entry);
+ return ptr;
}
/**
- * tomoyo_get_msg - Get warning message.
+ * tomoyo_profile - Find a profile.
*
- * @is_enforce: Is it enforcing mode?
+ * @ns: Pointer to "struct tomoyo_policy_namespace".
+ * @profile: Profile number to find.
*
- * Returns "ERROR" or "WARNING".
+ * Returns pointer to "struct tomoyo_profile".
*/
-const char *tomoyo_get_msg(const bool is_enforce)
+struct tomoyo_profile *tomoyo_profile(const struct tomoyo_policy_namespace *ns,
+ const u8 profile)
{
- if (is_enforce)
- return "ERROR";
- else
- return "WARNING";
+ static struct tomoyo_profile tomoyo_null_profile;
+ struct tomoyo_profile *ptr = ns->profile_ptr[profile];
+ if (!ptr)
+ ptr = &tomoyo_null_profile;
+ return ptr;
}
/**
- * tomoyo_check_flags - Check mode for specified functionality.
+ * tomoyo_find_yesno - Find values for specified keyword.
*
- * @domain: Pointer to "struct tomoyo_domain_info".
- * @index: The functionality to check mode.
+ * @string: String to check.
+ * @find: Name of keyword.
*
- * TOMOYO checks only process context.
- * This code disables TOMOYO's enforcement in case the function is called from
- * interrupt context.
+ * Returns 1 if "@find=yes" was found, 0 if "@find=no" was found, -1 otherwise.
*/
-unsigned int tomoyo_check_flags(const struct tomoyo_domain_info *domain,
- const u8 index)
+static s8 tomoyo_find_yesno(const char *string, const char *find)
{
- const u8 profile = domain->profile;
-
- if (WARN_ON(in_interrupt()))
- return 0;
- return tomoyo_policy_loaded && index < TOMOYO_MAX_CONTROL_INDEX
-#if TOMOYO_MAX_PROFILES != 256
- && profile < TOMOYO_MAX_PROFILES
-#endif
- && tomoyo_profile_ptr[profile] ?
- tomoyo_profile_ptr[profile]->value[index] : 0;
+ const char *cp = strstr(string, find);
+ if (cp) {
+ cp += strlen(find);
+ if (!strncmp(cp, "=yes", 4))
+ return 1;
+ else if (!strncmp(cp, "=no", 3))
+ return 0;
+ }
+ return -1;
}
/**
- * tomoyo_verbose_mode - Check whether TOMOYO is verbose mode.
+ * tomoyo_set_uint - Set value for specified preference.
*
- * @domain: Pointer to "struct tomoyo_domain_info".
+ * @i: Pointer to "unsigned int".
+ * @string: String to check.
+ * @find: Name of keyword.
*
- * Returns true if domain policy violation warning should be printed to
- * console.
+ * Returns nothing.
*/
-bool tomoyo_verbose_mode(const struct tomoyo_domain_info *domain)
+static void tomoyo_set_uint(unsigned int *i, const char *string,
+ const char *find)
{
- return tomoyo_check_flags(domain, TOMOYO_VERBOSE) != 0;
+ const char *cp = strstr(string, find);
+ if (cp)
+ sscanf(cp + strlen(find), "=%u", i);
}
/**
- * tomoyo_domain_quota_is_ok - Check for domain's quota.
+ * tomoyo_set_mode - Set mode for specified profile.
*
- * @domain: Pointer to "struct tomoyo_domain_info".
+ * @name: Name of functionality.
+ * @value: Mode for @name.
+ * @profile: Pointer to "struct tomoyo_profile".
*
- * Returns true if the domain is not exceeded quota, false otherwise.
+ * Returns 0 on success, negative value otherwise.
*/
-bool tomoyo_domain_quota_is_ok(struct tomoyo_domain_info * const domain)
+static int tomoyo_set_mode(char *name, const char *value,
+ struct tomoyo_profile *profile)
{
- unsigned int count = 0;
- struct tomoyo_acl_info *ptr;
-
- if (!domain)
- return true;
- down_read(&tomoyo_domain_acl_info_list_lock);
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
- if (ptr->type & TOMOYO_ACL_DELETED)
- continue;
- switch (tomoyo_acl_type2(ptr)) {
- struct tomoyo_single_path_acl_record *acl1;
- struct tomoyo_double_path_acl_record *acl2;
- u16 perm;
- case TOMOYO_TYPE_SINGLE_PATH_ACL:
- acl1 = container_of(ptr,
- struct tomoyo_single_path_acl_record,
- head);
- perm = acl1->perm;
- if (perm & (1 << TOMOYO_TYPE_EXECUTE_ACL))
- count++;
- if (perm &
- ((1 << TOMOYO_TYPE_READ_ACL) |
- (1 << TOMOYO_TYPE_WRITE_ACL)))
- count++;
- if (perm & (1 << TOMOYO_TYPE_CREATE_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_UNLINK_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_MKDIR_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_RMDIR_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_MKFIFO_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_MKSOCK_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_MKBLOCK_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_MKCHAR_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_TRUNCATE_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_SYMLINK_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_REWRITE_ACL))
- count++;
- break;
- case TOMOYO_TYPE_DOUBLE_PATH_ACL:
- acl2 = container_of(ptr,
- struct tomoyo_double_path_acl_record,
- head);
- perm = acl2->perm;
- if (perm & (1 << TOMOYO_TYPE_LINK_ACL))
- count++;
- if (perm & (1 << TOMOYO_TYPE_RENAME_ACL))
- count++;
+ u8 i;
+ u8 config;
+ if (!strcmp(name, "CONFIG")) {
+ i = TOMOYO_MAX_MAC_INDEX + TOMOYO_MAX_MAC_CATEGORY_INDEX;
+ config = profile->default_config;
+ } else if (tomoyo_str_starts(&name, "CONFIG::")) {
+ config = 0;
+ for (i = 0; i < TOMOYO_MAX_MAC_INDEX
+ + TOMOYO_MAX_MAC_CATEGORY_INDEX; i++) {
+ int len = 0;
+ if (i < TOMOYO_MAX_MAC_INDEX) {
+ const u8 c = tomoyo_index2category[i];
+ const char *category =
+ tomoyo_category_keywords[c];
+ len = strlen(category);
+ if (strncmp(name, category, len) ||
+ name[len++] != ':' || name[len++] != ':')
+ continue;
+ }
+ if (strcmp(name + len, tomoyo_mac_keywords[i]))
+ continue;
+ config = profile->config[i];
break;
}
+ if (i == TOMOYO_MAX_MAC_INDEX + TOMOYO_MAX_MAC_CATEGORY_INDEX)
+ return -EINVAL;
+ } else {
+ return -EINVAL;
}
- up_read(&tomoyo_domain_acl_info_list_lock);
- if (count < tomoyo_check_flags(domain, TOMOYO_MAX_ACCEPT_ENTRY))
- return true;
- if (!domain->quota_warned) {
- domain->quota_warned = true;
- printk(KERN_WARNING "TOMOYO-WARNING: "
- "Domain '%s' has so many ACLs to hold. "
- "Stopped learning mode.\n", domain->domainname->name);
+ if (strstr(value, "use_default")) {
+ config = TOMOYO_CONFIG_USE_DEFAULT;
+ } else {
+ u8 mode;
+ for (mode = 0; mode < 4; mode++)
+ if (strstr(value, tomoyo_mode[mode]))
+ /*
+ * Update lower 3 bits in order to distinguish
+ * 'config' from 'TOMOYO_CONFIG_USE_DEAFULT'.
+ */
+ config = (config & ~7) | mode;
+ if (config != TOMOYO_CONFIG_USE_DEFAULT) {
+ switch (tomoyo_find_yesno(value, "grant_log")) {
+ case 1:
+ config |= TOMOYO_CONFIG_WANT_GRANT_LOG;
+ break;
+ case 0:
+ config &= ~TOMOYO_CONFIG_WANT_GRANT_LOG;
+ break;
+ }
+ switch (tomoyo_find_yesno(value, "reject_log")) {
+ case 1:
+ config |= TOMOYO_CONFIG_WANT_REJECT_LOG;
+ break;
+ case 0:
+ config &= ~TOMOYO_CONFIG_WANT_REJECT_LOG;
+ break;
+ }
+ }
}
- return false;
-}
-
-/**
- * tomoyo_find_or_assign_new_profile - Create a new profile.
- *
- * @profile: Profile number to create.
- *
- * Returns pointer to "struct tomoyo_profile" on success, NULL otherwise.
- */
-static struct tomoyo_profile *tomoyo_find_or_assign_new_profile(const unsigned
- int profile)
-{
- static DEFINE_MUTEX(lock);
- struct tomoyo_profile *ptr = NULL;
- int i;
-
- if (profile >= TOMOYO_MAX_PROFILES)
- return NULL;
- mutex_lock(&lock);
- ptr = tomoyo_profile_ptr[profile];
- if (ptr)
- goto ok;
- ptr = tomoyo_alloc_element(sizeof(*ptr));
- if (!ptr)
- goto ok;
- for (i = 0; i < TOMOYO_MAX_CONTROL_INDEX; i++)
- ptr->value[i] = tomoyo_control_array[i].current_value;
- mb(); /* Avoid out-of-order execution. */
- tomoyo_profile_ptr[profile] = ptr;
- ok:
- mutex_unlock(&lock);
- return ptr;
+ if (i < TOMOYO_MAX_MAC_INDEX + TOMOYO_MAX_MAC_CATEGORY_INDEX)
+ profile->config[i] = config;
+ else if (config != TOMOYO_CONFIG_USE_DEFAULT)
+ profile->default_config = config;
+ return 0;
}
/**
- * tomoyo_write_profile - Write to profile table.
+ * tomoyo_write_profile - Write profile table.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
@@ -946,173 +661,173 @@ static int tomoyo_write_profile(struct tomoyo_io_buffer *head)
{
char *data = head->write_buf;
unsigned int i;
- unsigned int value;
char *cp;
struct tomoyo_profile *profile;
- unsigned long num;
-
- cp = strchr(data, '-');
- if (cp)
- *cp = '\0';
- if (strict_strtoul(data, 10, &num))
+ if (sscanf(data, "PROFILE_VERSION=%u", &head->w.ns->profile_version)
+ == 1)
+ return 0;
+ i = simple_strtoul(data, &cp, 10);
+ if (*cp != '-')
return -EINVAL;
- if (cp)
- data = cp + 1;
- profile = tomoyo_find_or_assign_new_profile(num);
+ data = cp + 1;
+ profile = tomoyo_assign_profile(head->w.ns, i);
if (!profile)
return -EINVAL;
cp = strchr(data, '=');
if (!cp)
return -EINVAL;
- *cp = '\0';
+ *cp++ = '\0';
if (!strcmp(data, "COMMENT")) {
- profile->comment = tomoyo_save_name(cp + 1);
+ static DEFINE_SPINLOCK(lock);
+ const struct tomoyo_path_info *new_comment
+ = tomoyo_get_name(cp);
+ const struct tomoyo_path_info *old_comment;
+ if (!new_comment)
+ return -ENOMEM;
+ spin_lock(&lock);
+ old_comment = profile->comment;
+ profile->comment = new_comment;
+ spin_unlock(&lock);
+ tomoyo_put_name(old_comment);
return 0;
}
- for (i = 0; i < TOMOYO_MAX_CONTROL_INDEX; i++) {
- if (strcmp(data, tomoyo_control_array[i].keyword))
- continue;
- if (sscanf(cp + 1, "%u", &value) != 1) {
- int j;
- const char **modes;
- switch (i) {
- case TOMOYO_VERBOSE:
- modes = tomoyo_mode_2;
- break;
- default:
- modes = tomoyo_mode_4;
- break;
- }
- for (j = 0; j < 4; j++) {
- if (strcmp(cp + 1, modes[j]))
- continue;
- value = j;
- break;
- }
- if (j == 4)
- return -EINVAL;
- } else if (value > tomoyo_control_array[i].max_value) {
- value = tomoyo_control_array[i].max_value;
- }
- profile->value[i] = value;
+ if (!strcmp(data, "PREFERENCE")) {
+ for (i = 0; i < TOMOYO_MAX_PREF; i++)
+ tomoyo_set_uint(&profile->pref[i], cp,
+ tomoyo_pref_keywords[i]);
return 0;
}
- return -EINVAL;
+ return tomoyo_set_mode(data, cp, profile);
}
/**
- * tomoyo_read_profile - Read from profile table.
+ * tomoyo_print_config - Print mode for specified functionality.
*
- * @head: Pointer to "struct tomoyo_io_buffer".
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @config: Mode for that functionality.
*
- * Returns 0.
+ * Returns nothing.
+ *
+ * Caller prints functionality's name.
*/
-static int tomoyo_read_profile(struct tomoyo_io_buffer *head)
+static void tomoyo_print_config(struct tomoyo_io_buffer *head, const u8 config)
{
- static const int total = TOMOYO_MAX_CONTROL_INDEX + 1;
- int step;
+ tomoyo_io_printf(head, "={ mode=%s grant_log=%s reject_log=%s }\n",
+ tomoyo_mode[config & 3],
+ tomoyo_yesno(config & TOMOYO_CONFIG_WANT_GRANT_LOG),
+ tomoyo_yesno(config & TOMOYO_CONFIG_WANT_REJECT_LOG));
+}
- if (head->read_eof)
- return 0;
- for (step = head->read_step; step < TOMOYO_MAX_PROFILES * total;
- step++) {
- const u8 index = step / total;
- u8 type = step % total;
- const struct tomoyo_profile *profile
- = tomoyo_profile_ptr[index];
- head->read_step = step;
- if (!profile)
- continue;
- if (!type) { /* Print profile' comment tag. */
- if (!tomoyo_io_printf(head, "%u-COMMENT=%s\n",
- index, profile->comment ?
- profile->comment->name : ""))
+/**
+ * tomoyo_read_profile - Read profile table.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_read_profile(struct tomoyo_io_buffer *head)
+{
+ u8 index;
+ struct tomoyo_policy_namespace *ns =
+ container_of(head->r.ns, typeof(*ns), namespace_list);
+ const struct tomoyo_profile *profile;
+ if (head->r.eof)
+ return;
+ next:
+ index = head->r.index;
+ profile = ns->profile_ptr[index];
+ switch (head->r.step) {
+ case 0:
+ tomoyo_print_namespace(head);
+ tomoyo_io_printf(head, "PROFILE_VERSION=%u\n",
+ ns->profile_version);
+ head->r.step++;
+ break;
+ case 1:
+ for ( ; head->r.index < TOMOYO_MAX_PROFILES;
+ head->r.index++)
+ if (ns->profile_ptr[head->r.index])
break;
- continue;
+ if (head->r.index == TOMOYO_MAX_PROFILES) {
+ head->r.eof = true;
+ return;
}
- type--;
- if (type < TOMOYO_MAX_CONTROL_INDEX) {
- const unsigned int value = profile->value[type];
- const char **modes = NULL;
- const char *keyword
- = tomoyo_control_array[type].keyword;
- switch (tomoyo_control_array[type].max_value) {
- case 3:
- modes = tomoyo_mode_4;
- break;
- case 1:
- modes = tomoyo_mode_2;
- break;
- }
- if (modes) {
- if (!tomoyo_io_printf(head, "%u-%s=%s\n", index,
- keyword, modes[value]))
- break;
- } else {
- if (!tomoyo_io_printf(head, "%u-%s=%u\n", index,
- keyword, value))
- break;
- }
+ head->r.step++;
+ break;
+ case 2:
+ {
+ u8 i;
+ const struct tomoyo_path_info *comment =
+ profile->comment;
+ tomoyo_print_namespace(head);
+ tomoyo_io_printf(head, "%u-COMMENT=", index);
+ tomoyo_set_string(head, comment ? comment->name : "");
+ tomoyo_set_lf(head);
+ tomoyo_print_namespace(head);
+ tomoyo_io_printf(head, "%u-PREFERENCE={ ", index);
+ for (i = 0; i < TOMOYO_MAX_PREF; i++)
+ tomoyo_io_printf(head, "%s=%u ",
+ tomoyo_pref_keywords[i],
+ profile->pref[i]);
+ tomoyo_set_string(head, "}\n");
+ head->r.step++;
+ }
+ break;
+ case 3:
+ {
+ tomoyo_print_namespace(head);
+ tomoyo_io_printf(head, "%u-%s", index, "CONFIG");
+ tomoyo_print_config(head, profile->default_config);
+ head->r.bit = 0;
+ head->r.step++;
+ }
+ break;
+ case 4:
+ for ( ; head->r.bit < TOMOYO_MAX_MAC_INDEX
+ + TOMOYO_MAX_MAC_CATEGORY_INDEX; head->r.bit++) {
+ const u8 i = head->r.bit;
+ const u8 config = profile->config[i];
+ if (config == TOMOYO_CONFIG_USE_DEFAULT)
+ continue;
+ tomoyo_print_namespace(head);
+ if (i < TOMOYO_MAX_MAC_INDEX)
+ tomoyo_io_printf(head, "%u-CONFIG::%s::%s",
+ index,
+ tomoyo_category_keywords
+ [tomoyo_index2category[i]],
+ tomoyo_mac_keywords[i]);
+ else
+ tomoyo_io_printf(head, "%u-CONFIG::%s", index,
+ tomoyo_mac_keywords[i]);
+ tomoyo_print_config(head, config);
+ head->r.bit++;
+ break;
+ }
+ if (head->r.bit == TOMOYO_MAX_MAC_INDEX
+ + TOMOYO_MAX_MAC_CATEGORY_INDEX) {
+ head->r.index++;
+ head->r.step = 1;
}
+ break;
}
- if (step == TOMOYO_MAX_PROFILES * total)
- head->read_eof = true;
- return 0;
+ if (tomoyo_flush(head))
+ goto next;
}
-/*
- * tomoyo_policy_manager_entry is a structure which is used for holding list of
- * domainnames or programs which are permitted to modify configuration via
- * /sys/kernel/security/tomoyo/ interface.
- * It has following fields.
- *
- * (1) "list" which is linked to tomoyo_policy_manager_list .
- * (2) "manager" is a domainname or a program's pathname.
- * (3) "is_domain" is a bool which is true if "manager" is a domainname, false
- * otherwise.
- * (4) "is_deleted" is a bool which is true if marked as deleted, false
- * otherwise.
- */
-struct tomoyo_policy_manager_entry {
- struct list_head list;
- /* A path to program or a domainname. */
- const struct tomoyo_path_info *manager;
- bool is_domain; /* True if manager is a domainname. */
- bool is_deleted; /* True if this entry is deleted. */
-};
-
-/*
- * tomoyo_policy_manager_list is used for holding list of domainnames or
- * programs which are permitted to modify configuration via
- * /sys/kernel/security/tomoyo/ interface.
- *
- * An entry is added by
- *
- * # echo '<kernel> /sbin/mingetty /bin/login /bin/bash' > \
- * /sys/kernel/security/tomoyo/manager
- * (if you want to specify by a domainname)
- *
- * or
- *
- * # echo '/usr/lib/ccs/editpolicy' > /sys/kernel/security/tomoyo/manager
- * (if you want to specify by a program's location)
- *
- * and is deleted by
- *
- * # echo 'delete <kernel> /sbin/mingetty /bin/login /bin/bash' > \
- * /sys/kernel/security/tomoyo/manager
- *
- * or
- *
- * # echo 'delete /usr/lib/ccs/editpolicy' > \
- * /sys/kernel/security/tomoyo/manager
+/**
+ * tomoyo_same_manager - Check for duplicated "struct tomoyo_manager" entry.
*
- * and all entries are retrieved by
+ * @a: Pointer to "struct tomoyo_acl_head".
+ * @b: Pointer to "struct tomoyo_acl_head".
*
- * # cat /sys/kernel/security/tomoyo/manager
+ * Returns true if @a == @b, false otherwise.
*/
-static LIST_HEAD(tomoyo_policy_manager_list);
-static DECLARE_RWSEM(tomoyo_policy_manager_list_lock);
+static bool tomoyo_same_manager(const struct tomoyo_acl_head *a,
+ const struct tomoyo_acl_head *b)
+{
+ return container_of(a, struct tomoyo_manager, head)->manager ==
+ container_of(b, struct tomoyo_manager, head)->manager;
+}
/**
* tomoyo_update_manager_entry - Add a manager entry.
@@ -1121,110 +836,88 @@ static DECLARE_RWSEM(tomoyo_policy_manager_list_lock);
* @is_delete: True if it is a delete request.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_update_manager_entry(const char *manager,
const bool is_delete)
{
- struct tomoyo_policy_manager_entry *new_entry;
- struct tomoyo_policy_manager_entry *ptr;
- const struct tomoyo_path_info *saved_manager;
- int error = -ENOMEM;
- bool is_domain = false;
-
- if (tomoyo_is_domain_def(manager)) {
- if (!tomoyo_is_correct_domain(manager, __func__))
- return -EINVAL;
- is_domain = true;
- } else {
- if (!tomoyo_is_correct_path(manager, 1, -1, -1, __func__))
- return -EINVAL;
- }
- saved_manager = tomoyo_save_name(manager);
- if (!saved_manager)
- return -ENOMEM;
- down_write(&tomoyo_policy_manager_list_lock);
- list_for_each_entry(ptr, &tomoyo_policy_manager_list, list) {
- if (ptr->manager != saved_manager)
- continue;
- ptr->is_deleted = is_delete;
- error = 0;
- goto out;
- }
- if (is_delete) {
- error = -ENOENT;
- goto out;
+ struct tomoyo_manager e = { };
+ struct tomoyo_acl_param param = {
+ /* .ns = &tomoyo_kernel_namespace, */
+ .is_delete = is_delete,
+ .list = &tomoyo_kernel_namespace.
+ policy_list[TOMOYO_ID_MANAGER],
+ };
+ int error = is_delete ? -ENOENT : -ENOMEM;
+ if (!tomoyo_correct_domain(manager) &&
+ !tomoyo_correct_word(manager))
+ return -EINVAL;
+ e.manager = tomoyo_get_name(manager);
+ if (e.manager) {
+ error = tomoyo_update_policy(&e.head, sizeof(e), &param,
+ tomoyo_same_manager);
+ tomoyo_put_name(e.manager);
}
- new_entry = tomoyo_alloc_element(sizeof(*new_entry));
- if (!new_entry)
- goto out;
- new_entry->manager = saved_manager;
- new_entry->is_domain = is_domain;
- list_add_tail(&new_entry->list, &tomoyo_policy_manager_list);
- error = 0;
- out:
- up_write(&tomoyo_policy_manager_list_lock);
return error;
}
/**
- * tomoyo_write_manager_policy - Write manager policy.
+ * tomoyo_write_manager - Write manager policy.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
-static int tomoyo_write_manager_policy(struct tomoyo_io_buffer *head)
+static int tomoyo_write_manager(struct tomoyo_io_buffer *head)
{
char *data = head->write_buf;
- bool is_delete = tomoyo_str_starts(&data, TOMOYO_KEYWORD_DELETE);
if (!strcmp(data, "manage_by_non_root")) {
- tomoyo_manage_by_non_root = !is_delete;
+ tomoyo_manage_by_non_root = !head->w.is_delete;
return 0;
}
- return tomoyo_update_manager_entry(data, is_delete);
+ return tomoyo_update_manager_entry(data, head->w.is_delete);
}
/**
- * tomoyo_read_manager_policy - Read manager policy.
+ * tomoyo_read_manager - Read manager policy.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
- * Returns 0.
+ * Caller holds tomoyo_read_lock().
*/
-static int tomoyo_read_manager_policy(struct tomoyo_io_buffer *head)
+static void tomoyo_read_manager(struct tomoyo_io_buffer *head)
{
- struct list_head *pos;
- bool done = true;
-
- if (head->read_eof)
- return 0;
- down_read(&tomoyo_policy_manager_list_lock);
- list_for_each_cookie(pos, head->read_var2,
- &tomoyo_policy_manager_list) {
- struct tomoyo_policy_manager_entry *ptr;
- ptr = list_entry(pos, struct tomoyo_policy_manager_entry,
- list);
- if (ptr->is_deleted)
+ if (head->r.eof)
+ return;
+ list_for_each_cookie(head->r.acl, &tomoyo_kernel_namespace.
+ policy_list[TOMOYO_ID_MANAGER]) {
+ struct tomoyo_manager *ptr =
+ list_entry(head->r.acl, typeof(*ptr), head.list);
+ if (ptr->head.is_deleted)
continue;
- done = tomoyo_io_printf(head, "%s\n", ptr->manager->name);
- if (!done)
- break;
+ if (!tomoyo_flush(head))
+ return;
+ tomoyo_set_string(head, ptr->manager->name);
+ tomoyo_set_lf(head);
}
- up_read(&tomoyo_policy_manager_list_lock);
- head->read_eof = done;
- return 0;
+ head->r.eof = true;
}
/**
- * tomoyo_is_policy_manager - Check whether the current process is a policy manager.
+ * tomoyo_manager - Check whether the current process is a policy manager.
*
* Returns true if the current process is permitted to modify policy
* via /sys/kernel/security/tomoyo/ interface.
+ *
+ * Caller holds tomoyo_read_lock().
*/
-static bool tomoyo_is_policy_manager(void)
+static bool tomoyo_manager(void)
{
- struct tomoyo_policy_manager_entry *ptr;
+ struct tomoyo_manager *ptr;
const char *exe;
const struct task_struct *task = current;
const struct tomoyo_path_info *domainname = tomoyo_domain()->domainname;
@@ -1232,31 +925,22 @@ static bool tomoyo_is_policy_manager(void)
if (!tomoyo_policy_loaded)
return true;
- if (!tomoyo_manage_by_non_root && (task->cred->uid || task->cred->euid))
+ if (!tomoyo_manage_by_non_root &&
+ (!uid_eq(task->cred->uid, GLOBAL_ROOT_UID) ||
+ !uid_eq(task->cred->euid, GLOBAL_ROOT_UID)))
return false;
- down_read(&tomoyo_policy_manager_list_lock);
- list_for_each_entry(ptr, &tomoyo_policy_manager_list, list) {
- if (!ptr->is_deleted && ptr->is_domain
- && !tomoyo_pathcmp(domainname, ptr->manager)) {
- found = true;
- break;
- }
- }
- up_read(&tomoyo_policy_manager_list_lock);
- if (found)
- return true;
exe = tomoyo_get_exe();
if (!exe)
return false;
- down_read(&tomoyo_policy_manager_list_lock);
- list_for_each_entry(ptr, &tomoyo_policy_manager_list, list) {
- if (!ptr->is_deleted && !ptr->is_domain
- && !strcmp(exe, ptr->manager->name)) {
+ list_for_each_entry_rcu(ptr, &tomoyo_kernel_namespace.
+ policy_list[TOMOYO_ID_MANAGER], head.list) {
+ if (!ptr->head.is_deleted &&
+ (!tomoyo_pathcmp(domainname, ptr->manager) ||
+ !strcmp(exe, ptr->manager->name))) {
found = true;
break;
}
}
- up_read(&tomoyo_policy_manager_list_lock);
if (!found) { /* Reduce error messages. */
static pid_t last_pid;
const pid_t pid = current->pid;
@@ -1266,72 +950,116 @@ static bool tomoyo_is_policy_manager(void)
last_pid = pid;
}
}
- tomoyo_free(exe);
+ kfree(exe);
return found;
}
+static struct tomoyo_domain_info *tomoyo_find_domain_by_qid
+(unsigned int serial);
+
/**
- * tomoyo_is_select_one - Parse select command.
+ * tomoyo_select_domain - Parse select command.
*
* @head: Pointer to "struct tomoyo_io_buffer".
* @data: String to parse.
*
* Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
-static bool tomoyo_is_select_one(struct tomoyo_io_buffer *head,
+static bool tomoyo_select_domain(struct tomoyo_io_buffer *head,
const char *data)
{
unsigned int pid;
struct tomoyo_domain_info *domain = NULL;
-
- if (sscanf(data, "pid=%u", &pid) == 1) {
+ bool global_pid = false;
+ if (strncmp(data, "select ", 7))
+ return false;
+ data += 7;
+ if (sscanf(data, "pid=%u", &pid) == 1 ||
+ (global_pid = true, sscanf(data, "global-pid=%u", &pid) == 1)) {
struct task_struct *p;
- read_lock(&tasklist_lock);
- p = find_task_by_vpid(pid);
+ rcu_read_lock();
+ if (global_pid)
+ p = find_task_by_pid_ns(pid, &init_pid_ns);
+ else
+ p = find_task_by_vpid(pid);
if (p)
domain = tomoyo_real_domain(p);
- read_unlock(&tasklist_lock);
+ rcu_read_unlock();
} else if (!strncmp(data, "domain=", 7)) {
- if (tomoyo_is_domain_def(data + 7)) {
- down_read(&tomoyo_domain_list_lock);
+ if (tomoyo_domain_def(data + 7))
domain = tomoyo_find_domain(data + 7);
- up_read(&tomoyo_domain_list_lock);
- }
+ } else if (sscanf(data, "Q=%u", &pid) == 1) {
+ domain = tomoyo_find_domain_by_qid(pid);
} else
return false;
- head->write_var1 = domain;
+ head->w.domain = domain;
/* Accessing read_buf is safe because head->io_sem is held. */
if (!head->read_buf)
return true; /* Do nothing if open(O_WRONLY). */
- head->read_avail = 0;
+ memset(&head->r, 0, sizeof(head->r));
+ head->r.print_this_domain_only = true;
+ if (domain)
+ head->r.domain = &domain->list;
+ else
+ head->r.eof = 1;
tomoyo_io_printf(head, "# select %s\n", data);
- head->read_single_domain = true;
- head->read_eof = !domain;
- if (domain) {
- struct tomoyo_domain_info *d;
- head->read_var1 = NULL;
- down_read(&tomoyo_domain_list_lock);
- list_for_each_entry(d, &tomoyo_domain_list, list) {
- if (d == domain)
- break;
- head->read_var1 = &d->list;
- }
- up_read(&tomoyo_domain_list_lock);
- head->read_var2 = NULL;
- head->read_bit = 0;
- head->read_step = 0;
- if (domain->is_deleted)
- tomoyo_io_printf(head, "# This is a deleted domain.\n");
- }
+ if (domain && domain->is_deleted)
+ tomoyo_io_printf(head, "# This is a deleted domain.\n");
return true;
}
/**
+ * tomoyo_same_task_acl - Check for duplicated "struct tomoyo_task_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_task_acl(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
+{
+ const struct tomoyo_task_acl *p1 = container_of(a, typeof(*p1), head);
+ const struct tomoyo_task_acl *p2 = container_of(b, typeof(*p2), head);
+ return p1->domainname == p2->domainname;
+}
+
+/**
+ * tomoyo_write_task - Update task related list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_write_task(struct tomoyo_acl_param *param)
+{
+ int error = -EINVAL;
+ if (tomoyo_str_starts(&param->data, "manual_domain_transition ")) {
+ struct tomoyo_task_acl e = {
+ .head.type = TOMOYO_TYPE_MANUAL_TASK_ACL,
+ .domainname = tomoyo_get_domainname(param),
+ };
+ if (e.domainname)
+ error = tomoyo_update_domain(&e.head, sizeof(e), param,
+ tomoyo_same_task_acl,
+ NULL);
+ tomoyo_put_name(e.domainname);
+ }
+ return error;
+}
+
+/**
* tomoyo_delete_domain - Delete a domain.
*
* @domainname: The name of domain.
*
- * Returns 0.
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
static int tomoyo_delete_domain(char *domainname)
{
@@ -1340,9 +1068,10 @@ static int tomoyo_delete_domain(char *domainname)
name.name = domainname;
tomoyo_fill_path_info(&name);
- down_write(&tomoyo_domain_list_lock);
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
+ return -EINTR;
/* Is there an active domain? */
- list_for_each_entry(domain, &tomoyo_domain_list, list) {
+ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
/* Never delete tomoyo_kernel_domain */
if (domain == &tomoyo_kernel_domain)
continue;
@@ -1352,326 +1081,541 @@ static int tomoyo_delete_domain(char *domainname)
domain->is_deleted = true;
break;
}
- up_write(&tomoyo_domain_list_lock);
+ mutex_unlock(&tomoyo_policy_lock);
return 0;
}
/**
- * tomoyo_write_domain_policy - Write domain policy.
+ * tomoyo_write_domain2 - Write domain policy.
+ *
+ * @ns: Pointer to "struct tomoyo_policy_namespace".
+ * @list: Pointer to "struct list_head".
+ * @data: Policy to be interpreted.
+ * @is_delete: True if it is a delete request.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_write_domain2(struct tomoyo_policy_namespace *ns,
+ struct list_head *list, char *data,
+ const bool is_delete)
+{
+ struct tomoyo_acl_param param = {
+ .ns = ns,
+ .list = list,
+ .data = data,
+ .is_delete = is_delete,
+ };
+ static const struct {
+ const char *keyword;
+ int (*write) (struct tomoyo_acl_param *);
+ } tomoyo_callback[5] = {
+ { "file ", tomoyo_write_file },
+ { "network inet ", tomoyo_write_inet_network },
+ { "network unix ", tomoyo_write_unix_network },
+ { "misc ", tomoyo_write_misc },
+ { "task ", tomoyo_write_task },
+ };
+ u8 i;
+
+ for (i = 0; i < ARRAY_SIZE(tomoyo_callback); i++) {
+ if (!tomoyo_str_starts(&param.data,
+ tomoyo_callback[i].keyword))
+ continue;
+ return tomoyo_callback[i].write(&param);
+ }
+ return -EINVAL;
+}
+
+/* String table for domain flags. */
+const char * const tomoyo_dif[TOMOYO_MAX_DOMAIN_INFO_FLAGS] = {
+ [TOMOYO_DIF_QUOTA_WARNED] = "quota_exceeded\n",
+ [TOMOYO_DIF_TRANSITION_FAILED] = "transition_failed\n",
+};
+
+/**
+ * tomoyo_write_domain - Write domain policy.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
-static int tomoyo_write_domain_policy(struct tomoyo_io_buffer *head)
+static int tomoyo_write_domain(struct tomoyo_io_buffer *head)
{
char *data = head->write_buf;
- struct tomoyo_domain_info *domain = head->write_var1;
- bool is_delete = false;
- bool is_select = false;
+ struct tomoyo_policy_namespace *ns;
+ struct tomoyo_domain_info *domain = head->w.domain;
+ const bool is_delete = head->w.is_delete;
+ bool is_select = !is_delete && tomoyo_str_starts(&data, "select ");
unsigned int profile;
-
- if (tomoyo_str_starts(&data, TOMOYO_KEYWORD_DELETE))
- is_delete = true;
- else if (tomoyo_str_starts(&data, TOMOYO_KEYWORD_SELECT))
- is_select = true;
- if (is_select && tomoyo_is_select_one(head, data))
- return 0;
- /* Don't allow updating policies by non manager programs. */
- if (!tomoyo_is_policy_manager())
- return -EPERM;
- if (tomoyo_is_domain_def(data)) {
+ if (*data == '<') {
+ int ret = 0;
domain = NULL;
if (is_delete)
- tomoyo_delete_domain(data);
- else if (is_select) {
- down_read(&tomoyo_domain_list_lock);
+ ret = tomoyo_delete_domain(data);
+ else if (is_select)
domain = tomoyo_find_domain(data);
- up_read(&tomoyo_domain_list_lock);
- } else
- domain = tomoyo_find_or_assign_new_domain(data, 0);
- head->write_var1 = domain;
- return 0;
+ else
+ domain = tomoyo_assign_domain(data, false);
+ head->w.domain = domain;
+ return ret;
}
if (!domain)
return -EINVAL;
-
- if (sscanf(data, TOMOYO_KEYWORD_USE_PROFILE "%u", &profile) == 1
+ ns = domain->ns;
+ if (sscanf(data, "use_profile %u", &profile) == 1
&& profile < TOMOYO_MAX_PROFILES) {
- if (tomoyo_profile_ptr[profile] || !tomoyo_policy_loaded)
+ if (!tomoyo_policy_loaded || ns->profile_ptr[profile])
domain->profile = (u8) profile;
return 0;
}
- if (!strcmp(data, TOMOYO_KEYWORD_IGNORE_GLOBAL_ALLOW_READ)) {
- tomoyo_set_domain_flag(domain, is_delete,
- TOMOYO_DOMAIN_FLAGS_IGNORE_GLOBAL_ALLOW_READ);
+ if (sscanf(data, "use_group %u\n", &profile) == 1
+ && profile < TOMOYO_MAX_ACL_GROUPS) {
+ if (!is_delete)
+ domain->group = (u8) profile;
return 0;
}
- return tomoyo_write_file_policy(data, domain, is_delete);
+ for (profile = 0; profile < TOMOYO_MAX_DOMAIN_INFO_FLAGS; profile++) {
+ const char *cp = tomoyo_dif[profile];
+ if (strncmp(data, cp, strlen(cp) - 1))
+ continue;
+ domain->flags[profile] = !is_delete;
+ return 0;
+ }
+ return tomoyo_write_domain2(ns, &domain->acl_info_list, data,
+ is_delete);
}
/**
- * tomoyo_print_single_path_acl - Print a single path ACL entry.
+ * tomoyo_print_condition - Print condition part.
*
* @head: Pointer to "struct tomoyo_io_buffer".
- * @ptr: Pointer to "struct tomoyo_single_path_acl_record".
+ * @cond: Pointer to "struct tomoyo_condition".
*
* Returns true on success, false otherwise.
*/
-static bool tomoyo_print_single_path_acl(struct tomoyo_io_buffer *head,
- struct tomoyo_single_path_acl_record *
- ptr)
+static bool tomoyo_print_condition(struct tomoyo_io_buffer *head,
+ const struct tomoyo_condition *cond)
{
- int pos;
- u8 bit;
- const char *atmark = "";
- const char *filename;
- const u16 perm = ptr->perm;
-
- filename = ptr->filename->name;
- for (bit = head->read_bit; bit < TOMOYO_MAX_SINGLE_PATH_OPERATION;
- bit++) {
- const char *msg;
- if (!(perm & (1 << bit)))
- continue;
- /* Print "read/write" instead of "read" and "write". */
- if ((bit == TOMOYO_TYPE_READ_ACL ||
- bit == TOMOYO_TYPE_WRITE_ACL)
- && (perm & (1 << TOMOYO_TYPE_READ_WRITE_ACL)))
- continue;
- msg = tomoyo_sp2keyword(bit);
- pos = head->read_avail;
- if (!tomoyo_io_printf(head, "allow_%s %s%s\n", msg,
- atmark, filename))
- goto out;
+ switch (head->r.cond_step) {
+ case 0:
+ head->r.cond_index = 0;
+ head->r.cond_step++;
+ if (cond->transit) {
+ tomoyo_set_space(head);
+ tomoyo_set_string(head, cond->transit->name);
+ }
+ /* fall through */
+ case 1:
+ {
+ const u16 condc = cond->condc;
+ const struct tomoyo_condition_element *condp =
+ (typeof(condp)) (cond + 1);
+ const struct tomoyo_number_union *numbers_p =
+ (typeof(numbers_p)) (condp + condc);
+ const struct tomoyo_name_union *names_p =
+ (typeof(names_p))
+ (numbers_p + cond->numbers_count);
+ const struct tomoyo_argv *argv =
+ (typeof(argv)) (names_p + cond->names_count);
+ const struct tomoyo_envp *envp =
+ (typeof(envp)) (argv + cond->argc);
+ u16 skip;
+ for (skip = 0; skip < head->r.cond_index; skip++) {
+ const u8 left = condp->left;
+ const u8 right = condp->right;
+ condp++;
+ switch (left) {
+ case TOMOYO_ARGV_ENTRY:
+ argv++;
+ continue;
+ case TOMOYO_ENVP_ENTRY:
+ envp++;
+ continue;
+ case TOMOYO_NUMBER_UNION:
+ numbers_p++;
+ break;
+ }
+ switch (right) {
+ case TOMOYO_NAME_UNION:
+ names_p++;
+ break;
+ case TOMOYO_NUMBER_UNION:
+ numbers_p++;
+ break;
+ }
+ }
+ while (head->r.cond_index < condc) {
+ const u8 match = condp->equals;
+ const u8 left = condp->left;
+ const u8 right = condp->right;
+ if (!tomoyo_flush(head))
+ return false;
+ condp++;
+ head->r.cond_index++;
+ tomoyo_set_space(head);
+ switch (left) {
+ case TOMOYO_ARGV_ENTRY:
+ tomoyo_io_printf(head,
+ "exec.argv[%lu]%s=\"",
+ argv->index, argv->
+ is_not ? "!" : "");
+ tomoyo_set_string(head,
+ argv->value->name);
+ tomoyo_set_string(head, "\"");
+ argv++;
+ continue;
+ case TOMOYO_ENVP_ENTRY:
+ tomoyo_set_string(head,
+ "exec.envp[\"");
+ tomoyo_set_string(head,
+ envp->name->name);
+ tomoyo_io_printf(head, "\"]%s=", envp->
+ is_not ? "!" : "");
+ if (envp->value) {
+ tomoyo_set_string(head, "\"");
+ tomoyo_set_string(head, envp->
+ value->name);
+ tomoyo_set_string(head, "\"");
+ } else {
+ tomoyo_set_string(head,
+ "NULL");
+ }
+ envp++;
+ continue;
+ case TOMOYO_NUMBER_UNION:
+ tomoyo_print_number_union_nospace
+ (head, numbers_p++);
+ break;
+ default:
+ tomoyo_set_string(head,
+ tomoyo_condition_keyword[left]);
+ break;
+ }
+ tomoyo_set_string(head, match ? "=" : "!=");
+ switch (right) {
+ case TOMOYO_NAME_UNION:
+ tomoyo_print_name_union_quoted
+ (head, names_p++);
+ break;
+ case TOMOYO_NUMBER_UNION:
+ tomoyo_print_number_union_nospace
+ (head, numbers_p++);
+ break;
+ default:
+ tomoyo_set_string(head,
+ tomoyo_condition_keyword[right]);
+ break;
+ }
+ }
+ }
+ head->r.cond_step++;
+ /* fall through */
+ case 2:
+ if (!tomoyo_flush(head))
+ break;
+ head->r.cond_step++;
+ /* fall through */
+ case 3:
+ if (cond->grant_log != TOMOYO_GRANTLOG_AUTO)
+ tomoyo_io_printf(head, " grant_log=%s",
+ tomoyo_yesno(cond->grant_log ==
+ TOMOYO_GRANTLOG_YES));
+ tomoyo_set_lf(head);
+ return true;
}
- head->read_bit = 0;
- return true;
- out:
- head->read_bit = bit;
- head->read_avail = pos;
return false;
}
/**
- * tomoyo_print_double_path_acl - Print a double path ACL entry.
+ * tomoyo_set_group - Print "acl_group " header keyword and category name.
*
- * @head: Pointer to "struct tomoyo_io_buffer".
- * @ptr: Pointer to "struct tomoyo_double_path_acl_record".
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @category: Category name.
*
- * Returns true on success, false otherwise.
+ * Returns nothing.
*/
-static bool tomoyo_print_double_path_acl(struct tomoyo_io_buffer *head,
- struct tomoyo_double_path_acl_record *
- ptr)
+static void tomoyo_set_group(struct tomoyo_io_buffer *head,
+ const char *category)
{
- int pos;
- const char *atmark1 = "";
- const char *atmark2 = "";
- const char *filename1;
- const char *filename2;
- const u8 perm = ptr->perm;
- u8 bit;
-
- filename1 = ptr->filename1->name;
- filename2 = ptr->filename2->name;
- for (bit = head->read_bit; bit < TOMOYO_MAX_DOUBLE_PATH_OPERATION;
- bit++) {
- const char *msg;
- if (!(perm & (1 << bit)))
- continue;
- msg = tomoyo_dp2keyword(bit);
- pos = head->read_avail;
- if (!tomoyo_io_printf(head, "allow_%s %s%s %s%s\n", msg,
- atmark1, filename1, atmark2, filename2))
- goto out;
+ if (head->type == TOMOYO_EXCEPTIONPOLICY) {
+ tomoyo_print_namespace(head);
+ tomoyo_io_printf(head, "acl_group %u ",
+ head->r.acl_group_index);
}
- head->read_bit = 0;
- return true;
- out:
- head->read_bit = bit;
- head->read_avail = pos;
- return false;
+ tomoyo_set_string(head, category);
}
/**
* tomoyo_print_entry - Print an ACL entry.
*
* @head: Pointer to "struct tomoyo_io_buffer".
- * @ptr: Pointer to an ACL entry.
+ * @acl: Pointer to an ACL entry.
*
* Returns true on success, false otherwise.
*/
static bool tomoyo_print_entry(struct tomoyo_io_buffer *head,
- struct tomoyo_acl_info *ptr)
+ struct tomoyo_acl_info *acl)
{
- const u8 acl_type = tomoyo_acl_type2(ptr);
+ const u8 acl_type = acl->type;
+ bool first = true;
+ u8 bit;
- if (acl_type & TOMOYO_ACL_DELETED)
+ if (head->r.print_cond_part)
+ goto print_cond_part;
+ if (acl->is_deleted)
return true;
- if (acl_type == TOMOYO_TYPE_SINGLE_PATH_ACL) {
- struct tomoyo_single_path_acl_record *acl
- = container_of(ptr,
- struct tomoyo_single_path_acl_record,
- head);
- return tomoyo_print_single_path_acl(head, acl);
- }
- if (acl_type == TOMOYO_TYPE_DOUBLE_PATH_ACL) {
- struct tomoyo_double_path_acl_record *acl
- = container_of(ptr,
- struct tomoyo_double_path_acl_record,
- head);
- return tomoyo_print_double_path_acl(head, acl);
- }
- BUG(); /* This must not happen. */
- return false;
-}
-
-/**
- * tomoyo_read_domain_policy - Read domain policy.
- *
- * @head: Pointer to "struct tomoyo_io_buffer".
- *
- * Returns 0.
- */
-static int tomoyo_read_domain_policy(struct tomoyo_io_buffer *head)
-{
- struct list_head *dpos;
- struct list_head *apos;
- bool done = true;
-
- if (head->read_eof)
- return 0;
- if (head->read_step == 0)
- head->read_step = 1;
- down_read(&tomoyo_domain_list_lock);
- list_for_each_cookie(dpos, head->read_var1, &tomoyo_domain_list) {
- struct tomoyo_domain_info *domain;
- const char *quota_exceeded = "";
- const char *transition_failed = "";
- const char *ignore_global_allow_read = "";
- domain = list_entry(dpos, struct tomoyo_domain_info, list);
- if (head->read_step != 1)
- goto acl_loop;
- if (domain->is_deleted && !head->read_single_domain)
- continue;
- /* Print domainname and flags. */
- if (domain->quota_warned)
- quota_exceeded = "quota_exceeded\n";
- if (domain->flags & TOMOYO_DOMAIN_FLAGS_TRANSITION_FAILED)
- transition_failed = "transition_failed\n";
- if (domain->flags &
- TOMOYO_DOMAIN_FLAGS_IGNORE_GLOBAL_ALLOW_READ)
- ignore_global_allow_read
- = TOMOYO_KEYWORD_IGNORE_GLOBAL_ALLOW_READ "\n";
- done = tomoyo_io_printf(head, "%s\n" TOMOYO_KEYWORD_USE_PROFILE
- "%u\n%s%s%s\n",
- domain->domainname->name,
- domain->profile, quota_exceeded,
- transition_failed,
- ignore_global_allow_read);
- if (!done)
- break;
- head->read_step = 2;
-acl_loop:
- if (head->read_step == 3)
- goto tail_mark;
- /* Print ACL entries in the domain. */
- down_read(&tomoyo_domain_acl_info_list_lock);
- list_for_each_cookie(apos, head->read_var2,
- &domain->acl_info_list) {
- struct tomoyo_acl_info *ptr
- = list_entry(apos, struct tomoyo_acl_info,
- list);
- done = tomoyo_print_entry(head, ptr);
- if (!done)
- break;
+ if (!tomoyo_flush(head))
+ return false;
+ else if (acl_type == TOMOYO_TYPE_PATH_ACL) {
+ struct tomoyo_path_acl *ptr =
+ container_of(acl, typeof(*ptr), head);
+ const u16 perm = ptr->perm;
+ for (bit = 0; bit < TOMOYO_MAX_PATH_OPERATION; bit++) {
+ if (!(perm & (1 << bit)))
+ continue;
+ if (head->r.print_transition_related_only &&
+ bit != TOMOYO_TYPE_EXECUTE)
+ continue;
+ if (first) {
+ tomoyo_set_group(head, "file ");
+ first = false;
+ } else {
+ tomoyo_set_slash(head);
+ }
+ tomoyo_set_string(head, tomoyo_path_keyword[bit]);
}
- up_read(&tomoyo_domain_acl_info_list_lock);
- if (!done)
- break;
- head->read_step = 3;
-tail_mark:
- done = tomoyo_io_printf(head, "\n");
- if (!done)
- break;
- head->read_step = 1;
- if (head->read_single_domain)
- break;
+ if (first)
+ return true;
+ tomoyo_print_name_union(head, &ptr->name);
+ } else if (acl_type == TOMOYO_TYPE_MANUAL_TASK_ACL) {
+ struct tomoyo_task_acl *ptr =
+ container_of(acl, typeof(*ptr), head);
+ tomoyo_set_group(head, "task ");
+ tomoyo_set_string(head, "manual_domain_transition ");
+ tomoyo_set_string(head, ptr->domainname->name);
+ } else if (head->r.print_transition_related_only) {
+ return true;
+ } else if (acl_type == TOMOYO_TYPE_PATH2_ACL) {
+ struct tomoyo_path2_acl *ptr =
+ container_of(acl, typeof(*ptr), head);
+ const u8 perm = ptr->perm;
+ for (bit = 0; bit < TOMOYO_MAX_PATH2_OPERATION; bit++) {
+ if (!(perm & (1 << bit)))
+ continue;
+ if (first) {
+ tomoyo_set_group(head, "file ");
+ first = false;
+ } else {
+ tomoyo_set_slash(head);
+ }
+ tomoyo_set_string(head, tomoyo_mac_keywords
+ [tomoyo_pp2mac[bit]]);
+ }
+ if (first)
+ return true;
+ tomoyo_print_name_union(head, &ptr->name1);
+ tomoyo_print_name_union(head, &ptr->name2);
+ } else if (acl_type == TOMOYO_TYPE_PATH_NUMBER_ACL) {
+ struct tomoyo_path_number_acl *ptr =
+ container_of(acl, typeof(*ptr), head);
+ const u8 perm = ptr->perm;
+ for (bit = 0; bit < TOMOYO_MAX_PATH_NUMBER_OPERATION; bit++) {
+ if (!(perm & (1 << bit)))
+ continue;
+ if (first) {
+ tomoyo_set_group(head, "file ");
+ first = false;
+ } else {
+ tomoyo_set_slash(head);
+ }
+ tomoyo_set_string(head, tomoyo_mac_keywords
+ [tomoyo_pn2mac[bit]]);
+ }
+ if (first)
+ return true;
+ tomoyo_print_name_union(head, &ptr->name);
+ tomoyo_print_number_union(head, &ptr->number);
+ } else if (acl_type == TOMOYO_TYPE_MKDEV_ACL) {
+ struct tomoyo_mkdev_acl *ptr =
+ container_of(acl, typeof(*ptr), head);
+ const u8 perm = ptr->perm;
+ for (bit = 0; bit < TOMOYO_MAX_MKDEV_OPERATION; bit++) {
+ if (!(perm & (1 << bit)))
+ continue;
+ if (first) {
+ tomoyo_set_group(head, "file ");
+ first = false;
+ } else {
+ tomoyo_set_slash(head);
+ }
+ tomoyo_set_string(head, tomoyo_mac_keywords
+ [tomoyo_pnnn2mac[bit]]);
+ }
+ if (first)
+ return true;
+ tomoyo_print_name_union(head, &ptr->name);
+ tomoyo_print_number_union(head, &ptr->mode);
+ tomoyo_print_number_union(head, &ptr->major);
+ tomoyo_print_number_union(head, &ptr->minor);
+ } else if (acl_type == TOMOYO_TYPE_INET_ACL) {
+ struct tomoyo_inet_acl *ptr =
+ container_of(acl, typeof(*ptr), head);
+ const u8 perm = ptr->perm;
+
+ for (bit = 0; bit < TOMOYO_MAX_NETWORK_OPERATION; bit++) {
+ if (!(perm & (1 << bit)))
+ continue;
+ if (first) {
+ tomoyo_set_group(head, "network inet ");
+ tomoyo_set_string(head, tomoyo_proto_keyword
+ [ptr->protocol]);
+ tomoyo_set_space(head);
+ first = false;
+ } else {
+ tomoyo_set_slash(head);
+ }
+ tomoyo_set_string(head, tomoyo_socket_keyword[bit]);
+ }
+ if (first)
+ return true;
+ tomoyo_set_space(head);
+ if (ptr->address.group) {
+ tomoyo_set_string(head, "@");
+ tomoyo_set_string(head, ptr->address.group->group_name
+ ->name);
+ } else {
+ char buf[128];
+ tomoyo_print_ip(buf, sizeof(buf), &ptr->address);
+ tomoyo_io_printf(head, "%s", buf);
+ }
+ tomoyo_print_number_union(head, &ptr->port);
+ } else if (acl_type == TOMOYO_TYPE_UNIX_ACL) {
+ struct tomoyo_unix_acl *ptr =
+ container_of(acl, typeof(*ptr), head);
+ const u8 perm = ptr->perm;
+
+ for (bit = 0; bit < TOMOYO_MAX_NETWORK_OPERATION; bit++) {
+ if (!(perm & (1 << bit)))
+ continue;
+ if (first) {
+ tomoyo_set_group(head, "network unix ");
+ tomoyo_set_string(head, tomoyo_proto_keyword
+ [ptr->protocol]);
+ tomoyo_set_space(head);
+ first = false;
+ } else {
+ tomoyo_set_slash(head);
+ }
+ tomoyo_set_string(head, tomoyo_socket_keyword[bit]);
+ }
+ if (first)
+ return true;
+ tomoyo_print_name_union(head, &ptr->name);
+ } else if (acl_type == TOMOYO_TYPE_MOUNT_ACL) {
+ struct tomoyo_mount_acl *ptr =
+ container_of(acl, typeof(*ptr), head);
+ tomoyo_set_group(head, "file mount");
+ tomoyo_print_name_union(head, &ptr->dev_name);
+ tomoyo_print_name_union(head, &ptr->dir_name);
+ tomoyo_print_name_union(head, &ptr->fs_type);
+ tomoyo_print_number_union(head, &ptr->flags);
+ } else if (acl_type == TOMOYO_TYPE_ENV_ACL) {
+ struct tomoyo_env_acl *ptr =
+ container_of(acl, typeof(*ptr), head);
+
+ tomoyo_set_group(head, "misc env ");
+ tomoyo_set_string(head, ptr->env->name);
}
- up_read(&tomoyo_domain_list_lock);
- head->read_eof = done;
- return 0;
+ if (acl->cond) {
+ head->r.print_cond_part = true;
+ head->r.cond_step = 0;
+ if (!tomoyo_flush(head))
+ return false;
+print_cond_part:
+ if (!tomoyo_print_condition(head, acl->cond))
+ return false;
+ head->r.print_cond_part = false;
+ } else {
+ tomoyo_set_lf(head);
+ }
+ return true;
}
/**
- * tomoyo_write_domain_profile - Assign profile for specified domain.
+ * tomoyo_read_domain2 - Read domain policy.
*
* @head: Pointer to "struct tomoyo_io_buffer".
+ * @list: Pointer to "struct list_head".
*
- * Returns 0 on success, -EINVAL otherwise.
- *
- * This is equivalent to doing
+ * Caller holds tomoyo_read_lock().
*
- * ( echo "select " $domainname; echo "use_profile " $profile ) |
- * /usr/lib/ccs/loadpolicy -d
+ * Returns true on success, false otherwise.
*/
-static int tomoyo_write_domain_profile(struct tomoyo_io_buffer *head)
+static bool tomoyo_read_domain2(struct tomoyo_io_buffer *head,
+ struct list_head *list)
{
- char *data = head->write_buf;
- char *cp = strchr(data, ' ');
- struct tomoyo_domain_info *domain;
- unsigned long profile;
-
- if (!cp)
- return -EINVAL;
- *cp = '\0';
- down_read(&tomoyo_domain_list_lock);
- domain = tomoyo_find_domain(cp + 1);
- up_read(&tomoyo_domain_list_lock);
- if (strict_strtoul(data, 10, &profile))
- return -EINVAL;
- if (domain && profile < TOMOYO_MAX_PROFILES
- && (tomoyo_profile_ptr[profile] || !tomoyo_policy_loaded))
- domain->profile = (u8) profile;
- return 0;
+ list_for_each_cookie(head->r.acl, list) {
+ struct tomoyo_acl_info *ptr =
+ list_entry(head->r.acl, typeof(*ptr), list);
+ if (!tomoyo_print_entry(head, ptr))
+ return false;
+ }
+ head->r.acl = NULL;
+ return true;
}
/**
- * tomoyo_read_domain_profile - Read only domainname and profile.
+ * tomoyo_read_domain - Read domain policy.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
- * Returns list of profile number and domainname pairs.
- *
- * This is equivalent to doing
- *
- * grep -A 1 '^<kernel>' /sys/kernel/security/tomoyo/domain_policy |
- * awk ' { if ( domainname == "" ) { if ( $1 == "<kernel>" )
- * domainname = $0; } else if ( $1 == "use_profile" ) {
- * print $2 " " domainname; domainname = ""; } } ; '
+ * Caller holds tomoyo_read_lock().
*/
-static int tomoyo_read_domain_profile(struct tomoyo_io_buffer *head)
+static void tomoyo_read_domain(struct tomoyo_io_buffer *head)
{
- struct list_head *pos;
- bool done = true;
-
- if (head->read_eof)
- return 0;
- down_read(&tomoyo_domain_list_lock);
- list_for_each_cookie(pos, head->read_var1, &tomoyo_domain_list) {
- struct tomoyo_domain_info *domain;
- domain = list_entry(pos, struct tomoyo_domain_info, list);
- if (domain->is_deleted)
- continue;
- done = tomoyo_io_printf(head, "%u %s\n", domain->profile,
- domain->domainname->name);
- if (!done)
- break;
+ if (head->r.eof)
+ return;
+ list_for_each_cookie(head->r.domain, &tomoyo_domain_list) {
+ struct tomoyo_domain_info *domain =
+ list_entry(head->r.domain, typeof(*domain), list);
+ switch (head->r.step) {
+ u8 i;
+ case 0:
+ if (domain->is_deleted &&
+ !head->r.print_this_domain_only)
+ continue;
+ /* Print domainname and flags. */
+ tomoyo_set_string(head, domain->domainname->name);
+ tomoyo_set_lf(head);
+ tomoyo_io_printf(head, "use_profile %u\n",
+ domain->profile);
+ tomoyo_io_printf(head, "use_group %u\n",
+ domain->group);
+ for (i = 0; i < TOMOYO_MAX_DOMAIN_INFO_FLAGS; i++)
+ if (domain->flags[i])
+ tomoyo_set_string(head, tomoyo_dif[i]);
+ head->r.step++;
+ tomoyo_set_lf(head);
+ /* fall through */
+ case 1:
+ if (!tomoyo_read_domain2(head, &domain->acl_info_list))
+ return;
+ head->r.step++;
+ if (!tomoyo_set_lf(head))
+ return;
+ /* fall through */
+ case 2:
+ head->r.step = 0;
+ if (head->r.print_this_domain_only)
+ goto done;
+ }
}
- up_read(&tomoyo_domain_list_lock);
- head->read_eof = done;
- return 0;
+ done:
+ head->r.eof = true;
}
/**
@@ -1683,11 +1627,7 @@ static int tomoyo_read_domain_profile(struct tomoyo_io_buffer *head)
*/
static int tomoyo_write_pid(struct tomoyo_io_buffer *head)
{
- unsigned long pid;
- /* No error check. */
- strict_strtoul(head->write_buf, 10, &pid);
- head->read_step = (int) pid;
- head->read_eof = false;
+ head->r.eof = false;
return 0;
}
@@ -1701,204 +1641,586 @@ static int tomoyo_write_pid(struct tomoyo_io_buffer *head)
* The PID is specified by tomoyo_write_pid() so that the user can obtain
* using read()/write() interface rather than sysctl() interface.
*/
-static int tomoyo_read_pid(struct tomoyo_io_buffer *head)
+static void tomoyo_read_pid(struct tomoyo_io_buffer *head)
{
- if (head->read_avail == 0 && !head->read_eof) {
- const int pid = head->read_step;
- struct task_struct *p;
- struct tomoyo_domain_info *domain = NULL;
- read_lock(&tasklist_lock);
- p = find_task_by_vpid(pid);
- if (p)
- domain = tomoyo_real_domain(p);
- read_unlock(&tasklist_lock);
- if (domain)
- tomoyo_io_printf(head, "%d %u %s", pid, domain->profile,
- domain->domainname->name);
- head->read_eof = true;
+ char *buf = head->write_buf;
+ bool global_pid = false;
+ unsigned int pid;
+ struct task_struct *p;
+ struct tomoyo_domain_info *domain = NULL;
+
+ /* Accessing write_buf is safe because head->io_sem is held. */
+ if (!buf) {
+ head->r.eof = true;
+ return; /* Do nothing if open(O_RDONLY). */
}
- return 0;
+ if (head->r.w_pos || head->r.eof)
+ return;
+ head->r.eof = true;
+ if (tomoyo_str_starts(&buf, "global-pid "))
+ global_pid = true;
+ pid = (unsigned int) simple_strtoul(buf, NULL, 10);
+ rcu_read_lock();
+ if (global_pid)
+ p = find_task_by_pid_ns(pid, &init_pid_ns);
+ else
+ p = find_task_by_vpid(pid);
+ if (p)
+ domain = tomoyo_real_domain(p);
+ rcu_read_unlock();
+ if (!domain)
+ return;
+ tomoyo_io_printf(head, "%u %u ", pid, domain->profile);
+ tomoyo_set_string(head, domain->domainname->name);
}
+/* String table for domain transition control keywords. */
+static const char *tomoyo_transition_type[TOMOYO_MAX_TRANSITION_TYPE] = {
+ [TOMOYO_TRANSITION_CONTROL_NO_RESET] = "no_reset_domain ",
+ [TOMOYO_TRANSITION_CONTROL_RESET] = "reset_domain ",
+ [TOMOYO_TRANSITION_CONTROL_NO_INITIALIZE] = "no_initialize_domain ",
+ [TOMOYO_TRANSITION_CONTROL_INITIALIZE] = "initialize_domain ",
+ [TOMOYO_TRANSITION_CONTROL_NO_KEEP] = "no_keep_domain ",
+ [TOMOYO_TRANSITION_CONTROL_KEEP] = "keep_domain ",
+};
+
+/* String table for grouping keywords. */
+static const char *tomoyo_group_name[TOMOYO_MAX_GROUP] = {
+ [TOMOYO_PATH_GROUP] = "path_group ",
+ [TOMOYO_NUMBER_GROUP] = "number_group ",
+ [TOMOYO_ADDRESS_GROUP] = "address_group ",
+};
+
/**
- * tomoyo_write_exception_policy - Write exception policy.
+ * tomoyo_write_exception - Write exception policy.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
-static int tomoyo_write_exception_policy(struct tomoyo_io_buffer *head)
+static int tomoyo_write_exception(struct tomoyo_io_buffer *head)
{
- char *data = head->write_buf;
- bool is_delete = tomoyo_str_starts(&data, TOMOYO_KEYWORD_DELETE);
-
- if (tomoyo_str_starts(&data, TOMOYO_KEYWORD_KEEP_DOMAIN))
- return tomoyo_write_domain_keeper_policy(data, false,
- is_delete);
- if (tomoyo_str_starts(&data, TOMOYO_KEYWORD_NO_KEEP_DOMAIN))
- return tomoyo_write_domain_keeper_policy(data, true, is_delete);
- if (tomoyo_str_starts(&data, TOMOYO_KEYWORD_INITIALIZE_DOMAIN))
- return tomoyo_write_domain_initializer_policy(data, false,
- is_delete);
- if (tomoyo_str_starts(&data, TOMOYO_KEYWORD_NO_INITIALIZE_DOMAIN))
- return tomoyo_write_domain_initializer_policy(data, true,
- is_delete);
- if (tomoyo_str_starts(&data, TOMOYO_KEYWORD_ALIAS))
- return tomoyo_write_alias_policy(data, is_delete);
- if (tomoyo_str_starts(&data, TOMOYO_KEYWORD_ALLOW_READ))
- return tomoyo_write_globally_readable_policy(data, is_delete);
- if (tomoyo_str_starts(&data, TOMOYO_KEYWORD_FILE_PATTERN))
- return tomoyo_write_pattern_policy(data, is_delete);
- if (tomoyo_str_starts(&data, TOMOYO_KEYWORD_DENY_REWRITE))
- return tomoyo_write_no_rewrite_policy(data, is_delete);
+ const bool is_delete = head->w.is_delete;
+ struct tomoyo_acl_param param = {
+ .ns = head->w.ns,
+ .is_delete = is_delete,
+ .data = head->write_buf,
+ };
+ u8 i;
+ if (tomoyo_str_starts(&param.data, "aggregator "))
+ return tomoyo_write_aggregator(&param);
+ for (i = 0; i < TOMOYO_MAX_TRANSITION_TYPE; i++)
+ if (tomoyo_str_starts(&param.data, tomoyo_transition_type[i]))
+ return tomoyo_write_transition_control(&param, i);
+ for (i = 0; i < TOMOYO_MAX_GROUP; i++)
+ if (tomoyo_str_starts(&param.data, tomoyo_group_name[i]))
+ return tomoyo_write_group(&param, i);
+ if (tomoyo_str_starts(&param.data, "acl_group ")) {
+ unsigned int group;
+ char *data;
+ group = simple_strtoul(param.data, &data, 10);
+ if (group < TOMOYO_MAX_ACL_GROUPS && *data++ == ' ')
+ return tomoyo_write_domain2
+ (head->w.ns, &head->w.ns->acl_group[group],
+ data, is_delete);
+ }
return -EINVAL;
}
/**
- * tomoyo_read_exception_policy - Read exception policy.
+ * tomoyo_read_group - Read "struct tomoyo_path_group"/"struct tomoyo_number_group"/"struct tomoyo_address_group" list.
*
* @head: Pointer to "struct tomoyo_io_buffer".
+ * @idx: Index number.
*
- * Returns 0 on success, -EINVAL otherwise.
+ * Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
-static int tomoyo_read_exception_policy(struct tomoyo_io_buffer *head)
+static bool tomoyo_read_group(struct tomoyo_io_buffer *head, const int idx)
{
- if (!head->read_eof) {
- switch (head->read_step) {
- case 0:
- head->read_var2 = NULL;
- head->read_step = 1;
- case 1:
- if (!tomoyo_read_domain_keeper_policy(head))
- break;
- head->read_var2 = NULL;
- head->read_step = 2;
- case 2:
- if (!tomoyo_read_globally_readable_policy(head))
- break;
- head->read_var2 = NULL;
- head->read_step = 3;
- case 3:
- head->read_var2 = NULL;
- head->read_step = 4;
- case 4:
- if (!tomoyo_read_domain_initializer_policy(head))
- break;
- head->read_var2 = NULL;
- head->read_step = 5;
- case 5:
- if (!tomoyo_read_alias_policy(head))
- break;
- head->read_var2 = NULL;
- head->read_step = 6;
- case 6:
- head->read_var2 = NULL;
- head->read_step = 7;
- case 7:
- if (!tomoyo_read_file_pattern(head))
- break;
- head->read_var2 = NULL;
- head->read_step = 8;
- case 8:
- if (!tomoyo_read_no_rewrite_policy(head))
- break;
- head->read_var2 = NULL;
- head->read_step = 9;
- case 9:
- head->read_eof = true;
+ struct tomoyo_policy_namespace *ns =
+ container_of(head->r.ns, typeof(*ns), namespace_list);
+ struct list_head *list = &ns->group_list[idx];
+ list_for_each_cookie(head->r.group, list) {
+ struct tomoyo_group *group =
+ list_entry(head->r.group, typeof(*group), head.list);
+ list_for_each_cookie(head->r.acl, &group->member_list) {
+ struct tomoyo_acl_head *ptr =
+ list_entry(head->r.acl, typeof(*ptr), list);
+ if (ptr->is_deleted)
+ continue;
+ if (!tomoyo_flush(head))
+ return false;
+ tomoyo_print_namespace(head);
+ tomoyo_set_string(head, tomoyo_group_name[idx]);
+ tomoyo_set_string(head, group->group_name->name);
+ if (idx == TOMOYO_PATH_GROUP) {
+ tomoyo_set_space(head);
+ tomoyo_set_string(head, container_of
+ (ptr, struct tomoyo_path_group,
+ head)->member_name->name);
+ } else if (idx == TOMOYO_NUMBER_GROUP) {
+ tomoyo_print_number_union(head, &container_of
+ (ptr,
+ struct tomoyo_number_group,
+ head)->number);
+ } else if (idx == TOMOYO_ADDRESS_GROUP) {
+ char buffer[128];
+
+ struct tomoyo_address_group *member =
+ container_of(ptr, typeof(*member),
+ head);
+ tomoyo_print_ip(buffer, sizeof(buffer),
+ &member->address);
+ tomoyo_io_printf(head, " %s", buffer);
+ }
+ tomoyo_set_lf(head);
+ }
+ head->r.acl = NULL;
+ }
+ head->r.group = NULL;
+ return true;
+}
+
+/**
+ * tomoyo_read_policy - Read "struct tomoyo_..._entry" list.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @idx: Index number.
+ *
+ * Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static bool tomoyo_read_policy(struct tomoyo_io_buffer *head, const int idx)
+{
+ struct tomoyo_policy_namespace *ns =
+ container_of(head->r.ns, typeof(*ns), namespace_list);
+ struct list_head *list = &ns->policy_list[idx];
+ list_for_each_cookie(head->r.acl, list) {
+ struct tomoyo_acl_head *acl =
+ container_of(head->r.acl, typeof(*acl), list);
+ if (acl->is_deleted)
+ continue;
+ if (!tomoyo_flush(head))
+ return false;
+ switch (idx) {
+ case TOMOYO_ID_TRANSITION_CONTROL:
+ {
+ struct tomoyo_transition_control *ptr =
+ container_of(acl, typeof(*ptr), head);
+ tomoyo_print_namespace(head);
+ tomoyo_set_string(head, tomoyo_transition_type
+ [ptr->type]);
+ tomoyo_set_string(head, ptr->program ?
+ ptr->program->name : "any");
+ tomoyo_set_string(head, " from ");
+ tomoyo_set_string(head, ptr->domainname ?
+ ptr->domainname->name :
+ "any");
+ }
+ break;
+ case TOMOYO_ID_AGGREGATOR:
+ {
+ struct tomoyo_aggregator *ptr =
+ container_of(acl, typeof(*ptr), head);
+ tomoyo_print_namespace(head);
+ tomoyo_set_string(head, "aggregator ");
+ tomoyo_set_string(head,
+ ptr->original_name->name);
+ tomoyo_set_space(head);
+ tomoyo_set_string(head,
+ ptr->aggregated_name->name);
+ }
break;
default:
- return -EINVAL;
+ continue;
}
+ tomoyo_set_lf(head);
}
- return 0;
+ head->r.acl = NULL;
+ return true;
}
-/* path to policy loader */
-static const char *tomoyo_loader = "/sbin/tomoyo-init";
+/**
+ * tomoyo_read_exception - Read exception policy.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static void tomoyo_read_exception(struct tomoyo_io_buffer *head)
+{
+ struct tomoyo_policy_namespace *ns =
+ container_of(head->r.ns, typeof(*ns), namespace_list);
+ if (head->r.eof)
+ return;
+ while (head->r.step < TOMOYO_MAX_POLICY &&
+ tomoyo_read_policy(head, head->r.step))
+ head->r.step++;
+ if (head->r.step < TOMOYO_MAX_POLICY)
+ return;
+ while (head->r.step < TOMOYO_MAX_POLICY + TOMOYO_MAX_GROUP &&
+ tomoyo_read_group(head, head->r.step - TOMOYO_MAX_POLICY))
+ head->r.step++;
+ if (head->r.step < TOMOYO_MAX_POLICY + TOMOYO_MAX_GROUP)
+ return;
+ while (head->r.step < TOMOYO_MAX_POLICY + TOMOYO_MAX_GROUP
+ + TOMOYO_MAX_ACL_GROUPS) {
+ head->r.acl_group_index = head->r.step - TOMOYO_MAX_POLICY
+ - TOMOYO_MAX_GROUP;
+ if (!tomoyo_read_domain2(head, &ns->acl_group
+ [head->r.acl_group_index]))
+ return;
+ head->r.step++;
+ }
+ head->r.eof = true;
+}
+
+/* Wait queue for kernel -> userspace notification. */
+static DECLARE_WAIT_QUEUE_HEAD(tomoyo_query_wait);
+/* Wait queue for userspace -> kernel notification. */
+static DECLARE_WAIT_QUEUE_HEAD(tomoyo_answer_wait);
+
+/* Structure for query. */
+struct tomoyo_query {
+ struct list_head list;
+ struct tomoyo_domain_info *domain;
+ char *query;
+ size_t query_len;
+ unsigned int serial;
+ u8 timer;
+ u8 answer;
+ u8 retry;
+};
+
+/* The list for "struct tomoyo_query". */
+static LIST_HEAD(tomoyo_query_list);
+
+/* Lock for manipulating tomoyo_query_list. */
+static DEFINE_SPINLOCK(tomoyo_query_list_lock);
+
+/*
+ * Number of "struct file" referring /sys/kernel/security/tomoyo/query
+ * interface.
+ */
+static atomic_t tomoyo_query_observers = ATOMIC_INIT(0);
/**
- * tomoyo_policy_loader_exists - Check whether /sbin/tomoyo-init exists.
+ * tomoyo_truncate - Truncate a line.
+ *
+ * @str: String to truncate.
*
- * Returns true if /sbin/tomoyo-init exists, false otherwise.
+ * Returns length of truncated @str.
*/
-static bool tomoyo_policy_loader_exists(void)
+static int tomoyo_truncate(char *str)
{
- /*
- * Don't activate MAC if the policy loader doesn't exist.
- * If the initrd includes /sbin/init but real-root-dev has not
- * mounted on / yet, activating MAC will block the system since
- * policies are not loaded yet.
- * Thus, let do_execve() call this function everytime.
- */
- struct path path;
+ char *start = str;
+ while (*(unsigned char *) str > (unsigned char) ' ')
+ str++;
+ *str = '\0';
+ return strlen(start) + 1;
+}
- if (kern_path(tomoyo_loader, LOOKUP_FOLLOW, &path)) {
- printk(KERN_INFO "Not activating Mandatory Access Control now "
- "since %s doesn't exist.\n", tomoyo_loader);
- return false;
+/**
+ * tomoyo_add_entry - Add an ACL to current thread's domain. Used by learning mode.
+ *
+ * @domain: Pointer to "struct tomoyo_domain_info".
+ * @header: Lines containing ACL.
+ *
+ * Returns nothing.
+ */
+static void tomoyo_add_entry(struct tomoyo_domain_info *domain, char *header)
+{
+ char *buffer;
+ char *realpath = NULL;
+ char *argv0 = NULL;
+ char *symlink = NULL;
+ char *cp = strchr(header, '\n');
+ int len;
+ if (!cp)
+ return;
+ cp = strchr(cp + 1, '\n');
+ if (!cp)
+ return;
+ *cp++ = '\0';
+ len = strlen(cp) + 1;
+ /* strstr() will return NULL if ordering is wrong. */
+ if (*cp == 'f') {
+ argv0 = strstr(header, " argv[]={ \"");
+ if (argv0) {
+ argv0 += 10;
+ len += tomoyo_truncate(argv0) + 14;
+ }
+ realpath = strstr(header, " exec={ realpath=\"");
+ if (realpath) {
+ realpath += 8;
+ len += tomoyo_truncate(realpath) + 6;
+ }
+ symlink = strstr(header, " symlink.target=\"");
+ if (symlink)
+ len += tomoyo_truncate(symlink + 1) + 1;
}
- path_put(&path);
- return true;
+ buffer = kmalloc(len, GFP_NOFS);
+ if (!buffer)
+ return;
+ snprintf(buffer, len - 1, "%s", cp);
+ if (realpath)
+ tomoyo_addprintf(buffer, len, " exec.%s", realpath);
+ if (argv0)
+ tomoyo_addprintf(buffer, len, " exec.argv[0]=%s", argv0);
+ if (symlink)
+ tomoyo_addprintf(buffer, len, "%s", symlink);
+ tomoyo_normalize_line(buffer);
+ if (!tomoyo_write_domain2(domain->ns, &domain->acl_info_list, buffer,
+ false))
+ tomoyo_update_stat(TOMOYO_STAT_POLICY_UPDATES);
+ kfree(buffer);
}
/**
- * tomoyo_load_policy - Run external policy loader to load policy.
+ * tomoyo_supervisor - Ask for the supervisor's decision.
*
- * @filename: The program about to start.
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @fmt: The printf()'s format string, followed by parameters.
*
- * This function checks whether @filename is /sbin/init , and if so
- * invoke /sbin/tomoyo-init and wait for the termination of /sbin/tomoyo-init
- * and then continues invocation of /sbin/init.
- * /sbin/tomoyo-init reads policy files in /etc/tomoyo/ directory and
- * writes to /sys/kernel/security/tomoyo/ interfaces.
+ * Returns 0 if the supervisor decided to permit the access request which
+ * violated the policy in enforcing mode, TOMOYO_RETRY_REQUEST if the
+ * supervisor decided to retry the access request which violated the policy in
+ * enforcing mode, 0 if it is not in enforcing mode, -EPERM otherwise.
+ */
+int tomoyo_supervisor(struct tomoyo_request_info *r, const char *fmt, ...)
+{
+ va_list args;
+ int error;
+ int len;
+ static unsigned int tomoyo_serial;
+ struct tomoyo_query entry = { };
+ bool quota_exceeded = false;
+ va_start(args, fmt);
+ len = vsnprintf((char *) &len, 1, fmt, args) + 1;
+ va_end(args);
+ /* Write /sys/kernel/security/tomoyo/audit. */
+ va_start(args, fmt);
+ tomoyo_write_log2(r, len, fmt, args);
+ va_end(args);
+ /* Nothing more to do if granted. */
+ if (r->granted)
+ return 0;
+ if (r->mode)
+ tomoyo_update_stat(r->mode);
+ switch (r->mode) {
+ case TOMOYO_CONFIG_ENFORCING:
+ error = -EPERM;
+ if (atomic_read(&tomoyo_query_observers))
+ break;
+ goto out;
+ case TOMOYO_CONFIG_LEARNING:
+ error = 0;
+ /* Check max_learning_entry parameter. */
+ if (tomoyo_domain_quota_is_ok(r))
+ break;
+ /* fall through */
+ default:
+ return 0;
+ }
+ /* Get message. */
+ va_start(args, fmt);
+ entry.query = tomoyo_init_log(r, len, fmt, args);
+ va_end(args);
+ if (!entry.query)
+ goto out;
+ entry.query_len = strlen(entry.query) + 1;
+ if (!error) {
+ tomoyo_add_entry(r->domain, entry.query);
+ goto out;
+ }
+ len = tomoyo_round2(entry.query_len);
+ entry.domain = r->domain;
+ spin_lock(&tomoyo_query_list_lock);
+ if (tomoyo_memory_quota[TOMOYO_MEMORY_QUERY] &&
+ tomoyo_memory_used[TOMOYO_MEMORY_QUERY] + len
+ >= tomoyo_memory_quota[TOMOYO_MEMORY_QUERY]) {
+ quota_exceeded = true;
+ } else {
+ entry.serial = tomoyo_serial++;
+ entry.retry = r->retry;
+ tomoyo_memory_used[TOMOYO_MEMORY_QUERY] += len;
+ list_add_tail(&entry.list, &tomoyo_query_list);
+ }
+ spin_unlock(&tomoyo_query_list_lock);
+ if (quota_exceeded)
+ goto out;
+ /* Give 10 seconds for supervisor's opinion. */
+ while (entry.timer < 10) {
+ wake_up_all(&tomoyo_query_wait);
+ if (wait_event_interruptible_timeout
+ (tomoyo_answer_wait, entry.answer ||
+ !atomic_read(&tomoyo_query_observers), HZ))
+ break;
+ else
+ entry.timer++;
+ }
+ spin_lock(&tomoyo_query_list_lock);
+ list_del(&entry.list);
+ tomoyo_memory_used[TOMOYO_MEMORY_QUERY] -= len;
+ spin_unlock(&tomoyo_query_list_lock);
+ switch (entry.answer) {
+ case 3: /* Asked to retry by administrator. */
+ error = TOMOYO_RETRY_REQUEST;
+ r->retry++;
+ break;
+ case 1:
+ /* Granted by administrator. */
+ error = 0;
+ break;
+ default:
+ /* Timed out or rejected by administrator. */
+ break;
+ }
+out:
+ kfree(entry.query);
+ return error;
+}
+
+/**
+ * tomoyo_find_domain_by_qid - Get domain by query id.
*
- * Returns nothing.
+ * @serial: Query ID assigned by tomoyo_supervisor().
+ *
+ * Returns pointer to "struct tomoyo_domain_info" if found, NULL otherwise.
*/
-void tomoyo_load_policy(const char *filename)
+static struct tomoyo_domain_info *tomoyo_find_domain_by_qid
+(unsigned int serial)
{
- char *argv[2];
- char *envp[3];
+ struct tomoyo_query *ptr;
+ struct tomoyo_domain_info *domain = NULL;
+ spin_lock(&tomoyo_query_list_lock);
+ list_for_each_entry(ptr, &tomoyo_query_list, list) {
+ if (ptr->serial != serial)
+ continue;
+ domain = ptr->domain;
+ break;
+ }
+ spin_unlock(&tomoyo_query_list_lock);
+ return domain;
+}
+
+/**
+ * tomoyo_poll_query - poll() for /sys/kernel/security/tomoyo/query.
+ *
+ * @file: Pointer to "struct file".
+ * @wait: Pointer to "poll_table".
+ *
+ * Returns POLLIN | POLLRDNORM when ready to read, 0 otherwise.
+ *
+ * Waits for access requests which violated policy in enforcing mode.
+ */
+static unsigned int tomoyo_poll_query(struct file *file, poll_table *wait)
+{
+ if (!list_empty(&tomoyo_query_list))
+ return POLLIN | POLLRDNORM;
+ poll_wait(file, &tomoyo_query_wait, wait);
+ if (!list_empty(&tomoyo_query_list))
+ return POLLIN | POLLRDNORM;
+ return 0;
+}
- if (tomoyo_policy_loaded)
+/**
+ * tomoyo_read_query - Read access requests which violated policy in enforcing mode.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ */
+static void tomoyo_read_query(struct tomoyo_io_buffer *head)
+{
+ struct list_head *tmp;
+ unsigned int pos = 0;
+ size_t len = 0;
+ char *buf;
+ if (head->r.w_pos)
return;
- /*
- * Check filename is /sbin/init or /sbin/tomoyo-start.
- * /sbin/tomoyo-start is a dummy filename in case where /sbin/init can't
- * be passed.
- * You can create /sbin/tomoyo-start by
- * "ln -s /bin/true /sbin/tomoyo-start".
- */
- if (strcmp(filename, "/sbin/init") &&
- strcmp(filename, "/sbin/tomoyo-start"))
+ if (head->read_buf) {
+ kfree(head->read_buf);
+ head->read_buf = NULL;
+ }
+ spin_lock(&tomoyo_query_list_lock);
+ list_for_each(tmp, &tomoyo_query_list) {
+ struct tomoyo_query *ptr = list_entry(tmp, typeof(*ptr), list);
+ if (pos++ != head->r.query_index)
+ continue;
+ len = ptr->query_len;
+ break;
+ }
+ spin_unlock(&tomoyo_query_list_lock);
+ if (!len) {
+ head->r.query_index = 0;
return;
- if (!tomoyo_policy_loader_exists())
+ }
+ buf = kzalloc(len + 32, GFP_NOFS);
+ if (!buf)
return;
+ pos = 0;
+ spin_lock(&tomoyo_query_list_lock);
+ list_for_each(tmp, &tomoyo_query_list) {
+ struct tomoyo_query *ptr = list_entry(tmp, typeof(*ptr), list);
+ if (pos++ != head->r.query_index)
+ continue;
+ /*
+ * Some query can be skipped because tomoyo_query_list
+ * can change, but I don't care.
+ */
+ if (len == ptr->query_len)
+ snprintf(buf, len + 31, "Q%u-%hu\n%s", ptr->serial,
+ ptr->retry, ptr->query);
+ break;
+ }
+ spin_unlock(&tomoyo_query_list_lock);
+ if (buf[0]) {
+ head->read_buf = buf;
+ head->r.w[head->r.w_pos++] = buf;
+ head->r.query_index++;
+ } else {
+ kfree(buf);
+ }
+}
- printk(KERN_INFO "Calling %s to load policy. Please wait.\n",
- tomoyo_loader);
- argv[0] = (char *) tomoyo_loader;
- argv[1] = NULL;
- envp[0] = "HOME=/";
- envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
- envp[2] = NULL;
- call_usermodehelper(argv[0], argv, envp, 1);
-
- printk(KERN_INFO "TOMOYO: 2.2.0 2009/04/01\n");
- printk(KERN_INFO "Mandatory Access Control activated.\n");
- tomoyo_policy_loaded = true;
- { /* Check all profiles currently assigned to domains are defined. */
- struct tomoyo_domain_info *domain;
- down_read(&tomoyo_domain_list_lock);
- list_for_each_entry(domain, &tomoyo_domain_list, list) {
- const u8 profile = domain->profile;
- if (tomoyo_profile_ptr[profile])
- continue;
- panic("Profile %u (used by '%s') not defined.\n",
- profile, domain->domainname->name);
- }
- up_read(&tomoyo_domain_list_lock);
+/**
+ * tomoyo_write_answer - Write the supervisor's decision.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns 0 on success, -EINVAL otherwise.
+ */
+static int tomoyo_write_answer(struct tomoyo_io_buffer *head)
+{
+ char *data = head->write_buf;
+ struct list_head *tmp;
+ unsigned int serial;
+ unsigned int answer;
+ spin_lock(&tomoyo_query_list_lock);
+ list_for_each(tmp, &tomoyo_query_list) {
+ struct tomoyo_query *ptr = list_entry(tmp, typeof(*ptr), list);
+ ptr->timer = 0;
+ }
+ spin_unlock(&tomoyo_query_list_lock);
+ if (sscanf(data, "A%u=%u", &serial, &answer) != 2)
+ return -EINVAL;
+ spin_lock(&tomoyo_query_list_lock);
+ list_for_each(tmp, &tomoyo_query_list) {
+ struct tomoyo_query *ptr = list_entry(tmp, typeof(*ptr), list);
+ if (ptr->serial != serial)
+ continue;
+ ptr->answer = answer;
+ /* Remove from tomoyo_query_list. */
+ if (ptr->answer)
+ list_del_init(&ptr->list);
+ break;
}
+ spin_unlock(&tomoyo_query_list_lock);
+ return 0;
}
/**
@@ -1908,33 +2230,109 @@ void tomoyo_load_policy(const char *filename)
*
* Returns version information.
*/
-static int tomoyo_read_version(struct tomoyo_io_buffer *head)
+static void tomoyo_read_version(struct tomoyo_io_buffer *head)
{
- if (!head->read_eof) {
- tomoyo_io_printf(head, "2.2.0");
- head->read_eof = true;
+ if (!head->r.eof) {
+ tomoyo_io_printf(head, "2.5.0");
+ head->r.eof = true;
}
- return 0;
}
+/* String table for /sys/kernel/security/tomoyo/stat interface. */
+static const char * const tomoyo_policy_headers[TOMOYO_MAX_POLICY_STAT] = {
+ [TOMOYO_STAT_POLICY_UPDATES] = "update:",
+ [TOMOYO_STAT_POLICY_LEARNING] = "violation in learning mode:",
+ [TOMOYO_STAT_POLICY_PERMISSIVE] = "violation in permissive mode:",
+ [TOMOYO_STAT_POLICY_ENFORCING] = "violation in enforcing mode:",
+};
+
+/* String table for /sys/kernel/security/tomoyo/stat interface. */
+static const char * const tomoyo_memory_headers[TOMOYO_MAX_MEMORY_STAT] = {
+ [TOMOYO_MEMORY_POLICY] = "policy:",
+ [TOMOYO_MEMORY_AUDIT] = "audit log:",
+ [TOMOYO_MEMORY_QUERY] = "query message:",
+};
+
+/* Timestamp counter for last updated. */
+static unsigned int tomoyo_stat_updated[TOMOYO_MAX_POLICY_STAT];
+/* Counter for number of updates. */
+static unsigned int tomoyo_stat_modified[TOMOYO_MAX_POLICY_STAT];
+
/**
- * tomoyo_read_self_domain - Get the current process's domainname.
+ * tomoyo_update_stat - Update statistic counters.
+ *
+ * @index: Index for policy type.
+ *
+ * Returns nothing.
+ */
+void tomoyo_update_stat(const u8 index)
+{
+ struct timeval tv;
+ do_gettimeofday(&tv);
+ /*
+ * I don't use atomic operations because race condition is not fatal.
+ */
+ tomoyo_stat_updated[index]++;
+ tomoyo_stat_modified[index] = tv.tv_sec;
+}
+
+/**
+ * tomoyo_read_stat - Read statistic data.
*
* @head: Pointer to "struct tomoyo_io_buffer".
*
- * Returns the current process's domainname.
+ * Returns nothing.
*/
-static int tomoyo_read_self_domain(struct tomoyo_io_buffer *head)
+static void tomoyo_read_stat(struct tomoyo_io_buffer *head)
{
- if (!head->read_eof) {
- /*
- * tomoyo_domain()->domainname != NULL
- * because every process belongs to a domain and
- * the domain's name cannot be NULL.
- */
- tomoyo_io_printf(head, "%s", tomoyo_domain()->domainname->name);
- head->read_eof = true;
+ u8 i;
+ unsigned int total = 0;
+ if (head->r.eof)
+ return;
+ for (i = 0; i < TOMOYO_MAX_POLICY_STAT; i++) {
+ tomoyo_io_printf(head, "Policy %-30s %10u",
+ tomoyo_policy_headers[i],
+ tomoyo_stat_updated[i]);
+ if (tomoyo_stat_modified[i]) {
+ struct tomoyo_time stamp;
+ tomoyo_convert_time(tomoyo_stat_modified[i], &stamp);
+ tomoyo_io_printf(head, " (Last: %04u/%02u/%02u "
+ "%02u:%02u:%02u)",
+ stamp.year, stamp.month, stamp.day,
+ stamp.hour, stamp.min, stamp.sec);
+ }
+ tomoyo_set_lf(head);
+ }
+ for (i = 0; i < TOMOYO_MAX_MEMORY_STAT; i++) {
+ unsigned int used = tomoyo_memory_used[i];
+ total += used;
+ tomoyo_io_printf(head, "Memory used by %-22s %10u",
+ tomoyo_memory_headers[i], used);
+ used = tomoyo_memory_quota[i];
+ if (used)
+ tomoyo_io_printf(head, " (Quota: %10u)", used);
+ tomoyo_set_lf(head);
}
+ tomoyo_io_printf(head, "Total memory used: %10u\n",
+ total);
+ head->r.eof = true;
+}
+
+/**
+ * tomoyo_write_stat - Set memory quota.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns 0.
+ */
+static int tomoyo_write_stat(struct tomoyo_io_buffer *head)
+{
+ char *data = head->write_buf;
+ u8 i;
+ if (tomoyo_str_starts(&data, "Memory used by "))
+ for (i = 0; i < TOMOYO_MAX_MEMORY_STAT; i++)
+ if (tomoyo_str_starts(&data, tomoyo_memory_headers[i]))
+ sscanf(data, "%u", &tomoyo_memory_quota[i]);
return 0;
}
@@ -1944,34 +2342,31 @@ static int tomoyo_read_self_domain(struct tomoyo_io_buffer *head)
* @type: Type of interface.
* @file: Pointer to "struct file".
*
- * Associates policy handler and returns 0 on success, -ENOMEM otherwise.
+ * Returns 0 on success, negative value otherwise.
*/
-static int tomoyo_open_control(const u8 type, struct file *file)
+int tomoyo_open_control(const u8 type, struct file *file)
{
- struct tomoyo_io_buffer *head = tomoyo_alloc(sizeof(*head));
+ struct tomoyo_io_buffer *head = kzalloc(sizeof(*head), GFP_NOFS);
if (!head)
return -ENOMEM;
mutex_init(&head->io_sem);
+ head->type = type;
switch (type) {
case TOMOYO_DOMAINPOLICY:
/* /sys/kernel/security/tomoyo/domain_policy */
- head->write = tomoyo_write_domain_policy;
- head->read = tomoyo_read_domain_policy;
+ head->write = tomoyo_write_domain;
+ head->read = tomoyo_read_domain;
break;
case TOMOYO_EXCEPTIONPOLICY:
/* /sys/kernel/security/tomoyo/exception_policy */
- head->write = tomoyo_write_exception_policy;
- head->read = tomoyo_read_exception_policy;
+ head->write = tomoyo_write_exception;
+ head->read = tomoyo_read_exception;
break;
- case TOMOYO_SELFDOMAIN:
- /* /sys/kernel/security/tomoyo/self_domain */
- head->read = tomoyo_read_self_domain;
- break;
- case TOMOYO_DOMAIN_STATUS:
- /* /sys/kernel/security/tomoyo/.domain_status */
- head->write = tomoyo_write_domain_profile;
- head->read = tomoyo_read_domain_profile;
+ case TOMOYO_AUDIT:
+ /* /sys/kernel/security/tomoyo/audit */
+ head->poll = tomoyo_poll_log;
+ head->read = tomoyo_read_log;
break;
case TOMOYO_PROCESS_STATUS:
/* /sys/kernel/security/tomoyo/.process_status */
@@ -1983,21 +2378,26 @@ static int tomoyo_open_control(const u8 type, struct file *file)
head->read = tomoyo_read_version;
head->readbuf_size = 128;
break;
- case TOMOYO_MEMINFO:
- /* /sys/kernel/security/tomoyo/meminfo */
- head->write = tomoyo_write_memory_quota;
- head->read = tomoyo_read_memory_counter;
- head->readbuf_size = 512;
+ case TOMOYO_STAT:
+ /* /sys/kernel/security/tomoyo/stat */
+ head->write = tomoyo_write_stat;
+ head->read = tomoyo_read_stat;
+ head->readbuf_size = 1024;
break;
case TOMOYO_PROFILE:
/* /sys/kernel/security/tomoyo/profile */
head->write = tomoyo_write_profile;
head->read = tomoyo_read_profile;
break;
+ case TOMOYO_QUERY: /* /sys/kernel/security/tomoyo/query */
+ head->poll = tomoyo_poll_query;
+ head->write = tomoyo_write_answer;
+ head->read = tomoyo_read_query;
+ break;
case TOMOYO_MANAGER:
/* /sys/kernel/security/tomoyo/manager */
- head->write = tomoyo_write_manager_policy;
- head->read = tomoyo_read_manager_policy;
+ head->write = tomoyo_write_manager;
+ head->read = tomoyo_read_manager;
break;
}
if (!(file->f_mode & FMODE_READ)) {
@@ -2006,12 +2406,14 @@ static int tomoyo_open_control(const u8 type, struct file *file)
* for reading.
*/
head->read = NULL;
- } else {
+ head->poll = NULL;
+ } else if (!head->poll) {
+ /* Don't allocate read_buf for poll() access. */
if (!head->readbuf_size)
head->readbuf_size = 4096 * 2;
- head->read_buf = tomoyo_alloc(head->readbuf_size);
+ head->read_buf = kzalloc(head->readbuf_size, GFP_NOFS);
if (!head->read_buf) {
- tomoyo_free(head);
+ kfree(head);
return -ENOMEM;
}
}
@@ -2023,117 +2425,254 @@ static int tomoyo_open_control(const u8 type, struct file *file)
head->write = NULL;
} else if (head->write) {
head->writebuf_size = 4096 * 2;
- head->write_buf = tomoyo_alloc(head->writebuf_size);
+ head->write_buf = kzalloc(head->writebuf_size, GFP_NOFS);
if (!head->write_buf) {
- tomoyo_free(head->read_buf);
- tomoyo_free(head);
+ kfree(head->read_buf);
+ kfree(head);
return -ENOMEM;
}
}
- file->private_data = head;
/*
- * Call the handler now if the file is
- * /sys/kernel/security/tomoyo/self_domain
- * so that the user can use
- * cat < /sys/kernel/security/tomoyo/self_domain"
- * to know the current process's domainname.
+ * If the file is /sys/kernel/security/tomoyo/query , increment the
+ * observer counter.
+ * The obserber counter is used by tomoyo_supervisor() to see if
+ * there is some process monitoring /sys/kernel/security/tomoyo/query.
*/
- if (type == TOMOYO_SELFDOMAIN)
- tomoyo_read_control(file, NULL, 0);
+ if (type == TOMOYO_QUERY)
+ atomic_inc(&tomoyo_query_observers);
+ file->private_data = head;
+ tomoyo_notify_gc(head, true);
return 0;
}
/**
+ * tomoyo_poll_control - poll() for /sys/kernel/security/tomoyo/ interface.
+ *
+ * @file: Pointer to "struct file".
+ * @wait: Pointer to "poll_table". Maybe NULL.
+ *
+ * Returns POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM if ready to read/write,
+ * POLLOUT | POLLWRNORM otherwise.
+ */
+unsigned int tomoyo_poll_control(struct file *file, poll_table *wait)
+{
+ struct tomoyo_io_buffer *head = file->private_data;
+ if (head->poll)
+ return head->poll(file, wait) | POLLOUT | POLLWRNORM;
+ return POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM;
+}
+
+/**
+ * tomoyo_set_namespace_cursor - Set namespace to read.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_set_namespace_cursor(struct tomoyo_io_buffer *head)
+{
+ struct list_head *ns;
+ if (head->type != TOMOYO_EXCEPTIONPOLICY &&
+ head->type != TOMOYO_PROFILE)
+ return;
+ /*
+ * If this is the first read, or reading previous namespace finished
+ * and has more namespaces to read, update the namespace cursor.
+ */
+ ns = head->r.ns;
+ if (!ns || (head->r.eof && ns->next != &tomoyo_namespace_list)) {
+ /* Clearing is OK because tomoyo_flush() returned true. */
+ memset(&head->r, 0, sizeof(head->r));
+ head->r.ns = ns ? ns->next : tomoyo_namespace_list.next;
+ }
+}
+
+/**
+ * tomoyo_has_more_namespace - Check for unread namespaces.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ *
+ * Returns true if we have more entries to print, false otherwise.
+ */
+static inline bool tomoyo_has_more_namespace(struct tomoyo_io_buffer *head)
+{
+ return (head->type == TOMOYO_EXCEPTIONPOLICY ||
+ head->type == TOMOYO_PROFILE) && head->r.eof &&
+ head->r.ns->next != &tomoyo_namespace_list;
+}
+
+/**
* tomoyo_read_control - read() for /sys/kernel/security/tomoyo/ interface.
*
- * @file: Pointer to "struct file".
+ * @head: Pointer to "struct tomoyo_io_buffer".
* @buffer: Poiner to buffer to write to.
* @buffer_len: Size of @buffer.
*
* Returns bytes read on success, negative value otherwise.
*/
-static int tomoyo_read_control(struct file *file, char __user *buffer,
- const int buffer_len)
+ssize_t tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer,
+ const int buffer_len)
{
- int len = 0;
- struct tomoyo_io_buffer *head = file->private_data;
- char *cp;
+ int len;
+ int idx;
if (!head->read)
return -ENOSYS;
if (mutex_lock_interruptible(&head->io_sem))
return -EINTR;
- /* Call the policy handler. */
- len = head->read(head);
- if (len < 0)
- goto out;
- /* Write to buffer. */
- len = head->read_avail;
- if (len > buffer_len)
- len = buffer_len;
- if (!len)
- goto out;
- /* head->read_buf changes by some functions. */
- cp = head->read_buf;
- if (copy_to_user(buffer, cp, len)) {
- len = -EFAULT;
- goto out;
- }
- head->read_avail -= len;
- memmove(cp, cp + len, head->read_avail);
- out:
+ head->read_user_buf = buffer;
+ head->read_user_buf_avail = buffer_len;
+ idx = tomoyo_read_lock();
+ if (tomoyo_flush(head))
+ /* Call the policy handler. */
+ do {
+ tomoyo_set_namespace_cursor(head);
+ head->read(head);
+ } while (tomoyo_flush(head) &&
+ tomoyo_has_more_namespace(head));
+ tomoyo_read_unlock(idx);
+ len = head->read_user_buf - buffer;
mutex_unlock(&head->io_sem);
return len;
}
/**
+ * tomoyo_parse_policy - Parse a policy line.
+ *
+ * @head: Poiter to "struct tomoyo_io_buffer".
+ * @line: Line to parse.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_parse_policy(struct tomoyo_io_buffer *head, char *line)
+{
+ /* Delete request? */
+ head->w.is_delete = !strncmp(line, "delete ", 7);
+ if (head->w.is_delete)
+ memmove(line, line + 7, strlen(line + 7) + 1);
+ /* Selecting namespace to update. */
+ if (head->type == TOMOYO_EXCEPTIONPOLICY ||
+ head->type == TOMOYO_PROFILE) {
+ if (*line == '<') {
+ char *cp = strchr(line, ' ');
+ if (cp) {
+ *cp++ = '\0';
+ head->w.ns = tomoyo_assign_namespace(line);
+ memmove(line, cp, strlen(cp) + 1);
+ } else
+ head->w.ns = NULL;
+ } else
+ head->w.ns = &tomoyo_kernel_namespace;
+ /* Don't allow updating if namespace is invalid. */
+ if (!head->w.ns)
+ return -ENOENT;
+ }
+ /* Do the update. */
+ return head->write(head);
+}
+
+/**
* tomoyo_write_control - write() for /sys/kernel/security/tomoyo/ interface.
*
- * @file: Pointer to "struct file".
+ * @head: Pointer to "struct tomoyo_io_buffer".
* @buffer: Pointer to buffer to read from.
* @buffer_len: Size of @buffer.
*
* Returns @buffer_len on success, negative value otherwise.
*/
-static int tomoyo_write_control(struct file *file, const char __user *buffer,
- const int buffer_len)
+ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head,
+ const char __user *buffer, const int buffer_len)
{
- struct tomoyo_io_buffer *head = file->private_data;
int error = buffer_len;
- int avail_len = buffer_len;
+ size_t avail_len = buffer_len;
char *cp0 = head->write_buf;
-
+ int idx;
if (!head->write)
return -ENOSYS;
if (!access_ok(VERIFY_READ, buffer, buffer_len))
return -EFAULT;
- /* Don't allow updating policies by non manager programs. */
- if (head->write != tomoyo_write_pid &&
- head->write != tomoyo_write_domain_policy &&
- !tomoyo_is_policy_manager())
- return -EPERM;
if (mutex_lock_interruptible(&head->io_sem))
return -EINTR;
+ head->read_user_buf_avail = 0;
+ idx = tomoyo_read_lock();
/* Read a line and dispatch it to the policy handler. */
while (avail_len > 0) {
char c;
- if (head->write_avail >= head->writebuf_size - 1) {
- error = -ENOMEM;
- break;
- } else if (get_user(c, buffer)) {
+ if (head->w.avail >= head->writebuf_size - 1) {
+ const int len = head->writebuf_size * 2;
+ char *cp = kzalloc(len, GFP_NOFS);
+ if (!cp) {
+ error = -ENOMEM;
+ break;
+ }
+ memmove(cp, cp0, head->w.avail);
+ kfree(cp0);
+ head->write_buf = cp;
+ cp0 = cp;
+ head->writebuf_size = len;
+ }
+ if (get_user(c, buffer)) {
error = -EFAULT;
break;
}
buffer++;
avail_len--;
- cp0[head->write_avail++] = c;
+ cp0[head->w.avail++] = c;
if (c != '\n')
continue;
- cp0[head->write_avail - 1] = '\0';
- head->write_avail = 0;
+ cp0[head->w.avail - 1] = '\0';
+ head->w.avail = 0;
tomoyo_normalize_line(cp0);
- head->write(head);
+ if (!strcmp(cp0, "reset")) {
+ head->w.ns = &tomoyo_kernel_namespace;
+ head->w.domain = NULL;
+ memset(&head->r, 0, sizeof(head->r));
+ continue;
+ }
+ /* Don't allow updating policies by non manager programs. */
+ switch (head->type) {
+ case TOMOYO_PROCESS_STATUS:
+ /* This does not write anything. */
+ break;
+ case TOMOYO_DOMAINPOLICY:
+ if (tomoyo_select_domain(head, cp0))
+ continue;
+ /* fall through */
+ case TOMOYO_EXCEPTIONPOLICY:
+ if (!strcmp(cp0, "select transition_only")) {
+ head->r.print_transition_related_only = true;
+ continue;
+ }
+ /* fall through */
+ default:
+ if (!tomoyo_manager()) {
+ error = -EPERM;
+ goto out;
+ }
+ }
+ switch (tomoyo_parse_policy(head, cp0)) {
+ case -EPERM:
+ error = -EPERM;
+ goto out;
+ case 0:
+ switch (head->type) {
+ case TOMOYO_DOMAINPOLICY:
+ case TOMOYO_EXCEPTIONPOLICY:
+ case TOMOYO_STAT:
+ case TOMOYO_PROFILE:
+ case TOMOYO_MANAGER:
+ tomoyo_update_stat(TOMOYO_STAT_POLICY_UPDATES);
+ break;
+ default:
+ break;
+ }
+ break;
+ }
}
+out:
+ tomoyo_read_unlock(idx);
mutex_unlock(&head->io_sem);
return error;
}
@@ -2141,178 +2680,112 @@ static int tomoyo_write_control(struct file *file, const char __user *buffer,
/**
* tomoyo_close_control - close() for /sys/kernel/security/tomoyo/ interface.
*
- * @file: Pointer to "struct file".
- *
- * Releases memory and returns 0.
+ * @head: Pointer to "struct tomoyo_io_buffer".
*/
-static int tomoyo_close_control(struct file *file)
+void tomoyo_close_control(struct tomoyo_io_buffer *head)
{
- struct tomoyo_io_buffer *head = file->private_data;
-
- /* Release memory used for policy I/O. */
- tomoyo_free(head->read_buf);
- head->read_buf = NULL;
- tomoyo_free(head->write_buf);
- head->write_buf = NULL;
- tomoyo_free(head);
- head = NULL;
- file->private_data = NULL;
- return 0;
+ /*
+ * If the file is /sys/kernel/security/tomoyo/query , decrement the
+ * observer counter.
+ */
+ if (head->type == TOMOYO_QUERY &&
+ atomic_dec_and_test(&tomoyo_query_observers))
+ wake_up_all(&tomoyo_answer_wait);
+ tomoyo_notify_gc(head, false);
}
/**
- * tomoyo_alloc_acl_element - Allocate permanent memory for ACL entry.
- *
- * @acl_type: Type of ACL entry.
- *
- * Returns pointer to the ACL entry on success, NULL otherwise.
+ * tomoyo_check_profile - Check all profiles currently assigned to domains are defined.
*/
-void *tomoyo_alloc_acl_element(const u8 acl_type)
+void tomoyo_check_profile(void)
{
- int len;
- struct tomoyo_acl_info *ptr;
-
- switch (acl_type) {
- case TOMOYO_TYPE_SINGLE_PATH_ACL:
- len = sizeof(struct tomoyo_single_path_acl_record);
- break;
- case TOMOYO_TYPE_DOUBLE_PATH_ACL:
- len = sizeof(struct tomoyo_double_path_acl_record);
- break;
- default:
- return NULL;
+ struct tomoyo_domain_info *domain;
+ const int idx = tomoyo_read_lock();
+ tomoyo_policy_loaded = true;
+ printk(KERN_INFO "TOMOYO: 2.5.0\n");
+ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
+ const u8 profile = domain->profile;
+ const struct tomoyo_policy_namespace *ns = domain->ns;
+ if (ns->profile_version != 20110903)
+ printk(KERN_ERR
+ "Profile version %u is not supported.\n",
+ ns->profile_version);
+ else if (!ns->profile_ptr[profile])
+ printk(KERN_ERR
+ "Profile %u (used by '%s') is not defined.\n",
+ profile, domain->domainname->name);
+ else
+ continue;
+ printk(KERN_ERR
+ "Userland tools for TOMOYO 2.5 must be installed and "
+ "policy must be initialized.\n");
+ printk(KERN_ERR "Please see http://tomoyo.sourceforge.jp/2.5/ "
+ "for more information.\n");
+ panic("STOP!");
}
- ptr = tomoyo_alloc_element(len);
- if (!ptr)
- return NULL;
- ptr->type = acl_type;
- return ptr;
-}
-
-/**
- * tomoyo_open - open() for /sys/kernel/security/tomoyo/ interface.
- *
- * @inode: Pointer to "struct inode".
- * @file: Pointer to "struct file".
- *
- * Returns 0 on success, negative value otherwise.
- */
-static int tomoyo_open(struct inode *inode, struct file *file)
-{
- const int key = ((u8 *) file->f_path.dentry->d_inode->i_private)
- - ((u8 *) NULL);
- return tomoyo_open_control(key, file);
-}
-
-/**
- * tomoyo_release - close() for /sys/kernel/security/tomoyo/ interface.
- *
- * @inode: Pointer to "struct inode".
- * @file: Pointer to "struct file".
- *
- * Returns 0 on success, negative value otherwise.
- */
-static int tomoyo_release(struct inode *inode, struct file *file)
-{
- return tomoyo_close_control(file);
-}
-
-/**
- * tomoyo_read - read() for /sys/kernel/security/tomoyo/ interface.
- *
- * @file: Pointer to "struct file".
- * @buf: Pointer to buffer.
- * @count: Size of @buf.
- * @ppos: Unused.
- *
- * Returns bytes read on success, negative value otherwise.
- */
-static ssize_t tomoyo_read(struct file *file, char __user *buf, size_t count,
- loff_t *ppos)
-{
- return tomoyo_read_control(file, buf, count);
-}
-
-/**
- * tomoyo_write - write() for /sys/kernel/security/tomoyo/ interface.
- *
- * @file: Pointer to "struct file".
- * @buf: Pointer to buffer.
- * @count: Size of @buf.
- * @ppos: Unused.
- *
- * Returns @count on success, negative value otherwise.
- */
-static ssize_t tomoyo_write(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
-{
- return tomoyo_write_control(file, buf, count);
+ tomoyo_read_unlock(idx);
+ printk(KERN_INFO "Mandatory Access Control activated.\n");
}
-/*
- * tomoyo_operations is a "struct file_operations" which is used for handling
- * /sys/kernel/security/tomoyo/ interface.
- *
- * Some files under /sys/kernel/security/tomoyo/ directory accept open(O_RDWR).
- * See tomoyo_io_buffer for internals.
- */
-static const struct file_operations tomoyo_operations = {
- .open = tomoyo_open,
- .release = tomoyo_release,
- .read = tomoyo_read,
- .write = tomoyo_write,
-};
-
/**
- * tomoyo_create_entry - Create interface files under /sys/kernel/security/tomoyo/ directory.
- *
- * @name: The name of the interface file.
- * @mode: The permission of the interface file.
- * @parent: The parent directory.
- * @key: Type of interface.
+ * tomoyo_load_builtin_policy - Load built-in policy.
*
* Returns nothing.
*/
-static void __init tomoyo_create_entry(const char *name, const mode_t mode,
- struct dentry *parent, const u8 key)
+void __init tomoyo_load_builtin_policy(void)
{
- securityfs_create_file(name, mode, parent, ((u8 *) NULL) + key,
- &tomoyo_operations);
-}
-
-/**
- * tomoyo_initerface_init - Initialize /sys/kernel/security/tomoyo/ interface.
- *
- * Returns 0.
- */
-static int __init tomoyo_initerface_init(void)
-{
- struct dentry *tomoyo_dir;
-
- /* Don't create securityfs entries unless registered. */
- if (current_cred()->security != &tomoyo_kernel_domain)
- return 0;
-
- tomoyo_dir = securityfs_create_dir("tomoyo", NULL);
- tomoyo_create_entry("domain_policy", 0600, tomoyo_dir,
- TOMOYO_DOMAINPOLICY);
- tomoyo_create_entry("exception_policy", 0600, tomoyo_dir,
- TOMOYO_EXCEPTIONPOLICY);
- tomoyo_create_entry("self_domain", 0400, tomoyo_dir,
- TOMOYO_SELFDOMAIN);
- tomoyo_create_entry(".domain_status", 0600, tomoyo_dir,
- TOMOYO_DOMAIN_STATUS);
- tomoyo_create_entry(".process_status", 0600, tomoyo_dir,
- TOMOYO_PROCESS_STATUS);
- tomoyo_create_entry("meminfo", 0600, tomoyo_dir,
- TOMOYO_MEMINFO);
- tomoyo_create_entry("profile", 0600, tomoyo_dir,
- TOMOYO_PROFILE);
- tomoyo_create_entry("manager", 0600, tomoyo_dir,
- TOMOYO_MANAGER);
- tomoyo_create_entry("version", 0400, tomoyo_dir,
- TOMOYO_VERSION);
- return 0;
+ /*
+ * This include file is manually created and contains built-in policy
+ * named "tomoyo_builtin_profile", "tomoyo_builtin_exception_policy",
+ * "tomoyo_builtin_domain_policy", "tomoyo_builtin_manager",
+ * "tomoyo_builtin_stat" in the form of "static char [] __initdata".
+ */
+#include "builtin-policy.h"
+ u8 i;
+ const int idx = tomoyo_read_lock();
+ for (i = 0; i < 5; i++) {
+ struct tomoyo_io_buffer head = { };
+ char *start = "";
+ switch (i) {
+ case 0:
+ start = tomoyo_builtin_profile;
+ head.type = TOMOYO_PROFILE;
+ head.write = tomoyo_write_profile;
+ break;
+ case 1:
+ start = tomoyo_builtin_exception_policy;
+ head.type = TOMOYO_EXCEPTIONPOLICY;
+ head.write = tomoyo_write_exception;
+ break;
+ case 2:
+ start = tomoyo_builtin_domain_policy;
+ head.type = TOMOYO_DOMAINPOLICY;
+ head.write = tomoyo_write_domain;
+ break;
+ case 3:
+ start = tomoyo_builtin_manager;
+ head.type = TOMOYO_MANAGER;
+ head.write = tomoyo_write_manager;
+ break;
+ case 4:
+ start = tomoyo_builtin_stat;
+ head.type = TOMOYO_STAT;
+ head.write = tomoyo_write_stat;
+ break;
+ }
+ while (1) {
+ char *end = strchr(start, '\n');
+ if (!end)
+ break;
+ *end = '\0';
+ tomoyo_normalize_line(start);
+ head.write_buf = start;
+ tomoyo_parse_policy(&head, start);
+ start = end + 1;
+ }
+ }
+ tomoyo_read_unlock(idx);
+#ifdef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
+ tomoyo_check_profile();
+#endif
}
-
-fs_initcall(tomoyo_initerface_init);
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index 92169d29b2d..b897d486201 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -1,12 +1,9 @@
/*
* security/tomoyo/common.h
*
- * Common functions for TOMOYO.
- *
- * Copyright (C) 2005-2009 NTT DATA CORPORATION
- *
- * Version: 2.2.0 2009/04/01
+ * Header file for TOMOYO.
*
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
*/
#ifndef _SECURITY_TOMOYO_COMMON_H
@@ -22,41 +19,484 @@
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/list.h>
+#include <linux/cred.h>
+#include <linux/poll.h>
+#include <linux/binfmts.h>
+#include <linux/highmem.h>
+#include <linux/net.h>
+#include <linux/inet.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/un.h>
+#include <net/sock.h>
+#include <net/af_unix.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <net/udp.h>
+
+/********** Constants definitions. **********/
-struct dentry;
-struct vfsmount;
+/*
+ * TOMOYO uses this hash only when appending a string into the string
+ * table. Frequency of appending strings is very low. So we don't need
+ * large (e.g. 64k) hash size. 256 will be sufficient.
+ */
+#define TOMOYO_HASH_BITS 8
+#define TOMOYO_MAX_HASH (1u<<TOMOYO_HASH_BITS)
/*
- * tomoyo_page_buffer is a structure which is used for holding a pathname
- * obtained from "struct dentry" and "struct vfsmount" pair.
- * As of now, it is 4096 bytes. If users complain that 4096 bytes is too small
- * (because TOMOYO escapes non ASCII printable characters using \ooo format),
- * we will make the buffer larger.
+ * TOMOYO checks only SOCK_STREAM, SOCK_DGRAM, SOCK_RAW, SOCK_SEQPACKET.
+ * Therefore, we don't need SOCK_MAX.
*/
-struct tomoyo_page_buffer {
- char buffer[4096];
+#define TOMOYO_SOCK_MAX 6
+
+#define TOMOYO_EXEC_TMPSIZE 4096
+
+/* Garbage collector is trying to kfree() this element. */
+#define TOMOYO_GC_IN_PROGRESS -1
+
+/* Profile number is an integer between 0 and 255. */
+#define TOMOYO_MAX_PROFILES 256
+
+/* Group number is an integer between 0 and 255. */
+#define TOMOYO_MAX_ACL_GROUPS 256
+
+/* Index numbers for "struct tomoyo_condition". */
+enum tomoyo_conditions_index {
+ TOMOYO_TASK_UID, /* current_uid() */
+ TOMOYO_TASK_EUID, /* current_euid() */
+ TOMOYO_TASK_SUID, /* current_suid() */
+ TOMOYO_TASK_FSUID, /* current_fsuid() */
+ TOMOYO_TASK_GID, /* current_gid() */
+ TOMOYO_TASK_EGID, /* current_egid() */
+ TOMOYO_TASK_SGID, /* current_sgid() */
+ TOMOYO_TASK_FSGID, /* current_fsgid() */
+ TOMOYO_TASK_PID, /* sys_getpid() */
+ TOMOYO_TASK_PPID, /* sys_getppid() */
+ TOMOYO_EXEC_ARGC, /* "struct linux_binprm *"->argc */
+ TOMOYO_EXEC_ENVC, /* "struct linux_binprm *"->envc */
+ TOMOYO_TYPE_IS_SOCKET, /* S_IFSOCK */
+ TOMOYO_TYPE_IS_SYMLINK, /* S_IFLNK */
+ TOMOYO_TYPE_IS_FILE, /* S_IFREG */
+ TOMOYO_TYPE_IS_BLOCK_DEV, /* S_IFBLK */
+ TOMOYO_TYPE_IS_DIRECTORY, /* S_IFDIR */
+ TOMOYO_TYPE_IS_CHAR_DEV, /* S_IFCHR */
+ TOMOYO_TYPE_IS_FIFO, /* S_IFIFO */
+ TOMOYO_MODE_SETUID, /* S_ISUID */
+ TOMOYO_MODE_SETGID, /* S_ISGID */
+ TOMOYO_MODE_STICKY, /* S_ISVTX */
+ TOMOYO_MODE_OWNER_READ, /* S_IRUSR */
+ TOMOYO_MODE_OWNER_WRITE, /* S_IWUSR */
+ TOMOYO_MODE_OWNER_EXECUTE, /* S_IXUSR */
+ TOMOYO_MODE_GROUP_READ, /* S_IRGRP */
+ TOMOYO_MODE_GROUP_WRITE, /* S_IWGRP */
+ TOMOYO_MODE_GROUP_EXECUTE, /* S_IXGRP */
+ TOMOYO_MODE_OTHERS_READ, /* S_IROTH */
+ TOMOYO_MODE_OTHERS_WRITE, /* S_IWOTH */
+ TOMOYO_MODE_OTHERS_EXECUTE, /* S_IXOTH */
+ TOMOYO_EXEC_REALPATH,
+ TOMOYO_SYMLINK_TARGET,
+ TOMOYO_PATH1_UID,
+ TOMOYO_PATH1_GID,
+ TOMOYO_PATH1_INO,
+ TOMOYO_PATH1_MAJOR,
+ TOMOYO_PATH1_MINOR,
+ TOMOYO_PATH1_PERM,
+ TOMOYO_PATH1_TYPE,
+ TOMOYO_PATH1_DEV_MAJOR,
+ TOMOYO_PATH1_DEV_MINOR,
+ TOMOYO_PATH2_UID,
+ TOMOYO_PATH2_GID,
+ TOMOYO_PATH2_INO,
+ TOMOYO_PATH2_MAJOR,
+ TOMOYO_PATH2_MINOR,
+ TOMOYO_PATH2_PERM,
+ TOMOYO_PATH2_TYPE,
+ TOMOYO_PATH2_DEV_MAJOR,
+ TOMOYO_PATH2_DEV_MINOR,
+ TOMOYO_PATH1_PARENT_UID,
+ TOMOYO_PATH1_PARENT_GID,
+ TOMOYO_PATH1_PARENT_INO,
+ TOMOYO_PATH1_PARENT_PERM,
+ TOMOYO_PATH2_PARENT_UID,
+ TOMOYO_PATH2_PARENT_GID,
+ TOMOYO_PATH2_PARENT_INO,
+ TOMOYO_PATH2_PARENT_PERM,
+ TOMOYO_MAX_CONDITION_KEYWORD,
+ TOMOYO_NUMBER_UNION,
+ TOMOYO_NAME_UNION,
+ TOMOYO_ARGV_ENTRY,
+ TOMOYO_ENVP_ENTRY,
+};
+
+
+/* Index numbers for stat(). */
+enum tomoyo_path_stat_index {
+ /* Do not change this order. */
+ TOMOYO_PATH1,
+ TOMOYO_PATH1_PARENT,
+ TOMOYO_PATH2,
+ TOMOYO_PATH2_PARENT,
+ TOMOYO_MAX_PATH_STAT
+};
+
+/* Index numbers for operation mode. */
+enum tomoyo_mode_index {
+ TOMOYO_CONFIG_DISABLED,
+ TOMOYO_CONFIG_LEARNING,
+ TOMOYO_CONFIG_PERMISSIVE,
+ TOMOYO_CONFIG_ENFORCING,
+ TOMOYO_CONFIG_MAX_MODE,
+ TOMOYO_CONFIG_WANT_REJECT_LOG = 64,
+ TOMOYO_CONFIG_WANT_GRANT_LOG = 128,
+ TOMOYO_CONFIG_USE_DEFAULT = 255,
+};
+
+/* Index numbers for entry type. */
+enum tomoyo_policy_id {
+ TOMOYO_ID_GROUP,
+ TOMOYO_ID_ADDRESS_GROUP,
+ TOMOYO_ID_PATH_GROUP,
+ TOMOYO_ID_NUMBER_GROUP,
+ TOMOYO_ID_TRANSITION_CONTROL,
+ TOMOYO_ID_AGGREGATOR,
+ TOMOYO_ID_MANAGER,
+ TOMOYO_ID_CONDITION,
+ TOMOYO_ID_NAME,
+ TOMOYO_ID_ACL,
+ TOMOYO_ID_DOMAIN,
+ TOMOYO_MAX_POLICY
+};
+
+/* Index numbers for domain's attributes. */
+enum tomoyo_domain_info_flags_index {
+ /* Quota warnning flag. */
+ TOMOYO_DIF_QUOTA_WARNED,
+ /*
+ * This domain was unable to create a new domain at
+ * tomoyo_find_next_domain() because the name of the domain to be
+ * created was too long or it could not allocate memory.
+ * More than one process continued execve() without domain transition.
+ */
+ TOMOYO_DIF_TRANSITION_FAILED,
+ TOMOYO_MAX_DOMAIN_INFO_FLAGS
+};
+
+/* Index numbers for audit type. */
+enum tomoyo_grant_log {
+ /* Follow profile's configuration. */
+ TOMOYO_GRANTLOG_AUTO,
+ /* Do not generate grant log. */
+ TOMOYO_GRANTLOG_NO,
+ /* Generate grant_log. */
+ TOMOYO_GRANTLOG_YES,
+};
+
+/* Index numbers for group entries. */
+enum tomoyo_group_id {
+ TOMOYO_PATH_GROUP,
+ TOMOYO_NUMBER_GROUP,
+ TOMOYO_ADDRESS_GROUP,
+ TOMOYO_MAX_GROUP
+};
+
+/* Index numbers for type of numeric values. */
+enum tomoyo_value_type {
+ TOMOYO_VALUE_TYPE_INVALID,
+ TOMOYO_VALUE_TYPE_DECIMAL,
+ TOMOYO_VALUE_TYPE_OCTAL,
+ TOMOYO_VALUE_TYPE_HEXADECIMAL,
+};
+
+/* Index numbers for domain transition control keywords. */
+enum tomoyo_transition_type {
+ /* Do not change this order, */
+ TOMOYO_TRANSITION_CONTROL_NO_RESET,
+ TOMOYO_TRANSITION_CONTROL_RESET,
+ TOMOYO_TRANSITION_CONTROL_NO_INITIALIZE,
+ TOMOYO_TRANSITION_CONTROL_INITIALIZE,
+ TOMOYO_TRANSITION_CONTROL_NO_KEEP,
+ TOMOYO_TRANSITION_CONTROL_KEEP,
+ TOMOYO_MAX_TRANSITION_TYPE
+};
+
+/* Index numbers for Access Controls. */
+enum tomoyo_acl_entry_type_index {
+ TOMOYO_TYPE_PATH_ACL,
+ TOMOYO_TYPE_PATH2_ACL,
+ TOMOYO_TYPE_PATH_NUMBER_ACL,
+ TOMOYO_TYPE_MKDEV_ACL,
+ TOMOYO_TYPE_MOUNT_ACL,
+ TOMOYO_TYPE_INET_ACL,
+ TOMOYO_TYPE_UNIX_ACL,
+ TOMOYO_TYPE_ENV_ACL,
+ TOMOYO_TYPE_MANUAL_TASK_ACL,
+};
+
+/* Index numbers for access controls with one pathname. */
+enum tomoyo_path_acl_index {
+ TOMOYO_TYPE_EXECUTE,
+ TOMOYO_TYPE_READ,
+ TOMOYO_TYPE_WRITE,
+ TOMOYO_TYPE_APPEND,
+ TOMOYO_TYPE_UNLINK,
+ TOMOYO_TYPE_GETATTR,
+ TOMOYO_TYPE_RMDIR,
+ TOMOYO_TYPE_TRUNCATE,
+ TOMOYO_TYPE_SYMLINK,
+ TOMOYO_TYPE_CHROOT,
+ TOMOYO_TYPE_UMOUNT,
+ TOMOYO_MAX_PATH_OPERATION
+};
+
+/* Index numbers for /sys/kernel/security/tomoyo/stat interface. */
+enum tomoyo_memory_stat_type {
+ TOMOYO_MEMORY_POLICY,
+ TOMOYO_MEMORY_AUDIT,
+ TOMOYO_MEMORY_QUERY,
+ TOMOYO_MAX_MEMORY_STAT
+};
+
+enum tomoyo_mkdev_acl_index {
+ TOMOYO_TYPE_MKBLOCK,
+ TOMOYO_TYPE_MKCHAR,
+ TOMOYO_MAX_MKDEV_OPERATION
+};
+
+/* Index numbers for socket operations. */
+enum tomoyo_network_acl_index {
+ TOMOYO_NETWORK_BIND, /* bind() operation. */
+ TOMOYO_NETWORK_LISTEN, /* listen() operation. */
+ TOMOYO_NETWORK_CONNECT, /* connect() operation. */
+ TOMOYO_NETWORK_SEND, /* send() operation. */
+ TOMOYO_MAX_NETWORK_OPERATION
+};
+
+/* Index numbers for access controls with two pathnames. */
+enum tomoyo_path2_acl_index {
+ TOMOYO_TYPE_LINK,
+ TOMOYO_TYPE_RENAME,
+ TOMOYO_TYPE_PIVOT_ROOT,
+ TOMOYO_MAX_PATH2_OPERATION
+};
+
+/* Index numbers for access controls with one pathname and one number. */
+enum tomoyo_path_number_acl_index {
+ TOMOYO_TYPE_CREATE,
+ TOMOYO_TYPE_MKDIR,
+ TOMOYO_TYPE_MKFIFO,
+ TOMOYO_TYPE_MKSOCK,
+ TOMOYO_TYPE_IOCTL,
+ TOMOYO_TYPE_CHMOD,
+ TOMOYO_TYPE_CHOWN,
+ TOMOYO_TYPE_CHGRP,
+ TOMOYO_MAX_PATH_NUMBER_OPERATION
+};
+
+/* Index numbers for /sys/kernel/security/tomoyo/ interfaces. */
+enum tomoyo_securityfs_interface_index {
+ TOMOYO_DOMAINPOLICY,
+ TOMOYO_EXCEPTIONPOLICY,
+ TOMOYO_PROCESS_STATUS,
+ TOMOYO_STAT,
+ TOMOYO_AUDIT,
+ TOMOYO_VERSION,
+ TOMOYO_PROFILE,
+ TOMOYO_QUERY,
+ TOMOYO_MANAGER
+};
+
+/* Index numbers for special mount operations. */
+enum tomoyo_special_mount {
+ TOMOYO_MOUNT_BIND, /* mount --bind /source /dest */
+ TOMOYO_MOUNT_MOVE, /* mount --move /old /new */
+ TOMOYO_MOUNT_REMOUNT, /* mount -o remount /dir */
+ TOMOYO_MOUNT_MAKE_UNBINDABLE, /* mount --make-unbindable /dir */
+ TOMOYO_MOUNT_MAKE_PRIVATE, /* mount --make-private /dir */
+ TOMOYO_MOUNT_MAKE_SLAVE, /* mount --make-slave /dir */
+ TOMOYO_MOUNT_MAKE_SHARED, /* mount --make-shared /dir */
+ TOMOYO_MAX_SPECIAL_MOUNT
+};
+
+/* Index numbers for functionality. */
+enum tomoyo_mac_index {
+ TOMOYO_MAC_FILE_EXECUTE,
+ TOMOYO_MAC_FILE_OPEN,
+ TOMOYO_MAC_FILE_CREATE,
+ TOMOYO_MAC_FILE_UNLINK,
+ TOMOYO_MAC_FILE_GETATTR,
+ TOMOYO_MAC_FILE_MKDIR,
+ TOMOYO_MAC_FILE_RMDIR,
+ TOMOYO_MAC_FILE_MKFIFO,
+ TOMOYO_MAC_FILE_MKSOCK,
+ TOMOYO_MAC_FILE_TRUNCATE,
+ TOMOYO_MAC_FILE_SYMLINK,
+ TOMOYO_MAC_FILE_MKBLOCK,
+ TOMOYO_MAC_FILE_MKCHAR,
+ TOMOYO_MAC_FILE_LINK,
+ TOMOYO_MAC_FILE_RENAME,
+ TOMOYO_MAC_FILE_CHMOD,
+ TOMOYO_MAC_FILE_CHOWN,
+ TOMOYO_MAC_FILE_CHGRP,
+ TOMOYO_MAC_FILE_IOCTL,
+ TOMOYO_MAC_FILE_CHROOT,
+ TOMOYO_MAC_FILE_MOUNT,
+ TOMOYO_MAC_FILE_UMOUNT,
+ TOMOYO_MAC_FILE_PIVOT_ROOT,
+ TOMOYO_MAC_NETWORK_INET_STREAM_BIND,
+ TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN,
+ TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT,
+ TOMOYO_MAC_NETWORK_INET_DGRAM_BIND,
+ TOMOYO_MAC_NETWORK_INET_DGRAM_SEND,
+ TOMOYO_MAC_NETWORK_INET_RAW_BIND,
+ TOMOYO_MAC_NETWORK_INET_RAW_SEND,
+ TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND,
+ TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN,
+ TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT,
+ TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND,
+ TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND,
+ TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND,
+ TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN,
+ TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT,
+ TOMOYO_MAC_ENVIRON,
+ TOMOYO_MAX_MAC_INDEX
+};
+
+/* Index numbers for category of functionality. */
+enum tomoyo_mac_category_index {
+ TOMOYO_MAC_CATEGORY_FILE,
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ TOMOYO_MAC_CATEGORY_MISC,
+ TOMOYO_MAX_MAC_CATEGORY_INDEX
};
/*
- * tomoyo_path_info is a structure which is used for holding a string data
- * used by TOMOYO.
- * This structure has several fields for supporting pattern matching.
- *
- * (1) "name" is the '\0' terminated string data.
- * (2) "hash" is full_name_hash(name, strlen(name)).
- * This allows tomoyo_pathcmp() to compare by hash before actually compare
- * using strcmp().
- * (3) "const_len" is the length of the initial segment of "name" which
- * consists entirely of non wildcard characters. In other words, the length
- * which we can compare two strings using strncmp().
- * (4) "is_dir" is a bool which is true if "name" ends with "/",
- * false otherwise.
- * TOMOYO distinguishes directory and non-directory. A directory ends with
- * "/" and non-directory does not end with "/".
- * (5) "is_patterned" is a bool which is true if "name" contains wildcard
- * characters, false otherwise. This allows TOMOYO to use "hash" and
- * strcmp() for string comparison if "is_patterned" is false.
+ * Retry this request. Returned by tomoyo_supervisor() if policy violation has
+ * occurred in enforcing mode and the userspace daemon decided to retry.
+ *
+ * We must choose a positive value in order to distinguish "granted" (which is
+ * 0) and "rejected" (which is a negative value) and "retry".
*/
+#define TOMOYO_RETRY_REQUEST 1
+
+/* Index numbers for /sys/kernel/security/tomoyo/stat interface. */
+enum tomoyo_policy_stat_type {
+ /* Do not change this order. */
+ TOMOYO_STAT_POLICY_UPDATES,
+ TOMOYO_STAT_POLICY_LEARNING, /* == TOMOYO_CONFIG_LEARNING */
+ TOMOYO_STAT_POLICY_PERMISSIVE, /* == TOMOYO_CONFIG_PERMISSIVE */
+ TOMOYO_STAT_POLICY_ENFORCING, /* == TOMOYO_CONFIG_ENFORCING */
+ TOMOYO_MAX_POLICY_STAT
+};
+
+/* Index numbers for profile's PREFERENCE values. */
+enum tomoyo_pref_index {
+ TOMOYO_PREF_MAX_AUDIT_LOG,
+ TOMOYO_PREF_MAX_LEARNING_ENTRY,
+ TOMOYO_MAX_PREF
+};
+
+/********** Structure definitions. **********/
+
+/* Common header for holding ACL entries. */
+struct tomoyo_acl_head {
+ struct list_head list;
+ s8 is_deleted; /* true or false or TOMOYO_GC_IN_PROGRESS */
+} __packed;
+
+/* Common header for shared entries. */
+struct tomoyo_shared_acl_head {
+ struct list_head list;
+ atomic_t users;
+} __packed;
+
+struct tomoyo_policy_namespace;
+
+/* Structure for request info. */
+struct tomoyo_request_info {
+ /*
+ * For holding parameters specific to operations which deal files.
+ * NULL if not dealing files.
+ */
+ struct tomoyo_obj_info *obj;
+ /*
+ * For holding parameters specific to execve() request.
+ * NULL if not dealing do_execve().
+ */
+ struct tomoyo_execve *ee;
+ struct tomoyo_domain_info *domain;
+ /* For holding parameters. */
+ union {
+ struct {
+ const struct tomoyo_path_info *filename;
+ /* For using wildcards at tomoyo_find_next_domain(). */
+ const struct tomoyo_path_info *matched_path;
+ /* One of values in "enum tomoyo_path_acl_index". */
+ u8 operation;
+ } path;
+ struct {
+ const struct tomoyo_path_info *filename1;
+ const struct tomoyo_path_info *filename2;
+ /* One of values in "enum tomoyo_path2_acl_index". */
+ u8 operation;
+ } path2;
+ struct {
+ const struct tomoyo_path_info *filename;
+ unsigned int mode;
+ unsigned int major;
+ unsigned int minor;
+ /* One of values in "enum tomoyo_mkdev_acl_index". */
+ u8 operation;
+ } mkdev;
+ struct {
+ const struct tomoyo_path_info *filename;
+ unsigned long number;
+ /*
+ * One of values in
+ * "enum tomoyo_path_number_acl_index".
+ */
+ u8 operation;
+ } path_number;
+ struct {
+ const struct tomoyo_path_info *name;
+ } environ;
+ struct {
+ const __be32 *address;
+ u16 port;
+ /* One of values smaller than TOMOYO_SOCK_MAX. */
+ u8 protocol;
+ /* One of values in "enum tomoyo_network_acl_index". */
+ u8 operation;
+ bool is_ipv6;
+ } inet_network;
+ struct {
+ const struct tomoyo_path_info *address;
+ /* One of values smaller than TOMOYO_SOCK_MAX. */
+ u8 protocol;
+ /* One of values in "enum tomoyo_network_acl_index". */
+ u8 operation;
+ } unix_network;
+ struct {
+ const struct tomoyo_path_info *type;
+ const struct tomoyo_path_info *dir;
+ const struct tomoyo_path_info *dev;
+ unsigned long flags;
+ int need_dev;
+ } mount;
+ struct {
+ const struct tomoyo_path_info *domainname;
+ } task;
+ } param;
+ struct tomoyo_acl_info *matched_acl;
+ u8 param_type;
+ bool granted;
+ u8 retry;
+ u8 profile;
+ u8 mode; /* One of tomoyo_mode_index . */
+ u8 type;
+};
+
+/* Structure for holding a token. */
struct tomoyo_path_info {
const char *name;
u32 hash; /* = full_name_hash(name, strlen(name)) */
@@ -65,393 +505,826 @@ struct tomoyo_path_info {
bool is_patterned; /* = tomoyo_path_contains_pattern(name) */
};
-/*
- * This is the max length of a token.
- *
- * A token consists of only ASCII printable characters.
- * Non printable characters in a token is represented in \ooo style
- * octal string. Thus, \ itself is represented as \\.
- */
-#define TOMOYO_MAX_PATHNAME_LEN 4000
+/* Structure for holding string data. */
+struct tomoyo_name {
+ struct tomoyo_shared_acl_head head;
+ struct tomoyo_path_info entry;
+};
-/*
- * tomoyo_path_info_with_data is a structure which is used for holding a
- * pathname obtained from "struct dentry" and "struct vfsmount" pair.
- *
- * "struct tomoyo_path_info_with_data" consists of "struct tomoyo_path_info"
- * and buffer for the pathname, while "struct tomoyo_page_buffer" consists of
- * buffer for the pathname only.
- *
- * "struct tomoyo_path_info_with_data" is intended to allow TOMOYO to release
- * both "struct tomoyo_path_info" and buffer for the pathname by single kfree()
- * so that we don't need to return two pointers to the caller. If the caller
- * puts "struct tomoyo_path_info" on stack memory, we will be able to remove
- * "struct tomoyo_path_info_with_data".
- */
-struct tomoyo_path_info_with_data {
- /* Keep "head" first, for this pointer is passed to tomoyo_free(). */
- struct tomoyo_path_info head;
- char barrier1[16]; /* Safeguard for overrun. */
- char body[TOMOYO_MAX_PATHNAME_LEN];
- char barrier2[16]; /* Safeguard for overrun. */
+/* Structure for holding a word. */
+struct tomoyo_name_union {
+ /* Either @filename or @group is NULL. */
+ const struct tomoyo_path_info *filename;
+ struct tomoyo_group *group;
};
-/*
- * tomoyo_acl_info is a structure which is used for holding
- *
- * (1) "list" which is linked to the ->acl_info_list of
- * "struct tomoyo_domain_info"
- * (2) "type" which tells
- * (a) type & 0x7F : type of the entry (either
- * "struct tomoyo_single_path_acl_record" or
- * "struct tomoyo_double_path_acl_record")
- * (b) type & 0x80 : whether the entry is marked as "deleted".
- *
- * Packing "struct tomoyo_acl_info" allows
- * "struct tomoyo_single_path_acl_record" to embed "u16" and
- * "struct tomoyo_double_path_acl_record" to embed "u8"
- * without enlarging their structure size.
- */
-struct tomoyo_acl_info {
- struct list_head list;
+/* Structure for holding a number. */
+struct tomoyo_number_union {
+ unsigned long values[2];
+ struct tomoyo_group *group; /* Maybe NULL. */
+ /* One of values in "enum tomoyo_value_type". */
+ u8 value_type[2];
+};
+
+/* Structure for holding an IP address. */
+struct tomoyo_ipaddr_union {
+ struct in6_addr ip[2]; /* Big endian. */
+ struct tomoyo_group *group; /* Pointer to address group. */
+ bool is_ipv6; /* Valid only if @group == NULL. */
+};
+
+/* Structure for "path_group"/"number_group"/"address_group" directive. */
+struct tomoyo_group {
+ struct tomoyo_shared_acl_head head;
+ const struct tomoyo_path_info *group_name;
+ struct list_head member_list;
+};
+
+/* Structure for "path_group" directive. */
+struct tomoyo_path_group {
+ struct tomoyo_acl_head head;
+ const struct tomoyo_path_info *member_name;
+};
+
+/* Structure for "number_group" directive. */
+struct tomoyo_number_group {
+ struct tomoyo_acl_head head;
+ struct tomoyo_number_union number;
+};
+
+/* Structure for "address_group" directive. */
+struct tomoyo_address_group {
+ struct tomoyo_acl_head head;
+ /* Structure for holding an IP address. */
+ struct tomoyo_ipaddr_union address;
+};
+
+/* Subset of "struct stat". Used by conditional ACL and audit logs. */
+struct tomoyo_mini_stat {
+ kuid_t uid;
+ kgid_t gid;
+ ino_t ino;
+ umode_t mode;
+ dev_t dev;
+ dev_t rdev;
+};
+
+/* Structure for dumping argv[] and envp[] of "struct linux_binprm". */
+struct tomoyo_page_dump {
+ struct page *page; /* Previously dumped page. */
+ char *data; /* Contents of "page". Size is PAGE_SIZE. */
+};
+
+/* Structure for attribute checks in addition to pathname checks. */
+struct tomoyo_obj_info {
/*
- * Type of this ACL entry.
- *
- * MSB is is_deleted flag.
+ * True if tomoyo_get_attributes() was already called, false otherwise.
*/
- u8 type;
-} __packed;
+ bool validate_done;
+ /* True if @stat[] is valid. */
+ bool stat_valid[TOMOYO_MAX_PATH_STAT];
+ /* First pathname. Initialized with { NULL, NULL } if no path. */
+ struct path path1;
+ /* Second pathname. Initialized with { NULL, NULL } if no path. */
+ struct path path2;
+ /*
+ * Information on @path1, @path1's parent directory, @path2, @path2's
+ * parent directory.
+ */
+ struct tomoyo_mini_stat stat[TOMOYO_MAX_PATH_STAT];
+ /*
+ * Content of symbolic link to be created. NULL for operations other
+ * than symlink().
+ */
+ struct tomoyo_path_info *symlink_target;
+};
-/* This ACL entry is deleted. */
-#define TOMOYO_ACL_DELETED 0x80
+/* Structure for argv[]. */
+struct tomoyo_argv {
+ unsigned long index;
+ const struct tomoyo_path_info *value;
+ bool is_not;
+};
-/*
- * tomoyo_domain_info is a structure which is used for holding permissions
- * (e.g. "allow_read /lib/libc-2.5.so") given to each domain.
- * It has following fields.
- *
- * (1) "list" which is linked to tomoyo_domain_list .
- * (2) "acl_info_list" which is linked to "struct tomoyo_acl_info".
- * (3) "domainname" which holds the name of the domain.
- * (4) "profile" which remembers profile number assigned to this domain.
- * (5) "is_deleted" is a bool which is true if this domain is marked as
- * "deleted", false otherwise.
- * (6) "quota_warned" is a bool which is used for suppressing warning message
- * when learning mode learned too much entries.
- * (7) "flags" which remembers this domain's attributes.
- *
- * A domain's lifecycle is an analogy of files on / directory.
- * Multiple domains with the same domainname cannot be created (as with
- * creating files with the same filename fails with -EEXIST).
- * If a process reached a domain, that process can reside in that domain after
- * that domain is marked as "deleted" (as with a process can access an already
- * open()ed file after that file was unlink()ed).
- */
+/* Structure for envp[]. */
+struct tomoyo_envp {
+ const struct tomoyo_path_info *name;
+ const struct tomoyo_path_info *value;
+ bool is_not;
+};
+
+/* Structure for execve() operation. */
+struct tomoyo_execve {
+ struct tomoyo_request_info r;
+ struct tomoyo_obj_info obj;
+ struct linux_binprm *bprm;
+ const struct tomoyo_path_info *transition;
+ /* For dumping argv[] and envp[]. */
+ struct tomoyo_page_dump dump;
+ /* For temporary use. */
+ char *tmp; /* Size is TOMOYO_EXEC_TMPSIZE bytes */
+};
+
+/* Structure for entries which follows "struct tomoyo_condition". */
+struct tomoyo_condition_element {
+ /*
+ * Left hand operand. A "struct tomoyo_argv" for TOMOYO_ARGV_ENTRY, a
+ * "struct tomoyo_envp" for TOMOYO_ENVP_ENTRY is attached to the tail
+ * of the array of this struct.
+ */
+ u8 left;
+ /*
+ * Right hand operand. A "struct tomoyo_number_union" for
+ * TOMOYO_NUMBER_UNION, a "struct tomoyo_name_union" for
+ * TOMOYO_NAME_UNION is attached to the tail of the array of this
+ * struct.
+ */
+ u8 right;
+ /* Equation operator. True if equals or overlaps, false otherwise. */
+ bool equals;
+};
+
+/* Structure for optional arguments. */
+struct tomoyo_condition {
+ struct tomoyo_shared_acl_head head;
+ u32 size; /* Memory size allocated for this entry. */
+ u16 condc; /* Number of conditions in this struct. */
+ u16 numbers_count; /* Number of "struct tomoyo_number_union values". */
+ u16 names_count; /* Number of "struct tomoyo_name_union names". */
+ u16 argc; /* Number of "struct tomoyo_argv". */
+ u16 envc; /* Number of "struct tomoyo_envp". */
+ u8 grant_log; /* One of values in "enum tomoyo_grant_log". */
+ const struct tomoyo_path_info *transit; /* Maybe NULL. */
+ /*
+ * struct tomoyo_condition_element condition[condc];
+ * struct tomoyo_number_union values[numbers_count];
+ * struct tomoyo_name_union names[names_count];
+ * struct tomoyo_argv argv[argc];
+ * struct tomoyo_envp envp[envc];
+ */
+};
+
+/* Common header for individual entries. */
+struct tomoyo_acl_info {
+ struct list_head list;
+ struct tomoyo_condition *cond; /* Maybe NULL. */
+ s8 is_deleted; /* true or false or TOMOYO_GC_IN_PROGRESS */
+ u8 type; /* One of values in "enum tomoyo_acl_entry_type_index". */
+} __packed;
+
+/* Structure for domain information. */
struct tomoyo_domain_info {
struct list_head list;
struct list_head acl_info_list;
/* Name of this domain. Never NULL. */
const struct tomoyo_path_info *domainname;
+ /* Namespace for this domain. Never NULL. */
+ struct tomoyo_policy_namespace *ns;
u8 profile; /* Profile number to use. */
+ u8 group; /* Group number to use. */
bool is_deleted; /* Delete flag. */
- bool quota_warned; /* Quota warnning flag. */
- /* DOMAIN_FLAGS_*. Use tomoyo_set_domain_flag() to modify. */
- u8 flags;
+ bool flags[TOMOYO_MAX_DOMAIN_INFO_FLAGS];
+ atomic_t users; /* Number of referring credentials. */
};
-/* Profile number is an integer between 0 and 255. */
-#define TOMOYO_MAX_PROFILES 256
-
-/* Ignore "allow_read" directive in exception policy. */
-#define TOMOYO_DOMAIN_FLAGS_IGNORE_GLOBAL_ALLOW_READ 1
/*
- * This domain was unable to create a new domain at tomoyo_find_next_domain()
- * because the name of the domain to be created was too long or
- * it could not allocate memory.
- * More than one process continued execve() without domain transition.
+ * Structure for "task manual_domain_transition" directive.
*/
-#define TOMOYO_DOMAIN_FLAGS_TRANSITION_FAILED 2
+struct tomoyo_task_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_MANUAL_TASK_ACL */
+ /* Pointer to domainname. */
+ const struct tomoyo_path_info *domainname;
+};
/*
- * tomoyo_single_path_acl_record is a structure which is used for holding an
- * entry with one pathname operation (e.g. open(), mkdir()).
- * It has following fields.
- *
- * (1) "head" which is a "struct tomoyo_acl_info".
- * (2) "perm" which is a bitmask of permitted operations.
- * (3) "filename" is the pathname.
- *
- * Directives held by this structure are "allow_read/write", "allow_execute",
- * "allow_read", "allow_write", "allow_create", "allow_unlink", "allow_mkdir",
- * "allow_rmdir", "allow_mkfifo", "allow_mksock", "allow_mkblock",
- * "allow_mkchar", "allow_truncate", "allow_symlink" and "allow_rewrite".
+ * Structure for "file execute", "file read", "file write", "file append",
+ * "file unlink", "file getattr", "file rmdir", "file truncate",
+ * "file symlink", "file chroot" and "file unmount" directive.
*/
-struct tomoyo_single_path_acl_record {
- struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_SINGLE_PATH_ACL */
- u16 perm;
- /* Pointer to single pathname. */
- const struct tomoyo_path_info *filename;
+struct tomoyo_path_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_PATH_ACL */
+ u16 perm; /* Bitmask of values in "enum tomoyo_path_acl_index". */
+ struct tomoyo_name_union name;
};
/*
- * tomoyo_double_path_acl_record is a structure which is used for holding an
- * entry with two pathnames operation (i.e. link() and rename()).
- * It has following fields.
- *
- * (1) "head" which is a "struct tomoyo_acl_info".
- * (2) "perm" which is a bitmask of permitted operations.
- * (3) "filename1" is the source/old pathname.
- * (4) "filename2" is the destination/new pathname.
- *
- * Directives held by this structure are "allow_rename" and "allow_link".
+ * Structure for "file create", "file mkdir", "file mkfifo", "file mksock",
+ * "file ioctl", "file chmod", "file chown" and "file chgrp" directive.
*/
-struct tomoyo_double_path_acl_record {
- struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_DOUBLE_PATH_ACL */
+struct tomoyo_path_number_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_PATH_NUMBER_ACL */
+ /* Bitmask of values in "enum tomoyo_path_number_acl_index". */
u8 perm;
- /* Pointer to single pathname. */
- const struct tomoyo_path_info *filename1;
- /* Pointer to single pathname. */
- const struct tomoyo_path_info *filename2;
-};
-
-/* Keywords for ACLs. */
-#define TOMOYO_KEYWORD_ALIAS "alias "
-#define TOMOYO_KEYWORD_ALLOW_READ "allow_read "
-#define TOMOYO_KEYWORD_DELETE "delete "
-#define TOMOYO_KEYWORD_DENY_REWRITE "deny_rewrite "
-#define TOMOYO_KEYWORD_FILE_PATTERN "file_pattern "
-#define TOMOYO_KEYWORD_INITIALIZE_DOMAIN "initialize_domain "
-#define TOMOYO_KEYWORD_KEEP_DOMAIN "keep_domain "
-#define TOMOYO_KEYWORD_NO_INITIALIZE_DOMAIN "no_initialize_domain "
-#define TOMOYO_KEYWORD_NO_KEEP_DOMAIN "no_keep_domain "
-#define TOMOYO_KEYWORD_SELECT "select "
-#define TOMOYO_KEYWORD_USE_PROFILE "use_profile "
-#define TOMOYO_KEYWORD_IGNORE_GLOBAL_ALLOW_READ "ignore_global_allow_read"
-/* A domain definition starts with <kernel>. */
-#define TOMOYO_ROOT_NAME "<kernel>"
-#define TOMOYO_ROOT_NAME_LEN (sizeof(TOMOYO_ROOT_NAME) - 1)
+ struct tomoyo_name_union name;
+ struct tomoyo_number_union number;
+};
-/* Index numbers for Access Controls. */
-#define TOMOYO_MAC_FOR_FILE 0 /* domain_policy.conf */
-#define TOMOYO_MAX_ACCEPT_ENTRY 1
-#define TOMOYO_VERBOSE 2
-#define TOMOYO_MAX_CONTROL_INDEX 3
+/* Structure for "file mkblock" and "file mkchar" directive. */
+struct tomoyo_mkdev_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_MKDEV_ACL */
+ u8 perm; /* Bitmask of values in "enum tomoyo_mkdev_acl_index". */
+ struct tomoyo_name_union name;
+ struct tomoyo_number_union mode;
+ struct tomoyo_number_union major;
+ struct tomoyo_number_union minor;
+};
+
+/*
+ * Structure for "file rename", "file link" and "file pivot_root" directive.
+ */
+struct tomoyo_path2_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_PATH2_ACL */
+ u8 perm; /* Bitmask of values in "enum tomoyo_path2_acl_index". */
+ struct tomoyo_name_union name1;
+ struct tomoyo_name_union name2;
+};
+
+/* Structure for "file mount" directive. */
+struct tomoyo_mount_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_MOUNT_ACL */
+ struct tomoyo_name_union dev_name;
+ struct tomoyo_name_union dir_name;
+ struct tomoyo_name_union fs_type;
+ struct tomoyo_number_union flags;
+};
+
+/* Structure for "misc env" directive in domain policy. */
+struct tomoyo_env_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_ENV_ACL */
+ const struct tomoyo_path_info *env; /* environment variable */
+};
+
+/* Structure for "network inet" directive. */
+struct tomoyo_inet_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_INET_ACL */
+ u8 protocol;
+ u8 perm; /* Bitmask of values in "enum tomoyo_network_acl_index" */
+ struct tomoyo_ipaddr_union address;
+ struct tomoyo_number_union port;
+};
+
+/* Structure for "network unix" directive. */
+struct tomoyo_unix_acl {
+ struct tomoyo_acl_info head; /* type = TOMOYO_TYPE_UNIX_ACL */
+ u8 protocol;
+ u8 perm; /* Bitmask of values in "enum tomoyo_network_acl_index" */
+ struct tomoyo_name_union name;
+};
+
+/* Structure for holding a line from /sys/kernel/security/tomoyo/ interface. */
+struct tomoyo_acl_param {
+ char *data;
+ struct list_head *list;
+ struct tomoyo_policy_namespace *ns;
+ bool is_delete;
+};
+
+#define TOMOYO_MAX_IO_READ_QUEUE 64
/*
- * tomoyo_io_buffer is a structure which is used for reading and modifying
- * configuration via /sys/kernel/security/tomoyo/ interface.
- * It has many fields. ->read_var1 , ->read_var2 , ->write_var1 are used as
- * cursors.
- *
- * Since the content of /sys/kernel/security/tomoyo/domain_policy is a list of
- * "struct tomoyo_domain_info" entries and each "struct tomoyo_domain_info"
- * entry has a list of "struct tomoyo_acl_info", we need two cursors when
- * reading (one is for traversing tomoyo_domain_list and the other is for
- * traversing "struct tomoyo_acl_info"->acl_info_list ).
- *
- * If a line written to /sys/kernel/security/tomoyo/domain_policy starts with
- * "select ", TOMOYO seeks the cursor ->read_var1 and ->write_var1 to the
- * domain with the domainname specified by the rest of that line (NULL is set
- * if seek failed).
- * If a line written to /sys/kernel/security/tomoyo/domain_policy starts with
- * "delete ", TOMOYO deletes an entry or a domain specified by the rest of that
- * line (->write_var1 is set to NULL if a domain was deleted).
- * If a line written to /sys/kernel/security/tomoyo/domain_policy starts with
- * neither "select " nor "delete ", an entry or a domain specified by that line
- * is appended.
+ * Structure for reading/writing policy via /sys/kernel/security/tomoyo
+ * interfaces.
*/
struct tomoyo_io_buffer {
- int (*read) (struct tomoyo_io_buffer *);
+ void (*read) (struct tomoyo_io_buffer *);
int (*write) (struct tomoyo_io_buffer *);
+ unsigned int (*poll) (struct file *file, poll_table *wait);
/* Exclusive lock for this structure. */
struct mutex io_sem;
- /* The position currently reading from. */
- struct list_head *read_var1;
- /* Extra variables for reading. */
- struct list_head *read_var2;
- /* The position currently writing to. */
- struct tomoyo_domain_info *write_var1;
- /* The step for reading. */
- int read_step;
+ char __user *read_user_buf;
+ size_t read_user_buf_avail;
+ struct {
+ struct list_head *ns;
+ struct list_head *domain;
+ struct list_head *group;
+ struct list_head *acl;
+ size_t avail;
+ unsigned int step;
+ unsigned int query_index;
+ u16 index;
+ u16 cond_index;
+ u8 acl_group_index;
+ u8 cond_step;
+ u8 bit;
+ u8 w_pos;
+ bool eof;
+ bool print_this_domain_only;
+ bool print_transition_related_only;
+ bool print_cond_part;
+ const char *w[TOMOYO_MAX_IO_READ_QUEUE];
+ } r;
+ struct {
+ struct tomoyo_policy_namespace *ns;
+ /* The position currently writing to. */
+ struct tomoyo_domain_info *domain;
+ /* Bytes available for writing. */
+ size_t avail;
+ bool is_delete;
+ } w;
/* Buffer for reading. */
char *read_buf;
- /* EOF flag for reading. */
- bool read_eof;
- /* Read domain ACL of specified PID? */
- bool read_single_domain;
- /* Extra variable for reading. */
- u8 read_bit;
- /* Bytes available for reading. */
- int read_avail;
/* Size of read buffer. */
- int readbuf_size;
+ size_t readbuf_size;
/* Buffer for writing. */
char *write_buf;
- /* Bytes available for writing. */
- int write_avail;
/* Size of write buffer. */
- int writebuf_size;
-};
-
-/* Check whether the domain has too many ACL entries to hold. */
-bool tomoyo_domain_quota_is_ok(struct tomoyo_domain_info * const domain);
-/* Transactional sprintf() for policy dump. */
-bool tomoyo_io_printf(struct tomoyo_io_buffer *head, const char *fmt, ...)
- __attribute__ ((format(printf, 2, 3)));
-/* Check whether the domainname is correct. */
-bool tomoyo_is_correct_domain(const unsigned char *domainname,
- const char *function);
-/* Check whether the token is correct. */
-bool tomoyo_is_correct_path(const char *filename, const s8 start_type,
- const s8 pattern_type, const s8 end_type,
- const char *function);
-/* Check whether the token can be a domainname. */
-bool tomoyo_is_domain_def(const unsigned char *buffer);
-/* Check whether the given filename matches the given pattern. */
-bool tomoyo_path_matches_pattern(const struct tomoyo_path_info *filename,
- const struct tomoyo_path_info *pattern);
-/* Read "alias" entry in exception policy. */
-bool tomoyo_read_alias_policy(struct tomoyo_io_buffer *head);
-/*
- * Read "initialize_domain" and "no_initialize_domain" entry
- * in exception policy.
- */
-bool tomoyo_read_domain_initializer_policy(struct tomoyo_io_buffer *head);
-/* Read "keep_domain" and "no_keep_domain" entry in exception policy. */
-bool tomoyo_read_domain_keeper_policy(struct tomoyo_io_buffer *head);
-/* Read "file_pattern" entry in exception policy. */
-bool tomoyo_read_file_pattern(struct tomoyo_io_buffer *head);
-/* Read "allow_read" entry in exception policy. */
-bool tomoyo_read_globally_readable_policy(struct tomoyo_io_buffer *head);
-/* Read "deny_rewrite" entry in exception policy. */
-bool tomoyo_read_no_rewrite_policy(struct tomoyo_io_buffer *head);
-/* Write domain policy violation warning message to console? */
-bool tomoyo_verbose_mode(const struct tomoyo_domain_info *domain);
-/* Convert double path operation to operation name. */
-const char *tomoyo_dp2keyword(const u8 operation);
-/* Get the last component of the given domainname. */
-const char *tomoyo_get_last_name(const struct tomoyo_domain_info *domain);
-/* Get warning message. */
-const char *tomoyo_get_msg(const bool is_enforce);
-/* Convert single path operation to operation name. */
-const char *tomoyo_sp2keyword(const u8 operation);
-/* Create "alias" entry in exception policy. */
-int tomoyo_write_alias_policy(char *data, const bool is_delete);
-/*
- * Create "initialize_domain" and "no_initialize_domain" entry
- * in exception policy.
- */
-int tomoyo_write_domain_initializer_policy(char *data, const bool is_not,
- const bool is_delete);
-/* Create "keep_domain" and "no_keep_domain" entry in exception policy. */
-int tomoyo_write_domain_keeper_policy(char *data, const bool is_not,
- const bool is_delete);
+ size_t writebuf_size;
+ /* Type of this interface. */
+ enum tomoyo_securityfs_interface_index type;
+ /* Users counter protected by tomoyo_io_buffer_list_lock. */
+ u8 users;
+ /* List for telling GC not to kfree() elements. */
+ struct list_head list;
+};
+
/*
- * Create "allow_read/write", "allow_execute", "allow_read", "allow_write",
- * "allow_create", "allow_unlink", "allow_mkdir", "allow_rmdir",
- * "allow_mkfifo", "allow_mksock", "allow_mkblock", "allow_mkchar",
- * "allow_truncate", "allow_symlink", "allow_rewrite", "allow_rename" and
- * "allow_link" entry in domain policy.
+ * Structure for "initialize_domain"/"no_initialize_domain"/"keep_domain"/
+ * "no_keep_domain" keyword.
*/
-int tomoyo_write_file_policy(char *data, struct tomoyo_domain_info *domain,
- const bool is_delete);
-/* Create "allow_read" entry in exception policy. */
-int tomoyo_write_globally_readable_policy(char *data, const bool is_delete);
-/* Create "deny_rewrite" entry in exception policy. */
-int tomoyo_write_no_rewrite_policy(char *data, const bool is_delete);
-/* Create "file_pattern" entry in exception policy. */
-int tomoyo_write_pattern_policy(char *data, const bool is_delete);
-/* Find a domain by the given name. */
+struct tomoyo_transition_control {
+ struct tomoyo_acl_head head;
+ u8 type; /* One of values in "enum tomoyo_transition_type". */
+ /* True if the domainname is tomoyo_get_last_name(). */
+ bool is_last_name;
+ const struct tomoyo_path_info *domainname; /* Maybe NULL */
+ const struct tomoyo_path_info *program; /* Maybe NULL */
+};
+
+/* Structure for "aggregator" keyword. */
+struct tomoyo_aggregator {
+ struct tomoyo_acl_head head;
+ const struct tomoyo_path_info *original_name;
+ const struct tomoyo_path_info *aggregated_name;
+};
+
+/* Structure for policy manager. */
+struct tomoyo_manager {
+ struct tomoyo_acl_head head;
+ /* A path to program or a domainname. */
+ const struct tomoyo_path_info *manager;
+};
+
+struct tomoyo_preference {
+ unsigned int learning_max_entry;
+ bool enforcing_verbose;
+ bool learning_verbose;
+ bool permissive_verbose;
+};
+
+/* Structure for /sys/kernel/security/tomnoyo/profile interface. */
+struct tomoyo_profile {
+ const struct tomoyo_path_info *comment;
+ struct tomoyo_preference *learning;
+ struct tomoyo_preference *permissive;
+ struct tomoyo_preference *enforcing;
+ struct tomoyo_preference preference;
+ u8 default_config;
+ u8 config[TOMOYO_MAX_MAC_INDEX + TOMOYO_MAX_MAC_CATEGORY_INDEX];
+ unsigned int pref[TOMOYO_MAX_PREF];
+};
+
+/* Structure for representing YYYY/MM/DD hh/mm/ss. */
+struct tomoyo_time {
+ u16 year;
+ u8 month;
+ u8 day;
+ u8 hour;
+ u8 min;
+ u8 sec;
+};
+
+/* Structure for policy namespace. */
+struct tomoyo_policy_namespace {
+ /* Profile table. Memory is allocated as needed. */
+ struct tomoyo_profile *profile_ptr[TOMOYO_MAX_PROFILES];
+ /* List of "struct tomoyo_group". */
+ struct list_head group_list[TOMOYO_MAX_GROUP];
+ /* List of policy. */
+ struct list_head policy_list[TOMOYO_MAX_POLICY];
+ /* The global ACL referred by "use_group" keyword. */
+ struct list_head acl_group[TOMOYO_MAX_ACL_GROUPS];
+ /* List for connecting to tomoyo_namespace_list list. */
+ struct list_head namespace_list;
+ /* Profile version. Currently only 20110903 is defined. */
+ unsigned int profile_version;
+ /* Name of this namespace (e.g. "<kernel>", "</usr/sbin/httpd>" ). */
+ const char *name;
+};
+
+/********** Function prototypes. **********/
+
+bool tomoyo_address_matches_group(const bool is_ipv6, const __be32 *address,
+ const struct tomoyo_group *group);
+bool tomoyo_compare_number_union(const unsigned long value,
+ const struct tomoyo_number_union *ptr);
+bool tomoyo_condition(struct tomoyo_request_info *r,
+ const struct tomoyo_condition *cond);
+bool tomoyo_correct_domain(const unsigned char *domainname);
+bool tomoyo_correct_path(const char *filename);
+bool tomoyo_correct_word(const char *string);
+bool tomoyo_domain_def(const unsigned char *buffer);
+bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r);
+bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
+ struct tomoyo_page_dump *dump);
+bool tomoyo_memory_ok(void *ptr);
+bool tomoyo_number_matches_group(const unsigned long min,
+ const unsigned long max,
+ const struct tomoyo_group *group);
+bool tomoyo_parse_ipaddr_union(struct tomoyo_acl_param *param,
+ struct tomoyo_ipaddr_union *ptr);
+bool tomoyo_parse_name_union(struct tomoyo_acl_param *param,
+ struct tomoyo_name_union *ptr);
+bool tomoyo_parse_number_union(struct tomoyo_acl_param *param,
+ struct tomoyo_number_union *ptr);
+bool tomoyo_path_matches_pattern(const struct tomoyo_path_info *filename,
+ const struct tomoyo_path_info *pattern);
+bool tomoyo_permstr(const char *string, const char *keyword);
+bool tomoyo_str_starts(char **src, const char *find);
+char *tomoyo_encode(const char *str);
+char *tomoyo_encode2(const char *str, int str_len);
+char *tomoyo_init_log(struct tomoyo_request_info *r, int len, const char *fmt,
+ va_list args);
+char *tomoyo_read_token(struct tomoyo_acl_param *param);
+char *tomoyo_realpath_from_path(struct path *path);
+char *tomoyo_realpath_nofollow(const char *pathname);
+const char *tomoyo_get_exe(void);
+const char *tomoyo_yesno(const unsigned int value);
+const struct tomoyo_path_info *tomoyo_compare_name_union
+(const struct tomoyo_path_info *name, const struct tomoyo_name_union *ptr);
+const struct tomoyo_path_info *tomoyo_get_domainname
+(struct tomoyo_acl_param *param);
+const struct tomoyo_path_info *tomoyo_get_name(const char *name);
+const struct tomoyo_path_info *tomoyo_path_matches_group
+(const struct tomoyo_path_info *pathname, const struct tomoyo_group *group);
+int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
+ struct path *path, const int flag);
+void tomoyo_close_control(struct tomoyo_io_buffer *head);
+int tomoyo_env_perm(struct tomoyo_request_info *r, const char *env);
+int tomoyo_execute_permission(struct tomoyo_request_info *r,
+ const struct tomoyo_path_info *filename);
+int tomoyo_find_next_domain(struct linux_binprm *bprm);
+int tomoyo_get_mode(const struct tomoyo_policy_namespace *ns, const u8 profile,
+ const u8 index);
+int tomoyo_init_request_info(struct tomoyo_request_info *r,
+ struct tomoyo_domain_info *domain,
+ const u8 index);
+int tomoyo_mkdev_perm(const u8 operation, struct path *path,
+ const unsigned int mode, unsigned int dev);
+int tomoyo_mount_permission(const char *dev_name, struct path *path,
+ const char *type, unsigned long flags,
+ void *data_page);
+int tomoyo_open_control(const u8 type, struct file *file);
+int tomoyo_path2_perm(const u8 operation, struct path *path1,
+ struct path *path2);
+int tomoyo_path_number_perm(const u8 operation, struct path *path,
+ unsigned long number);
+int tomoyo_path_perm(const u8 operation, struct path *path,
+ const char *target);
+unsigned int tomoyo_poll_control(struct file *file, poll_table *wait);
+unsigned int tomoyo_poll_log(struct file *file, poll_table *wait);
+int tomoyo_socket_bind_permission(struct socket *sock, struct sockaddr *addr,
+ int addr_len);
+int tomoyo_socket_connect_permission(struct socket *sock,
+ struct sockaddr *addr, int addr_len);
+int tomoyo_socket_listen_permission(struct socket *sock);
+int tomoyo_socket_sendmsg_permission(struct socket *sock, struct msghdr *msg,
+ int size);
+int tomoyo_supervisor(struct tomoyo_request_info *r, const char *fmt, ...)
+ __printf(2, 3);
+int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size,
+ struct tomoyo_acl_param *param,
+ bool (*check_duplicate)
+ (const struct tomoyo_acl_info *,
+ const struct tomoyo_acl_info *),
+ bool (*merge_duplicate)
+ (struct tomoyo_acl_info *, struct tomoyo_acl_info *,
+ const bool));
+int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size,
+ struct tomoyo_acl_param *param,
+ bool (*check_duplicate)
+ (const struct tomoyo_acl_head *,
+ const struct tomoyo_acl_head *));
+int tomoyo_write_aggregator(struct tomoyo_acl_param *param);
+int tomoyo_write_file(struct tomoyo_acl_param *param);
+int tomoyo_write_group(struct tomoyo_acl_param *param, const u8 type);
+int tomoyo_write_misc(struct tomoyo_acl_param *param);
+int tomoyo_write_inet_network(struct tomoyo_acl_param *param);
+int tomoyo_write_transition_control(struct tomoyo_acl_param *param,
+ const u8 type);
+int tomoyo_write_unix_network(struct tomoyo_acl_param *param);
+ssize_t tomoyo_read_control(struct tomoyo_io_buffer *head, char __user *buffer,
+ const int buffer_len);
+ssize_t tomoyo_write_control(struct tomoyo_io_buffer *head,
+ const char __user *buffer, const int buffer_len);
+struct tomoyo_condition *tomoyo_get_condition(struct tomoyo_acl_param *param);
+struct tomoyo_domain_info *tomoyo_assign_domain(const char *domainname,
+ const bool transit);
struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname);
-/* Find or create a domain by the given name. */
-struct tomoyo_domain_info *tomoyo_find_or_assign_new_domain(const char *
- domainname,
- const u8 profile);
-/* Check mode for specified functionality. */
+struct tomoyo_group *tomoyo_get_group(struct tomoyo_acl_param *param,
+ const u8 idx);
+struct tomoyo_policy_namespace *tomoyo_assign_namespace
+(const char *domainname);
+struct tomoyo_profile *tomoyo_profile(const struct tomoyo_policy_namespace *ns,
+ const u8 profile);
unsigned int tomoyo_check_flags(const struct tomoyo_domain_info *domain,
const u8 index);
-/* Allocate memory for structures. */
-void *tomoyo_alloc_acl_element(const u8 acl_type);
-/* Fill in "struct tomoyo_path_info" members. */
+u8 tomoyo_parse_ulong(unsigned long *result, char **str);
+void *tomoyo_commit_ok(void *data, const unsigned int size);
+void __init tomoyo_load_builtin_policy(void);
+void __init tomoyo_mm_init(void);
+void tomoyo_check_acl(struct tomoyo_request_info *r,
+ bool (*check_entry) (struct tomoyo_request_info *,
+ const struct tomoyo_acl_info *));
+void tomoyo_check_profile(void);
+void tomoyo_convert_time(time_t time, struct tomoyo_time *stamp);
+void tomoyo_del_condition(struct list_head *element);
void tomoyo_fill_path_info(struct tomoyo_path_info *ptr);
-/* Run policy loader when /sbin/init starts. */
+void tomoyo_get_attributes(struct tomoyo_obj_info *obj);
+void tomoyo_init_policy_namespace(struct tomoyo_policy_namespace *ns);
void tomoyo_load_policy(const char *filename);
-/* Change "struct tomoyo_domain_info"->flags. */
-void tomoyo_set_domain_flag(struct tomoyo_domain_info *domain,
- const bool is_delete, const u8 flags);
+void tomoyo_normalize_line(unsigned char *buffer);
+void tomoyo_notify_gc(struct tomoyo_io_buffer *head, const bool is_register);
+void tomoyo_print_ip(char *buf, const unsigned int size,
+ const struct tomoyo_ipaddr_union *ptr);
+void tomoyo_print_ulong(char *buffer, const int buffer_len,
+ const unsigned long value, const u8 type);
+void tomoyo_put_name_union(struct tomoyo_name_union *ptr);
+void tomoyo_put_number_union(struct tomoyo_number_union *ptr);
+void tomoyo_read_log(struct tomoyo_io_buffer *head);
+void tomoyo_update_stat(const u8 index);
+void tomoyo_warn_oom(const char *function);
+void tomoyo_write_log(struct tomoyo_request_info *r, const char *fmt, ...)
+ __printf(2, 3);
+void tomoyo_write_log2(struct tomoyo_request_info *r, int len, const char *fmt,
+ va_list args);
+
+/********** External variable definitions. **********/
+
+extern bool tomoyo_policy_loaded;
+extern const char * const tomoyo_condition_keyword
+[TOMOYO_MAX_CONDITION_KEYWORD];
+extern const char * const tomoyo_dif[TOMOYO_MAX_DOMAIN_INFO_FLAGS];
+extern const char * const tomoyo_mac_keywords[TOMOYO_MAX_MAC_INDEX
+ + TOMOYO_MAX_MAC_CATEGORY_INDEX];
+extern const char * const tomoyo_mode[TOMOYO_CONFIG_MAX_MODE];
+extern const char * const tomoyo_path_keyword[TOMOYO_MAX_PATH_OPERATION];
+extern const char * const tomoyo_proto_keyword[TOMOYO_SOCK_MAX];
+extern const char * const tomoyo_socket_keyword[TOMOYO_MAX_NETWORK_OPERATION];
+extern const u8 tomoyo_index2category[TOMOYO_MAX_MAC_INDEX];
+extern const u8 tomoyo_pn2mac[TOMOYO_MAX_PATH_NUMBER_OPERATION];
+extern const u8 tomoyo_pnnn2mac[TOMOYO_MAX_MKDEV_OPERATION];
+extern const u8 tomoyo_pp2mac[TOMOYO_MAX_PATH2_OPERATION];
+extern struct list_head tomoyo_condition_list;
+extern struct list_head tomoyo_domain_list;
+extern struct list_head tomoyo_name_list[TOMOYO_MAX_HASH];
+extern struct list_head tomoyo_namespace_list;
+extern struct mutex tomoyo_policy_lock;
+extern struct srcu_struct tomoyo_ss;
+extern struct tomoyo_domain_info tomoyo_kernel_domain;
+extern struct tomoyo_policy_namespace tomoyo_kernel_namespace;
+extern unsigned int tomoyo_memory_quota[TOMOYO_MAX_MEMORY_STAT];
+extern unsigned int tomoyo_memory_used[TOMOYO_MAX_MEMORY_STAT];
+
+/********** Inlined functions. **********/
+
+/**
+ * tomoyo_read_lock - Take lock for protecting policy.
+ *
+ * Returns index number for tomoyo_read_unlock().
+ */
+static inline int tomoyo_read_lock(void)
+{
+ return srcu_read_lock(&tomoyo_ss);
+}
+
+/**
+ * tomoyo_read_unlock - Release lock for protecting policy.
+ *
+ * @idx: Index number returned by tomoyo_read_lock().
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_read_unlock(int idx)
+{
+ srcu_read_unlock(&tomoyo_ss, idx);
+}
+
+/**
+ * tomoyo_sys_getppid - Copy of getppid().
+ *
+ * Returns parent process's PID.
+ *
+ * Alpha does not have getppid() defined. To be able to build this module on
+ * Alpha, I have to copy getppid() from kernel/timer.c.
+ */
+static inline pid_t tomoyo_sys_getppid(void)
+{
+ pid_t pid;
+ rcu_read_lock();
+ pid = task_tgid_vnr(rcu_dereference(current->real_parent));
+ rcu_read_unlock();
+ return pid;
+}
+
+/**
+ * tomoyo_sys_getpid - Copy of getpid().
+ *
+ * Returns current thread's PID.
+ *
+ * Alpha does not have getpid() defined. To be able to build this module on
+ * Alpha, I have to copy getpid() from kernel/timer.c.
+ */
+static inline pid_t tomoyo_sys_getpid(void)
+{
+ return task_tgid_vnr(current);
+}
-/* strcmp() for "struct tomoyo_path_info" structure. */
+/**
+ * tomoyo_pathcmp - strcmp() for "struct tomoyo_path_info" structure.
+ *
+ * @a: Pointer to "struct tomoyo_path_info".
+ * @b: Pointer to "struct tomoyo_path_info".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
static inline bool tomoyo_pathcmp(const struct tomoyo_path_info *a,
const struct tomoyo_path_info *b)
{
return a->hash != b->hash || strcmp(a->name, b->name);
}
-/* Get type of an ACL entry. */
-static inline u8 tomoyo_acl_type1(struct tomoyo_acl_info *ptr)
+/**
+ * tomoyo_put_name - Drop reference on "struct tomoyo_name".
+ *
+ * @name: Pointer to "struct tomoyo_path_info". Maybe NULL.
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_put_name(const struct tomoyo_path_info *name)
{
- return ptr->type & ~TOMOYO_ACL_DELETED;
+ if (name) {
+ struct tomoyo_name *ptr =
+ container_of(name, typeof(*ptr), entry);
+ atomic_dec(&ptr->head.users);
+ }
}
-/* Get type of an ACL entry. */
-static inline u8 tomoyo_acl_type2(struct tomoyo_acl_info *ptr)
+/**
+ * tomoyo_put_condition - Drop reference on "struct tomoyo_condition".
+ *
+ * @cond: Pointer to "struct tomoyo_condition". Maybe NULL.
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_put_condition(struct tomoyo_condition *cond)
{
- return ptr->type;
+ if (cond)
+ atomic_dec(&cond->head.users);
}
/**
- * tomoyo_is_valid - Check whether the character is a valid char.
+ * tomoyo_put_group - Drop reference on "struct tomoyo_group".
*
- * @c: The character to check.
+ * @group: Pointer to "struct tomoyo_group". Maybe NULL.
*
- * Returns true if @c is a valid character, false otherwise.
+ * Returns nothing.
*/
-static inline bool tomoyo_is_valid(const unsigned char c)
+static inline void tomoyo_put_group(struct tomoyo_group *group)
{
- return c > ' ' && c < 127;
+ if (group)
+ atomic_dec(&group->head.users);
}
/**
- * tomoyo_is_invalid - Check whether the character is an invalid char.
+ * tomoyo_domain - Get "struct tomoyo_domain_info" for current thread.
*
- * @c: The character to check.
+ * Returns pointer to "struct tomoyo_domain_info" for current thread.
+ */
+static inline struct tomoyo_domain_info *tomoyo_domain(void)
+{
+ return current_cred()->security;
+}
+
+/**
+ * tomoyo_real_domain - Get "struct tomoyo_domain_info" for specified thread.
*
- * Returns true if @c is an invalid character, false otherwise.
+ * @task: Pointer to "struct task_struct".
+ *
+ * Returns pointer to "struct tomoyo_security" for specified thread.
*/
-static inline bool tomoyo_is_invalid(const unsigned char c)
+static inline struct tomoyo_domain_info *tomoyo_real_domain(struct task_struct
+ *task)
{
- return c && (c <= ' ' || c >= 127);
+ return task_cred_xxx(task, security);
}
-/* The list for "struct tomoyo_domain_info". */
-extern struct list_head tomoyo_domain_list;
-extern struct rw_semaphore tomoyo_domain_list_lock;
+/**
+ * tomoyo_same_name_union - Check for duplicated "struct tomoyo_name_union" entry.
+ *
+ * @a: Pointer to "struct tomoyo_name_union".
+ * @b: Pointer to "struct tomoyo_name_union".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static inline bool tomoyo_same_name_union
+(const struct tomoyo_name_union *a, const struct tomoyo_name_union *b)
+{
+ return a->filename == b->filename && a->group == b->group;
+}
+
+/**
+ * tomoyo_same_number_union - Check for duplicated "struct tomoyo_number_union" entry.
+ *
+ * @a: Pointer to "struct tomoyo_number_union".
+ * @b: Pointer to "struct tomoyo_number_union".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static inline bool tomoyo_same_number_union
+(const struct tomoyo_number_union *a, const struct tomoyo_number_union *b)
+{
+ return a->values[0] == b->values[0] && a->values[1] == b->values[1] &&
+ a->group == b->group && a->value_type[0] == b->value_type[0] &&
+ a->value_type[1] == b->value_type[1];
+}
-/* Lock for domain->acl_info_list. */
-extern struct rw_semaphore tomoyo_domain_acl_info_list_lock;
+/**
+ * tomoyo_same_ipaddr_union - Check for duplicated "struct tomoyo_ipaddr_union" entry.
+ *
+ * @a: Pointer to "struct tomoyo_ipaddr_union".
+ * @b: Pointer to "struct tomoyo_ipaddr_union".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static inline bool tomoyo_same_ipaddr_union
+(const struct tomoyo_ipaddr_union *a, const struct tomoyo_ipaddr_union *b)
+{
+ return !memcmp(a->ip, b->ip, sizeof(a->ip)) && a->group == b->group &&
+ a->is_ipv6 == b->is_ipv6;
+}
-/* Has /sbin/init started? */
-extern bool tomoyo_policy_loaded;
+/**
+ * tomoyo_current_namespace - Get "struct tomoyo_policy_namespace" for current thread.
+ *
+ * Returns pointer to "struct tomoyo_policy_namespace" for current thread.
+ */
+static inline struct tomoyo_policy_namespace *tomoyo_current_namespace(void)
+{
+ return tomoyo_domain()->ns;
+}
-/* The kernel's domain. */
-extern struct tomoyo_domain_info tomoyo_kernel_domain;
+#if defined(CONFIG_SLOB)
+
+/**
+ * tomoyo_round2 - Round up to power of 2 for calculating memory usage.
+ *
+ * @size: Size to be rounded up.
+ *
+ * Returns @size.
+ *
+ * Since SLOB does not round up, this function simply returns @size.
+ */
+static inline int tomoyo_round2(size_t size)
+{
+ return size;
+}
+
+#else
+
+/**
+ * tomoyo_round2 - Round up to power of 2 for calculating memory usage.
+ *
+ * @size: Size to be rounded up.
+ *
+ * Returns rounded size.
+ *
+ * Strictly speaking, SLAB may be able to allocate (e.g.) 96 bytes instead of
+ * (e.g.) 128 bytes.
+ */
+static inline int tomoyo_round2(size_t size)
+{
+#if PAGE_SIZE == 4096
+ size_t bsize = 32;
+#else
+ size_t bsize = 64;
+#endif
+ if (!size)
+ return 0;
+ while (size > bsize)
+ bsize <<= 1;
+ return bsize;
+}
+
+#endif
/**
* list_for_each_cookie - iterate over a list with cookie.
* @pos: the &struct list_head to use as a loop cursor.
- * @cookie: the &struct list_head to use as a cookie.
* @head: the head for your list.
- *
- * Same with list_for_each() except that this primitive uses @cookie
- * so that we can continue iteration.
- * @cookie must be NULL when iteration starts, and @cookie will become
- * NULL when iteration finishes.
*/
-#define list_for_each_cookie(pos, cookie, head) \
- for (({ if (!cookie) \
- cookie = head; }), \
- pos = (cookie)->next; \
- prefetch(pos->next), pos != (head) || ((cookie) = NULL); \
- (cookie) = pos, pos = pos->next)
+#define list_for_each_cookie(pos, head) \
+ if (!pos) \
+ pos = srcu_dereference((head)->next, &tomoyo_ss); \
+ for ( ; pos != (head); pos = srcu_dereference(pos->next, &tomoyo_ss))
#endif /* !defined(_SECURITY_TOMOYO_COMMON_H) */
diff --git a/security/tomoyo/condition.c b/security/tomoyo/condition.c
new file mode 100644
index 00000000000..63681e8be62
--- /dev/null
+++ b/security/tomoyo/condition.c
@@ -0,0 +1,1094 @@
+/*
+ * security/tomoyo/condition.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include "common.h"
+#include <linux/slab.h>
+
+/* List of "struct tomoyo_condition". */
+LIST_HEAD(tomoyo_condition_list);
+
+/**
+ * tomoyo_argv - Check argv[] in "struct linux_binbrm".
+ *
+ * @index: Index number of @arg_ptr.
+ * @arg_ptr: Contents of argv[@index].
+ * @argc: Length of @argv.
+ * @argv: Pointer to "struct tomoyo_argv".
+ * @checked: Set to true if @argv[@index] was found.
+ *
+ * Returns true on success, false otherwise.
+ */
+static bool tomoyo_argv(const unsigned int index, const char *arg_ptr,
+ const int argc, const struct tomoyo_argv *argv,
+ u8 *checked)
+{
+ int i;
+ struct tomoyo_path_info arg;
+ arg.name = arg_ptr;
+ for (i = 0; i < argc; argv++, checked++, i++) {
+ bool result;
+ if (index != argv->index)
+ continue;
+ *checked = 1;
+ tomoyo_fill_path_info(&arg);
+ result = tomoyo_path_matches_pattern(&arg, argv->value);
+ if (argv->is_not)
+ result = !result;
+ if (!result)
+ return false;
+ }
+ return true;
+}
+
+/**
+ * tomoyo_envp - Check envp[] in "struct linux_binbrm".
+ *
+ * @env_name: The name of environment variable.
+ * @env_value: The value of environment variable.
+ * @envc: Length of @envp.
+ * @envp: Pointer to "struct tomoyo_envp".
+ * @checked: Set to true if @envp[@env_name] was found.
+ *
+ * Returns true on success, false otherwise.
+ */
+static bool tomoyo_envp(const char *env_name, const char *env_value,
+ const int envc, const struct tomoyo_envp *envp,
+ u8 *checked)
+{
+ int i;
+ struct tomoyo_path_info name;
+ struct tomoyo_path_info value;
+ name.name = env_name;
+ tomoyo_fill_path_info(&name);
+ value.name = env_value;
+ tomoyo_fill_path_info(&value);
+ for (i = 0; i < envc; envp++, checked++, i++) {
+ bool result;
+ if (!tomoyo_path_matches_pattern(&name, envp->name))
+ continue;
+ *checked = 1;
+ if (envp->value) {
+ result = tomoyo_path_matches_pattern(&value,
+ envp->value);
+ if (envp->is_not)
+ result = !result;
+ } else {
+ result = true;
+ if (!envp->is_not)
+ result = !result;
+ }
+ if (!result)
+ return false;
+ }
+ return true;
+}
+
+/**
+ * tomoyo_scan_bprm - Scan "struct linux_binprm".
+ *
+ * @ee: Pointer to "struct tomoyo_execve".
+ * @argc: Length of @argc.
+ * @argv: Pointer to "struct tomoyo_argv".
+ * @envc: Length of @envp.
+ * @envp: Poiner to "struct tomoyo_envp".
+ *
+ * Returns true on success, false otherwise.
+ */
+static bool tomoyo_scan_bprm(struct tomoyo_execve *ee,
+ const u16 argc, const struct tomoyo_argv *argv,
+ const u16 envc, const struct tomoyo_envp *envp)
+{
+ struct linux_binprm *bprm = ee->bprm;
+ struct tomoyo_page_dump *dump = &ee->dump;
+ char *arg_ptr = ee->tmp;
+ int arg_len = 0;
+ unsigned long pos = bprm->p;
+ int offset = pos % PAGE_SIZE;
+ int argv_count = bprm->argc;
+ int envp_count = bprm->envc;
+ bool result = true;
+ u8 local_checked[32];
+ u8 *checked;
+ if (argc + envc <= sizeof(local_checked)) {
+ checked = local_checked;
+ memset(local_checked, 0, sizeof(local_checked));
+ } else {
+ checked = kzalloc(argc + envc, GFP_NOFS);
+ if (!checked)
+ return false;
+ }
+ while (argv_count || envp_count) {
+ if (!tomoyo_dump_page(bprm, pos, dump)) {
+ result = false;
+ goto out;
+ }
+ pos += PAGE_SIZE - offset;
+ while (offset < PAGE_SIZE) {
+ /* Read. */
+ const char *kaddr = dump->data;
+ const unsigned char c = kaddr[offset++];
+ if (c && arg_len < TOMOYO_EXEC_TMPSIZE - 10) {
+ if (c == '\\') {
+ arg_ptr[arg_len++] = '\\';
+ arg_ptr[arg_len++] = '\\';
+ } else if (c > ' ' && c < 127) {
+ arg_ptr[arg_len++] = c;
+ } else {
+ arg_ptr[arg_len++] = '\\';
+ arg_ptr[arg_len++] = (c >> 6) + '0';
+ arg_ptr[arg_len++] =
+ ((c >> 3) & 7) + '0';
+ arg_ptr[arg_len++] = (c & 7) + '0';
+ }
+ } else {
+ arg_ptr[arg_len] = '\0';
+ }
+ if (c)
+ continue;
+ /* Check. */
+ if (argv_count) {
+ if (!tomoyo_argv(bprm->argc - argv_count,
+ arg_ptr, argc, argv,
+ checked)) {
+ result = false;
+ break;
+ }
+ argv_count--;
+ } else if (envp_count) {
+ char *cp = strchr(arg_ptr, '=');
+ if (cp) {
+ *cp = '\0';
+ if (!tomoyo_envp(arg_ptr, cp + 1,
+ envc, envp,
+ checked + argc)) {
+ result = false;
+ break;
+ }
+ }
+ envp_count--;
+ } else {
+ break;
+ }
+ arg_len = 0;
+ }
+ offset = 0;
+ if (!result)
+ break;
+ }
+out:
+ if (result) {
+ int i;
+ /* Check not-yet-checked entries. */
+ for (i = 0; i < argc; i++) {
+ if (checked[i])
+ continue;
+ /*
+ * Return true only if all unchecked indexes in
+ * bprm->argv[] are not matched.
+ */
+ if (argv[i].is_not)
+ continue;
+ result = false;
+ break;
+ }
+ for (i = 0; i < envc; envp++, i++) {
+ if (checked[argc + i])
+ continue;
+ /*
+ * Return true only if all unchecked environ variables
+ * in bprm->envp[] are either undefined or not matched.
+ */
+ if ((!envp->value && !envp->is_not) ||
+ (envp->value && envp->is_not))
+ continue;
+ result = false;
+ break;
+ }
+ }
+ if (checked != local_checked)
+ kfree(checked);
+ return result;
+}
+
+/**
+ * tomoyo_scan_exec_realpath - Check "exec.realpath" parameter of "struct tomoyo_condition".
+ *
+ * @file: Pointer to "struct file".
+ * @ptr: Pointer to "struct tomoyo_name_union".
+ * @match: True if "exec.realpath=", false if "exec.realpath!=".
+ *
+ * Returns true on success, false otherwise.
+ */
+static bool tomoyo_scan_exec_realpath(struct file *file,
+ const struct tomoyo_name_union *ptr,
+ const bool match)
+{
+ bool result;
+ struct tomoyo_path_info exe;
+ if (!file)
+ return false;
+ exe.name = tomoyo_realpath_from_path(&file->f_path);
+ if (!exe.name)
+ return false;
+ tomoyo_fill_path_info(&exe);
+ result = tomoyo_compare_name_union(&exe, ptr);
+ kfree(exe.name);
+ return result == match;
+}
+
+/**
+ * tomoyo_get_dqword - tomoyo_get_name() for a quoted string.
+ *
+ * @start: String to save.
+ *
+ * Returns pointer to "struct tomoyo_path_info" on success, NULL otherwise.
+ */
+static const struct tomoyo_path_info *tomoyo_get_dqword(char *start)
+{
+ char *cp = start + strlen(start) - 1;
+ if (cp == start || *start++ != '"' || *cp != '"')
+ return NULL;
+ *cp = '\0';
+ if (*start && !tomoyo_correct_word(start))
+ return NULL;
+ return tomoyo_get_name(start);
+}
+
+/**
+ * tomoyo_parse_name_union_quoted - Parse a quoted word.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @ptr: Pointer to "struct tomoyo_name_union".
+ *
+ * Returns true on success, false otherwise.
+ */
+static bool tomoyo_parse_name_union_quoted(struct tomoyo_acl_param *param,
+ struct tomoyo_name_union *ptr)
+{
+ char *filename = param->data;
+ if (*filename == '@')
+ return tomoyo_parse_name_union(param, ptr);
+ ptr->filename = tomoyo_get_dqword(filename);
+ return ptr->filename != NULL;
+}
+
+/**
+ * tomoyo_parse_argv - Parse an argv[] condition part.
+ *
+ * @left: Lefthand value.
+ * @right: Righthand value.
+ * @argv: Pointer to "struct tomoyo_argv".
+ *
+ * Returns true on success, false otherwise.
+ */
+static bool tomoyo_parse_argv(char *left, char *right,
+ struct tomoyo_argv *argv)
+{
+ if (tomoyo_parse_ulong(&argv->index, &left) !=
+ TOMOYO_VALUE_TYPE_DECIMAL || *left++ != ']' || *left)
+ return false;
+ argv->value = tomoyo_get_dqword(right);
+ return argv->value != NULL;
+}
+
+/**
+ * tomoyo_parse_envp - Parse an envp[] condition part.
+ *
+ * @left: Lefthand value.
+ * @right: Righthand value.
+ * @envp: Pointer to "struct tomoyo_envp".
+ *
+ * Returns true on success, false otherwise.
+ */
+static bool tomoyo_parse_envp(char *left, char *right,
+ struct tomoyo_envp *envp)
+{
+ const struct tomoyo_path_info *name;
+ const struct tomoyo_path_info *value;
+ char *cp = left + strlen(left) - 1;
+ if (*cp-- != ']' || *cp != '"')
+ goto out;
+ *cp = '\0';
+ if (!tomoyo_correct_word(left))
+ goto out;
+ name = tomoyo_get_name(left);
+ if (!name)
+ goto out;
+ if (!strcmp(right, "NULL")) {
+ value = NULL;
+ } else {
+ value = tomoyo_get_dqword(right);
+ if (!value) {
+ tomoyo_put_name(name);
+ goto out;
+ }
+ }
+ envp->name = name;
+ envp->value = value;
+ return true;
+out:
+ return false;
+}
+
+/**
+ * tomoyo_same_condition - Check for duplicated "struct tomoyo_condition" entry.
+ *
+ * @a: Pointer to "struct tomoyo_condition".
+ * @b: Pointer to "struct tomoyo_condition".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static inline bool tomoyo_same_condition(const struct tomoyo_condition *a,
+ const struct tomoyo_condition *b)
+{
+ return a->size == b->size && a->condc == b->condc &&
+ a->numbers_count == b->numbers_count &&
+ a->names_count == b->names_count &&
+ a->argc == b->argc && a->envc == b->envc &&
+ a->grant_log == b->grant_log && a->transit == b->transit &&
+ !memcmp(a + 1, b + 1, a->size - sizeof(*a));
+}
+
+/**
+ * tomoyo_condition_type - Get condition type.
+ *
+ * @word: Keyword string.
+ *
+ * Returns one of values in "enum tomoyo_conditions_index" on success,
+ * TOMOYO_MAX_CONDITION_KEYWORD otherwise.
+ */
+static u8 tomoyo_condition_type(const char *word)
+{
+ u8 i;
+ for (i = 0; i < TOMOYO_MAX_CONDITION_KEYWORD; i++) {
+ if (!strcmp(word, tomoyo_condition_keyword[i]))
+ break;
+ }
+ return i;
+}
+
+/* Define this to enable debug mode. */
+/* #define DEBUG_CONDITION */
+
+#ifdef DEBUG_CONDITION
+#define dprintk printk
+#else
+#define dprintk(...) do { } while (0)
+#endif
+
+/**
+ * tomoyo_commit_condition - Commit "struct tomoyo_condition".
+ *
+ * @entry: Pointer to "struct tomoyo_condition".
+ *
+ * Returns pointer to "struct tomoyo_condition" on success, NULL otherwise.
+ *
+ * This function merges duplicated entries. This function returns NULL if
+ * @entry is not duplicated but memory quota for policy has exceeded.
+ */
+static struct tomoyo_condition *tomoyo_commit_condition
+(struct tomoyo_condition *entry)
+{
+ struct tomoyo_condition *ptr;
+ bool found = false;
+ if (mutex_lock_interruptible(&tomoyo_policy_lock)) {
+ dprintk(KERN_WARNING "%u: %s failed\n", __LINE__, __func__);
+ ptr = NULL;
+ found = true;
+ goto out;
+ }
+ list_for_each_entry(ptr, &tomoyo_condition_list, head.list) {
+ if (!tomoyo_same_condition(ptr, entry) ||
+ atomic_read(&ptr->head.users) == TOMOYO_GC_IN_PROGRESS)
+ continue;
+ /* Same entry found. Share this entry. */
+ atomic_inc(&ptr->head.users);
+ found = true;
+ break;
+ }
+ if (!found) {
+ if (tomoyo_memory_ok(entry)) {
+ atomic_set(&entry->head.users, 1);
+ list_add(&entry->head.list, &tomoyo_condition_list);
+ } else {
+ found = true;
+ ptr = NULL;
+ }
+ }
+ mutex_unlock(&tomoyo_policy_lock);
+out:
+ if (found) {
+ tomoyo_del_condition(&entry->head.list);
+ kfree(entry);
+ entry = ptr;
+ }
+ return entry;
+}
+
+/**
+ * tomoyo_get_transit_preference - Parse domain transition preference for execve().
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @e: Pointer to "struct tomoyo_condition".
+ *
+ * Returns the condition string part.
+ */
+static char *tomoyo_get_transit_preference(struct tomoyo_acl_param *param,
+ struct tomoyo_condition *e)
+{
+ char * const pos = param->data;
+ bool flag;
+ if (*pos == '<') {
+ e->transit = tomoyo_get_domainname(param);
+ goto done;
+ }
+ {
+ char *cp = strchr(pos, ' ');
+ if (cp)
+ *cp = '\0';
+ flag = tomoyo_correct_path(pos) || !strcmp(pos, "keep") ||
+ !strcmp(pos, "initialize") || !strcmp(pos, "reset") ||
+ !strcmp(pos, "child") || !strcmp(pos, "parent");
+ if (cp)
+ *cp = ' ';
+ }
+ if (!flag)
+ return pos;
+ e->transit = tomoyo_get_name(tomoyo_read_token(param));
+done:
+ if (e->transit)
+ return param->data;
+ /*
+ * Return a bad read-only condition string that will let
+ * tomoyo_get_condition() return NULL.
+ */
+ return "/";
+}
+
+/**
+ * tomoyo_get_condition - Parse condition part.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns pointer to "struct tomoyo_condition" on success, NULL otherwise.
+ */
+struct tomoyo_condition *tomoyo_get_condition(struct tomoyo_acl_param *param)
+{
+ struct tomoyo_condition *entry = NULL;
+ struct tomoyo_condition_element *condp = NULL;
+ struct tomoyo_number_union *numbers_p = NULL;
+ struct tomoyo_name_union *names_p = NULL;
+ struct tomoyo_argv *argv = NULL;
+ struct tomoyo_envp *envp = NULL;
+ struct tomoyo_condition e = { };
+ char * const start_of_string =
+ tomoyo_get_transit_preference(param, &e);
+ char * const end_of_string = start_of_string + strlen(start_of_string);
+ char *pos;
+rerun:
+ pos = start_of_string;
+ while (1) {
+ u8 left = -1;
+ u8 right = -1;
+ char *left_word = pos;
+ char *cp;
+ char *right_word;
+ bool is_not;
+ if (!*left_word)
+ break;
+ /*
+ * Since left-hand condition does not allow use of "path_group"
+ * or "number_group" and environment variable's names do not
+ * accept '=', it is guaranteed that the original line consists
+ * of one or more repetition of $left$operator$right blocks
+ * where "$left is free from '=' and ' '" and "$operator is
+ * either '=' or '!='" and "$right is free from ' '".
+ * Therefore, we can reconstruct the original line at the end
+ * of dry run even if we overwrite $operator with '\0'.
+ */
+ cp = strchr(pos, ' ');
+ if (cp) {
+ *cp = '\0'; /* Will restore later. */
+ pos = cp + 1;
+ } else {
+ pos = "";
+ }
+ right_word = strchr(left_word, '=');
+ if (!right_word || right_word == left_word)
+ goto out;
+ is_not = *(right_word - 1) == '!';
+ if (is_not)
+ *(right_word++ - 1) = '\0'; /* Will restore later. */
+ else if (*(right_word + 1) != '=')
+ *right_word++ = '\0'; /* Will restore later. */
+ else
+ goto out;
+ dprintk(KERN_WARNING "%u: <%s>%s=<%s>\n", __LINE__, left_word,
+ is_not ? "!" : "", right_word);
+ if (!strcmp(left_word, "grant_log")) {
+ if (entry) {
+ if (is_not ||
+ entry->grant_log != TOMOYO_GRANTLOG_AUTO)
+ goto out;
+ else if (!strcmp(right_word, "yes"))
+ entry->grant_log = TOMOYO_GRANTLOG_YES;
+ else if (!strcmp(right_word, "no"))
+ entry->grant_log = TOMOYO_GRANTLOG_NO;
+ else
+ goto out;
+ }
+ continue;
+ }
+ if (!strncmp(left_word, "exec.argv[", 10)) {
+ if (!argv) {
+ e.argc++;
+ e.condc++;
+ } else {
+ e.argc--;
+ e.condc--;
+ left = TOMOYO_ARGV_ENTRY;
+ argv->is_not = is_not;
+ if (!tomoyo_parse_argv(left_word + 10,
+ right_word, argv++))
+ goto out;
+ }
+ goto store_value;
+ }
+ if (!strncmp(left_word, "exec.envp[\"", 11)) {
+ if (!envp) {
+ e.envc++;
+ e.condc++;
+ } else {
+ e.envc--;
+ e.condc--;
+ left = TOMOYO_ENVP_ENTRY;
+ envp->is_not = is_not;
+ if (!tomoyo_parse_envp(left_word + 11,
+ right_word, envp++))
+ goto out;
+ }
+ goto store_value;
+ }
+ left = tomoyo_condition_type(left_word);
+ dprintk(KERN_WARNING "%u: <%s> left=%u\n", __LINE__, left_word,
+ left);
+ if (left == TOMOYO_MAX_CONDITION_KEYWORD) {
+ if (!numbers_p) {
+ e.numbers_count++;
+ } else {
+ e.numbers_count--;
+ left = TOMOYO_NUMBER_UNION;
+ param->data = left_word;
+ if (*left_word == '@' ||
+ !tomoyo_parse_number_union(param,
+ numbers_p++))
+ goto out;
+ }
+ }
+ if (!condp)
+ e.condc++;
+ else
+ e.condc--;
+ if (left == TOMOYO_EXEC_REALPATH ||
+ left == TOMOYO_SYMLINK_TARGET) {
+ if (!names_p) {
+ e.names_count++;
+ } else {
+ e.names_count--;
+ right = TOMOYO_NAME_UNION;
+ param->data = right_word;
+ if (!tomoyo_parse_name_union_quoted(param,
+ names_p++))
+ goto out;
+ }
+ goto store_value;
+ }
+ right = tomoyo_condition_type(right_word);
+ if (right == TOMOYO_MAX_CONDITION_KEYWORD) {
+ if (!numbers_p) {
+ e.numbers_count++;
+ } else {
+ e.numbers_count--;
+ right = TOMOYO_NUMBER_UNION;
+ param->data = right_word;
+ if (!tomoyo_parse_number_union(param,
+ numbers_p++))
+ goto out;
+ }
+ }
+store_value:
+ if (!condp) {
+ dprintk(KERN_WARNING "%u: dry_run left=%u right=%u "
+ "match=%u\n", __LINE__, left, right, !is_not);
+ continue;
+ }
+ condp->left = left;
+ condp->right = right;
+ condp->equals = !is_not;
+ dprintk(KERN_WARNING "%u: left=%u right=%u match=%u\n",
+ __LINE__, condp->left, condp->right,
+ condp->equals);
+ condp++;
+ }
+ dprintk(KERN_INFO "%u: cond=%u numbers=%u names=%u ac=%u ec=%u\n",
+ __LINE__, e.condc, e.numbers_count, e.names_count, e.argc,
+ e.envc);
+ if (entry) {
+ BUG_ON(e.names_count | e.numbers_count | e.argc | e.envc |
+ e.condc);
+ return tomoyo_commit_condition(entry);
+ }
+ e.size = sizeof(*entry)
+ + e.condc * sizeof(struct tomoyo_condition_element)
+ + e.numbers_count * sizeof(struct tomoyo_number_union)
+ + e.names_count * sizeof(struct tomoyo_name_union)
+ + e.argc * sizeof(struct tomoyo_argv)
+ + e.envc * sizeof(struct tomoyo_envp);
+ entry = kzalloc(e.size, GFP_NOFS);
+ if (!entry)
+ goto out2;
+ *entry = e;
+ e.transit = NULL;
+ condp = (struct tomoyo_condition_element *) (entry + 1);
+ numbers_p = (struct tomoyo_number_union *) (condp + e.condc);
+ names_p = (struct tomoyo_name_union *) (numbers_p + e.numbers_count);
+ argv = (struct tomoyo_argv *) (names_p + e.names_count);
+ envp = (struct tomoyo_envp *) (argv + e.argc);
+ {
+ bool flag = false;
+ for (pos = start_of_string; pos < end_of_string; pos++) {
+ if (*pos)
+ continue;
+ if (flag) /* Restore " ". */
+ *pos = ' ';
+ else if (*(pos + 1) == '=') /* Restore "!=". */
+ *pos = '!';
+ else /* Restore "=". */
+ *pos = '=';
+ flag = !flag;
+ }
+ }
+ goto rerun;
+out:
+ dprintk(KERN_WARNING "%u: %s failed\n", __LINE__, __func__);
+ if (entry) {
+ tomoyo_del_condition(&entry->head.list);
+ kfree(entry);
+ }
+out2:
+ tomoyo_put_name(e.transit);
+ return NULL;
+}
+
+/**
+ * tomoyo_get_attributes - Revalidate "struct inode".
+ *
+ * @obj: Pointer to "struct tomoyo_obj_info".
+ *
+ * Returns nothing.
+ */
+void tomoyo_get_attributes(struct tomoyo_obj_info *obj)
+{
+ u8 i;
+ struct dentry *dentry = NULL;
+
+ for (i = 0; i < TOMOYO_MAX_PATH_STAT; i++) {
+ struct inode *inode;
+ switch (i) {
+ case TOMOYO_PATH1:
+ dentry = obj->path1.dentry;
+ if (!dentry)
+ continue;
+ break;
+ case TOMOYO_PATH2:
+ dentry = obj->path2.dentry;
+ if (!dentry)
+ continue;
+ break;
+ default:
+ if (!dentry)
+ continue;
+ dentry = dget_parent(dentry);
+ break;
+ }
+ inode = dentry->d_inode;
+ if (inode) {
+ struct tomoyo_mini_stat *stat = &obj->stat[i];
+ stat->uid = inode->i_uid;
+ stat->gid = inode->i_gid;
+ stat->ino = inode->i_ino;
+ stat->mode = inode->i_mode;
+ stat->dev = inode->i_sb->s_dev;
+ stat->rdev = inode->i_rdev;
+ obj->stat_valid[i] = true;
+ }
+ if (i & 1) /* i == TOMOYO_PATH1_PARENT ||
+ i == TOMOYO_PATH2_PARENT */
+ dput(dentry);
+ }
+}
+
+/**
+ * tomoyo_condition - Check condition part.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @cond: Pointer to "struct tomoyo_condition". Maybe NULL.
+ *
+ * Returns true on success, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+bool tomoyo_condition(struct tomoyo_request_info *r,
+ const struct tomoyo_condition *cond)
+{
+ u32 i;
+ unsigned long min_v[2] = { 0, 0 };
+ unsigned long max_v[2] = { 0, 0 };
+ const struct tomoyo_condition_element *condp;
+ const struct tomoyo_number_union *numbers_p;
+ const struct tomoyo_name_union *names_p;
+ const struct tomoyo_argv *argv;
+ const struct tomoyo_envp *envp;
+ struct tomoyo_obj_info *obj;
+ u16 condc;
+ u16 argc;
+ u16 envc;
+ struct linux_binprm *bprm = NULL;
+ if (!cond)
+ return true;
+ condc = cond->condc;
+ argc = cond->argc;
+ envc = cond->envc;
+ obj = r->obj;
+ if (r->ee)
+ bprm = r->ee->bprm;
+ if (!bprm && (argc || envc))
+ return false;
+ condp = (struct tomoyo_condition_element *) (cond + 1);
+ numbers_p = (const struct tomoyo_number_union *) (condp + condc);
+ names_p = (const struct tomoyo_name_union *)
+ (numbers_p + cond->numbers_count);
+ argv = (const struct tomoyo_argv *) (names_p + cond->names_count);
+ envp = (const struct tomoyo_envp *) (argv + argc);
+ for (i = 0; i < condc; i++) {
+ const bool match = condp->equals;
+ const u8 left = condp->left;
+ const u8 right = condp->right;
+ bool is_bitop[2] = { false, false };
+ u8 j;
+ condp++;
+ /* Check argv[] and envp[] later. */
+ if (left == TOMOYO_ARGV_ENTRY || left == TOMOYO_ENVP_ENTRY)
+ continue;
+ /* Check string expressions. */
+ if (right == TOMOYO_NAME_UNION) {
+ const struct tomoyo_name_union *ptr = names_p++;
+ switch (left) {
+ struct tomoyo_path_info *symlink;
+ struct tomoyo_execve *ee;
+ struct file *file;
+ case TOMOYO_SYMLINK_TARGET:
+ symlink = obj ? obj->symlink_target : NULL;
+ if (!symlink ||
+ !tomoyo_compare_name_union(symlink, ptr)
+ == match)
+ goto out;
+ break;
+ case TOMOYO_EXEC_REALPATH:
+ ee = r->ee;
+ file = ee ? ee->bprm->file : NULL;
+ if (!tomoyo_scan_exec_realpath(file, ptr,
+ match))
+ goto out;
+ break;
+ }
+ continue;
+ }
+ /* Check numeric or bit-op expressions. */
+ for (j = 0; j < 2; j++) {
+ const u8 index = j ? right : left;
+ unsigned long value = 0;
+ switch (index) {
+ case TOMOYO_TASK_UID:
+ value = from_kuid(&init_user_ns, current_uid());
+ break;
+ case TOMOYO_TASK_EUID:
+ value = from_kuid(&init_user_ns, current_euid());
+ break;
+ case TOMOYO_TASK_SUID:
+ value = from_kuid(&init_user_ns, current_suid());
+ break;
+ case TOMOYO_TASK_FSUID:
+ value = from_kuid(&init_user_ns, current_fsuid());
+ break;
+ case TOMOYO_TASK_GID:
+ value = from_kgid(&init_user_ns, current_gid());
+ break;
+ case TOMOYO_TASK_EGID:
+ value = from_kgid(&init_user_ns, current_egid());
+ break;
+ case TOMOYO_TASK_SGID:
+ value = from_kgid(&init_user_ns, current_sgid());
+ break;
+ case TOMOYO_TASK_FSGID:
+ value = from_kgid(&init_user_ns, current_fsgid());
+ break;
+ case TOMOYO_TASK_PID:
+ value = tomoyo_sys_getpid();
+ break;
+ case TOMOYO_TASK_PPID:
+ value = tomoyo_sys_getppid();
+ break;
+ case TOMOYO_TYPE_IS_SOCKET:
+ value = S_IFSOCK;
+ break;
+ case TOMOYO_TYPE_IS_SYMLINK:
+ value = S_IFLNK;
+ break;
+ case TOMOYO_TYPE_IS_FILE:
+ value = S_IFREG;
+ break;
+ case TOMOYO_TYPE_IS_BLOCK_DEV:
+ value = S_IFBLK;
+ break;
+ case TOMOYO_TYPE_IS_DIRECTORY:
+ value = S_IFDIR;
+ break;
+ case TOMOYO_TYPE_IS_CHAR_DEV:
+ value = S_IFCHR;
+ break;
+ case TOMOYO_TYPE_IS_FIFO:
+ value = S_IFIFO;
+ break;
+ case TOMOYO_MODE_SETUID:
+ value = S_ISUID;
+ break;
+ case TOMOYO_MODE_SETGID:
+ value = S_ISGID;
+ break;
+ case TOMOYO_MODE_STICKY:
+ value = S_ISVTX;
+ break;
+ case TOMOYO_MODE_OWNER_READ:
+ value = S_IRUSR;
+ break;
+ case TOMOYO_MODE_OWNER_WRITE:
+ value = S_IWUSR;
+ break;
+ case TOMOYO_MODE_OWNER_EXECUTE:
+ value = S_IXUSR;
+ break;
+ case TOMOYO_MODE_GROUP_READ:
+ value = S_IRGRP;
+ break;
+ case TOMOYO_MODE_GROUP_WRITE:
+ value = S_IWGRP;
+ break;
+ case TOMOYO_MODE_GROUP_EXECUTE:
+ value = S_IXGRP;
+ break;
+ case TOMOYO_MODE_OTHERS_READ:
+ value = S_IROTH;
+ break;
+ case TOMOYO_MODE_OTHERS_WRITE:
+ value = S_IWOTH;
+ break;
+ case TOMOYO_MODE_OTHERS_EXECUTE:
+ value = S_IXOTH;
+ break;
+ case TOMOYO_EXEC_ARGC:
+ if (!bprm)
+ goto out;
+ value = bprm->argc;
+ break;
+ case TOMOYO_EXEC_ENVC:
+ if (!bprm)
+ goto out;
+ value = bprm->envc;
+ break;
+ case TOMOYO_NUMBER_UNION:
+ /* Fetch values later. */
+ break;
+ default:
+ if (!obj)
+ goto out;
+ if (!obj->validate_done) {
+ tomoyo_get_attributes(obj);
+ obj->validate_done = true;
+ }
+ {
+ u8 stat_index;
+ struct tomoyo_mini_stat *stat;
+ switch (index) {
+ case TOMOYO_PATH1_UID:
+ case TOMOYO_PATH1_GID:
+ case TOMOYO_PATH1_INO:
+ case TOMOYO_PATH1_MAJOR:
+ case TOMOYO_PATH1_MINOR:
+ case TOMOYO_PATH1_TYPE:
+ case TOMOYO_PATH1_DEV_MAJOR:
+ case TOMOYO_PATH1_DEV_MINOR:
+ case TOMOYO_PATH1_PERM:
+ stat_index = TOMOYO_PATH1;
+ break;
+ case TOMOYO_PATH2_UID:
+ case TOMOYO_PATH2_GID:
+ case TOMOYO_PATH2_INO:
+ case TOMOYO_PATH2_MAJOR:
+ case TOMOYO_PATH2_MINOR:
+ case TOMOYO_PATH2_TYPE:
+ case TOMOYO_PATH2_DEV_MAJOR:
+ case TOMOYO_PATH2_DEV_MINOR:
+ case TOMOYO_PATH2_PERM:
+ stat_index = TOMOYO_PATH2;
+ break;
+ case TOMOYO_PATH1_PARENT_UID:
+ case TOMOYO_PATH1_PARENT_GID:
+ case TOMOYO_PATH1_PARENT_INO:
+ case TOMOYO_PATH1_PARENT_PERM:
+ stat_index =
+ TOMOYO_PATH1_PARENT;
+ break;
+ case TOMOYO_PATH2_PARENT_UID:
+ case TOMOYO_PATH2_PARENT_GID:
+ case TOMOYO_PATH2_PARENT_INO:
+ case TOMOYO_PATH2_PARENT_PERM:
+ stat_index =
+ TOMOYO_PATH2_PARENT;
+ break;
+ default:
+ goto out;
+ }
+ if (!obj->stat_valid[stat_index])
+ goto out;
+ stat = &obj->stat[stat_index];
+ switch (index) {
+ case TOMOYO_PATH1_UID:
+ case TOMOYO_PATH2_UID:
+ case TOMOYO_PATH1_PARENT_UID:
+ case TOMOYO_PATH2_PARENT_UID:
+ value = from_kuid(&init_user_ns, stat->uid);
+ break;
+ case TOMOYO_PATH1_GID:
+ case TOMOYO_PATH2_GID:
+ case TOMOYO_PATH1_PARENT_GID:
+ case TOMOYO_PATH2_PARENT_GID:
+ value = from_kgid(&init_user_ns, stat->gid);
+ break;
+ case TOMOYO_PATH1_INO:
+ case TOMOYO_PATH2_INO:
+ case TOMOYO_PATH1_PARENT_INO:
+ case TOMOYO_PATH2_PARENT_INO:
+ value = stat->ino;
+ break;
+ case TOMOYO_PATH1_MAJOR:
+ case TOMOYO_PATH2_MAJOR:
+ value = MAJOR(stat->dev);
+ break;
+ case TOMOYO_PATH1_MINOR:
+ case TOMOYO_PATH2_MINOR:
+ value = MINOR(stat->dev);
+ break;
+ case TOMOYO_PATH1_TYPE:
+ case TOMOYO_PATH2_TYPE:
+ value = stat->mode & S_IFMT;
+ break;
+ case TOMOYO_PATH1_DEV_MAJOR:
+ case TOMOYO_PATH2_DEV_MAJOR:
+ value = MAJOR(stat->rdev);
+ break;
+ case TOMOYO_PATH1_DEV_MINOR:
+ case TOMOYO_PATH2_DEV_MINOR:
+ value = MINOR(stat->rdev);
+ break;
+ case TOMOYO_PATH1_PERM:
+ case TOMOYO_PATH2_PERM:
+ case TOMOYO_PATH1_PARENT_PERM:
+ case TOMOYO_PATH2_PARENT_PERM:
+ value = stat->mode & S_IALLUGO;
+ break;
+ }
+ }
+ break;
+ }
+ max_v[j] = value;
+ min_v[j] = value;
+ switch (index) {
+ case TOMOYO_MODE_SETUID:
+ case TOMOYO_MODE_SETGID:
+ case TOMOYO_MODE_STICKY:
+ case TOMOYO_MODE_OWNER_READ:
+ case TOMOYO_MODE_OWNER_WRITE:
+ case TOMOYO_MODE_OWNER_EXECUTE:
+ case TOMOYO_MODE_GROUP_READ:
+ case TOMOYO_MODE_GROUP_WRITE:
+ case TOMOYO_MODE_GROUP_EXECUTE:
+ case TOMOYO_MODE_OTHERS_READ:
+ case TOMOYO_MODE_OTHERS_WRITE:
+ case TOMOYO_MODE_OTHERS_EXECUTE:
+ is_bitop[j] = true;
+ }
+ }
+ if (left == TOMOYO_NUMBER_UNION) {
+ /* Fetch values now. */
+ const struct tomoyo_number_union *ptr = numbers_p++;
+ min_v[0] = ptr->values[0];
+ max_v[0] = ptr->values[1];
+ }
+ if (right == TOMOYO_NUMBER_UNION) {
+ /* Fetch values now. */
+ const struct tomoyo_number_union *ptr = numbers_p++;
+ if (ptr->group) {
+ if (tomoyo_number_matches_group(min_v[0],
+ max_v[0],
+ ptr->group)
+ == match)
+ continue;
+ } else {
+ if ((min_v[0] <= ptr->values[1] &&
+ max_v[0] >= ptr->values[0]) == match)
+ continue;
+ }
+ goto out;
+ }
+ /*
+ * Bit operation is valid only when counterpart value
+ * represents permission.
+ */
+ if (is_bitop[0] && is_bitop[1]) {
+ goto out;
+ } else if (is_bitop[0]) {
+ switch (right) {
+ case TOMOYO_PATH1_PERM:
+ case TOMOYO_PATH1_PARENT_PERM:
+ case TOMOYO_PATH2_PERM:
+ case TOMOYO_PATH2_PARENT_PERM:
+ if (!(max_v[0] & max_v[1]) == !match)
+ continue;
+ }
+ goto out;
+ } else if (is_bitop[1]) {
+ switch (left) {
+ case TOMOYO_PATH1_PERM:
+ case TOMOYO_PATH1_PARENT_PERM:
+ case TOMOYO_PATH2_PERM:
+ case TOMOYO_PATH2_PARENT_PERM:
+ if (!(max_v[0] & max_v[1]) == !match)
+ continue;
+ }
+ goto out;
+ }
+ /* Normal value range comparison. */
+ if ((min_v[0] <= max_v[1] && max_v[0] >= min_v[1]) == match)
+ continue;
+out:
+ return false;
+ }
+ /* Check argv[] and envp[] now. */
+ if (r->ee && (argc || envc))
+ return tomoyo_scan_bprm(r->ee, argc, argv, envc, envp);
+ return true;
+}
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index fcf52accce2..38651454ed0 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -1,786 +1,667 @@
/*
* security/tomoyo/domain.c
*
- * Implementation of the Domain-Based Mandatory Access Control.
- *
- * Copyright (C) 2005-2009 NTT DATA CORPORATION
- *
- * Version: 2.2.0 2009/04/01
- *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
*/
#include "common.h"
-#include "tomoyo.h"
-#include "realpath.h"
#include <linux/binfmts.h>
+#include <linux/slab.h>
/* Variables definitions.*/
/* The initial domain. */
struct tomoyo_domain_info tomoyo_kernel_domain;
-/*
- * tomoyo_domain_list is used for holding list of domains.
- * The ->acl_info_list of "struct tomoyo_domain_info" is used for holding
- * permissions (e.g. "allow_read /lib/libc-2.5.so") given to each domain.
- *
- * An entry is added by
- *
- * # ( echo "<kernel>"; echo "allow_execute /sbin/init" ) > \
- * /sys/kernel/security/tomoyo/domain_policy
- *
- * and is deleted by
- *
- * # ( echo "<kernel>"; echo "delete allow_execute /sbin/init" ) > \
- * /sys/kernel/security/tomoyo/domain_policy
- *
- * and all entries are retrieved by
- *
- * # cat /sys/kernel/security/tomoyo/domain_policy
- *
- * A domain is added by
- *
- * # echo "<kernel>" > /sys/kernel/security/tomoyo/domain_policy
- *
- * and is deleted by
- *
- * # echo "delete <kernel>" > /sys/kernel/security/tomoyo/domain_policy
+/**
+ * tomoyo_update_policy - Update an entry for exception policy.
*
- * and all domains are retrieved by
+ * @new_entry: Pointer to "struct tomoyo_acl_info".
+ * @size: Size of @new_entry in bytes.
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @check_duplicate: Callback function to find duplicated entry.
*
- * # grep '^<kernel>' /sys/kernel/security/tomoyo/domain_policy
+ * Returns 0 on success, negative value otherwise.
*
- * Normally, a domainname is monotonically getting longer because a domainname
- * which the process will belong to if an execve() operation succeeds is
- * defined as a concatenation of "current domainname" + "pathname passed to
- * execve()".
- * See tomoyo_domain_initializer_list and tomoyo_domain_keeper_list for
- * exceptions.
- */
-LIST_HEAD(tomoyo_domain_list);
-DECLARE_RWSEM(tomoyo_domain_list_lock);
-
-/*
- * tomoyo_domain_initializer_entry is a structure which is used for holding
- * "initialize_domain" and "no_initialize_domain" entries.
- * It has following fields.
- *
- * (1) "list" which is linked to tomoyo_domain_initializer_list .
- * (2) "domainname" which is "a domainname" or "the last component of a
- * domainname". This field is NULL if "from" clause is not specified.
- * (3) "program" which is a program's pathname.
- * (4) "is_deleted" is a bool which is true if marked as deleted, false
- * otherwise.
- * (5) "is_not" is a bool which is true if "no_initialize_domain", false
- * otherwise.
- * (6) "is_last_name" is a bool which is true if "domainname" is "the last
- * component of a domainname", false otherwise.
+ * Caller holds tomoyo_read_lock().
*/
-struct tomoyo_domain_initializer_entry {
- struct list_head list;
- const struct tomoyo_path_info *domainname; /* This may be NULL */
- const struct tomoyo_path_info *program;
- bool is_deleted;
- bool is_not; /* True if this entry is "no_initialize_domain". */
- /* True if the domainname is tomoyo_get_last_name(). */
- bool is_last_name;
-};
-
-/*
- * tomoyo_domain_keeper_entry is a structure which is used for holding
- * "keep_domain" and "no_keep_domain" entries.
- * It has following fields.
- *
- * (1) "list" which is linked to tomoyo_domain_keeper_list .
- * (2) "domainname" which is "a domainname" or "the last component of a
- * domainname".
- * (3) "program" which is a program's pathname.
- * This field is NULL if "from" clause is not specified.
- * (4) "is_deleted" is a bool which is true if marked as deleted, false
- * otherwise.
- * (5) "is_not" is a bool which is true if "no_initialize_domain", false
- * otherwise.
- * (6) "is_last_name" is a bool which is true if "domainname" is "the last
- * component of a domainname", false otherwise.
- */
-struct tomoyo_domain_keeper_entry {
- struct list_head list;
- const struct tomoyo_path_info *domainname;
- const struct tomoyo_path_info *program; /* This may be NULL */
- bool is_deleted;
- bool is_not; /* True if this entry is "no_keep_domain". */
- /* True if the domainname is tomoyo_get_last_name(). */
- bool is_last_name;
-};
+int tomoyo_update_policy(struct tomoyo_acl_head *new_entry, const int size,
+ struct tomoyo_acl_param *param,
+ bool (*check_duplicate) (const struct tomoyo_acl_head
+ *,
+ const struct tomoyo_acl_head
+ *))
+{
+ int error = param->is_delete ? -ENOENT : -ENOMEM;
+ struct tomoyo_acl_head *entry;
+ struct list_head *list = param->list;
-/*
- * tomoyo_alias_entry is a structure which is used for holding "alias" entries.
- * It has following fields.
- *
- * (1) "list" which is linked to tomoyo_alias_list .
- * (2) "original_name" which is a dereferenced pathname.
- * (3) "aliased_name" which is a symlink's pathname.
- * (4) "is_deleted" is a bool which is true if marked as deleted, false
- * otherwise.
- */
-struct tomoyo_alias_entry {
- struct list_head list;
- const struct tomoyo_path_info *original_name;
- const struct tomoyo_path_info *aliased_name;
- bool is_deleted;
-};
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
+ return -ENOMEM;
+ list_for_each_entry_rcu(entry, list, list) {
+ if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS)
+ continue;
+ if (!check_duplicate(entry, new_entry))
+ continue;
+ entry->is_deleted = param->is_delete;
+ error = 0;
+ break;
+ }
+ if (error && !param->is_delete) {
+ entry = tomoyo_commit_ok(new_entry, size);
+ if (entry) {
+ list_add_tail_rcu(&entry->list, list);
+ error = 0;
+ }
+ }
+ mutex_unlock(&tomoyo_policy_lock);
+ return error;
+}
/**
- * tomoyo_set_domain_flag - Set or clear domain's attribute flags.
+ * tomoyo_same_acl_head - Check for duplicated "struct tomoyo_acl_info" entry.
*
- * @domain: Pointer to "struct tomoyo_domain_info".
- * @is_delete: True if it is a delete request.
- * @flags: Flags to set or clear.
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
*
- * Returns nothing.
+ * Returns true if @a == @b, false otherwise.
*/
-void tomoyo_set_domain_flag(struct tomoyo_domain_info *domain,
- const bool is_delete, const u8 flags)
+static inline bool tomoyo_same_acl_head(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
{
- /* We need to serialize because this is bitfield operation. */
- static DEFINE_SPINLOCK(lock);
- spin_lock(&lock);
- if (!is_delete)
- domain->flags |= flags;
- else
- domain->flags &= ~flags;
- spin_unlock(&lock);
+ return a->type == b->type && a->cond == b->cond;
}
/**
- * tomoyo_get_last_name - Get last component of a domainname.
+ * tomoyo_update_domain - Update an entry for domain policy.
+ *
+ * @new_entry: Pointer to "struct tomoyo_acl_info".
+ * @size: Size of @new_entry in bytes.
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @check_duplicate: Callback function to find duplicated entry.
+ * @merge_duplicate: Callback function to merge duplicated entry.
*
- * @domain: Pointer to "struct tomoyo_domain_info".
+ * Returns 0 on success, negative value otherwise.
*
- * Returns the last component of the domainname.
+ * Caller holds tomoyo_read_lock().
*/
-const char *tomoyo_get_last_name(const struct tomoyo_domain_info *domain)
+int tomoyo_update_domain(struct tomoyo_acl_info *new_entry, const int size,
+ struct tomoyo_acl_param *param,
+ bool (*check_duplicate) (const struct tomoyo_acl_info
+ *,
+ const struct tomoyo_acl_info
+ *),
+ bool (*merge_duplicate) (struct tomoyo_acl_info *,
+ struct tomoyo_acl_info *,
+ const bool))
{
- const char *cp0 = domain->domainname->name;
- const char *cp1 = strrchr(cp0, ' ');
-
- if (cp1)
- return cp1 + 1;
- return cp0;
+ const bool is_delete = param->is_delete;
+ int error = is_delete ? -ENOENT : -ENOMEM;
+ struct tomoyo_acl_info *entry;
+ struct list_head * const list = param->list;
+
+ if (param->data[0]) {
+ new_entry->cond = tomoyo_get_condition(param);
+ if (!new_entry->cond)
+ return -EINVAL;
+ /*
+ * Domain transition preference is allowed for only
+ * "file execute" entries.
+ */
+ if (new_entry->cond->transit &&
+ !(new_entry->type == TOMOYO_TYPE_PATH_ACL &&
+ container_of(new_entry, struct tomoyo_path_acl, head)
+ ->perm == 1 << TOMOYO_TYPE_EXECUTE))
+ goto out;
+ }
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
+ goto out;
+ list_for_each_entry_rcu(entry, list, list) {
+ if (entry->is_deleted == TOMOYO_GC_IN_PROGRESS)
+ continue;
+ if (!tomoyo_same_acl_head(entry, new_entry) ||
+ !check_duplicate(entry, new_entry))
+ continue;
+ if (merge_duplicate)
+ entry->is_deleted = merge_duplicate(entry, new_entry,
+ is_delete);
+ else
+ entry->is_deleted = is_delete;
+ error = 0;
+ break;
+ }
+ if (error && !is_delete) {
+ entry = tomoyo_commit_ok(new_entry, size);
+ if (entry) {
+ list_add_tail_rcu(&entry->list, list);
+ error = 0;
+ }
+ }
+ mutex_unlock(&tomoyo_policy_lock);
+out:
+ tomoyo_put_condition(new_entry->cond);
+ return error;
}
-/*
- * tomoyo_domain_initializer_list is used for holding list of programs which
- * triggers reinitialization of domainname. Normally, a domainname is
- * monotonically getting longer. But sometimes, we restart daemon programs.
- * It would be convenient for us that "a daemon started upon system boot" and
- * "the daemon restarted from console" belong to the same domain. Thus, TOMOYO
- * provides a way to shorten domainnames.
- *
- * An entry is added by
- *
- * # echo 'initialize_domain /usr/sbin/httpd' > \
- * /sys/kernel/security/tomoyo/exception_policy
- *
- * and is deleted by
- *
- * # echo 'delete initialize_domain /usr/sbin/httpd' > \
- * /sys/kernel/security/tomoyo/exception_policy
- *
- * and all entries are retrieved by
- *
- * # grep ^initialize_domain /sys/kernel/security/tomoyo/exception_policy
+/**
+ * tomoyo_check_acl - Do permission check.
*
- * In the example above, /usr/sbin/httpd will belong to
- * "<kernel> /usr/sbin/httpd" domain.
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @check_entry: Callback function to check type specific parameters.
*
- * You may specify a domainname using "from" keyword.
- * "initialize_domain /usr/sbin/httpd from <kernel> /etc/rc.d/init.d/httpd"
- * will cause "/usr/sbin/httpd" executed from "<kernel> /etc/rc.d/init.d/httpd"
- * domain to belong to "<kernel> /usr/sbin/httpd" domain.
+ * Returns 0 on success, negative value otherwise.
*
- * You may add "no_" prefix to "initialize_domain".
- * "initialize_domain /usr/sbin/httpd" and
- * "no_initialize_domain /usr/sbin/httpd from <kernel> /etc/rc.d/init.d/httpd"
- * will cause "/usr/sbin/httpd" to belong to "<kernel> /usr/sbin/httpd" domain
- * unless executed from "<kernel> /etc/rc.d/init.d/httpd" domain.
+ * Caller holds tomoyo_read_lock().
*/
-static LIST_HEAD(tomoyo_domain_initializer_list);
-static DECLARE_RWSEM(tomoyo_domain_initializer_list_lock);
+void tomoyo_check_acl(struct tomoyo_request_info *r,
+ bool (*check_entry) (struct tomoyo_request_info *,
+ const struct tomoyo_acl_info *))
+{
+ const struct tomoyo_domain_info *domain = r->domain;
+ struct tomoyo_acl_info *ptr;
+ bool retried = false;
+ const struct list_head *list = &domain->acl_info_list;
+
+retry:
+ list_for_each_entry_rcu(ptr, list, list) {
+ if (ptr->is_deleted || ptr->type != r->param_type)
+ continue;
+ if (!check_entry(r, ptr))
+ continue;
+ if (!tomoyo_condition(r, ptr->cond))
+ continue;
+ r->matched_acl = ptr;
+ r->granted = true;
+ return;
+ }
+ if (!retried) {
+ retried = true;
+ list = &domain->ns->acl_group[domain->group];
+ goto retry;
+ }
+ r->granted = false;
+}
+
+/* The list for "struct tomoyo_domain_info". */
+LIST_HEAD(tomoyo_domain_list);
/**
- * tomoyo_update_domain_initializer_entry - Update "struct tomoyo_domain_initializer_entry" list.
+ * tomoyo_last_word - Get last component of a domainname.
*
- * @domainname: The name of domain. May be NULL.
- * @program: The name of program.
- * @is_not: True if it is "no_initialize_domain" entry.
- * @is_delete: True if it is a delete request.
+ * @name: Domainname to check.
*
- * Returns 0 on success, negative value otherwise.
+ * Returns the last word of @domainname.
*/
-static int tomoyo_update_domain_initializer_entry(const char *domainname,
- const char *program,
- const bool is_not,
- const bool is_delete)
+static const char *tomoyo_last_word(const char *name)
{
- struct tomoyo_domain_initializer_entry *new_entry;
- struct tomoyo_domain_initializer_entry *ptr;
- const struct tomoyo_path_info *saved_program;
- const struct tomoyo_path_info *saved_domainname = NULL;
- int error = -ENOMEM;
- bool is_last_name = false;
-
- if (!tomoyo_is_correct_path(program, 1, -1, -1, __func__))
- return -EINVAL; /* No patterns allowed. */
- if (domainname) {
- if (!tomoyo_is_domain_def(domainname) &&
- tomoyo_is_correct_path(domainname, 1, -1, -1, __func__))
- is_last_name = true;
- else if (!tomoyo_is_correct_domain(domainname, __func__))
- return -EINVAL;
- saved_domainname = tomoyo_save_name(domainname);
- if (!saved_domainname)
- return -ENOMEM;
- }
- saved_program = tomoyo_save_name(program);
- if (!saved_program)
- return -ENOMEM;
- down_write(&tomoyo_domain_initializer_list_lock);
- list_for_each_entry(ptr, &tomoyo_domain_initializer_list, list) {
- if (ptr->is_not != is_not ||
- ptr->domainname != saved_domainname ||
- ptr->program != saved_program)
- continue;
- ptr->is_deleted = is_delete;
- error = 0;
- goto out;
- }
- if (is_delete) {
- error = -ENOENT;
- goto out;
- }
- new_entry = tomoyo_alloc_element(sizeof(*new_entry));
- if (!new_entry)
- goto out;
- new_entry->domainname = saved_domainname;
- new_entry->program = saved_program;
- new_entry->is_not = is_not;
- new_entry->is_last_name = is_last_name;
- list_add_tail(&new_entry->list, &tomoyo_domain_initializer_list);
- error = 0;
- out:
- up_write(&tomoyo_domain_initializer_list_lock);
- return error;
+ const char *cp = strrchr(name, ' ');
+ if (cp)
+ return cp + 1;
+ return name;
}
/**
- * tomoyo_read_domain_initializer_policy - Read "struct tomoyo_domain_initializer_entry" list.
+ * tomoyo_same_transition_control - Check for duplicated "struct tomoyo_transition_control" entry.
*
- * @head: Pointer to "struct tomoyo_io_buffer".
+ * @a: Pointer to "struct tomoyo_acl_head".
+ * @b: Pointer to "struct tomoyo_acl_head".
*
- * Returns true on success, false otherwise.
+ * Returns true if @a == @b, false otherwise.
*/
-bool tomoyo_read_domain_initializer_policy(struct tomoyo_io_buffer *head)
+static bool tomoyo_same_transition_control(const struct tomoyo_acl_head *a,
+ const struct tomoyo_acl_head *b)
{
- struct list_head *pos;
- bool done = true;
-
- down_read(&tomoyo_domain_initializer_list_lock);
- list_for_each_cookie(pos, head->read_var2,
- &tomoyo_domain_initializer_list) {
- const char *no;
- const char *from = "";
- const char *domain = "";
- struct tomoyo_domain_initializer_entry *ptr;
- ptr = list_entry(pos, struct tomoyo_domain_initializer_entry,
- list);
- if (ptr->is_deleted)
- continue;
- no = ptr->is_not ? "no_" : "";
- if (ptr->domainname) {
- from = " from ";
- domain = ptr->domainname->name;
- }
- done = tomoyo_io_printf(head,
- "%s" TOMOYO_KEYWORD_INITIALIZE_DOMAIN
- "%s%s%s\n", no, ptr->program->name,
- from, domain);
- if (!done)
- break;
- }
- up_read(&tomoyo_domain_initializer_list_lock);
- return done;
+ const struct tomoyo_transition_control *p1 = container_of(a,
+ typeof(*p1),
+ head);
+ const struct tomoyo_transition_control *p2 = container_of(b,
+ typeof(*p2),
+ head);
+ return p1->type == p2->type && p1->is_last_name == p2->is_last_name
+ && p1->domainname == p2->domainname
+ && p1->program == p2->program;
}
/**
- * tomoyo_write_domain_initializer_policy - Write "struct tomoyo_domain_initializer_entry" list.
+ * tomoyo_write_transition_control - Write "struct tomoyo_transition_control" list.
*
- * @data: String to parse.
- * @is_not: True if it is "no_initialize_domain" entry.
- * @is_delete: True if it is a delete request.
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @type: Type of this entry.
*
* Returns 0 on success, negative value otherwise.
*/
-int tomoyo_write_domain_initializer_policy(char *data, const bool is_not,
- const bool is_delete)
+int tomoyo_write_transition_control(struct tomoyo_acl_param *param,
+ const u8 type)
{
- char *cp = strstr(data, " from ");
-
- if (cp) {
- *cp = '\0';
- return tomoyo_update_domain_initializer_entry(cp + 6, data,
- is_not,
- is_delete);
+ struct tomoyo_transition_control e = { .type = type };
+ int error = param->is_delete ? -ENOENT : -ENOMEM;
+ char *program = param->data;
+ char *domainname = strstr(program, " from ");
+ if (domainname) {
+ *domainname = '\0';
+ domainname += 6;
+ } else if (type == TOMOYO_TRANSITION_CONTROL_NO_KEEP ||
+ type == TOMOYO_TRANSITION_CONTROL_KEEP) {
+ domainname = program;
+ program = NULL;
+ }
+ if (program && strcmp(program, "any")) {
+ if (!tomoyo_correct_path(program))
+ return -EINVAL;
+ e.program = tomoyo_get_name(program);
+ if (!e.program)
+ goto out;
}
- return tomoyo_update_domain_initializer_entry(NULL, data, is_not,
- is_delete);
+ if (domainname && strcmp(domainname, "any")) {
+ if (!tomoyo_correct_domain(domainname)) {
+ if (!tomoyo_correct_path(domainname))
+ goto out;
+ e.is_last_name = true;
+ }
+ e.domainname = tomoyo_get_name(domainname);
+ if (!e.domainname)
+ goto out;
+ }
+ param->list = &param->ns->policy_list[TOMOYO_ID_TRANSITION_CONTROL];
+ error = tomoyo_update_policy(&e.head, sizeof(e), param,
+ tomoyo_same_transition_control);
+out:
+ tomoyo_put_name(e.domainname);
+ tomoyo_put_name(e.program);
+ return error;
}
/**
- * tomoyo_is_domain_initializer - Check whether the given program causes domainname reinitialization.
+ * tomoyo_scan_transition - Try to find specific domain transition type.
*
- * @domainname: The name of domain.
- * @program: The name of program.
+ * @list: Pointer to "struct list_head".
+ * @domainname: The name of current domain.
+ * @program: The name of requested program.
* @last_name: The last component of @domainname.
+ * @type: One of values in "enum tomoyo_transition_type".
*
- * Returns true if executing @program reinitializes domain transition,
- * false otherwise.
+ * Returns true if found one, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
-static bool tomoyo_is_domain_initializer(const struct tomoyo_path_info *
- domainname,
- const struct tomoyo_path_info *program,
- const struct tomoyo_path_info *
- last_name)
+static inline bool tomoyo_scan_transition
+(const struct list_head *list, const struct tomoyo_path_info *domainname,
+ const struct tomoyo_path_info *program, const char *last_name,
+ const enum tomoyo_transition_type type)
{
- struct tomoyo_domain_initializer_entry *ptr;
- bool flag = false;
-
- down_read(&tomoyo_domain_initializer_list_lock);
- list_for_each_entry(ptr, &tomoyo_domain_initializer_list, list) {
- if (ptr->is_deleted)
+ const struct tomoyo_transition_control *ptr;
+ list_for_each_entry_rcu(ptr, list, head.list) {
+ if (ptr->head.is_deleted || ptr->type != type)
continue;
if (ptr->domainname) {
if (!ptr->is_last_name) {
if (ptr->domainname != domainname)
continue;
} else {
- if (tomoyo_pathcmp(ptr->domainname, last_name))
+ /*
+ * Use direct strcmp() since this is
+ * unlikely used.
+ */
+ if (strcmp(ptr->domainname->name, last_name))
continue;
}
}
- if (tomoyo_pathcmp(ptr->program, program))
+ if (ptr->program && tomoyo_pathcmp(ptr->program, program))
continue;
- if (ptr->is_not) {
- flag = false;
- break;
- }
- flag = true;
+ return true;
}
- up_read(&tomoyo_domain_initializer_list_lock);
- return flag;
+ return false;
}
-/*
- * tomoyo_domain_keeper_list is used for holding list of domainnames which
- * suppresses domain transition. Normally, a domainname is monotonically
- * getting longer. But sometimes, we want to suppress domain transition.
- * It would be convenient for us that programs executed from a login session
- * belong to the same domain. Thus, TOMOYO provides a way to suppress domain
- * transition.
- *
- * An entry is added by
- *
- * # echo 'keep_domain <kernel> /usr/sbin/sshd /bin/bash' > \
- * /sys/kernel/security/tomoyo/exception_policy
- *
- * and is deleted by
- *
- * # echo 'delete keep_domain <kernel> /usr/sbin/sshd /bin/bash' > \
- * /sys/kernel/security/tomoyo/exception_policy
- *
- * and all entries are retrieved by
- *
- * # grep ^keep_domain /sys/kernel/security/tomoyo/exception_policy
- *
- * In the example above, any process which belongs to
- * "<kernel> /usr/sbin/sshd /bin/bash" domain will remain in that domain,
- * unless explicitly specified by "initialize_domain" or "no_keep_domain".
- *
- * You may specify a program using "from" keyword.
- * "keep_domain /bin/pwd from <kernel> /usr/sbin/sshd /bin/bash"
- * will cause "/bin/pwd" executed from "<kernel> /usr/sbin/sshd /bin/bash"
- * domain to remain in "<kernel> /usr/sbin/sshd /bin/bash" domain.
- *
- * You may add "no_" prefix to "keep_domain".
- * "keep_domain <kernel> /usr/sbin/sshd /bin/bash" and
- * "no_keep_domain /usr/bin/passwd from <kernel> /usr/sbin/sshd /bin/bash" will
- * cause "/usr/bin/passwd" to belong to
- * "<kernel> /usr/sbin/sshd /bin/bash /usr/bin/passwd" domain, unless
- * explicitly specified by "initialize_domain".
- */
-static LIST_HEAD(tomoyo_domain_keeper_list);
-static DECLARE_RWSEM(tomoyo_domain_keeper_list_lock);
-
/**
- * tomoyo_update_domain_keeper_entry - Update "struct tomoyo_domain_keeper_entry" list.
+ * tomoyo_transition_type - Get domain transition type.
*
- * @domainname: The name of domain.
- * @program: The name of program. May be NULL.
- * @is_not: True if it is "no_keep_domain" entry.
- * @is_delete: True if it is a delete request.
+ * @ns: Pointer to "struct tomoyo_policy_namespace".
+ * @domainname: The name of current domain.
+ * @program: The name of requested program.
*
- * Returns 0 on success, negative value otherwise.
+ * Returns TOMOYO_TRANSITION_CONTROL_TRANSIT if executing @program causes
+ * domain transition across namespaces, TOMOYO_TRANSITION_CONTROL_INITIALIZE if
+ * executing @program reinitializes domain transition within that namespace,
+ * TOMOYO_TRANSITION_CONTROL_KEEP if executing @program stays at @domainname ,
+ * others otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
-static int tomoyo_update_domain_keeper_entry(const char *domainname,
- const char *program,
- const bool is_not,
- const bool is_delete)
+static enum tomoyo_transition_type tomoyo_transition_type
+(const struct tomoyo_policy_namespace *ns,
+ const struct tomoyo_path_info *domainname,
+ const struct tomoyo_path_info *program)
{
- struct tomoyo_domain_keeper_entry *new_entry;
- struct tomoyo_domain_keeper_entry *ptr;
- const struct tomoyo_path_info *saved_domainname;
- const struct tomoyo_path_info *saved_program = NULL;
- int error = -ENOMEM;
- bool is_last_name = false;
-
- if (!tomoyo_is_domain_def(domainname) &&
- tomoyo_is_correct_path(domainname, 1, -1, -1, __func__))
- is_last_name = true;
- else if (!tomoyo_is_correct_domain(domainname, __func__))
- return -EINVAL;
- if (program) {
- if (!tomoyo_is_correct_path(program, 1, -1, -1, __func__))
- return -EINVAL;
- saved_program = tomoyo_save_name(program);
- if (!saved_program)
- return -ENOMEM;
- }
- saved_domainname = tomoyo_save_name(domainname);
- if (!saved_domainname)
- return -ENOMEM;
- down_write(&tomoyo_domain_keeper_list_lock);
- list_for_each_entry(ptr, &tomoyo_domain_keeper_list, list) {
- if (ptr->is_not != is_not ||
- ptr->domainname != saved_domainname ||
- ptr->program != saved_program)
+ const char *last_name = tomoyo_last_word(domainname->name);
+ enum tomoyo_transition_type type = TOMOYO_TRANSITION_CONTROL_NO_RESET;
+ while (type < TOMOYO_MAX_TRANSITION_TYPE) {
+ const struct list_head * const list =
+ &ns->policy_list[TOMOYO_ID_TRANSITION_CONTROL];
+ if (!tomoyo_scan_transition(list, domainname, program,
+ last_name, type)) {
+ type++;
continue;
- ptr->is_deleted = is_delete;
- error = 0;
- goto out;
- }
- if (is_delete) {
- error = -ENOENT;
- goto out;
+ }
+ if (type != TOMOYO_TRANSITION_CONTROL_NO_RESET &&
+ type != TOMOYO_TRANSITION_CONTROL_NO_INITIALIZE)
+ break;
+ /*
+ * Do not check for reset_domain if no_reset_domain matched.
+ * Do not check for initialize_domain if no_initialize_domain
+ * matched.
+ */
+ type++;
+ type++;
}
- new_entry = tomoyo_alloc_element(sizeof(*new_entry));
- if (!new_entry)
- goto out;
- new_entry->domainname = saved_domainname;
- new_entry->program = saved_program;
- new_entry->is_not = is_not;
- new_entry->is_last_name = is_last_name;
- list_add_tail(&new_entry->list, &tomoyo_domain_keeper_list);
- error = 0;
- out:
- up_write(&tomoyo_domain_keeper_list_lock);
- return error;
+ return type;
}
/**
- * tomoyo_write_domain_keeper_policy - Write "struct tomoyo_domain_keeper_entry" list.
+ * tomoyo_same_aggregator - Check for duplicated "struct tomoyo_aggregator" entry.
*
- * @data: String to parse.
- * @is_not: True if it is "no_keep_domain" entry.
- * @is_delete: True if it is a delete request.
+ * @a: Pointer to "struct tomoyo_acl_head".
+ * @b: Pointer to "struct tomoyo_acl_head".
*
+ * Returns true if @a == @b, false otherwise.
*/
-int tomoyo_write_domain_keeper_policy(char *data, const bool is_not,
- const bool is_delete)
+static bool tomoyo_same_aggregator(const struct tomoyo_acl_head *a,
+ const struct tomoyo_acl_head *b)
{
- char *cp = strstr(data, " from ");
-
- if (cp) {
- *cp = '\0';
- return tomoyo_update_domain_keeper_entry(cp + 6, data, is_not,
- is_delete);
- }
- return tomoyo_update_domain_keeper_entry(data, NULL, is_not, is_delete);
+ const struct tomoyo_aggregator *p1 = container_of(a, typeof(*p1),
+ head);
+ const struct tomoyo_aggregator *p2 = container_of(b, typeof(*p2),
+ head);
+ return p1->original_name == p2->original_name &&
+ p1->aggregated_name == p2->aggregated_name;
}
/**
- * tomoyo_read_domain_keeper_policy - Read "struct tomoyo_domain_keeper_entry" list.
+ * tomoyo_write_aggregator - Write "struct tomoyo_aggregator" list.
*
- * @head: Pointer to "struct tomoyo_io_buffer".
+ * @param: Pointer to "struct tomoyo_acl_param".
*
- * Returns true on success, false otherwise.
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
-bool tomoyo_read_domain_keeper_policy(struct tomoyo_io_buffer *head)
+int tomoyo_write_aggregator(struct tomoyo_acl_param *param)
{
- struct list_head *pos;
- bool done = true;
-
- down_read(&tomoyo_domain_keeper_list_lock);
- list_for_each_cookie(pos, head->read_var2,
- &tomoyo_domain_keeper_list) {
- struct tomoyo_domain_keeper_entry *ptr;
- const char *no;
- const char *from = "";
- const char *program = "";
-
- ptr = list_entry(pos, struct tomoyo_domain_keeper_entry, list);
- if (ptr->is_deleted)
- continue;
- no = ptr->is_not ? "no_" : "";
- if (ptr->program) {
- from = " from ";
- program = ptr->program->name;
- }
- done = tomoyo_io_printf(head,
- "%s" TOMOYO_KEYWORD_KEEP_DOMAIN
- "%s%s%s\n", no, program, from,
- ptr->domainname->name);
- if (!done)
- break;
- }
- up_read(&tomoyo_domain_keeper_list_lock);
- return done;
+ struct tomoyo_aggregator e = { };
+ int error = param->is_delete ? -ENOENT : -ENOMEM;
+ const char *original_name = tomoyo_read_token(param);
+ const char *aggregated_name = tomoyo_read_token(param);
+ if (!tomoyo_correct_word(original_name) ||
+ !tomoyo_correct_path(aggregated_name))
+ return -EINVAL;
+ e.original_name = tomoyo_get_name(original_name);
+ e.aggregated_name = tomoyo_get_name(aggregated_name);
+ if (!e.original_name || !e.aggregated_name ||
+ e.aggregated_name->is_patterned) /* No patterns allowed. */
+ goto out;
+ param->list = &param->ns->policy_list[TOMOYO_ID_AGGREGATOR];
+ error = tomoyo_update_policy(&e.head, sizeof(e), param,
+ tomoyo_same_aggregator);
+out:
+ tomoyo_put_name(e.original_name);
+ tomoyo_put_name(e.aggregated_name);
+ return error;
}
/**
- * tomoyo_is_domain_keeper - Check whether the given program causes domain transition suppression.
+ * tomoyo_find_namespace - Find specified namespace.
*
- * @domainname: The name of domain.
- * @program: The name of program.
- * @last_name: The last component of @domainname.
+ * @name: Name of namespace to find.
+ * @len: Length of @name.
+ *
+ * Returns pointer to "struct tomoyo_policy_namespace" if found,
+ * NULL otherwise.
*
- * Returns true if executing @program supresses domain transition,
- * false otherwise.
+ * Caller holds tomoyo_read_lock().
*/
-static bool tomoyo_is_domain_keeper(const struct tomoyo_path_info *domainname,
- const struct tomoyo_path_info *program,
- const struct tomoyo_path_info *last_name)
+static struct tomoyo_policy_namespace *tomoyo_find_namespace
+(const char *name, const unsigned int len)
{
- struct tomoyo_domain_keeper_entry *ptr;
- bool flag = false;
-
- down_read(&tomoyo_domain_keeper_list_lock);
- list_for_each_entry(ptr, &tomoyo_domain_keeper_list, list) {
- if (ptr->is_deleted)
- continue;
- if (!ptr->is_last_name) {
- if (ptr->domainname != domainname)
- continue;
- } else {
- if (tomoyo_pathcmp(ptr->domainname, last_name))
- continue;
- }
- if (ptr->program && tomoyo_pathcmp(ptr->program, program))
+ struct tomoyo_policy_namespace *ns;
+ list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) {
+ if (strncmp(name, ns->name, len) ||
+ (name[len] && name[len] != ' '))
continue;
- if (ptr->is_not) {
- flag = false;
- break;
- }
- flag = true;
+ return ns;
}
- up_read(&tomoyo_domain_keeper_list_lock);
- return flag;
+ return NULL;
}
-/*
- * tomoyo_alias_list is used for holding list of symlink's pathnames which are
- * allowed to be passed to an execve() request. Normally, the domainname which
- * the current process will belong to after execve() succeeds is calculated
- * using dereferenced pathnames. But some programs behave differently depending
- * on the name passed to argv[0]. For busybox, calculating domainname using
- * dereferenced pathnames will cause all programs in the busybox to belong to
- * the same domain. Thus, TOMOYO provides a way to allow use of symlink's
- * pathname for checking execve()'s permission and calculating domainname which
- * the current process will belong to after execve() succeeds.
- *
- * An entry is added by
- *
- * # echo 'alias /bin/busybox /bin/cat' > \
- * /sys/kernel/security/tomoyo/exception_policy
- *
- * and is deleted by
- *
- * # echo 'delete alias /bin/busybox /bin/cat' > \
- * /sys/kernel/security/tomoyo/exception_policy
- *
- * and all entries are retrieved by
- *
- * # grep ^alias /sys/kernel/security/tomoyo/exception_policy
- *
- * In the example above, if /bin/cat is a symlink to /bin/busybox and execution
- * of /bin/cat is requested, permission is checked for /bin/cat rather than
- * /bin/busybox and domainname which the current process will belong to after
- * execve() succeeds is calculated using /bin/cat rather than /bin/busybox .
- */
-static LIST_HEAD(tomoyo_alias_list);
-static DECLARE_RWSEM(tomoyo_alias_list_lock);
-
/**
- * tomoyo_update_alias_entry - Update "struct tomoyo_alias_entry" list.
+ * tomoyo_assign_namespace - Create a new namespace.
*
- * @original_name: The original program's real name.
- * @aliased_name: The symbolic program's symbolic link's name.
- * @is_delete: True if it is a delete request.
+ * @domainname: Name of namespace to create.
*
- * Returns 0 on success, negative value otherwise.
+ * Returns pointer to "struct tomoyo_policy_namespace" on success,
+ * NULL otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
-static int tomoyo_update_alias_entry(const char *original_name,
- const char *aliased_name,
- const bool is_delete)
+struct tomoyo_policy_namespace *tomoyo_assign_namespace(const char *domainname)
{
- struct tomoyo_alias_entry *new_entry;
- struct tomoyo_alias_entry *ptr;
- const struct tomoyo_path_info *saved_original_name;
- const struct tomoyo_path_info *saved_aliased_name;
- int error = -ENOMEM;
-
- if (!tomoyo_is_correct_path(original_name, 1, -1, -1, __func__) ||
- !tomoyo_is_correct_path(aliased_name, 1, -1, -1, __func__))
- return -EINVAL; /* No patterns allowed. */
- saved_original_name = tomoyo_save_name(original_name);
- saved_aliased_name = tomoyo_save_name(aliased_name);
- if (!saved_original_name || !saved_aliased_name)
- return -ENOMEM;
- down_write(&tomoyo_alias_list_lock);
- list_for_each_entry(ptr, &tomoyo_alias_list, list) {
- if (ptr->original_name != saved_original_name ||
- ptr->aliased_name != saved_aliased_name)
- continue;
- ptr->is_deleted = is_delete;
- error = 0;
- goto out;
- }
- if (is_delete) {
- error = -ENOENT;
+ struct tomoyo_policy_namespace *ptr;
+ struct tomoyo_policy_namespace *entry;
+ const char *cp = domainname;
+ unsigned int len = 0;
+ while (*cp && *cp++ != ' ')
+ len++;
+ ptr = tomoyo_find_namespace(domainname, len);
+ if (ptr)
+ return ptr;
+ if (len >= TOMOYO_EXEC_TMPSIZE - 10 || !tomoyo_domain_def(domainname))
+ return NULL;
+ entry = kzalloc(sizeof(*entry) + len + 1, GFP_NOFS);
+ if (!entry)
+ return NULL;
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
goto out;
+ ptr = tomoyo_find_namespace(domainname, len);
+ if (!ptr && tomoyo_memory_ok(entry)) {
+ char *name = (char *) (entry + 1);
+ ptr = entry;
+ memmove(name, domainname, len);
+ name[len] = '\0';
+ entry->name = name;
+ tomoyo_init_policy_namespace(entry);
+ entry = NULL;
}
- new_entry = tomoyo_alloc_element(sizeof(*new_entry));
- if (!new_entry)
- goto out;
- new_entry->original_name = saved_original_name;
- new_entry->aliased_name = saved_aliased_name;
- list_add_tail(&new_entry->list, &tomoyo_alias_list);
- error = 0;
- out:
- up_write(&tomoyo_alias_list_lock);
- return error;
+ mutex_unlock(&tomoyo_policy_lock);
+out:
+ kfree(entry);
+ return ptr;
}
/**
- * tomoyo_read_alias_policy - Read "struct tomoyo_alias_entry" list.
+ * tomoyo_namespace_jump - Check for namespace jump.
*
- * @head: Pointer to "struct tomoyo_io_buffer".
+ * @domainname: Name of domain.
*
- * Returns true on success, false otherwise.
+ * Returns true if namespace differs, false otherwise.
*/
-bool tomoyo_read_alias_policy(struct tomoyo_io_buffer *head)
+static bool tomoyo_namespace_jump(const char *domainname)
{
- struct list_head *pos;
- bool done = true;
-
- down_read(&tomoyo_alias_list_lock);
- list_for_each_cookie(pos, head->read_var2, &tomoyo_alias_list) {
- struct tomoyo_alias_entry *ptr;
-
- ptr = list_entry(pos, struct tomoyo_alias_entry, list);
- if (ptr->is_deleted)
- continue;
- done = tomoyo_io_printf(head, TOMOYO_KEYWORD_ALIAS "%s %s\n",
- ptr->original_name->name,
- ptr->aliased_name->name);
- if (!done)
- break;
- }
- up_read(&tomoyo_alias_list_lock);
- return done;
+ const char *namespace = tomoyo_current_namespace()->name;
+ const int len = strlen(namespace);
+ return strncmp(domainname, namespace, len) ||
+ (domainname[len] && domainname[len] != ' ');
}
/**
- * tomoyo_write_alias_policy - Write "struct tomoyo_alias_entry" list.
+ * tomoyo_assign_domain - Create a domain or a namespace.
*
- * @data: String to parse.
- * @is_delete: True if it is a delete request.
+ * @domainname: The name of domain.
+ * @transit: True if transit to domain found or created.
*
- * Returns 0 on success, negative value otherwise.
+ * Returns pointer to "struct tomoyo_domain_info" on success, NULL otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
-int tomoyo_write_alias_policy(char *data, const bool is_delete)
+struct tomoyo_domain_info *tomoyo_assign_domain(const char *domainname,
+ const bool transit)
{
- char *cp = strchr(data, ' ');
-
- if (!cp)
- return -EINVAL;
- *cp++ = '\0';
- return tomoyo_update_alias_entry(data, cp, is_delete);
+ struct tomoyo_domain_info e = { };
+ struct tomoyo_domain_info *entry = tomoyo_find_domain(domainname);
+ bool created = false;
+ if (entry) {
+ if (transit) {
+ /*
+ * Since namespace is created at runtime, profiles may
+ * not be created by the moment the process transits to
+ * that domain. Do not perform domain transition if
+ * profile for that domain is not yet created.
+ */
+ if (tomoyo_policy_loaded &&
+ !entry->ns->profile_ptr[entry->profile])
+ return NULL;
+ }
+ return entry;
+ }
+ /* Requested domain does not exist. */
+ /* Don't create requested domain if domainname is invalid. */
+ if (strlen(domainname) >= TOMOYO_EXEC_TMPSIZE - 10 ||
+ !tomoyo_correct_domain(domainname))
+ return NULL;
+ /*
+ * Since definition of profiles and acl_groups may differ across
+ * namespaces, do not inherit "use_profile" and "use_group" settings
+ * by automatically creating requested domain upon domain transition.
+ */
+ if (transit && tomoyo_namespace_jump(domainname))
+ return NULL;
+ e.ns = tomoyo_assign_namespace(domainname);
+ if (!e.ns)
+ return NULL;
+ /*
+ * "use_profile" and "use_group" settings for automatically created
+ * domains are inherited from current domain. These are 0 for manually
+ * created domains.
+ */
+ if (transit) {
+ const struct tomoyo_domain_info *domain = tomoyo_domain();
+ e.profile = domain->profile;
+ e.group = domain->group;
+ }
+ e.domainname = tomoyo_get_name(domainname);
+ if (!e.domainname)
+ return NULL;
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
+ goto out;
+ entry = tomoyo_find_domain(domainname);
+ if (!entry) {
+ entry = tomoyo_commit_ok(&e, sizeof(e));
+ if (entry) {
+ INIT_LIST_HEAD(&entry->acl_info_list);
+ list_add_tail_rcu(&entry->list, &tomoyo_domain_list);
+ created = true;
+ }
+ }
+ mutex_unlock(&tomoyo_policy_lock);
+out:
+ tomoyo_put_name(e.domainname);
+ if (entry && transit) {
+ if (created) {
+ struct tomoyo_request_info r;
+ tomoyo_init_request_info(&r, entry,
+ TOMOYO_MAC_FILE_EXECUTE);
+ r.granted = false;
+ tomoyo_write_log(&r, "use_profile %u\n",
+ entry->profile);
+ tomoyo_write_log(&r, "use_group %u\n", entry->group);
+ tomoyo_update_stat(TOMOYO_STAT_POLICY_UPDATES);
+ }
+ }
+ return entry;
}
/**
- * tomoyo_find_or_assign_new_domain - Create a domain.
+ * tomoyo_environ - Check permission for environment variable names.
*
- * @domainname: The name of domain.
- * @profile: Profile number to assign if the domain was newly created.
+ * @ee: Pointer to "struct tomoyo_execve".
*
- * Returns pointer to "struct tomoyo_domain_info" on success, NULL otherwise.
+ * Returns 0 on success, negative value otherwise.
*/
-struct tomoyo_domain_info *tomoyo_find_or_assign_new_domain(const char *
- domainname,
- const u8 profile)
+static int tomoyo_environ(struct tomoyo_execve *ee)
{
- struct tomoyo_domain_info *domain = NULL;
- const struct tomoyo_path_info *saved_domainname;
+ struct tomoyo_request_info *r = &ee->r;
+ struct linux_binprm *bprm = ee->bprm;
+ /* env_page.data is allocated by tomoyo_dump_page(). */
+ struct tomoyo_page_dump env_page = { };
+ char *arg_ptr; /* Size is TOMOYO_EXEC_TMPSIZE bytes */
+ int arg_len = 0;
+ unsigned long pos = bprm->p;
+ int offset = pos % PAGE_SIZE;
+ int argv_count = bprm->argc;
+ int envp_count = bprm->envc;
+ int error = -ENOMEM;
- down_write(&tomoyo_domain_list_lock);
- domain = tomoyo_find_domain(domainname);
- if (domain)
- goto out;
- if (!tomoyo_is_correct_domain(domainname, __func__))
+ ee->r.type = TOMOYO_MAC_ENVIRON;
+ ee->r.profile = r->domain->profile;
+ ee->r.mode = tomoyo_get_mode(r->domain->ns, ee->r.profile,
+ TOMOYO_MAC_ENVIRON);
+ if (!r->mode || !envp_count)
+ return 0;
+ arg_ptr = kzalloc(TOMOYO_EXEC_TMPSIZE, GFP_NOFS);
+ if (!arg_ptr)
goto out;
- saved_domainname = tomoyo_save_name(domainname);
- if (!saved_domainname)
- goto out;
- /* Can I reuse memory of deleted domain? */
- list_for_each_entry(domain, &tomoyo_domain_list, list) {
- struct task_struct *p;
- struct tomoyo_acl_info *ptr;
- bool flag;
- if (!domain->is_deleted ||
- domain->domainname != saved_domainname)
- continue;
- flag = false;
- read_lock(&tasklist_lock);
- for_each_process(p) {
- if (tomoyo_real_domain(p) != domain)
- continue;
- flag = true;
- break;
+ while (error == -ENOMEM) {
+ if (!tomoyo_dump_page(bprm, pos, &env_page))
+ goto out;
+ pos += PAGE_SIZE - offset;
+ /* Read. */
+ while (argv_count && offset < PAGE_SIZE) {
+ if (!env_page.data[offset++])
+ argv_count--;
}
- read_unlock(&tasklist_lock);
- if (flag)
+ if (argv_count) {
+ offset = 0;
continue;
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
- ptr->type |= TOMOYO_ACL_DELETED;
}
- tomoyo_set_domain_flag(domain, true, domain->flags);
- domain->profile = profile;
- domain->quota_warned = false;
- mb(); /* Avoid out-of-order execution. */
- domain->is_deleted = false;
- goto out;
- }
- /* No memory reusable. Create using new memory. */
- domain = tomoyo_alloc_element(sizeof(*domain));
- if (domain) {
- INIT_LIST_HEAD(&domain->acl_info_list);
- domain->domainname = saved_domainname;
- domain->profile = profile;
- list_add_tail(&domain->list, &tomoyo_domain_list);
+ while (offset < PAGE_SIZE) {
+ const unsigned char c = env_page.data[offset++];
+
+ if (c && arg_len < TOMOYO_EXEC_TMPSIZE - 10) {
+ if (c == '=') {
+ arg_ptr[arg_len++] = '\0';
+ } else if (c == '\\') {
+ arg_ptr[arg_len++] = '\\';
+ arg_ptr[arg_len++] = '\\';
+ } else if (c > ' ' && c < 127) {
+ arg_ptr[arg_len++] = c;
+ } else {
+ arg_ptr[arg_len++] = '\\';
+ arg_ptr[arg_len++] = (c >> 6) + '0';
+ arg_ptr[arg_len++]
+ = ((c >> 3) & 7) + '0';
+ arg_ptr[arg_len++] = (c & 7) + '0';
+ }
+ } else {
+ arg_ptr[arg_len] = '\0';
+ }
+ if (c)
+ continue;
+ if (tomoyo_env_perm(r, arg_ptr)) {
+ error = -EPERM;
+ break;
+ }
+ if (!--envp_count) {
+ error = 0;
+ break;
+ }
+ arg_len = 0;
+ }
+ offset = 0;
}
- out:
- up_write(&tomoyo_domain_list_lock);
- return domain;
+out:
+ if (r->mode != TOMOYO_CONFIG_ENFORCING)
+ error = 0;
+ kfree(env_page.data);
+ kfree(arg_ptr);
+ return error;
}
/**
@@ -789,134 +670,232 @@ struct tomoyo_domain_info *tomoyo_find_or_assign_new_domain(const char *
* @bprm: Pointer to "struct linux_binprm".
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
int tomoyo_find_next_domain(struct linux_binprm *bprm)
{
- /*
- * This function assumes that the size of buffer returned by
- * tomoyo_realpath() = TOMOYO_MAX_PATHNAME_LEN.
- */
- struct tomoyo_page_buffer *tmp = tomoyo_alloc(sizeof(*tmp));
struct tomoyo_domain_info *old_domain = tomoyo_domain();
struct tomoyo_domain_info *domain = NULL;
- const char *old_domain_name = old_domain->domainname->name;
const char *original_name = bprm->filename;
- char *new_domain_name = NULL;
- char *real_program_name = NULL;
- char *symlink_program_name = NULL;
- const u8 mode = tomoyo_check_flags(old_domain, TOMOYO_MAC_FOR_FILE);
- const bool is_enforce = (mode == 3);
int retval = -ENOMEM;
- struct tomoyo_path_info r; /* real name */
- struct tomoyo_path_info s; /* symlink name */
- struct tomoyo_path_info l; /* last name */
- static bool initialized;
-
- if (!tmp)
- goto out;
+ bool reject_on_transition_failure = false;
+ const struct tomoyo_path_info *candidate;
+ struct tomoyo_path_info exename;
+ struct tomoyo_execve *ee = kzalloc(sizeof(*ee), GFP_NOFS);
- if (!initialized) {
- /*
- * Built-in initializers. This is needed because policies are
- * not loaded until starting /sbin/init.
- */
- tomoyo_update_domain_initializer_entry(NULL, "/sbin/hotplug",
- false, false);
- tomoyo_update_domain_initializer_entry(NULL, "/sbin/modprobe",
- false, false);
- initialized = true;
+ if (!ee)
+ return -ENOMEM;
+ ee->tmp = kzalloc(TOMOYO_EXEC_TMPSIZE, GFP_NOFS);
+ if (!ee->tmp) {
+ kfree(ee);
+ return -ENOMEM;
}
-
- /* Get tomoyo_realpath of program. */
+ /* ee->dump->data is allocated by tomoyo_dump_page(). */
+ tomoyo_init_request_info(&ee->r, NULL, TOMOYO_MAC_FILE_EXECUTE);
+ ee->r.ee = ee;
+ ee->bprm = bprm;
+ ee->r.obj = &ee->obj;
+ ee->obj.path1 = bprm->file->f_path;
+ /* Get symlink's pathname of program. */
retval = -ENOENT;
- /* I hope tomoyo_realpath() won't fail with -ENOMEM. */
- real_program_name = tomoyo_realpath(original_name);
- if (!real_program_name)
+ exename.name = tomoyo_realpath_nofollow(original_name);
+ if (!exename.name)
goto out;
- /* Get tomoyo_realpath of symbolic link. */
- symlink_program_name = tomoyo_realpath_nofollow(original_name);
- if (!symlink_program_name)
- goto out;
-
- r.name = real_program_name;
- tomoyo_fill_path_info(&r);
- s.name = symlink_program_name;
- tomoyo_fill_path_info(&s);
- l.name = tomoyo_get_last_name(old_domain);
- tomoyo_fill_path_info(&l);
-
- /* Check 'alias' directive. */
- if (tomoyo_pathcmp(&r, &s)) {
- struct tomoyo_alias_entry *ptr;
- /* Is this program allowed to be called via symbolic links? */
- down_read(&tomoyo_alias_list_lock);
- list_for_each_entry(ptr, &tomoyo_alias_list, list) {
- if (ptr->is_deleted ||
- tomoyo_pathcmp(&r, ptr->original_name) ||
- tomoyo_pathcmp(&s, ptr->aliased_name))
+ tomoyo_fill_path_info(&exename);
+retry:
+ /* Check 'aggregator' directive. */
+ {
+ struct tomoyo_aggregator *ptr;
+ struct list_head *list =
+ &old_domain->ns->policy_list[TOMOYO_ID_AGGREGATOR];
+ /* Check 'aggregator' directive. */
+ candidate = &exename;
+ list_for_each_entry_rcu(ptr, list, head.list) {
+ if (ptr->head.is_deleted ||
+ !tomoyo_path_matches_pattern(&exename,
+ ptr->original_name))
continue;
- memset(real_program_name, 0, TOMOYO_MAX_PATHNAME_LEN);
- strncpy(real_program_name, ptr->aliased_name->name,
- TOMOYO_MAX_PATHNAME_LEN - 1);
- tomoyo_fill_path_info(&r);
+ candidate = ptr->aggregated_name;
break;
}
- up_read(&tomoyo_alias_list_lock);
}
/* Check execute permission. */
- retval = tomoyo_check_exec_perm(old_domain, &r);
+ retval = tomoyo_execute_permission(&ee->r, candidate);
+ if (retval == TOMOYO_RETRY_REQUEST)
+ goto retry;
if (retval < 0)
goto out;
+ /*
+ * To be able to specify domainnames with wildcards, use the
+ * pathname specified in the policy (which may contain
+ * wildcard) rather than the pathname passed to execve()
+ * (which never contains wildcard).
+ */
+ if (ee->r.param.path.matched_path)
+ candidate = ee->r.param.path.matched_path;
- new_domain_name = tmp->buffer;
- if (tomoyo_is_domain_initializer(old_domain->domainname, &r, &l)) {
- /* Transit to the child of tomoyo_kernel_domain domain. */
- snprintf(new_domain_name, TOMOYO_MAX_PATHNAME_LEN + 1,
- TOMOYO_ROOT_NAME " " "%s", real_program_name);
- } else if (old_domain == &tomoyo_kernel_domain &&
- !tomoyo_policy_loaded) {
+ /*
+ * Check for domain transition preference if "file execute" matched.
+ * If preference is given, make do_execve() fail if domain transition
+ * has failed, for domain transition preference should be used with
+ * destination domain defined.
+ */
+ if (ee->transition) {
+ const char *domainname = ee->transition->name;
+ reject_on_transition_failure = true;
+ if (!strcmp(domainname, "keep"))
+ goto force_keep_domain;
+ if (!strcmp(domainname, "child"))
+ goto force_child_domain;
+ if (!strcmp(domainname, "reset"))
+ goto force_reset_domain;
+ if (!strcmp(domainname, "initialize"))
+ goto force_initialize_domain;
+ if (!strcmp(domainname, "parent")) {
+ char *cp;
+ strncpy(ee->tmp, old_domain->domainname->name,
+ TOMOYO_EXEC_TMPSIZE - 1);
+ cp = strrchr(ee->tmp, ' ');
+ if (cp)
+ *cp = '\0';
+ } else if (*domainname == '<')
+ strncpy(ee->tmp, domainname, TOMOYO_EXEC_TMPSIZE - 1);
+ else
+ snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "%s %s",
+ old_domain->domainname->name, domainname);
+ goto force_jump_domain;
+ }
+ /*
+ * No domain transition preference specified.
+ * Calculate domain to transit to.
+ */
+ switch (tomoyo_transition_type(old_domain->ns, old_domain->domainname,
+ candidate)) {
+ case TOMOYO_TRANSITION_CONTROL_RESET:
+force_reset_domain:
+ /* Transit to the root of specified namespace. */
+ snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "<%s>",
+ candidate->name);
/*
- * Needn't to transit from kernel domain before starting
- * /sbin/init. But transit from kernel domain if executing
- * initializers because they might start before /sbin/init.
+ * Make do_execve() fail if domain transition across namespaces
+ * has failed.
*/
- domain = old_domain;
- } else if (tomoyo_is_domain_keeper(old_domain->domainname, &r, &l)) {
+ reject_on_transition_failure = true;
+ break;
+ case TOMOYO_TRANSITION_CONTROL_INITIALIZE:
+force_initialize_domain:
+ /* Transit to the child of current namespace's root. */
+ snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "%s %s",
+ old_domain->ns->name, candidate->name);
+ break;
+ case TOMOYO_TRANSITION_CONTROL_KEEP:
+force_keep_domain:
/* Keep current domain. */
domain = old_domain;
- } else {
+ break;
+ default:
+ if (old_domain == &tomoyo_kernel_domain &&
+ !tomoyo_policy_loaded) {
+ /*
+ * Needn't to transit from kernel domain before
+ * starting /sbin/init. But transit from kernel domain
+ * if executing initializers because they might start
+ * before /sbin/init.
+ */
+ domain = old_domain;
+ break;
+ }
+force_child_domain:
/* Normal domain transition. */
- snprintf(new_domain_name, TOMOYO_MAX_PATHNAME_LEN + 1,
- "%s %s", old_domain_name, real_program_name);
+ snprintf(ee->tmp, TOMOYO_EXEC_TMPSIZE - 1, "%s %s",
+ old_domain->domainname->name, candidate->name);
+ break;
}
- if (domain || strlen(new_domain_name) >= TOMOYO_MAX_PATHNAME_LEN)
- goto done;
- down_read(&tomoyo_domain_list_lock);
- domain = tomoyo_find_domain(new_domain_name);
- up_read(&tomoyo_domain_list_lock);
- if (domain)
- goto done;
- if (is_enforce)
- goto done;
- domain = tomoyo_find_or_assign_new_domain(new_domain_name,
- old_domain->profile);
- done:
+force_jump_domain:
+ if (!domain)
+ domain = tomoyo_assign_domain(ee->tmp, true);
if (domain)
- goto out;
- printk(KERN_WARNING "TOMOYO-ERROR: Domain '%s' not defined.\n",
- new_domain_name);
- if (is_enforce)
- retval = -EPERM;
- else
- tomoyo_set_domain_flag(old_domain, false,
- TOMOYO_DOMAIN_FLAGS_TRANSITION_FAILED);
+ retval = 0;
+ else if (reject_on_transition_failure) {
+ printk(KERN_WARNING "ERROR: Domain '%s' not ready.\n",
+ ee->tmp);
+ retval = -ENOMEM;
+ } else if (ee->r.mode == TOMOYO_CONFIG_ENFORCING)
+ retval = -ENOMEM;
+ else {
+ retval = 0;
+ if (!old_domain->flags[TOMOYO_DIF_TRANSITION_FAILED]) {
+ old_domain->flags[TOMOYO_DIF_TRANSITION_FAILED] = true;
+ ee->r.granted = false;
+ tomoyo_write_log(&ee->r, "%s", tomoyo_dif
+ [TOMOYO_DIF_TRANSITION_FAILED]);
+ printk(KERN_WARNING
+ "ERROR: Domain '%s' not defined.\n", ee->tmp);
+ }
+ }
out:
if (!domain)
domain = old_domain;
+ /* Update reference count on "struct tomoyo_domain_info". */
+ atomic_inc(&domain->users);
bprm->cred->security = domain;
- tomoyo_free(real_program_name);
- tomoyo_free(symlink_program_name);
- tomoyo_free(tmp);
+ kfree(exename.name);
+ if (!retval) {
+ ee->r.domain = domain;
+ retval = tomoyo_environ(ee);
+ }
+ kfree(ee->tmp);
+ kfree(ee->dump.data);
+ kfree(ee);
return retval;
}
+
+/**
+ * tomoyo_dump_page - Dump a page to buffer.
+ *
+ * @bprm: Pointer to "struct linux_binprm".
+ * @pos: Location to dump.
+ * @dump: Poiner to "struct tomoyo_page_dump".
+ *
+ * Returns true on success, false otherwise.
+ */
+bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
+ struct tomoyo_page_dump *dump)
+{
+ struct page *page;
+
+ /* dump->data is released by tomoyo_find_next_domain(). */
+ if (!dump->data) {
+ dump->data = kzalloc(PAGE_SIZE, GFP_NOFS);
+ if (!dump->data)
+ return false;
+ }
+ /* Same with get_arg_page(bprm, pos, 0) in fs/exec.c */
+#ifdef CONFIG_MMU
+ if (get_user_pages(current, bprm->mm, pos, 1, 0, 1, &page, NULL) <= 0)
+ return false;
+#else
+ page = bprm->page[pos / PAGE_SIZE];
+#endif
+ if (page != dump->page) {
+ const unsigned int offset = pos % PAGE_SIZE;
+ /*
+ * Maybe kmap()/kunmap() should be used here.
+ * But remove_arg_zero() uses kmap_atomic()/kunmap_atomic().
+ * So do I.
+ */
+ char *kaddr = kmap_atomic(page);
+
+ dump->page = page;
+ memcpy(dump->data + offset, kaddr + offset,
+ PAGE_SIZE - offset);
+ kunmap_atomic(kaddr);
+ }
+ /* Same with put_arg_page(page) in fs/exec.c */
+#ifdef CONFIG_MMU
+ put_page(page);
+#endif
+ return true;
+}
diff --git a/security/tomoyo/environ.c b/security/tomoyo/environ.c
new file mode 100644
index 00000000000..ad4c6e18a43
--- /dev/null
+++ b/security/tomoyo/environ.c
@@ -0,0 +1,122 @@
+/*
+ * security/tomoyo/environ.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include "common.h"
+
+/**
+ * tomoyo_check_env_acl - Check permission for environment variable's name.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_env_acl(struct tomoyo_request_info *r,
+ const struct tomoyo_acl_info *ptr)
+{
+ const struct tomoyo_env_acl *acl =
+ container_of(ptr, typeof(*acl), head);
+
+ return tomoyo_path_matches_pattern(r->param.environ.name, acl->env);
+}
+
+/**
+ * tomoyo_audit_env_log - Audit environment variable name log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_env_log(struct tomoyo_request_info *r)
+{
+ return tomoyo_supervisor(r, "misc env %s\n",
+ r->param.environ.name->name);
+}
+
+/**
+ * tomoyo_env_perm - Check permission for environment variable's name.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @env: The name of environment variable.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+int tomoyo_env_perm(struct tomoyo_request_info *r, const char *env)
+{
+ struct tomoyo_path_info environ;
+ int error;
+
+ if (!env || !*env)
+ return 0;
+ environ.name = env;
+ tomoyo_fill_path_info(&environ);
+ r->param_type = TOMOYO_TYPE_ENV_ACL;
+ r->param.environ.name = &environ;
+ do {
+ tomoyo_check_acl(r, tomoyo_check_env_acl);
+ error = tomoyo_audit_env_log(r);
+ } while (error == TOMOYO_RETRY_REQUEST);
+ return error;
+}
+
+/**
+ * tomoyo_same_env_acl - Check for duplicated "struct tomoyo_env_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_env_acl(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
+{
+ const struct tomoyo_env_acl *p1 = container_of(a, typeof(*p1), head);
+ const struct tomoyo_env_acl *p2 = container_of(b, typeof(*p2), head);
+
+ return p1->env == p2->env;
+}
+
+/**
+ * tomoyo_write_env - Write "struct tomoyo_env_acl" list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_write_env(struct tomoyo_acl_param *param)
+{
+ struct tomoyo_env_acl e = { .head.type = TOMOYO_TYPE_ENV_ACL };
+ int error = -ENOMEM;
+ const char *data = tomoyo_read_token(param);
+
+ if (!tomoyo_correct_word(data) || strchr(data, '='))
+ return -EINVAL;
+ e.env = tomoyo_get_name(data);
+ if (!e.env)
+ return error;
+ error = tomoyo_update_domain(&e.head, sizeof(e), param,
+ tomoyo_same_env_acl, NULL);
+ tomoyo_put_name(e.env);
+ return error;
+}
+
+/**
+ * tomoyo_write_misc - Update environment variable list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_write_misc(struct tomoyo_acl_param *param)
+{
+ if (tomoyo_str_starts(&param->data, "env "))
+ return tomoyo_write_env(param);
+ return -EINVAL;
+}
diff --git a/security/tomoyo/file.c b/security/tomoyo/file.c
index 9a6c58881c0..40039079074 100644
--- a/security/tomoyo/file.c
+++ b/security/tomoyo/file.c
@@ -1,1115 +1,726 @@
/*
* security/tomoyo/file.c
*
- * Implementation of the Domain-Based Mandatory Access Control.
- *
- * Copyright (C) 2005-2009 NTT DATA CORPORATION
- *
- * Version: 2.2.0 2009/04/01
- *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
*/
#include "common.h"
-#include "tomoyo.h"
-#include "realpath.h"
+#include <linux/slab.h>
/*
- * tomoyo_globally_readable_file_entry is a structure which is used for holding
- * "allow_read" entries.
- * It has following fields.
- *
- * (1) "list" which is linked to tomoyo_globally_readable_list .
- * (2) "filename" is a pathname which is allowed to open(O_RDONLY).
- * (3) "is_deleted" is a bool which is true if marked as deleted, false
- * otherwise.
+ * Mapping table from "enum tomoyo_path_acl_index" to "enum tomoyo_mac_index".
*/
-struct tomoyo_globally_readable_file_entry {
- struct list_head list;
- const struct tomoyo_path_info *filename;
- bool is_deleted;
+static const u8 tomoyo_p2mac[TOMOYO_MAX_PATH_OPERATION] = {
+ [TOMOYO_TYPE_EXECUTE] = TOMOYO_MAC_FILE_EXECUTE,
+ [TOMOYO_TYPE_READ] = TOMOYO_MAC_FILE_OPEN,
+ [TOMOYO_TYPE_WRITE] = TOMOYO_MAC_FILE_OPEN,
+ [TOMOYO_TYPE_APPEND] = TOMOYO_MAC_FILE_OPEN,
+ [TOMOYO_TYPE_UNLINK] = TOMOYO_MAC_FILE_UNLINK,
+ [TOMOYO_TYPE_GETATTR] = TOMOYO_MAC_FILE_GETATTR,
+ [TOMOYO_TYPE_RMDIR] = TOMOYO_MAC_FILE_RMDIR,
+ [TOMOYO_TYPE_TRUNCATE] = TOMOYO_MAC_FILE_TRUNCATE,
+ [TOMOYO_TYPE_SYMLINK] = TOMOYO_MAC_FILE_SYMLINK,
+ [TOMOYO_TYPE_CHROOT] = TOMOYO_MAC_FILE_CHROOT,
+ [TOMOYO_TYPE_UMOUNT] = TOMOYO_MAC_FILE_UMOUNT,
};
/*
- * tomoyo_pattern_entry is a structure which is used for holding
- * "tomoyo_pattern_list" entries.
- * It has following fields.
- *
- * (1) "list" which is linked to tomoyo_pattern_list .
- * (2) "pattern" is a pathname pattern which is used for converting pathnames
- * to pathname patterns during learning mode.
- * (3) "is_deleted" is a bool which is true if marked as deleted, false
- * otherwise.
+ * Mapping table from "enum tomoyo_mkdev_acl_index" to "enum tomoyo_mac_index".
*/
-struct tomoyo_pattern_entry {
- struct list_head list;
- const struct tomoyo_path_info *pattern;
- bool is_deleted;
+const u8 tomoyo_pnnn2mac[TOMOYO_MAX_MKDEV_OPERATION] = {
+ [TOMOYO_TYPE_MKBLOCK] = TOMOYO_MAC_FILE_MKBLOCK,
+ [TOMOYO_TYPE_MKCHAR] = TOMOYO_MAC_FILE_MKCHAR,
};
/*
- * tomoyo_no_rewrite_entry is a structure which is used for holding
- * "deny_rewrite" entries.
- * It has following fields.
- *
- * (1) "list" which is linked to tomoyo_no_rewrite_list .
- * (2) "pattern" is a pathname which is by default not permitted to modify
- * already existing content.
- * (3) "is_deleted" is a bool which is true if marked as deleted, false
- * otherwise.
+ * Mapping table from "enum tomoyo_path2_acl_index" to "enum tomoyo_mac_index".
*/
-struct tomoyo_no_rewrite_entry {
- struct list_head list;
- const struct tomoyo_path_info *pattern;
- bool is_deleted;
-};
-
-/* Keyword array for single path operations. */
-static const char *tomoyo_sp_keyword[TOMOYO_MAX_SINGLE_PATH_OPERATION] = {
- [TOMOYO_TYPE_READ_WRITE_ACL] = "read/write",
- [TOMOYO_TYPE_EXECUTE_ACL] = "execute",
- [TOMOYO_TYPE_READ_ACL] = "read",
- [TOMOYO_TYPE_WRITE_ACL] = "write",
- [TOMOYO_TYPE_CREATE_ACL] = "create",
- [TOMOYO_TYPE_UNLINK_ACL] = "unlink",
- [TOMOYO_TYPE_MKDIR_ACL] = "mkdir",
- [TOMOYO_TYPE_RMDIR_ACL] = "rmdir",
- [TOMOYO_TYPE_MKFIFO_ACL] = "mkfifo",
- [TOMOYO_TYPE_MKSOCK_ACL] = "mksock",
- [TOMOYO_TYPE_MKBLOCK_ACL] = "mkblock",
- [TOMOYO_TYPE_MKCHAR_ACL] = "mkchar",
- [TOMOYO_TYPE_TRUNCATE_ACL] = "truncate",
- [TOMOYO_TYPE_SYMLINK_ACL] = "symlink",
- [TOMOYO_TYPE_REWRITE_ACL] = "rewrite",
+const u8 tomoyo_pp2mac[TOMOYO_MAX_PATH2_OPERATION] = {
+ [TOMOYO_TYPE_LINK] = TOMOYO_MAC_FILE_LINK,
+ [TOMOYO_TYPE_RENAME] = TOMOYO_MAC_FILE_RENAME,
+ [TOMOYO_TYPE_PIVOT_ROOT] = TOMOYO_MAC_FILE_PIVOT_ROOT,
};
-/* Keyword array for double path operations. */
-static const char *tomoyo_dp_keyword[TOMOYO_MAX_DOUBLE_PATH_OPERATION] = {
- [TOMOYO_TYPE_LINK_ACL] = "link",
- [TOMOYO_TYPE_RENAME_ACL] = "rename",
+/*
+ * Mapping table from "enum tomoyo_path_number_acl_index" to
+ * "enum tomoyo_mac_index".
+ */
+const u8 tomoyo_pn2mac[TOMOYO_MAX_PATH_NUMBER_OPERATION] = {
+ [TOMOYO_TYPE_CREATE] = TOMOYO_MAC_FILE_CREATE,
+ [TOMOYO_TYPE_MKDIR] = TOMOYO_MAC_FILE_MKDIR,
+ [TOMOYO_TYPE_MKFIFO] = TOMOYO_MAC_FILE_MKFIFO,
+ [TOMOYO_TYPE_MKSOCK] = TOMOYO_MAC_FILE_MKSOCK,
+ [TOMOYO_TYPE_IOCTL] = TOMOYO_MAC_FILE_IOCTL,
+ [TOMOYO_TYPE_CHMOD] = TOMOYO_MAC_FILE_CHMOD,
+ [TOMOYO_TYPE_CHOWN] = TOMOYO_MAC_FILE_CHOWN,
+ [TOMOYO_TYPE_CHGRP] = TOMOYO_MAC_FILE_CHGRP,
};
/**
- * tomoyo_sp2keyword - Get the name of single path operation.
+ * tomoyo_put_name_union - Drop reference on "struct tomoyo_name_union".
*
- * @operation: Type of operation.
+ * @ptr: Pointer to "struct tomoyo_name_union".
*
- * Returns the name of single path operation.
+ * Returns nothing.
*/
-const char *tomoyo_sp2keyword(const u8 operation)
+void tomoyo_put_name_union(struct tomoyo_name_union *ptr)
{
- return (operation < TOMOYO_MAX_SINGLE_PATH_OPERATION)
- ? tomoyo_sp_keyword[operation] : NULL;
+ tomoyo_put_group(ptr->group);
+ tomoyo_put_name(ptr->filename);
}
/**
- * tomoyo_dp2keyword - Get the name of double path operation.
+ * tomoyo_compare_name_union - Check whether a name matches "struct tomoyo_name_union" or not.
*
- * @operation: Type of operation.
+ * @name: Pointer to "struct tomoyo_path_info".
+ * @ptr: Pointer to "struct tomoyo_name_union".
*
- * Returns the name of double path operation.
+ * Returns "struct tomoyo_path_info" if @name matches @ptr, NULL otherwise.
*/
-const char *tomoyo_dp2keyword(const u8 operation)
+const struct tomoyo_path_info *
+tomoyo_compare_name_union(const struct tomoyo_path_info *name,
+ const struct tomoyo_name_union *ptr)
{
- return (operation < TOMOYO_MAX_DOUBLE_PATH_OPERATION)
- ? tomoyo_dp_keyword[operation] : NULL;
+ if (ptr->group)
+ return tomoyo_path_matches_group(name, ptr->group);
+ if (tomoyo_path_matches_pattern(name, ptr->filename))
+ return ptr->filename;
+ return NULL;
}
/**
- * tomoyo_strendswith - Check whether the token ends with the given token.
+ * tomoyo_put_number_union - Drop reference on "struct tomoyo_number_union".
*
- * @name: The token to check.
- * @tail: The token to find.
+ * @ptr: Pointer to "struct tomoyo_number_union".
*
- * Returns true if @name ends with @tail, false otherwise.
+ * Returns nothing.
*/
-static bool tomoyo_strendswith(const char *name, const char *tail)
+void tomoyo_put_number_union(struct tomoyo_number_union *ptr)
{
- int len;
-
- if (!name || !tail)
- return false;
- len = strlen(name) - strlen(tail);
- return len >= 0 && !strcmp(name + len, tail);
+ tomoyo_put_group(ptr->group);
}
/**
- * tomoyo_get_path - Get realpath.
+ * tomoyo_compare_number_union - Check whether a value matches "struct tomoyo_number_union" or not.
*
- * @path: Pointer to "struct path".
+ * @value: Number to check.
+ * @ptr: Pointer to "struct tomoyo_number_union".
*
- * Returns pointer to "struct tomoyo_path_info" on success, NULL otherwise.
+ * Returns true if @value matches @ptr, false otherwise.
*/
-static struct tomoyo_path_info *tomoyo_get_path(struct path *path)
+bool tomoyo_compare_number_union(const unsigned long value,
+ const struct tomoyo_number_union *ptr)
{
- int error;
- struct tomoyo_path_info_with_data *buf = tomoyo_alloc(sizeof(*buf));
-
- if (!buf)
- return NULL;
- /* Reserve one byte for appending "/". */
- error = tomoyo_realpath_from_path2(path, buf->body,
- sizeof(buf->body) - 2);
- if (!error) {
- buf->head.name = buf->body;
- tomoyo_fill_path_info(&buf->head);
- return &buf->head;
- }
- tomoyo_free(buf);
- return NULL;
+ if (ptr->group)
+ return tomoyo_number_matches_group(value, value, ptr->group);
+ return value >= ptr->values[0] && value <= ptr->values[1];
}
-/* Lock for domain->acl_info_list. */
-DECLARE_RWSEM(tomoyo_domain_acl_info_list_lock);
-
-static int tomoyo_update_double_path_acl(const u8 type, const char *filename1,
- const char *filename2,
- struct tomoyo_domain_info *
- const domain, const bool is_delete);
-static int tomoyo_update_single_path_acl(const u8 type, const char *filename,
- struct tomoyo_domain_info *
- const domain, const bool is_delete);
-
-/*
- * tomoyo_globally_readable_list is used for holding list of pathnames which
- * are by default allowed to be open()ed for reading by any process.
- *
- * An entry is added by
- *
- * # echo 'allow_read /lib/libc-2.5.so' > \
- * /sys/kernel/security/tomoyo/exception_policy
- *
- * and is deleted by
- *
- * # echo 'delete allow_read /lib/libc-2.5.so' > \
- * /sys/kernel/security/tomoyo/exception_policy
+/**
+ * tomoyo_add_slash - Add trailing '/' if needed.
*
- * and all entries are retrieved by
+ * @buf: Pointer to "struct tomoyo_path_info".
*
- * # grep ^allow_read /sys/kernel/security/tomoyo/exception_policy
+ * Returns nothing.
*
- * In the example above, any process is allowed to
- * open("/lib/libc-2.5.so", O_RDONLY).
- * One exception is, if the domain which current process belongs to is marked
- * as "ignore_global_allow_read", current process can't do so unless explicitly
- * given "allow_read /lib/libc-2.5.so" to the domain which current process
- * belongs to.
+ * @buf must be generated by tomoyo_encode() because this function does not
+ * allocate memory for adding '/'.
*/
-static LIST_HEAD(tomoyo_globally_readable_list);
-static DECLARE_RWSEM(tomoyo_globally_readable_list_lock);
+static void tomoyo_add_slash(struct tomoyo_path_info *buf)
+{
+ if (buf->is_dir)
+ return;
+ /*
+ * This is OK because tomoyo_encode() reserves space for appending "/".
+ */
+ strcat((char *) buf->name, "/");
+ tomoyo_fill_path_info(buf);
+}
/**
- * tomoyo_update_globally_readable_entry - Update "struct tomoyo_globally_readable_file_entry" list.
+ * tomoyo_get_realpath - Get realpath.
*
- * @filename: Filename unconditionally permitted to open() for reading.
- * @is_delete: True if it is a delete request.
+ * @buf: Pointer to "struct tomoyo_path_info".
+ * @path: Pointer to "struct path".
*
- * Returns 0 on success, negative value otherwise.
+ * Returns true on success, false otherwise.
*/
-static int tomoyo_update_globally_readable_entry(const char *filename,
- const bool is_delete)
+static bool tomoyo_get_realpath(struct tomoyo_path_info *buf, struct path *path)
{
- struct tomoyo_globally_readable_file_entry *new_entry;
- struct tomoyo_globally_readable_file_entry *ptr;
- const struct tomoyo_path_info *saved_filename;
- int error = -ENOMEM;
-
- if (!tomoyo_is_correct_path(filename, 1, 0, -1, __func__))
- return -EINVAL;
- saved_filename = tomoyo_save_name(filename);
- if (!saved_filename)
- return -ENOMEM;
- down_write(&tomoyo_globally_readable_list_lock);
- list_for_each_entry(ptr, &tomoyo_globally_readable_list, list) {
- if (ptr->filename != saved_filename)
- continue;
- ptr->is_deleted = is_delete;
- error = 0;
- goto out;
+ buf->name = tomoyo_realpath_from_path(path);
+ if (buf->name) {
+ tomoyo_fill_path_info(buf);
+ return true;
}
- if (is_delete) {
- error = -ENOENT;
- goto out;
- }
- new_entry = tomoyo_alloc_element(sizeof(*new_entry));
- if (!new_entry)
- goto out;
- new_entry->filename = saved_filename;
- list_add_tail(&new_entry->list, &tomoyo_globally_readable_list);
- error = 0;
- out:
- up_write(&tomoyo_globally_readable_list_lock);
- return error;
+ return false;
}
/**
- * tomoyo_is_globally_readable_file - Check if the file is unconditionnaly permitted to be open()ed for reading.
+ * tomoyo_audit_path_log - Audit path request log.
*
- * @filename: The filename to check.
+ * @r: Pointer to "struct tomoyo_request_info".
*
- * Returns true if any domain can open @filename for reading, false otherwise.
+ * Returns 0 on success, negative value otherwise.
*/
-static bool tomoyo_is_globally_readable_file(const struct tomoyo_path_info *
- filename)
+static int tomoyo_audit_path_log(struct tomoyo_request_info *r)
{
- struct tomoyo_globally_readable_file_entry *ptr;
- bool found = false;
- down_read(&tomoyo_globally_readable_list_lock);
- list_for_each_entry(ptr, &tomoyo_globally_readable_list, list) {
- if (!ptr->is_deleted &&
- tomoyo_path_matches_pattern(filename, ptr->filename)) {
- found = true;
- break;
- }
- }
- up_read(&tomoyo_globally_readable_list_lock);
- return found;
+ return tomoyo_supervisor(r, "file %s %s\n", tomoyo_path_keyword
+ [r->param.path.operation],
+ r->param.path.filename->name);
}
/**
- * tomoyo_write_globally_readable_policy - Write "struct tomoyo_globally_readable_file_entry" list.
+ * tomoyo_audit_path2_log - Audit path/path request log.
*
- * @data: String to parse.
- * @is_delete: True if it is a delete request.
+ * @r: Pointer to "struct tomoyo_request_info".
*
* Returns 0 on success, negative value otherwise.
*/
-int tomoyo_write_globally_readable_policy(char *data, const bool is_delete)
+static int tomoyo_audit_path2_log(struct tomoyo_request_info *r)
{
- return tomoyo_update_globally_readable_entry(data, is_delete);
+ return tomoyo_supervisor(r, "file %s %s %s\n", tomoyo_mac_keywords
+ [tomoyo_pp2mac[r->param.path2.operation]],
+ r->param.path2.filename1->name,
+ r->param.path2.filename2->name);
}
/**
- * tomoyo_read_globally_readable_policy - Read "struct tomoyo_globally_readable_file_entry" list.
+ * tomoyo_audit_mkdev_log - Audit path/number/number/number request log.
*
- * @head: Pointer to "struct tomoyo_io_buffer".
+ * @r: Pointer to "struct tomoyo_request_info".
*
- * Returns true on success, false otherwise.
+ * Returns 0 on success, negative value otherwise.
*/
-bool tomoyo_read_globally_readable_policy(struct tomoyo_io_buffer *head)
+static int tomoyo_audit_mkdev_log(struct tomoyo_request_info *r)
{
- struct list_head *pos;
- bool done = true;
-
- down_read(&tomoyo_globally_readable_list_lock);
- list_for_each_cookie(pos, head->read_var2,
- &tomoyo_globally_readable_list) {
- struct tomoyo_globally_readable_file_entry *ptr;
- ptr = list_entry(pos,
- struct tomoyo_globally_readable_file_entry,
- list);
- if (ptr->is_deleted)
- continue;
- done = tomoyo_io_printf(head, TOMOYO_KEYWORD_ALLOW_READ "%s\n",
- ptr->filename->name);
- if (!done)
- break;
- }
- up_read(&tomoyo_globally_readable_list_lock);
- return done;
+ return tomoyo_supervisor(r, "file %s %s 0%o %u %u\n",
+ tomoyo_mac_keywords
+ [tomoyo_pnnn2mac[r->param.mkdev.operation]],
+ r->param.mkdev.filename->name,
+ r->param.mkdev.mode, r->param.mkdev.major,
+ r->param.mkdev.minor);
}
-/* tomoyo_pattern_list is used for holding list of pathnames which are used for
- * converting pathnames to pathname patterns during learning mode.
- *
- * An entry is added by
- *
- * # echo 'file_pattern /proc/\$/mounts' > \
- * /sys/kernel/security/tomoyo/exception_policy
- *
- * and is deleted by
- *
- * # echo 'delete file_pattern /proc/\$/mounts' > \
- * /sys/kernel/security/tomoyo/exception_policy
- *
- * and all entries are retrieved by
- *
- * # grep ^file_pattern /sys/kernel/security/tomoyo/exception_policy
- *
- * In the example above, if a process which belongs to a domain which is in
- * learning mode requested open("/proc/1/mounts", O_RDONLY),
- * "allow_read /proc/\$/mounts" is automatically added to the domain which that
- * process belongs to.
- *
- * It is not a desirable behavior that we have to use /proc/\$/ instead of
- * /proc/self/ when current process needs to access only current process's
- * information. As of now, LSM version of TOMOYO is using __d_path() for
- * calculating pathname. Non LSM version of TOMOYO is using its own function
- * which pretends as if /proc/self/ is not a symlink; so that we can forbid
- * current process from accessing other process's information.
- */
-static LIST_HEAD(tomoyo_pattern_list);
-static DECLARE_RWSEM(tomoyo_pattern_list_lock);
-
/**
- * tomoyo_update_file_pattern_entry - Update "struct tomoyo_pattern_entry" list.
+ * tomoyo_audit_path_number_log - Audit path/number request log.
*
- * @pattern: Pathname pattern.
- * @is_delete: True if it is a delete request.
+ * @r: Pointer to "struct tomoyo_request_info".
*
* Returns 0 on success, negative value otherwise.
*/
-static int tomoyo_update_file_pattern_entry(const char *pattern,
- const bool is_delete)
+static int tomoyo_audit_path_number_log(struct tomoyo_request_info *r)
{
- struct tomoyo_pattern_entry *new_entry;
- struct tomoyo_pattern_entry *ptr;
- const struct tomoyo_path_info *saved_pattern;
- int error = -ENOMEM;
-
- if (!tomoyo_is_correct_path(pattern, 0, 1, 0, __func__))
- return -EINVAL;
- saved_pattern = tomoyo_save_name(pattern);
- if (!saved_pattern)
- return -ENOMEM;
- down_write(&tomoyo_pattern_list_lock);
- list_for_each_entry(ptr, &tomoyo_pattern_list, list) {
- if (saved_pattern != ptr->pattern)
- continue;
- ptr->is_deleted = is_delete;
- error = 0;
- goto out;
- }
- if (is_delete) {
- error = -ENOENT;
- goto out;
+ const u8 type = r->param.path_number.operation;
+ u8 radix;
+ char buffer[64];
+ switch (type) {
+ case TOMOYO_TYPE_CREATE:
+ case TOMOYO_TYPE_MKDIR:
+ case TOMOYO_TYPE_MKFIFO:
+ case TOMOYO_TYPE_MKSOCK:
+ case TOMOYO_TYPE_CHMOD:
+ radix = TOMOYO_VALUE_TYPE_OCTAL;
+ break;
+ case TOMOYO_TYPE_IOCTL:
+ radix = TOMOYO_VALUE_TYPE_HEXADECIMAL;
+ break;
+ default:
+ radix = TOMOYO_VALUE_TYPE_DECIMAL;
+ break;
}
- new_entry = tomoyo_alloc_element(sizeof(*new_entry));
- if (!new_entry)
- goto out;
- new_entry->pattern = saved_pattern;
- list_add_tail(&new_entry->list, &tomoyo_pattern_list);
- error = 0;
- out:
- up_write(&tomoyo_pattern_list_lock);
- return error;
+ tomoyo_print_ulong(buffer, sizeof(buffer), r->param.path_number.number,
+ radix);
+ return tomoyo_supervisor(r, "file %s %s %s\n", tomoyo_mac_keywords
+ [tomoyo_pn2mac[type]],
+ r->param.path_number.filename->name, buffer);
}
/**
- * tomoyo_get_file_pattern - Get patterned pathname.
+ * tomoyo_check_path_acl - Check permission for path operation.
*
- * @filename: The filename to find patterned pathname.
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
*
- * Returns pointer to pathname pattern if matched, @filename otherwise.
+ * Returns true if granted, false otherwise.
+ *
+ * To be able to use wildcard for domain transition, this function sets
+ * matching entry on success. Since the caller holds tomoyo_read_lock(),
+ * it is safe to set matching entry.
*/
-static const struct tomoyo_path_info *
-tomoyo_get_file_pattern(const struct tomoyo_path_info *filename)
+static bool tomoyo_check_path_acl(struct tomoyo_request_info *r,
+ const struct tomoyo_acl_info *ptr)
{
- struct tomoyo_pattern_entry *ptr;
- const struct tomoyo_path_info *pattern = NULL;
-
- down_read(&tomoyo_pattern_list_lock);
- list_for_each_entry(ptr, &tomoyo_pattern_list, list) {
- if (ptr->is_deleted)
- continue;
- if (!tomoyo_path_matches_pattern(filename, ptr->pattern))
- continue;
- pattern = ptr->pattern;
- if (tomoyo_strendswith(pattern->name, "/\\*")) {
- /* Do nothing. Try to find the better match. */
- } else {
- /* This would be the better match. Use this. */
- break;
- }
+ const struct tomoyo_path_acl *acl = container_of(ptr, typeof(*acl),
+ head);
+ if (acl->perm & (1 << r->param.path.operation)) {
+ r->param.path.matched_path =
+ tomoyo_compare_name_union(r->param.path.filename,
+ &acl->name);
+ return r->param.path.matched_path != NULL;
}
- up_read(&tomoyo_pattern_list_lock);
- if (pattern)
- filename = pattern;
- return filename;
+ return false;
}
/**
- * tomoyo_write_pattern_policy - Write "struct tomoyo_pattern_entry" list.
+ * tomoyo_check_path_number_acl - Check permission for path number operation.
*
- * @data: String to parse.
- * @is_delete: True if it is a delete request.
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
*
- * Returns 0 on success, negative value otherwise.
+ * Returns true if granted, false otherwise.
*/
-int tomoyo_write_pattern_policy(char *data, const bool is_delete)
+static bool tomoyo_check_path_number_acl(struct tomoyo_request_info *r,
+ const struct tomoyo_acl_info *ptr)
{
- return tomoyo_update_file_pattern_entry(data, is_delete);
+ const struct tomoyo_path_number_acl *acl =
+ container_of(ptr, typeof(*acl), head);
+ return (acl->perm & (1 << r->param.path_number.operation)) &&
+ tomoyo_compare_number_union(r->param.path_number.number,
+ &acl->number) &&
+ tomoyo_compare_name_union(r->param.path_number.filename,
+ &acl->name);
}
/**
- * tomoyo_read_file_pattern - Read "struct tomoyo_pattern_entry" list.
+ * tomoyo_check_path2_acl - Check permission for path path operation.
*
- * @head: Pointer to "struct tomoyo_io_buffer".
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
*
- * Returns true on success, false otherwise.
+ * Returns true if granted, false otherwise.
*/
-bool tomoyo_read_file_pattern(struct tomoyo_io_buffer *head)
+static bool tomoyo_check_path2_acl(struct tomoyo_request_info *r,
+ const struct tomoyo_acl_info *ptr)
{
- struct list_head *pos;
- bool done = true;
-
- down_read(&tomoyo_pattern_list_lock);
- list_for_each_cookie(pos, head->read_var2, &tomoyo_pattern_list) {
- struct tomoyo_pattern_entry *ptr;
- ptr = list_entry(pos, struct tomoyo_pattern_entry, list);
- if (ptr->is_deleted)
- continue;
- done = tomoyo_io_printf(head, TOMOYO_KEYWORD_FILE_PATTERN
- "%s\n", ptr->pattern->name);
- if (!done)
- break;
- }
- up_read(&tomoyo_pattern_list_lock);
- return done;
+ const struct tomoyo_path2_acl *acl =
+ container_of(ptr, typeof(*acl), head);
+ return (acl->perm & (1 << r->param.path2.operation)) &&
+ tomoyo_compare_name_union(r->param.path2.filename1, &acl->name1)
+ && tomoyo_compare_name_union(r->param.path2.filename2,
+ &acl->name2);
}
-/*
- * tomoyo_no_rewrite_list is used for holding list of pathnames which are by
- * default forbidden to modify already written content of a file.
- *
- * An entry is added by
- *
- * # echo 'deny_rewrite /var/log/messages' > \
- * /sys/kernel/security/tomoyo/exception_policy
- *
- * and is deleted by
- *
- * # echo 'delete deny_rewrite /var/log/messages' > \
- * /sys/kernel/security/tomoyo/exception_policy
- *
- * and all entries are retrieved by
- *
- * # grep ^deny_rewrite /sys/kernel/security/tomoyo/exception_policy
+/**
+ * tomoyo_check_mkdev_acl - Check permission for path number number number operation.
*
- * In the example above, if a process requested to rewrite /var/log/messages ,
- * the process can't rewrite unless the domain which that process belongs to
- * has "allow_rewrite /var/log/messages" entry.
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
*
- * It is not a desirable behavior that we have to add "\040(deleted)" suffix
- * when we want to allow rewriting already unlink()ed file. As of now,
- * LSM version of TOMOYO is using __d_path() for calculating pathname.
- * Non LSM version of TOMOYO is using its own function which doesn't append
- * " (deleted)" suffix if the file is already unlink()ed; so that we don't
- * need to worry whether the file is already unlink()ed or not.
+ * Returns true if granted, false otherwise.
*/
-static LIST_HEAD(tomoyo_no_rewrite_list);
-static DECLARE_RWSEM(tomoyo_no_rewrite_list_lock);
+static bool tomoyo_check_mkdev_acl(struct tomoyo_request_info *r,
+ const struct tomoyo_acl_info *ptr)
+{
+ const struct tomoyo_mkdev_acl *acl =
+ container_of(ptr, typeof(*acl), head);
+ return (acl->perm & (1 << r->param.mkdev.operation)) &&
+ tomoyo_compare_number_union(r->param.mkdev.mode,
+ &acl->mode) &&
+ tomoyo_compare_number_union(r->param.mkdev.major,
+ &acl->major) &&
+ tomoyo_compare_number_union(r->param.mkdev.minor,
+ &acl->minor) &&
+ tomoyo_compare_name_union(r->param.mkdev.filename,
+ &acl->name);
+}
/**
- * tomoyo_update_no_rewrite_entry - Update "struct tomoyo_no_rewrite_entry" list.
+ * tomoyo_same_path_acl - Check for duplicated "struct tomoyo_path_acl" entry.
*
- * @pattern: Pathname pattern that are not rewritable by default.
- * @is_delete: True if it is a delete request.
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
*
- * Returns 0 on success, negative value otherwise.
+ * Returns true if @a == @b except permission bits, false otherwise.
*/
-static int tomoyo_update_no_rewrite_entry(const char *pattern,
- const bool is_delete)
+static bool tomoyo_same_path_acl(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
{
- struct tomoyo_no_rewrite_entry *new_entry, *ptr;
- const struct tomoyo_path_info *saved_pattern;
- int error = -ENOMEM;
-
- if (!tomoyo_is_correct_path(pattern, 0, 0, 0, __func__))
- return -EINVAL;
- saved_pattern = tomoyo_save_name(pattern);
- if (!saved_pattern)
- return -ENOMEM;
- down_write(&tomoyo_no_rewrite_list_lock);
- list_for_each_entry(ptr, &tomoyo_no_rewrite_list, list) {
- if (ptr->pattern != saved_pattern)
- continue;
- ptr->is_deleted = is_delete;
- error = 0;
- goto out;
- }
- if (is_delete) {
- error = -ENOENT;
- goto out;
- }
- new_entry = tomoyo_alloc_element(sizeof(*new_entry));
- if (!new_entry)
- goto out;
- new_entry->pattern = saved_pattern;
- list_add_tail(&new_entry->list, &tomoyo_no_rewrite_list);
- error = 0;
- out:
- up_write(&tomoyo_no_rewrite_list_lock);
- return error;
+ const struct tomoyo_path_acl *p1 = container_of(a, typeof(*p1), head);
+ const struct tomoyo_path_acl *p2 = container_of(b, typeof(*p2), head);
+ return tomoyo_same_name_union(&p1->name, &p2->name);
}
/**
- * tomoyo_is_no_rewrite_file - Check if the given pathname is not permitted to be rewrited.
+ * tomoyo_merge_path_acl - Merge duplicated "struct tomoyo_path_acl" entry.
*
- * @filename: Filename to check.
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ * @is_delete: True for @a &= ~@b, false for @a |= @b.
*
- * Returns true if @filename is specified by "deny_rewrite" directive,
- * false otherwise.
+ * Returns true if @a is empty, false otherwise.
*/
-static bool tomoyo_is_no_rewrite_file(const struct tomoyo_path_info *filename)
+static bool tomoyo_merge_path_acl(struct tomoyo_acl_info *a,
+ struct tomoyo_acl_info *b,
+ const bool is_delete)
{
- struct tomoyo_no_rewrite_entry *ptr;
- bool found = false;
-
- down_read(&tomoyo_no_rewrite_list_lock);
- list_for_each_entry(ptr, &tomoyo_no_rewrite_list, list) {
- if (ptr->is_deleted)
- continue;
- if (!tomoyo_path_matches_pattern(filename, ptr->pattern))
- continue;
- found = true;
- break;
- }
- up_read(&tomoyo_no_rewrite_list_lock);
- return found;
+ u16 * const a_perm = &container_of(a, struct tomoyo_path_acl, head)
+ ->perm;
+ u16 perm = *a_perm;
+ const u16 b_perm = container_of(b, struct tomoyo_path_acl, head)->perm;
+ if (is_delete)
+ perm &= ~b_perm;
+ else
+ perm |= b_perm;
+ *a_perm = perm;
+ return !perm;
}
/**
- * tomoyo_write_no_rewrite_policy - Write "struct tomoyo_no_rewrite_entry" list.
+ * tomoyo_update_path_acl - Update "struct tomoyo_path_acl" list.
*
- * @data: String to parse.
- * @is_delete: True if it is a delete request.
+ * @perm: Permission.
+ * @param: Pointer to "struct tomoyo_acl_param".
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
-int tomoyo_write_no_rewrite_policy(char *data, const bool is_delete)
+static int tomoyo_update_path_acl(const u16 perm,
+ struct tomoyo_acl_param *param)
{
- return tomoyo_update_no_rewrite_entry(data, is_delete);
+ struct tomoyo_path_acl e = {
+ .head.type = TOMOYO_TYPE_PATH_ACL,
+ .perm = perm
+ };
+ int error;
+ if (!tomoyo_parse_name_union(param, &e.name))
+ error = -EINVAL;
+ else
+ error = tomoyo_update_domain(&e.head, sizeof(e), param,
+ tomoyo_same_path_acl,
+ tomoyo_merge_path_acl);
+ tomoyo_put_name_union(&e.name);
+ return error;
}
/**
- * tomoyo_read_no_rewrite_policy - Read "struct tomoyo_no_rewrite_entry" list.
+ * tomoyo_same_mkdev_acl - Check for duplicated "struct tomoyo_mkdev_acl" entry.
*
- * @head: Pointer to "struct tomoyo_io_buffer".
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
*
- * Returns true on success, false otherwise.
+ * Returns true if @a == @b except permission bits, false otherwise.
*/
-bool tomoyo_read_no_rewrite_policy(struct tomoyo_io_buffer *head)
+static bool tomoyo_same_mkdev_acl(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
{
- struct list_head *pos;
- bool done = true;
-
- down_read(&tomoyo_no_rewrite_list_lock);
- list_for_each_cookie(pos, head->read_var2, &tomoyo_no_rewrite_list) {
- struct tomoyo_no_rewrite_entry *ptr;
- ptr = list_entry(pos, struct tomoyo_no_rewrite_entry, list);
- if (ptr->is_deleted)
- continue;
- done = tomoyo_io_printf(head, TOMOYO_KEYWORD_DENY_REWRITE
- "%s\n", ptr->pattern->name);
- if (!done)
- break;
- }
- up_read(&tomoyo_no_rewrite_list_lock);
- return done;
+ const struct tomoyo_mkdev_acl *p1 = container_of(a, typeof(*p1), head);
+ const struct tomoyo_mkdev_acl *p2 = container_of(b, typeof(*p2), head);
+ return tomoyo_same_name_union(&p1->name, &p2->name) &&
+ tomoyo_same_number_union(&p1->mode, &p2->mode) &&
+ tomoyo_same_number_union(&p1->major, &p2->major) &&
+ tomoyo_same_number_union(&p1->minor, &p2->minor);
}
/**
- * tomoyo_update_file_acl - Update file's read/write/execute ACL.
+ * tomoyo_merge_mkdev_acl - Merge duplicated "struct tomoyo_mkdev_acl" entry.
*
- * @filename: Filename.
- * @perm: Permission (between 1 to 7).
- * @domain: Pointer to "struct tomoyo_domain_info".
- * @is_delete: True if it is a delete request.
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ * @is_delete: True for @a &= ~@b, false for @a |= @b.
*
- * Returns 0 on success, negative value otherwise.
- *
- * This is legacy support interface for older policy syntax.
- * Current policy syntax uses "allow_read/write" instead of "6",
- * "allow_read" instead of "4", "allow_write" instead of "2",
- * "allow_execute" instead of "1".
+ * Returns true if @a is empty, false otherwise.
*/
-static int tomoyo_update_file_acl(const char *filename, u8 perm,
- struct tomoyo_domain_info * const domain,
- const bool is_delete)
+static bool tomoyo_merge_mkdev_acl(struct tomoyo_acl_info *a,
+ struct tomoyo_acl_info *b,
+ const bool is_delete)
{
- if (perm > 7 || !perm) {
- printk(KERN_DEBUG "%s: Invalid permission '%d %s'\n",
- __func__, perm, filename);
- return -EINVAL;
- }
- if (filename[0] != '@' && tomoyo_strendswith(filename, "/"))
- /*
- * Only 'allow_mkdir' and 'allow_rmdir' are valid for
- * directory permissions.
- */
- return 0;
- if (perm & 4)
- tomoyo_update_single_path_acl(TOMOYO_TYPE_READ_ACL, filename,
- domain, is_delete);
- if (perm & 2)
- tomoyo_update_single_path_acl(TOMOYO_TYPE_WRITE_ACL, filename,
- domain, is_delete);
- if (perm & 1)
- tomoyo_update_single_path_acl(TOMOYO_TYPE_EXECUTE_ACL,
- filename, domain, is_delete);
- return 0;
+ u8 *const a_perm = &container_of(a, struct tomoyo_mkdev_acl,
+ head)->perm;
+ u8 perm = *a_perm;
+ const u8 b_perm = container_of(b, struct tomoyo_mkdev_acl, head)
+ ->perm;
+ if (is_delete)
+ perm &= ~b_perm;
+ else
+ perm |= b_perm;
+ *a_perm = perm;
+ return !perm;
}
/**
- * tomoyo_check_single_path_acl2 - Check permission for single path operation.
+ * tomoyo_update_mkdev_acl - Update "struct tomoyo_mkdev_acl" list.
*
- * @domain: Pointer to "struct tomoyo_domain_info".
- * @filename: Filename to check.
- * @perm: Permission.
- * @may_use_pattern: True if patterned ACL is permitted.
+ * @perm: Permission.
+ * @param: Pointer to "struct tomoyo_acl_param".
*
- * Returns 0 on success, -EPERM otherwise.
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
-static int tomoyo_check_single_path_acl2(const struct tomoyo_domain_info *
- domain,
- const struct tomoyo_path_info *
- filename,
- const u16 perm,
- const bool may_use_pattern)
+static int tomoyo_update_mkdev_acl(const u8 perm,
+ struct tomoyo_acl_param *param)
{
- struct tomoyo_acl_info *ptr;
- int error = -EPERM;
-
- down_read(&tomoyo_domain_acl_info_list_lock);
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
- struct tomoyo_single_path_acl_record *acl;
- if (tomoyo_acl_type2(ptr) != TOMOYO_TYPE_SINGLE_PATH_ACL)
- continue;
- acl = container_of(ptr, struct tomoyo_single_path_acl_record,
- head);
- if (!(acl->perm & perm))
- continue;
- if (may_use_pattern || !acl->filename->is_patterned) {
- if (!tomoyo_path_matches_pattern(filename,
- acl->filename))
- continue;
- } else {
- continue;
- }
- error = 0;
- break;
- }
- up_read(&tomoyo_domain_acl_info_list_lock);
+ struct tomoyo_mkdev_acl e = {
+ .head.type = TOMOYO_TYPE_MKDEV_ACL,
+ .perm = perm
+ };
+ int error;
+ if (!tomoyo_parse_name_union(param, &e.name) ||
+ !tomoyo_parse_number_union(param, &e.mode) ||
+ !tomoyo_parse_number_union(param, &e.major) ||
+ !tomoyo_parse_number_union(param, &e.minor))
+ error = -EINVAL;
+ else
+ error = tomoyo_update_domain(&e.head, sizeof(e), param,
+ tomoyo_same_mkdev_acl,
+ tomoyo_merge_mkdev_acl);
+ tomoyo_put_name_union(&e.name);
+ tomoyo_put_number_union(&e.mode);
+ tomoyo_put_number_union(&e.major);
+ tomoyo_put_number_union(&e.minor);
return error;
}
/**
- * tomoyo_check_file_acl - Check permission for opening files.
+ * tomoyo_same_path2_acl - Check for duplicated "struct tomoyo_path2_acl" entry.
*
- * @domain: Pointer to "struct tomoyo_domain_info".
- * @filename: Filename to check.
- * @operation: Mode ("read" or "write" or "read/write" or "execute").
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
*
- * Returns 0 on success, -EPERM otherwise.
+ * Returns true if @a == @b except permission bits, false otherwise.
*/
-static int tomoyo_check_file_acl(const struct tomoyo_domain_info *domain,
- const struct tomoyo_path_info *filename,
- const u8 operation)
+static bool tomoyo_same_path2_acl(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
{
- u16 perm = 0;
-
- if (!tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE))
- return 0;
- if (operation == 6)
- perm = 1 << TOMOYO_TYPE_READ_WRITE_ACL;
- else if (operation == 4)
- perm = 1 << TOMOYO_TYPE_READ_ACL;
- else if (operation == 2)
- perm = 1 << TOMOYO_TYPE_WRITE_ACL;
- else if (operation == 1)
- perm = 1 << TOMOYO_TYPE_EXECUTE_ACL;
- else
- BUG();
- return tomoyo_check_single_path_acl2(domain, filename, perm,
- operation != 1);
+ const struct tomoyo_path2_acl *p1 = container_of(a, typeof(*p1), head);
+ const struct tomoyo_path2_acl *p2 = container_of(b, typeof(*p2), head);
+ return tomoyo_same_name_union(&p1->name1, &p2->name1) &&
+ tomoyo_same_name_union(&p1->name2, &p2->name2);
}
/**
- * tomoyo_check_file_perm2 - Check permission for opening files.
+ * tomoyo_merge_path2_acl - Merge duplicated "struct tomoyo_path2_acl" entry.
*
- * @domain: Pointer to "struct tomoyo_domain_info".
- * @filename: Filename to check.
- * @perm: Mode ("read" or "write" or "read/write" or "execute").
- * @operation: Operation name passed used for verbose mode.
- * @mode: Access control mode.
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ * @is_delete: True for @a &= ~@b, false for @a |= @b.
*
- * Returns 0 on success, negative value otherwise.
+ * Returns true if @a is empty, false otherwise.
*/
-static int tomoyo_check_file_perm2(struct tomoyo_domain_info * const domain,
- const struct tomoyo_path_info *filename,
- const u8 perm, const char *operation,
- const u8 mode)
+static bool tomoyo_merge_path2_acl(struct tomoyo_acl_info *a,
+ struct tomoyo_acl_info *b,
+ const bool is_delete)
{
- const bool is_enforce = (mode == 3);
- const char *msg = "<unknown>";
- int error = 0;
-
- if (!filename)
- return 0;
- error = tomoyo_check_file_acl(domain, filename, perm);
- if (error && perm == 4 &&
- (domain->flags & TOMOYO_DOMAIN_FLAGS_IGNORE_GLOBAL_ALLOW_READ) == 0
- && tomoyo_is_globally_readable_file(filename))
- error = 0;
- if (perm == 6)
- msg = tomoyo_sp2keyword(TOMOYO_TYPE_READ_WRITE_ACL);
- else if (perm == 4)
- msg = tomoyo_sp2keyword(TOMOYO_TYPE_READ_ACL);
- else if (perm == 2)
- msg = tomoyo_sp2keyword(TOMOYO_TYPE_WRITE_ACL);
- else if (perm == 1)
- msg = tomoyo_sp2keyword(TOMOYO_TYPE_EXECUTE_ACL);
+ u8 * const a_perm = &container_of(a, struct tomoyo_path2_acl, head)
+ ->perm;
+ u8 perm = *a_perm;
+ const u8 b_perm = container_of(b, struct tomoyo_path2_acl, head)->perm;
+ if (is_delete)
+ perm &= ~b_perm;
else
- BUG();
- if (!error)
- return 0;
- if (tomoyo_verbose_mode(domain))
- printk(KERN_WARNING "TOMOYO-%s: Access '%s(%s) %s' denied "
- "for %s\n", tomoyo_get_msg(is_enforce), msg, operation,
- filename->name, tomoyo_get_last_name(domain));
- if (is_enforce)
- return error;
- if (mode == 1 && tomoyo_domain_quota_is_ok(domain)) {
- /* Don't use patterns for execute permission. */
- const struct tomoyo_path_info *patterned_file = (perm != 1) ?
- tomoyo_get_file_pattern(filename) : filename;
- tomoyo_update_file_acl(patterned_file->name, perm,
- domain, false);
- }
- return 0;
+ perm |= b_perm;
+ *a_perm = perm;
+ return !perm;
}
/**
- * tomoyo_write_file_policy - Update file related list.
+ * tomoyo_update_path2_acl - Update "struct tomoyo_path2_acl" list.
*
- * @data: String to parse.
- * @domain: Pointer to "struct tomoyo_domain_info".
- * @is_delete: True if it is a delete request.
+ * @perm: Permission.
+ * @param: Pointer to "struct tomoyo_acl_param".
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
-int tomoyo_write_file_policy(char *data, struct tomoyo_domain_info *domain,
- const bool is_delete)
+static int tomoyo_update_path2_acl(const u8 perm,
+ struct tomoyo_acl_param *param)
{
- char *filename = strchr(data, ' ');
- char *filename2;
- unsigned int perm;
- u8 type;
-
- if (!filename)
- return -EINVAL;
- *filename++ = '\0';
- if (sscanf(data, "%u", &perm) == 1)
- return tomoyo_update_file_acl(filename, (u8) perm, domain,
- is_delete);
- if (strncmp(data, "allow_", 6))
- goto out;
- data += 6;
- for (type = 0; type < TOMOYO_MAX_SINGLE_PATH_OPERATION; type++) {
- if (strcmp(data, tomoyo_sp_keyword[type]))
- continue;
- return tomoyo_update_single_path_acl(type, filename,
- domain, is_delete);
- }
- filename2 = strchr(filename, ' ');
- if (!filename2)
- goto out;
- *filename2++ = '\0';
- for (type = 0; type < TOMOYO_MAX_DOUBLE_PATH_OPERATION; type++) {
- if (strcmp(data, tomoyo_dp_keyword[type]))
- continue;
- return tomoyo_update_double_path_acl(type, filename, filename2,
- domain, is_delete);
- }
- out:
- return -EINVAL;
+ struct tomoyo_path2_acl e = {
+ .head.type = TOMOYO_TYPE_PATH2_ACL,
+ .perm = perm
+ };
+ int error;
+ if (!tomoyo_parse_name_union(param, &e.name1) ||
+ !tomoyo_parse_name_union(param, &e.name2))
+ error = -EINVAL;
+ else
+ error = tomoyo_update_domain(&e.head, sizeof(e), param,
+ tomoyo_same_path2_acl,
+ tomoyo_merge_path2_acl);
+ tomoyo_put_name_union(&e.name1);
+ tomoyo_put_name_union(&e.name2);
+ return error;
}
/**
- * tomoyo_update_single_path_acl - Update "struct tomoyo_single_path_acl_record" list.
+ * tomoyo_path_permission - Check permission for single path operation.
*
- * @type: Type of operation.
- * @filename: Filename.
- * @domain: Pointer to "struct tomoyo_domain_info".
- * @is_delete: True if it is a delete request.
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @operation: Type of operation.
+ * @filename: Filename to check.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
-static int tomoyo_update_single_path_acl(const u8 type, const char *filename,
- struct tomoyo_domain_info *
- const domain, const bool is_delete)
+static int tomoyo_path_permission(struct tomoyo_request_info *r, u8 operation,
+ const struct tomoyo_path_info *filename)
{
- static const u16 rw_mask =
- (1 << TOMOYO_TYPE_READ_ACL) | (1 << TOMOYO_TYPE_WRITE_ACL);
- const struct tomoyo_path_info *saved_filename;
- struct tomoyo_acl_info *ptr;
- struct tomoyo_single_path_acl_record *acl;
- int error = -ENOMEM;
- const u16 perm = 1 << type;
+ int error;
- if (!domain)
- return -EINVAL;
- if (!tomoyo_is_correct_path(filename, 0, 0, 0, __func__))
- return -EINVAL;
- saved_filename = tomoyo_save_name(filename);
- if (!saved_filename)
- return -ENOMEM;
- down_write(&tomoyo_domain_acl_info_list_lock);
- if (is_delete)
- goto delete;
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
- if (tomoyo_acl_type1(ptr) != TOMOYO_TYPE_SINGLE_PATH_ACL)
- continue;
- acl = container_of(ptr, struct tomoyo_single_path_acl_record,
- head);
- if (acl->filename != saved_filename)
- continue;
- /* Special case. Clear all bits if marked as deleted. */
- if (ptr->type & TOMOYO_ACL_DELETED)
- acl->perm = 0;
- acl->perm |= perm;
- if ((acl->perm & rw_mask) == rw_mask)
- acl->perm |= 1 << TOMOYO_TYPE_READ_WRITE_ACL;
- else if (acl->perm & (1 << TOMOYO_TYPE_READ_WRITE_ACL))
- acl->perm |= rw_mask;
- ptr->type &= ~TOMOYO_ACL_DELETED;
- error = 0;
- goto out;
- }
- /* Not found. Append it to the tail. */
- acl = tomoyo_alloc_acl_element(TOMOYO_TYPE_SINGLE_PATH_ACL);
- if (!acl)
- goto out;
- acl->perm = perm;
- if (perm == (1 << TOMOYO_TYPE_READ_WRITE_ACL))
- acl->perm |= rw_mask;
- acl->filename = saved_filename;
- list_add_tail(&acl->head.list, &domain->acl_info_list);
- error = 0;
- goto out;
- delete:
- error = -ENOENT;
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
- if (tomoyo_acl_type2(ptr) != TOMOYO_TYPE_SINGLE_PATH_ACL)
- continue;
- acl = container_of(ptr, struct tomoyo_single_path_acl_record,
- head);
- if (acl->filename != saved_filename)
- continue;
- acl->perm &= ~perm;
- if ((acl->perm & rw_mask) != rw_mask)
- acl->perm &= ~(1 << TOMOYO_TYPE_READ_WRITE_ACL);
- else if (!(acl->perm & (1 << TOMOYO_TYPE_READ_WRITE_ACL)))
- acl->perm &= ~rw_mask;
- if (!acl->perm)
- ptr->type |= TOMOYO_ACL_DELETED;
- error = 0;
- break;
- }
- out:
- up_write(&tomoyo_domain_acl_info_list_lock);
+ r->type = tomoyo_p2mac[operation];
+ r->mode = tomoyo_get_mode(r->domain->ns, r->profile, r->type);
+ if (r->mode == TOMOYO_CONFIG_DISABLED)
+ return 0;
+ r->param_type = TOMOYO_TYPE_PATH_ACL;
+ r->param.path.filename = filename;
+ r->param.path.operation = operation;
+ do {
+ tomoyo_check_acl(r, tomoyo_check_path_acl);
+ error = tomoyo_audit_path_log(r);
+ } while (error == TOMOYO_RETRY_REQUEST);
return error;
}
/**
- * tomoyo_update_double_path_acl - Update "struct tomoyo_double_path_acl_record" list.
+ * tomoyo_execute_permission - Check permission for execute operation.
*
- * @type: Type of operation.
- * @filename1: First filename.
- * @filename2: Second filename.
- * @domain: Pointer to "struct tomoyo_domain_info".
- * @is_delete: True if it is a delete request.
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @filename: Filename to check.
*
* Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
*/
-static int tomoyo_update_double_path_acl(const u8 type, const char *filename1,
- const char *filename2,
- struct tomoyo_domain_info *
- const domain, const bool is_delete)
+int tomoyo_execute_permission(struct tomoyo_request_info *r,
+ const struct tomoyo_path_info *filename)
{
- const struct tomoyo_path_info *saved_filename1;
- const struct tomoyo_path_info *saved_filename2;
- struct tomoyo_acl_info *ptr;
- struct tomoyo_double_path_acl_record *acl;
- int error = -ENOMEM;
- const u8 perm = 1 << type;
-
- if (!domain)
- return -EINVAL;
- if (!tomoyo_is_correct_path(filename1, 0, 0, 0, __func__) ||
- !tomoyo_is_correct_path(filename2, 0, 0, 0, __func__))
- return -EINVAL;
- saved_filename1 = tomoyo_save_name(filename1);
- saved_filename2 = tomoyo_save_name(filename2);
- if (!saved_filename1 || !saved_filename2)
- return -ENOMEM;
- down_write(&tomoyo_domain_acl_info_list_lock);
- if (is_delete)
- goto delete;
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
- if (tomoyo_acl_type1(ptr) != TOMOYO_TYPE_DOUBLE_PATH_ACL)
- continue;
- acl = container_of(ptr, struct tomoyo_double_path_acl_record,
- head);
- if (acl->filename1 != saved_filename1 ||
- acl->filename2 != saved_filename2)
- continue;
- /* Special case. Clear all bits if marked as deleted. */
- if (ptr->type & TOMOYO_ACL_DELETED)
- acl->perm = 0;
- acl->perm |= perm;
- ptr->type &= ~TOMOYO_ACL_DELETED;
- error = 0;
- goto out;
- }
- /* Not found. Append it to the tail. */
- acl = tomoyo_alloc_acl_element(TOMOYO_TYPE_DOUBLE_PATH_ACL);
- if (!acl)
- goto out;
- acl->perm = perm;
- acl->filename1 = saved_filename1;
- acl->filename2 = saved_filename2;
- list_add_tail(&acl->head.list, &domain->acl_info_list);
- error = 0;
- goto out;
- delete:
- error = -ENOENT;
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
- if (tomoyo_acl_type2(ptr) != TOMOYO_TYPE_DOUBLE_PATH_ACL)
- continue;
- acl = container_of(ptr, struct tomoyo_double_path_acl_record,
- head);
- if (acl->filename1 != saved_filename1 ||
- acl->filename2 != saved_filename2)
- continue;
- acl->perm &= ~perm;
- if (!acl->perm)
- ptr->type |= TOMOYO_ACL_DELETED;
- error = 0;
- break;
- }
- out:
- up_write(&tomoyo_domain_acl_info_list_lock);
- return error;
+ /*
+ * Unlike other permission checks, this check is done regardless of
+ * profile mode settings in order to check for domain transition
+ * preference.
+ */
+ r->type = TOMOYO_MAC_FILE_EXECUTE;
+ r->mode = tomoyo_get_mode(r->domain->ns, r->profile, r->type);
+ r->param_type = TOMOYO_TYPE_PATH_ACL;
+ r->param.path.filename = filename;
+ r->param.path.operation = TOMOYO_TYPE_EXECUTE;
+ tomoyo_check_acl(r, tomoyo_check_path_acl);
+ r->ee->transition = r->matched_acl && r->matched_acl->cond ?
+ r->matched_acl->cond->transit : NULL;
+ if (r->mode != TOMOYO_CONFIG_DISABLED)
+ return tomoyo_audit_path_log(r);
+ return 0;
}
/**
- * tomoyo_check_single_path_acl - Check permission for single path operation.
+ * tomoyo_same_path_number_acl - Check for duplicated "struct tomoyo_path_number_acl" entry.
*
- * @domain: Pointer to "struct tomoyo_domain_info".
- * @type: Type of operation.
- * @filename: Filename to check.
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
*
- * Returns 0 on success, negative value otherwise.
+ * Returns true if @a == @b except permission bits, false otherwise.
*/
-static int tomoyo_check_single_path_acl(struct tomoyo_domain_info *domain,
- const u8 type,
- const struct tomoyo_path_info *filename)
+static bool tomoyo_same_path_number_acl(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
{
- if (!tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE))
- return 0;
- return tomoyo_check_single_path_acl2(domain, filename, 1 << type, 1);
+ const struct tomoyo_path_number_acl *p1 = container_of(a, typeof(*p1),
+ head);
+ const struct tomoyo_path_number_acl *p2 = container_of(b, typeof(*p2),
+ head);
+ return tomoyo_same_name_union(&p1->name, &p2->name) &&
+ tomoyo_same_number_union(&p1->number, &p2->number);
}
/**
- * tomoyo_check_double_path_acl - Check permission for double path operation.
+ * tomoyo_merge_path_number_acl - Merge duplicated "struct tomoyo_path_number_acl" entry.
*
- * @domain: Pointer to "struct tomoyo_domain_info".
- * @type: Type of operation.
- * @filename1: First filename to check.
- * @filename2: Second filename to check.
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ * @is_delete: True for @a &= ~@b, false for @a |= @b.
*
- * Returns 0 on success, -EPERM otherwise.
+ * Returns true if @a is empty, false otherwise.
*/
-static int tomoyo_check_double_path_acl(const struct tomoyo_domain_info *domain,
- const u8 type,
- const struct tomoyo_path_info *
- filename1,
- const struct tomoyo_path_info *
- filename2)
+static bool tomoyo_merge_path_number_acl(struct tomoyo_acl_info *a,
+ struct tomoyo_acl_info *b,
+ const bool is_delete)
{
- struct tomoyo_acl_info *ptr;
- const u8 perm = 1 << type;
- int error = -EPERM;
-
- if (!tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE))
- return 0;
- down_read(&tomoyo_domain_acl_info_list_lock);
- list_for_each_entry(ptr, &domain->acl_info_list, list) {
- struct tomoyo_double_path_acl_record *acl;
- if (tomoyo_acl_type2(ptr) != TOMOYO_TYPE_DOUBLE_PATH_ACL)
- continue;
- acl = container_of(ptr, struct tomoyo_double_path_acl_record,
- head);
- if (!(acl->perm & perm))
- continue;
- if (!tomoyo_path_matches_pattern(filename1, acl->filename1))
- continue;
- if (!tomoyo_path_matches_pattern(filename2, acl->filename2))
- continue;
- error = 0;
- break;
- }
- up_read(&tomoyo_domain_acl_info_list_lock);
- return error;
+ u8 * const a_perm = &container_of(a, struct tomoyo_path_number_acl,
+ head)->perm;
+ u8 perm = *a_perm;
+ const u8 b_perm = container_of(b, struct tomoyo_path_number_acl, head)
+ ->perm;
+ if (is_delete)
+ perm &= ~b_perm;
+ else
+ perm |= b_perm;
+ *a_perm = perm;
+ return !perm;
}
/**
- * tomoyo_check_single_path_permission2 - Check permission for single path operation.
+ * tomoyo_update_path_number_acl - Update ioctl/chmod/chown/chgrp ACL.
*
- * @domain: Pointer to "struct tomoyo_domain_info".
- * @operation: Type of operation.
- * @filename: Filename to check.
- * @mode: Access control mode.
+ * @perm: Permission.
+ * @param: Pointer to "struct tomoyo_acl_param".
*
* Returns 0 on success, negative value otherwise.
*/
-static int tomoyo_check_single_path_permission2(struct tomoyo_domain_info *
- const domain, u8 operation,
- const struct tomoyo_path_info *
- filename, const u8 mode)
+static int tomoyo_update_path_number_acl(const u8 perm,
+ struct tomoyo_acl_param *param)
{
- const char *msg;
+ struct tomoyo_path_number_acl e = {
+ .head.type = TOMOYO_TYPE_PATH_NUMBER_ACL,
+ .perm = perm
+ };
int error;
- const bool is_enforce = (mode == 3);
-
- if (!mode)
- return 0;
- next:
- error = tomoyo_check_single_path_acl(domain, operation, filename);
- msg = tomoyo_sp2keyword(operation);
- if (!error)
- goto ok;
- if (tomoyo_verbose_mode(domain))
- printk(KERN_WARNING "TOMOYO-%s: Access '%s %s' denied for %s\n",
- tomoyo_get_msg(is_enforce), msg, filename->name,
- tomoyo_get_last_name(domain));
- if (mode == 1 && tomoyo_domain_quota_is_ok(domain)) {
- const char *name = tomoyo_get_file_pattern(filename)->name;
- tomoyo_update_single_path_acl(operation, name, domain, false);
- }
- if (!is_enforce)
- error = 0;
- ok:
- /*
- * Since "allow_truncate" doesn't imply "allow_rewrite" permission,
- * we need to check "allow_rewrite" permission if the filename is
- * specified by "deny_rewrite" keyword.
- */
- if (!error && operation == TOMOYO_TYPE_TRUNCATE_ACL &&
- tomoyo_is_no_rewrite_file(filename)) {
- operation = TOMOYO_TYPE_REWRITE_ACL;
- goto next;
- }
+ if (!tomoyo_parse_name_union(param, &e.name) ||
+ !tomoyo_parse_number_union(param, &e.number))
+ error = -EINVAL;
+ else
+ error = tomoyo_update_domain(&e.head, sizeof(e), param,
+ tomoyo_same_path_number_acl,
+ tomoyo_merge_path_number_acl);
+ tomoyo_put_name_union(&e.name);
+ tomoyo_put_number_union(&e.number);
return error;
}
/**
- * tomoyo_check_exec_perm - Check permission for "execute".
+ * tomoyo_path_number_perm - Check permission for "create", "mkdir", "mkfifo", "mksock", "ioctl", "chmod", "chown", "chgrp".
*
- * @domain: Pointer to "struct tomoyo_domain_info".
- * @filename: Check permission for "execute".
+ * @type: Type of operation.
+ * @path: Pointer to "struct path".
+ * @number: Number.
*
- * Returns 0 on success, negativevalue otherwise.
+ * Returns 0 on success, negative value otherwise.
*/
-int tomoyo_check_exec_perm(struct tomoyo_domain_info *domain,
- const struct tomoyo_path_info *filename)
+int tomoyo_path_number_perm(const u8 type, struct path *path,
+ unsigned long number)
{
- const u8 mode = tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE);
+ struct tomoyo_request_info r;
+ struct tomoyo_obj_info obj = {
+ .path1 = *path,
+ };
+ int error = -ENOMEM;
+ struct tomoyo_path_info buf;
+ int idx;
- if (!mode)
+ if (tomoyo_init_request_info(&r, NULL, tomoyo_pn2mac[type])
+ == TOMOYO_CONFIG_DISABLED || !path->dentry)
return 0;
- return tomoyo_check_file_perm2(domain, filename, 1, "do_execve", mode);
+ idx = tomoyo_read_lock();
+ if (!tomoyo_get_realpath(&buf, path))
+ goto out;
+ r.obj = &obj;
+ if (type == TOMOYO_TYPE_MKDIR)
+ tomoyo_add_slash(&buf);
+ r.param_type = TOMOYO_TYPE_PATH_NUMBER_ACL;
+ r.param.path_number.operation = type;
+ r.param.path_number.filename = &buf;
+ r.param.path_number.number = number;
+ do {
+ tomoyo_check_acl(&r, tomoyo_check_path_number_acl);
+ error = tomoyo_audit_path_number_log(&r);
+ } while (error == TOMOYO_RETRY_REQUEST);
+ kfree(buf.name);
+ out:
+ tomoyo_read_unlock(idx);
+ if (r.mode != TOMOYO_CONFIG_ENFORCING)
+ error = 0;
+ return error;
}
/**
@@ -1125,189 +736,291 @@ int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
struct path *path, const int flag)
{
const u8 acc_mode = ACC_MODE(flag);
- int error = -ENOMEM;
- struct tomoyo_path_info *buf;
- const u8 mode = tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE);
- const bool is_enforce = (mode == 3);
-
- if (!mode || !path->mnt)
- return 0;
- if (acc_mode == 0)
- return 0;
- if (path->dentry->d_inode && S_ISDIR(path->dentry->d_inode->i_mode))
- /*
- * I don't check directories here because mkdir() and rmdir()
- * don't call me.
- */
- return 0;
- buf = tomoyo_get_path(path);
- if (!buf)
- goto out;
- error = 0;
- /*
- * If the filename is specified by "deny_rewrite" keyword,
- * we need to check "allow_rewrite" permission when the filename is not
- * opened for append mode or the filename is truncated at open time.
- */
- if ((acc_mode & MAY_WRITE) &&
- ((flag & O_TRUNC) || !(flag & O_APPEND)) &&
- (tomoyo_is_no_rewrite_file(buf))) {
- error = tomoyo_check_single_path_permission2(domain,
- TOMOYO_TYPE_REWRITE_ACL,
- buf, mode);
+ int error = 0;
+ struct tomoyo_path_info buf;
+ struct tomoyo_request_info r;
+ struct tomoyo_obj_info obj = {
+ .path1 = *path,
+ };
+ int idx;
+
+ buf.name = NULL;
+ r.mode = TOMOYO_CONFIG_DISABLED;
+ idx = tomoyo_read_lock();
+ if (acc_mode &&
+ tomoyo_init_request_info(&r, domain, TOMOYO_MAC_FILE_OPEN)
+ != TOMOYO_CONFIG_DISABLED) {
+ if (!tomoyo_get_realpath(&buf, path)) {
+ error = -ENOMEM;
+ goto out;
+ }
+ r.obj = &obj;
+ if (acc_mode & MAY_READ)
+ error = tomoyo_path_permission(&r, TOMOYO_TYPE_READ,
+ &buf);
+ if (!error && (acc_mode & MAY_WRITE))
+ error = tomoyo_path_permission(&r, (flag & O_APPEND) ?
+ TOMOYO_TYPE_APPEND :
+ TOMOYO_TYPE_WRITE,
+ &buf);
}
- if (!error)
- error = tomoyo_check_file_perm2(domain, buf, acc_mode, "open",
- mode);
- if (!error && (flag & O_TRUNC))
- error = tomoyo_check_single_path_permission2(domain,
- TOMOYO_TYPE_TRUNCATE_ACL,
- buf, mode);
out:
- tomoyo_free(buf);
- if (!is_enforce)
+ kfree(buf.name);
+ tomoyo_read_unlock(idx);
+ if (r.mode != TOMOYO_CONFIG_ENFORCING)
error = 0;
return error;
}
/**
- * tomoyo_check_1path_perm - Check permission for "create", "unlink", "mkdir", "rmdir", "mkfifo", "mksock", "mkblock", "mkchar", "truncate" and "symlink".
+ * tomoyo_path_perm - Check permission for "unlink", "rmdir", "truncate", "symlink", "append", "chroot" and "unmount".
*
- * @domain: Pointer to "struct tomoyo_domain_info".
* @operation: Type of operation.
* @path: Pointer to "struct path".
+ * @target: Symlink's target if @operation is TOMOYO_TYPE_SYMLINK,
+ * NULL otherwise.
*
* Returns 0 on success, negative value otherwise.
*/
-int tomoyo_check_1path_perm(struct tomoyo_domain_info *domain,
- const u8 operation, struct path *path)
+int tomoyo_path_perm(const u8 operation, struct path *path, const char *target)
{
- int error = -ENOMEM;
- struct tomoyo_path_info *buf;
- const u8 mode = tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE);
- const bool is_enforce = (mode == 3);
+ struct tomoyo_request_info r;
+ struct tomoyo_obj_info obj = {
+ .path1 = *path,
+ };
+ int error;
+ struct tomoyo_path_info buf;
+ bool is_enforce;
+ struct tomoyo_path_info symlink_target;
+ int idx;
- if (!mode || !path->mnt)
+ if (tomoyo_init_request_info(&r, NULL, tomoyo_p2mac[operation])
+ == TOMOYO_CONFIG_DISABLED)
return 0;
- buf = tomoyo_get_path(path);
- if (!buf)
+ is_enforce = (r.mode == TOMOYO_CONFIG_ENFORCING);
+ error = -ENOMEM;
+ buf.name = NULL;
+ idx = tomoyo_read_lock();
+ if (!tomoyo_get_realpath(&buf, path))
goto out;
+ r.obj = &obj;
switch (operation) {
- case TOMOYO_TYPE_MKDIR_ACL:
- case TOMOYO_TYPE_RMDIR_ACL:
- if (!buf->is_dir) {
- /*
- * tomoyo_get_path() reserves space for appending "/."
- */
- strcat((char *) buf->name, "/");
- tomoyo_fill_path_info(buf);
- }
+ case TOMOYO_TYPE_RMDIR:
+ case TOMOYO_TYPE_CHROOT:
+ tomoyo_add_slash(&buf);
+ break;
+ case TOMOYO_TYPE_SYMLINK:
+ symlink_target.name = tomoyo_encode(target);
+ if (!symlink_target.name)
+ goto out;
+ tomoyo_fill_path_info(&symlink_target);
+ obj.symlink_target = &symlink_target;
+ break;
}
- error = tomoyo_check_single_path_permission2(domain, operation, buf,
- mode);
+ error = tomoyo_path_permission(&r, operation, &buf);
+ if (operation == TOMOYO_TYPE_SYMLINK)
+ kfree(symlink_target.name);
out:
- tomoyo_free(buf);
+ kfree(buf.name);
+ tomoyo_read_unlock(idx);
if (!is_enforce)
error = 0;
return error;
}
/**
- * tomoyo_check_rewrite_permission - Check permission for "rewrite".
+ * tomoyo_mkdev_perm - Check permission for "mkblock" and "mkchar".
*
- * @domain: Pointer to "struct tomoyo_domain_info".
- * @filp: Pointer to "struct file".
+ * @operation: Type of operation. (TOMOYO_TYPE_MKCHAR or TOMOYO_TYPE_MKBLOCK)
+ * @path: Pointer to "struct path".
+ * @mode: Create mode.
+ * @dev: Device number.
*
* Returns 0 on success, negative value otherwise.
*/
-int tomoyo_check_rewrite_permission(struct tomoyo_domain_info *domain,
- struct file *filp)
+int tomoyo_mkdev_perm(const u8 operation, struct path *path,
+ const unsigned int mode, unsigned int dev)
{
+ struct tomoyo_request_info r;
+ struct tomoyo_obj_info obj = {
+ .path1 = *path,
+ };
int error = -ENOMEM;
- const u8 mode = tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE);
- const bool is_enforce = (mode == 3);
- struct tomoyo_path_info *buf;
+ struct tomoyo_path_info buf;
+ int idx;
- if (!mode || !filp->f_path.mnt)
+ if (tomoyo_init_request_info(&r, NULL, tomoyo_pnnn2mac[operation])
+ == TOMOYO_CONFIG_DISABLED)
return 0;
- buf = tomoyo_get_path(&filp->f_path);
- if (!buf)
- goto out;
- if (!tomoyo_is_no_rewrite_file(buf)) {
- error = 0;
- goto out;
+ idx = tomoyo_read_lock();
+ error = -ENOMEM;
+ if (tomoyo_get_realpath(&buf, path)) {
+ r.obj = &obj;
+ dev = new_decode_dev(dev);
+ r.param_type = TOMOYO_TYPE_MKDEV_ACL;
+ r.param.mkdev.filename = &buf;
+ r.param.mkdev.operation = operation;
+ r.param.mkdev.mode = mode;
+ r.param.mkdev.major = MAJOR(dev);
+ r.param.mkdev.minor = MINOR(dev);
+ tomoyo_check_acl(&r, tomoyo_check_mkdev_acl);
+ error = tomoyo_audit_mkdev_log(&r);
+ kfree(buf.name);
}
- error = tomoyo_check_single_path_permission2(domain,
- TOMOYO_TYPE_REWRITE_ACL,
- buf, mode);
- out:
- tomoyo_free(buf);
- if (!is_enforce)
+ tomoyo_read_unlock(idx);
+ if (r.mode != TOMOYO_CONFIG_ENFORCING)
error = 0;
return error;
}
/**
- * tomoyo_check_2path_perm - Check permission for "rename" and "link".
+ * tomoyo_path2_perm - Check permission for "rename", "link" and "pivot_root".
*
- * @domain: Pointer to "struct tomoyo_domain_info".
* @operation: Type of operation.
* @path1: Pointer to "struct path".
* @path2: Pointer to "struct path".
*
* Returns 0 on success, negative value otherwise.
*/
-int tomoyo_check_2path_perm(struct tomoyo_domain_info * const domain,
- const u8 operation, struct path *path1,
- struct path *path2)
+int tomoyo_path2_perm(const u8 operation, struct path *path1,
+ struct path *path2)
{
int error = -ENOMEM;
- struct tomoyo_path_info *buf1, *buf2;
- const u8 mode = tomoyo_check_flags(domain, TOMOYO_MAC_FOR_FILE);
- const bool is_enforce = (mode == 3);
- const char *msg;
-
- if (!mode || !path1->mnt || !path2->mnt)
+ struct tomoyo_path_info buf1;
+ struct tomoyo_path_info buf2;
+ struct tomoyo_request_info r;
+ struct tomoyo_obj_info obj = {
+ .path1 = *path1,
+ .path2 = *path2,
+ };
+ int idx;
+
+ if (tomoyo_init_request_info(&r, NULL, tomoyo_pp2mac[operation])
+ == TOMOYO_CONFIG_DISABLED)
return 0;
- buf1 = tomoyo_get_path(path1);
- buf2 = tomoyo_get_path(path2);
- if (!buf1 || !buf2)
- goto out;
- {
- struct dentry *dentry = path1->dentry;
- if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) {
- /*
- * tomoyo_get_path() reserves space for appending "/."
- */
- if (!buf1->is_dir) {
- strcat((char *) buf1->name, "/");
- tomoyo_fill_path_info(buf1);
- }
- if (!buf2->is_dir) {
- strcat((char *) buf2->name, "/");
- tomoyo_fill_path_info(buf2);
- }
- }
- }
- error = tomoyo_check_double_path_acl(domain, operation, buf1, buf2);
- msg = tomoyo_dp2keyword(operation);
- if (!error)
+ buf1.name = NULL;
+ buf2.name = NULL;
+ idx = tomoyo_read_lock();
+ if (!tomoyo_get_realpath(&buf1, path1) ||
+ !tomoyo_get_realpath(&buf2, path2))
goto out;
- if (tomoyo_verbose_mode(domain))
- printk(KERN_WARNING "TOMOYO-%s: Access '%s %s %s' "
- "denied for %s\n", tomoyo_get_msg(is_enforce),
- msg, buf1->name, buf2->name,
- tomoyo_get_last_name(domain));
- if (mode == 1 && tomoyo_domain_quota_is_ok(domain)) {
- const char *name1 = tomoyo_get_file_pattern(buf1)->name;
- const char *name2 = tomoyo_get_file_pattern(buf2)->name;
- tomoyo_update_double_path_acl(operation, name1, name2, domain,
- false);
+ switch (operation) {
+ struct dentry *dentry;
+ case TOMOYO_TYPE_RENAME:
+ case TOMOYO_TYPE_LINK:
+ dentry = path1->dentry;
+ if (!dentry->d_inode || !S_ISDIR(dentry->d_inode->i_mode))
+ break;
+ /* fall through */
+ case TOMOYO_TYPE_PIVOT_ROOT:
+ tomoyo_add_slash(&buf1);
+ tomoyo_add_slash(&buf2);
+ break;
}
+ r.obj = &obj;
+ r.param_type = TOMOYO_TYPE_PATH2_ACL;
+ r.param.path2.operation = operation;
+ r.param.path2.filename1 = &buf1;
+ r.param.path2.filename2 = &buf2;
+ do {
+ tomoyo_check_acl(&r, tomoyo_check_path2_acl);
+ error = tomoyo_audit_path2_log(&r);
+ } while (error == TOMOYO_RETRY_REQUEST);
out:
- tomoyo_free(buf1);
- tomoyo_free(buf2);
- if (!is_enforce)
+ kfree(buf1.name);
+ kfree(buf2.name);
+ tomoyo_read_unlock(idx);
+ if (r.mode != TOMOYO_CONFIG_ENFORCING)
error = 0;
return error;
}
+
+/**
+ * tomoyo_same_mount_acl - Check for duplicated "struct tomoyo_mount_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_mount_acl(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
+{
+ const struct tomoyo_mount_acl *p1 = container_of(a, typeof(*p1), head);
+ const struct tomoyo_mount_acl *p2 = container_of(b, typeof(*p2), head);
+ return tomoyo_same_name_union(&p1->dev_name, &p2->dev_name) &&
+ tomoyo_same_name_union(&p1->dir_name, &p2->dir_name) &&
+ tomoyo_same_name_union(&p1->fs_type, &p2->fs_type) &&
+ tomoyo_same_number_union(&p1->flags, &p2->flags);
+}
+
+/**
+ * tomoyo_update_mount_acl - Write "struct tomoyo_mount_acl" list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_update_mount_acl(struct tomoyo_acl_param *param)
+{
+ struct tomoyo_mount_acl e = { .head.type = TOMOYO_TYPE_MOUNT_ACL };
+ int error;
+ if (!tomoyo_parse_name_union(param, &e.dev_name) ||
+ !tomoyo_parse_name_union(param, &e.dir_name) ||
+ !tomoyo_parse_name_union(param, &e.fs_type) ||
+ !tomoyo_parse_number_union(param, &e.flags))
+ error = -EINVAL;
+ else
+ error = tomoyo_update_domain(&e.head, sizeof(e), param,
+ tomoyo_same_mount_acl, NULL);
+ tomoyo_put_name_union(&e.dev_name);
+ tomoyo_put_name_union(&e.dir_name);
+ tomoyo_put_name_union(&e.fs_type);
+ tomoyo_put_number_union(&e.flags);
+ return error;
+}
+
+/**
+ * tomoyo_write_file - Update file related list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+int tomoyo_write_file(struct tomoyo_acl_param *param)
+{
+ u16 perm = 0;
+ u8 type;
+ const char *operation = tomoyo_read_token(param);
+ for (type = 0; type < TOMOYO_MAX_PATH_OPERATION; type++)
+ if (tomoyo_permstr(operation, tomoyo_path_keyword[type]))
+ perm |= 1 << type;
+ if (perm)
+ return tomoyo_update_path_acl(perm, param);
+ for (type = 0; type < TOMOYO_MAX_PATH2_OPERATION; type++)
+ if (tomoyo_permstr(operation,
+ tomoyo_mac_keywords[tomoyo_pp2mac[type]]))
+ perm |= 1 << type;
+ if (perm)
+ return tomoyo_update_path2_acl(perm, param);
+ for (type = 0; type < TOMOYO_MAX_PATH_NUMBER_OPERATION; type++)
+ if (tomoyo_permstr(operation,
+ tomoyo_mac_keywords[tomoyo_pn2mac[type]]))
+ perm |= 1 << type;
+ if (perm)
+ return tomoyo_update_path_number_acl(perm, param);
+ for (type = 0; type < TOMOYO_MAX_MKDEV_OPERATION; type++)
+ if (tomoyo_permstr(operation,
+ tomoyo_mac_keywords[tomoyo_pnnn2mac[type]]))
+ perm |= 1 << type;
+ if (perm)
+ return tomoyo_update_mkdev_acl(perm, param);
+ if (tomoyo_permstr(operation,
+ tomoyo_mac_keywords[TOMOYO_MAC_FILE_MOUNT]))
+ return tomoyo_update_mount_acl(param);
+ return -EINVAL;
+}
diff --git a/security/tomoyo/gc.c b/security/tomoyo/gc.c
new file mode 100644
index 00000000000..986a6a75686
--- /dev/null
+++ b/security/tomoyo/gc.c
@@ -0,0 +1,655 @@
+/*
+ * security/tomoyo/gc.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include "common.h"
+#include <linux/kthread.h>
+#include <linux/slab.h>
+
+/**
+ * tomoyo_memory_free - Free memory for elements.
+ *
+ * @ptr: Pointer to allocated memory.
+ *
+ * Returns nothing.
+ *
+ * Caller holds tomoyo_policy_lock mutex.
+ */
+static inline void tomoyo_memory_free(void *ptr)
+{
+ tomoyo_memory_used[TOMOYO_MEMORY_POLICY] -= ksize(ptr);
+ kfree(ptr);
+}
+
+/* The list for "struct tomoyo_io_buffer". */
+static LIST_HEAD(tomoyo_io_buffer_list);
+/* Lock for protecting tomoyo_io_buffer_list. */
+static DEFINE_SPINLOCK(tomoyo_io_buffer_list_lock);
+
+/**
+ * tomoyo_struct_used_by_io_buffer - Check whether the list element is used by /sys/kernel/security/tomoyo/ users or not.
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns true if @element is used by /sys/kernel/security/tomoyo/ users,
+ * false otherwise.
+ */
+static bool tomoyo_struct_used_by_io_buffer(const struct list_head *element)
+{
+ struct tomoyo_io_buffer *head;
+ bool in_use = false;
+
+ spin_lock(&tomoyo_io_buffer_list_lock);
+ list_for_each_entry(head, &tomoyo_io_buffer_list, list) {
+ head->users++;
+ spin_unlock(&tomoyo_io_buffer_list_lock);
+ mutex_lock(&head->io_sem);
+ if (head->r.domain == element || head->r.group == element ||
+ head->r.acl == element || &head->w.domain->list == element)
+ in_use = true;
+ mutex_unlock(&head->io_sem);
+ spin_lock(&tomoyo_io_buffer_list_lock);
+ head->users--;
+ if (in_use)
+ break;
+ }
+ spin_unlock(&tomoyo_io_buffer_list_lock);
+ return in_use;
+}
+
+/**
+ * tomoyo_name_used_by_io_buffer - Check whether the string is used by /sys/kernel/security/tomoyo/ users or not.
+ *
+ * @string: String to check.
+ *
+ * Returns true if @string is used by /sys/kernel/security/tomoyo/ users,
+ * false otherwise.
+ */
+static bool tomoyo_name_used_by_io_buffer(const char *string)
+{
+ struct tomoyo_io_buffer *head;
+ const size_t size = strlen(string) + 1;
+ bool in_use = false;
+
+ spin_lock(&tomoyo_io_buffer_list_lock);
+ list_for_each_entry(head, &tomoyo_io_buffer_list, list) {
+ int i;
+ head->users++;
+ spin_unlock(&tomoyo_io_buffer_list_lock);
+ mutex_lock(&head->io_sem);
+ for (i = 0; i < TOMOYO_MAX_IO_READ_QUEUE; i++) {
+ const char *w = head->r.w[i];
+ if (w < string || w > string + size)
+ continue;
+ in_use = true;
+ break;
+ }
+ mutex_unlock(&head->io_sem);
+ spin_lock(&tomoyo_io_buffer_list_lock);
+ head->users--;
+ if (in_use)
+ break;
+ }
+ spin_unlock(&tomoyo_io_buffer_list_lock);
+ return in_use;
+}
+
+/**
+ * tomoyo_del_transition_control - Delete members in "struct tomoyo_transition_control".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_del_transition_control(struct list_head *element)
+{
+ struct tomoyo_transition_control *ptr =
+ container_of(element, typeof(*ptr), head.list);
+ tomoyo_put_name(ptr->domainname);
+ tomoyo_put_name(ptr->program);
+}
+
+/**
+ * tomoyo_del_aggregator - Delete members in "struct tomoyo_aggregator".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_del_aggregator(struct list_head *element)
+{
+ struct tomoyo_aggregator *ptr =
+ container_of(element, typeof(*ptr), head.list);
+ tomoyo_put_name(ptr->original_name);
+ tomoyo_put_name(ptr->aggregated_name);
+}
+
+/**
+ * tomoyo_del_manager - Delete members in "struct tomoyo_manager".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_del_manager(struct list_head *element)
+{
+ struct tomoyo_manager *ptr =
+ container_of(element, typeof(*ptr), head.list);
+ tomoyo_put_name(ptr->manager);
+}
+
+/**
+ * tomoyo_del_acl - Delete members in "struct tomoyo_acl_info".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_del_acl(struct list_head *element)
+{
+ struct tomoyo_acl_info *acl =
+ container_of(element, typeof(*acl), list);
+ tomoyo_put_condition(acl->cond);
+ switch (acl->type) {
+ case TOMOYO_TYPE_PATH_ACL:
+ {
+ struct tomoyo_path_acl *entry
+ = container_of(acl, typeof(*entry), head);
+ tomoyo_put_name_union(&entry->name);
+ }
+ break;
+ case TOMOYO_TYPE_PATH2_ACL:
+ {
+ struct tomoyo_path2_acl *entry
+ = container_of(acl, typeof(*entry), head);
+ tomoyo_put_name_union(&entry->name1);
+ tomoyo_put_name_union(&entry->name2);
+ }
+ break;
+ case TOMOYO_TYPE_PATH_NUMBER_ACL:
+ {
+ struct tomoyo_path_number_acl *entry
+ = container_of(acl, typeof(*entry), head);
+ tomoyo_put_name_union(&entry->name);
+ tomoyo_put_number_union(&entry->number);
+ }
+ break;
+ case TOMOYO_TYPE_MKDEV_ACL:
+ {
+ struct tomoyo_mkdev_acl *entry
+ = container_of(acl, typeof(*entry), head);
+ tomoyo_put_name_union(&entry->name);
+ tomoyo_put_number_union(&entry->mode);
+ tomoyo_put_number_union(&entry->major);
+ tomoyo_put_number_union(&entry->minor);
+ }
+ break;
+ case TOMOYO_TYPE_MOUNT_ACL:
+ {
+ struct tomoyo_mount_acl *entry
+ = container_of(acl, typeof(*entry), head);
+ tomoyo_put_name_union(&entry->dev_name);
+ tomoyo_put_name_union(&entry->dir_name);
+ tomoyo_put_name_union(&entry->fs_type);
+ tomoyo_put_number_union(&entry->flags);
+ }
+ break;
+ case TOMOYO_TYPE_ENV_ACL:
+ {
+ struct tomoyo_env_acl *entry =
+ container_of(acl, typeof(*entry), head);
+
+ tomoyo_put_name(entry->env);
+ }
+ break;
+ case TOMOYO_TYPE_INET_ACL:
+ {
+ struct tomoyo_inet_acl *entry =
+ container_of(acl, typeof(*entry), head);
+
+ tomoyo_put_group(entry->address.group);
+ tomoyo_put_number_union(&entry->port);
+ }
+ break;
+ case TOMOYO_TYPE_UNIX_ACL:
+ {
+ struct tomoyo_unix_acl *entry =
+ container_of(acl, typeof(*entry), head);
+
+ tomoyo_put_name_union(&entry->name);
+ }
+ break;
+ case TOMOYO_TYPE_MANUAL_TASK_ACL:
+ {
+ struct tomoyo_task_acl *entry =
+ container_of(acl, typeof(*entry), head);
+ tomoyo_put_name(entry->domainname);
+ }
+ break;
+ }
+}
+
+/**
+ * tomoyo_del_domain - Delete members in "struct tomoyo_domain_info".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ *
+ * Caller holds tomoyo_policy_lock mutex.
+ */
+static inline void tomoyo_del_domain(struct list_head *element)
+{
+ struct tomoyo_domain_info *domain =
+ container_of(element, typeof(*domain), list);
+ struct tomoyo_acl_info *acl;
+ struct tomoyo_acl_info *tmp;
+ /*
+ * Since this domain is referenced from neither
+ * "struct tomoyo_io_buffer" nor "struct cred"->security, we can delete
+ * elements without checking for is_deleted flag.
+ */
+ list_for_each_entry_safe(acl, tmp, &domain->acl_info_list, list) {
+ tomoyo_del_acl(&acl->list);
+ tomoyo_memory_free(acl);
+ }
+ tomoyo_put_name(domain->domainname);
+}
+
+/**
+ * tomoyo_del_condition - Delete members in "struct tomoyo_condition".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+void tomoyo_del_condition(struct list_head *element)
+{
+ struct tomoyo_condition *cond = container_of(element, typeof(*cond),
+ head.list);
+ const u16 condc = cond->condc;
+ const u16 numbers_count = cond->numbers_count;
+ const u16 names_count = cond->names_count;
+ const u16 argc = cond->argc;
+ const u16 envc = cond->envc;
+ unsigned int i;
+ const struct tomoyo_condition_element *condp
+ = (const struct tomoyo_condition_element *) (cond + 1);
+ struct tomoyo_number_union *numbers_p
+ = (struct tomoyo_number_union *) (condp + condc);
+ struct tomoyo_name_union *names_p
+ = (struct tomoyo_name_union *) (numbers_p + numbers_count);
+ const struct tomoyo_argv *argv
+ = (const struct tomoyo_argv *) (names_p + names_count);
+ const struct tomoyo_envp *envp
+ = (const struct tomoyo_envp *) (argv + argc);
+ for (i = 0; i < numbers_count; i++)
+ tomoyo_put_number_union(numbers_p++);
+ for (i = 0; i < names_count; i++)
+ tomoyo_put_name_union(names_p++);
+ for (i = 0; i < argc; argv++, i++)
+ tomoyo_put_name(argv->value);
+ for (i = 0; i < envc; envp++, i++) {
+ tomoyo_put_name(envp->name);
+ tomoyo_put_name(envp->value);
+ }
+}
+
+/**
+ * tomoyo_del_name - Delete members in "struct tomoyo_name".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_del_name(struct list_head *element)
+{
+ /* Nothing to do. */
+}
+
+/**
+ * tomoyo_del_path_group - Delete members in "struct tomoyo_path_group".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_del_path_group(struct list_head *element)
+{
+ struct tomoyo_path_group *member =
+ container_of(element, typeof(*member), head.list);
+ tomoyo_put_name(member->member_name);
+}
+
+/**
+ * tomoyo_del_group - Delete "struct tomoyo_group".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_del_group(struct list_head *element)
+{
+ struct tomoyo_group *group =
+ container_of(element, typeof(*group), head.list);
+ tomoyo_put_name(group->group_name);
+}
+
+/**
+ * tomoyo_del_address_group - Delete members in "struct tomoyo_address_group".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_del_address_group(struct list_head *element)
+{
+ /* Nothing to do. */
+}
+
+/**
+ * tomoyo_del_number_group - Delete members in "struct tomoyo_number_group".
+ *
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static inline void tomoyo_del_number_group(struct list_head *element)
+{
+ /* Nothing to do. */
+}
+
+/**
+ * tomoyo_try_to_gc - Try to kfree() an entry.
+ *
+ * @type: One of values in "enum tomoyo_policy_id".
+ * @element: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ *
+ * Caller holds tomoyo_policy_lock mutex.
+ */
+static void tomoyo_try_to_gc(const enum tomoyo_policy_id type,
+ struct list_head *element)
+{
+ /*
+ * __list_del_entry() guarantees that the list element became no longer
+ * reachable from the list which the element was originally on (e.g.
+ * tomoyo_domain_list). Also, synchronize_srcu() guarantees that the
+ * list element became no longer referenced by syscall users.
+ */
+ __list_del_entry(element);
+ mutex_unlock(&tomoyo_policy_lock);
+ synchronize_srcu(&tomoyo_ss);
+ /*
+ * However, there are two users which may still be using the list
+ * element. We need to defer until both users forget this element.
+ *
+ * Don't kfree() until "struct tomoyo_io_buffer"->r.{domain,group,acl}
+ * and "struct tomoyo_io_buffer"->w.domain forget this element.
+ */
+ if (tomoyo_struct_used_by_io_buffer(element))
+ goto reinject;
+ switch (type) {
+ case TOMOYO_ID_TRANSITION_CONTROL:
+ tomoyo_del_transition_control(element);
+ break;
+ case TOMOYO_ID_MANAGER:
+ tomoyo_del_manager(element);
+ break;
+ case TOMOYO_ID_AGGREGATOR:
+ tomoyo_del_aggregator(element);
+ break;
+ case TOMOYO_ID_GROUP:
+ tomoyo_del_group(element);
+ break;
+ case TOMOYO_ID_PATH_GROUP:
+ tomoyo_del_path_group(element);
+ break;
+ case TOMOYO_ID_ADDRESS_GROUP:
+ tomoyo_del_address_group(element);
+ break;
+ case TOMOYO_ID_NUMBER_GROUP:
+ tomoyo_del_number_group(element);
+ break;
+ case TOMOYO_ID_CONDITION:
+ tomoyo_del_condition(element);
+ break;
+ case TOMOYO_ID_NAME:
+ /*
+ * Don't kfree() until all "struct tomoyo_io_buffer"->r.w[]
+ * forget this element.
+ */
+ if (tomoyo_name_used_by_io_buffer
+ (container_of(element, typeof(struct tomoyo_name),
+ head.list)->entry.name))
+ goto reinject;
+ tomoyo_del_name(element);
+ break;
+ case TOMOYO_ID_ACL:
+ tomoyo_del_acl(element);
+ break;
+ case TOMOYO_ID_DOMAIN:
+ /*
+ * Don't kfree() until all "struct cred"->security forget this
+ * element.
+ */
+ if (atomic_read(&container_of
+ (element, typeof(struct tomoyo_domain_info),
+ list)->users))
+ goto reinject;
+ break;
+ case TOMOYO_MAX_POLICY:
+ break;
+ }
+ mutex_lock(&tomoyo_policy_lock);
+ if (type == TOMOYO_ID_DOMAIN)
+ tomoyo_del_domain(element);
+ tomoyo_memory_free(element);
+ return;
+reinject:
+ /*
+ * We can safely reinject this element here bacause
+ * (1) Appending list elements and removing list elements are protected
+ * by tomoyo_policy_lock mutex.
+ * (2) Only this function removes list elements and this function is
+ * exclusively executed by tomoyo_gc_mutex mutex.
+ * are true.
+ */
+ mutex_lock(&tomoyo_policy_lock);
+ list_add_rcu(element, element->prev);
+}
+
+/**
+ * tomoyo_collect_member - Delete elements with "struct tomoyo_acl_head".
+ *
+ * @id: One of values in "enum tomoyo_policy_id".
+ * @member_list: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_collect_member(const enum tomoyo_policy_id id,
+ struct list_head *member_list)
+{
+ struct tomoyo_acl_head *member;
+ struct tomoyo_acl_head *tmp;
+ list_for_each_entry_safe(member, tmp, member_list, list) {
+ if (!member->is_deleted)
+ continue;
+ member->is_deleted = TOMOYO_GC_IN_PROGRESS;
+ tomoyo_try_to_gc(id, &member->list);
+ }
+}
+
+/**
+ * tomoyo_collect_acl - Delete elements in "struct tomoyo_domain_info".
+ *
+ * @list: Pointer to "struct list_head".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_collect_acl(struct list_head *list)
+{
+ struct tomoyo_acl_info *acl;
+ struct tomoyo_acl_info *tmp;
+ list_for_each_entry_safe(acl, tmp, list, list) {
+ if (!acl->is_deleted)
+ continue;
+ acl->is_deleted = TOMOYO_GC_IN_PROGRESS;
+ tomoyo_try_to_gc(TOMOYO_ID_ACL, &acl->list);
+ }
+}
+
+/**
+ * tomoyo_collect_entry - Try to kfree() deleted elements.
+ *
+ * Returns nothing.
+ */
+static void tomoyo_collect_entry(void)
+{
+ int i;
+ enum tomoyo_policy_id id;
+ struct tomoyo_policy_namespace *ns;
+ mutex_lock(&tomoyo_policy_lock);
+ {
+ struct tomoyo_domain_info *domain;
+ struct tomoyo_domain_info *tmp;
+ list_for_each_entry_safe(domain, tmp, &tomoyo_domain_list,
+ list) {
+ tomoyo_collect_acl(&domain->acl_info_list);
+ if (!domain->is_deleted || atomic_read(&domain->users))
+ continue;
+ tomoyo_try_to_gc(TOMOYO_ID_DOMAIN, &domain->list);
+ }
+ }
+ list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) {
+ for (id = 0; id < TOMOYO_MAX_POLICY; id++)
+ tomoyo_collect_member(id, &ns->policy_list[id]);
+ for (i = 0; i < TOMOYO_MAX_ACL_GROUPS; i++)
+ tomoyo_collect_acl(&ns->acl_group[i]);
+ }
+ {
+ struct tomoyo_shared_acl_head *ptr;
+ struct tomoyo_shared_acl_head *tmp;
+ list_for_each_entry_safe(ptr, tmp, &tomoyo_condition_list,
+ list) {
+ if (atomic_read(&ptr->users) > 0)
+ continue;
+ atomic_set(&ptr->users, TOMOYO_GC_IN_PROGRESS);
+ tomoyo_try_to_gc(TOMOYO_ID_CONDITION, &ptr->list);
+ }
+ }
+ list_for_each_entry(ns, &tomoyo_namespace_list, namespace_list) {
+ for (i = 0; i < TOMOYO_MAX_GROUP; i++) {
+ struct list_head *list = &ns->group_list[i];
+ struct tomoyo_group *group;
+ struct tomoyo_group *tmp;
+ switch (i) {
+ case 0:
+ id = TOMOYO_ID_PATH_GROUP;
+ break;
+ case 1:
+ id = TOMOYO_ID_NUMBER_GROUP;
+ break;
+ default:
+ id = TOMOYO_ID_ADDRESS_GROUP;
+ break;
+ }
+ list_for_each_entry_safe(group, tmp, list, head.list) {
+ tomoyo_collect_member(id, &group->member_list);
+ if (!list_empty(&group->member_list) ||
+ atomic_read(&group->head.users) > 0)
+ continue;
+ atomic_set(&group->head.users,
+ TOMOYO_GC_IN_PROGRESS);
+ tomoyo_try_to_gc(TOMOYO_ID_GROUP,
+ &group->head.list);
+ }
+ }
+ }
+ for (i = 0; i < TOMOYO_MAX_HASH; i++) {
+ struct list_head *list = &tomoyo_name_list[i];
+ struct tomoyo_shared_acl_head *ptr;
+ struct tomoyo_shared_acl_head *tmp;
+ list_for_each_entry_safe(ptr, tmp, list, list) {
+ if (atomic_read(&ptr->users) > 0)
+ continue;
+ atomic_set(&ptr->users, TOMOYO_GC_IN_PROGRESS);
+ tomoyo_try_to_gc(TOMOYO_ID_NAME, &ptr->list);
+ }
+ }
+ mutex_unlock(&tomoyo_policy_lock);
+}
+
+/**
+ * tomoyo_gc_thread - Garbage collector thread function.
+ *
+ * @unused: Unused.
+ *
+ * Returns 0.
+ */
+static int tomoyo_gc_thread(void *unused)
+{
+ /* Garbage collector thread is exclusive. */
+ static DEFINE_MUTEX(tomoyo_gc_mutex);
+ if (!mutex_trylock(&tomoyo_gc_mutex))
+ goto out;
+ tomoyo_collect_entry();
+ {
+ struct tomoyo_io_buffer *head;
+ struct tomoyo_io_buffer *tmp;
+
+ spin_lock(&tomoyo_io_buffer_list_lock);
+ list_for_each_entry_safe(head, tmp, &tomoyo_io_buffer_list,
+ list) {
+ if (head->users)
+ continue;
+ list_del(&head->list);
+ kfree(head->read_buf);
+ kfree(head->write_buf);
+ kfree(head);
+ }
+ spin_unlock(&tomoyo_io_buffer_list_lock);
+ }
+ mutex_unlock(&tomoyo_gc_mutex);
+out:
+ /* This acts as do_exit(0). */
+ return 0;
+}
+
+/**
+ * tomoyo_notify_gc - Register/unregister /sys/kernel/security/tomoyo/ users.
+ *
+ * @head: Pointer to "struct tomoyo_io_buffer".
+ * @is_register: True if register, false if unregister.
+ *
+ * Returns nothing.
+ */
+void tomoyo_notify_gc(struct tomoyo_io_buffer *head, const bool is_register)
+{
+ bool is_write = false;
+
+ spin_lock(&tomoyo_io_buffer_list_lock);
+ if (is_register) {
+ head->users = 1;
+ list_add(&head->list, &tomoyo_io_buffer_list);
+ } else {
+ is_write = head->write_buf != NULL;
+ if (!--head->users) {
+ list_del(&head->list);
+ kfree(head->read_buf);
+ kfree(head->write_buf);
+ kfree(head);
+ }
+ }
+ spin_unlock(&tomoyo_io_buffer_list_lock);
+ if (is_write) {
+ struct task_struct *task = kthread_create(tomoyo_gc_thread,
+ NULL,
+ "GC for TOMOYO");
+ if (!IS_ERR(task))
+ wake_up_process(task);
+ }
+}
diff --git a/security/tomoyo/group.c b/security/tomoyo/group.c
new file mode 100644
index 00000000000..50092534ec5
--- /dev/null
+++ b/security/tomoyo/group.c
@@ -0,0 +1,198 @@
+/*
+ * security/tomoyo/group.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include <linux/slab.h>
+#include "common.h"
+
+/**
+ * tomoyo_same_path_group - Check for duplicated "struct tomoyo_path_group" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_head".
+ * @b: Pointer to "struct tomoyo_acl_head".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_path_group(const struct tomoyo_acl_head *a,
+ const struct tomoyo_acl_head *b)
+{
+ return container_of(a, struct tomoyo_path_group, head)->member_name ==
+ container_of(b, struct tomoyo_path_group, head)->member_name;
+}
+
+/**
+ * tomoyo_same_number_group - Check for duplicated "struct tomoyo_number_group" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_head".
+ * @b: Pointer to "struct tomoyo_acl_head".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_number_group(const struct tomoyo_acl_head *a,
+ const struct tomoyo_acl_head *b)
+{
+ return !memcmp(&container_of(a, struct tomoyo_number_group, head)
+ ->number,
+ &container_of(b, struct tomoyo_number_group, head)
+ ->number,
+ sizeof(container_of(a, struct tomoyo_number_group, head)
+ ->number));
+}
+
+/**
+ * tomoyo_same_address_group - Check for duplicated "struct tomoyo_address_group" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_head".
+ * @b: Pointer to "struct tomoyo_acl_head".
+ *
+ * Returns true if @a == @b, false otherwise.
+ */
+static bool tomoyo_same_address_group(const struct tomoyo_acl_head *a,
+ const struct tomoyo_acl_head *b)
+{
+ const struct tomoyo_address_group *p1 = container_of(a, typeof(*p1),
+ head);
+ const struct tomoyo_address_group *p2 = container_of(b, typeof(*p2),
+ head);
+
+ return tomoyo_same_ipaddr_union(&p1->address, &p2->address);
+}
+
+/**
+ * tomoyo_write_group - Write "struct tomoyo_path_group"/"struct tomoyo_number_group"/"struct tomoyo_address_group" list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @type: Type of this group.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_write_group(struct tomoyo_acl_param *param, const u8 type)
+{
+ struct tomoyo_group *group = tomoyo_get_group(param, type);
+ int error = -EINVAL;
+ if (!group)
+ return -ENOMEM;
+ param->list = &group->member_list;
+ if (type == TOMOYO_PATH_GROUP) {
+ struct tomoyo_path_group e = { };
+ e.member_name = tomoyo_get_name(tomoyo_read_token(param));
+ if (!e.member_name) {
+ error = -ENOMEM;
+ goto out;
+ }
+ error = tomoyo_update_policy(&e.head, sizeof(e), param,
+ tomoyo_same_path_group);
+ tomoyo_put_name(e.member_name);
+ } else if (type == TOMOYO_NUMBER_GROUP) {
+ struct tomoyo_number_group e = { };
+ if (param->data[0] == '@' ||
+ !tomoyo_parse_number_union(param, &e.number))
+ goto out;
+ error = tomoyo_update_policy(&e.head, sizeof(e), param,
+ tomoyo_same_number_group);
+ /*
+ * tomoyo_put_number_union() is not needed because
+ * param->data[0] != '@'.
+ */
+ } else {
+ struct tomoyo_address_group e = { };
+
+ if (param->data[0] == '@' ||
+ !tomoyo_parse_ipaddr_union(param, &e.address))
+ goto out;
+ error = tomoyo_update_policy(&e.head, sizeof(e), param,
+ tomoyo_same_address_group);
+ }
+out:
+ tomoyo_put_group(group);
+ return error;
+}
+
+/**
+ * tomoyo_path_matches_group - Check whether the given pathname matches members of the given pathname group.
+ *
+ * @pathname: The name of pathname.
+ * @group: Pointer to "struct tomoyo_path_group".
+ *
+ * Returns matched member's pathname if @pathname matches pathnames in @group,
+ * NULL otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+const struct tomoyo_path_info *
+tomoyo_path_matches_group(const struct tomoyo_path_info *pathname,
+ const struct tomoyo_group *group)
+{
+ struct tomoyo_path_group *member;
+ list_for_each_entry_rcu(member, &group->member_list, head.list) {
+ if (member->head.is_deleted)
+ continue;
+ if (!tomoyo_path_matches_pattern(pathname, member->member_name))
+ continue;
+ return member->member_name;
+ }
+ return NULL;
+}
+
+/**
+ * tomoyo_number_matches_group - Check whether the given number matches members of the given number group.
+ *
+ * @min: Min number.
+ * @max: Max number.
+ * @group: Pointer to "struct tomoyo_number_group".
+ *
+ * Returns true if @min and @max partially overlaps @group, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+bool tomoyo_number_matches_group(const unsigned long min,
+ const unsigned long max,
+ const struct tomoyo_group *group)
+{
+ struct tomoyo_number_group *member;
+ bool matched = false;
+ list_for_each_entry_rcu(member, &group->member_list, head.list) {
+ if (member->head.is_deleted)
+ continue;
+ if (min > member->number.values[1] ||
+ max < member->number.values[0])
+ continue;
+ matched = true;
+ break;
+ }
+ return matched;
+}
+
+/**
+ * tomoyo_address_matches_group - Check whether the given address matches members of the given address group.
+ *
+ * @is_ipv6: True if @address is an IPv6 address.
+ * @address: An IPv4 or IPv6 address.
+ * @group: Pointer to "struct tomoyo_address_group".
+ *
+ * Returns true if @address matches addresses in @group group, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+bool tomoyo_address_matches_group(const bool is_ipv6, const __be32 *address,
+ const struct tomoyo_group *group)
+{
+ struct tomoyo_address_group *member;
+ bool matched = false;
+ const u8 size = is_ipv6 ? 16 : 4;
+
+ list_for_each_entry_rcu(member, &group->member_list, head.list) {
+ if (member->head.is_deleted)
+ continue;
+ if (member->address.is_ipv6 != is_ipv6)
+ continue;
+ if (memcmp(&member->address.ip[0], address, size) > 0 ||
+ memcmp(address, &member->address.ip[1], size) > 0)
+ continue;
+ matched = true;
+ break;
+ }
+ return matched;
+}
diff --git a/security/tomoyo/load_policy.c b/security/tomoyo/load_policy.c
new file mode 100644
index 00000000000..078fac0bb4c
--- /dev/null
+++ b/security/tomoyo/load_policy.c
@@ -0,0 +1,109 @@
+/*
+ * security/tomoyo/load_policy.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include "common.h"
+
+#ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
+
+/*
+ * Path to the policy loader. (default = CONFIG_SECURITY_TOMOYO_POLICY_LOADER)
+ */
+static const char *tomoyo_loader;
+
+/**
+ * tomoyo_loader_setup - Set policy loader.
+ *
+ * @str: Program to use as a policy loader (e.g. /sbin/tomoyo-init ).
+ *
+ * Returns 0.
+ */
+static int __init tomoyo_loader_setup(char *str)
+{
+ tomoyo_loader = str;
+ return 0;
+}
+
+__setup("TOMOYO_loader=", tomoyo_loader_setup);
+
+/**
+ * tomoyo_policy_loader_exists - Check whether /sbin/tomoyo-init exists.
+ *
+ * Returns true if /sbin/tomoyo-init exists, false otherwise.
+ */
+static bool tomoyo_policy_loader_exists(void)
+{
+ struct path path;
+ if (!tomoyo_loader)
+ tomoyo_loader = CONFIG_SECURITY_TOMOYO_POLICY_LOADER;
+ if (kern_path(tomoyo_loader, LOOKUP_FOLLOW, &path)) {
+ printk(KERN_INFO "Not activating Mandatory Access Control "
+ "as %s does not exist.\n", tomoyo_loader);
+ return false;
+ }
+ path_put(&path);
+ return true;
+}
+
+/*
+ * Path to the trigger. (default = CONFIG_SECURITY_TOMOYO_ACTIVATION_TRIGGER)
+ */
+static const char *tomoyo_trigger;
+
+/**
+ * tomoyo_trigger_setup - Set trigger for activation.
+ *
+ * @str: Program to use as an activation trigger (e.g. /sbin/init ).
+ *
+ * Returns 0.
+ */
+static int __init tomoyo_trigger_setup(char *str)
+{
+ tomoyo_trigger = str;
+ return 0;
+}
+
+__setup("TOMOYO_trigger=", tomoyo_trigger_setup);
+
+/**
+ * tomoyo_load_policy - Run external policy loader to load policy.
+ *
+ * @filename: The program about to start.
+ *
+ * This function checks whether @filename is /sbin/init , and if so
+ * invoke /sbin/tomoyo-init and wait for the termination of /sbin/tomoyo-init
+ * and then continues invocation of /sbin/init.
+ * /sbin/tomoyo-init reads policy files in /etc/tomoyo/ directory and
+ * writes to /sys/kernel/security/tomoyo/ interfaces.
+ *
+ * Returns nothing.
+ */
+void tomoyo_load_policy(const char *filename)
+{
+ static bool done;
+ char *argv[2];
+ char *envp[3];
+
+ if (tomoyo_policy_loaded || done)
+ return;
+ if (!tomoyo_trigger)
+ tomoyo_trigger = CONFIG_SECURITY_TOMOYO_ACTIVATION_TRIGGER;
+ if (strcmp(filename, tomoyo_trigger))
+ return;
+ if (!tomoyo_policy_loader_exists())
+ return;
+ done = true;
+ printk(KERN_INFO "Calling %s to load policy. Please wait.\n",
+ tomoyo_loader);
+ argv[0] = (char *) tomoyo_loader;
+ argv[1] = NULL;
+ envp[0] = "HOME=/";
+ envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
+ envp[2] = NULL;
+ call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
+ tomoyo_check_profile();
+}
+
+#endif
diff --git a/security/tomoyo/memory.c b/security/tomoyo/memory.c
new file mode 100644
index 00000000000..0e995716cc2
--- /dev/null
+++ b/security/tomoyo/memory.c
@@ -0,0 +1,201 @@
+/*
+ * security/tomoyo/memory.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include <linux/hash.h>
+#include <linux/slab.h>
+#include "common.h"
+
+/**
+ * tomoyo_warn_oom - Print out of memory warning message.
+ *
+ * @function: Function's name.
+ */
+void tomoyo_warn_oom(const char *function)
+{
+ /* Reduce error messages. */
+ static pid_t tomoyo_last_pid;
+ const pid_t pid = current->pid;
+ if (tomoyo_last_pid != pid) {
+ printk(KERN_WARNING "ERROR: Out of memory at %s.\n",
+ function);
+ tomoyo_last_pid = pid;
+ }
+ if (!tomoyo_policy_loaded)
+ panic("MAC Initialization failed.\n");
+}
+
+/* Memoy currently used by policy/audit log/query. */
+unsigned int tomoyo_memory_used[TOMOYO_MAX_MEMORY_STAT];
+/* Memory quota for "policy"/"audit log"/"query". */
+unsigned int tomoyo_memory_quota[TOMOYO_MAX_MEMORY_STAT];
+
+/**
+ * tomoyo_memory_ok - Check memory quota.
+ *
+ * @ptr: Pointer to allocated memory.
+ *
+ * Returns true on success, false otherwise.
+ *
+ * Returns true if @ptr is not NULL and quota not exceeded, false otherwise.
+ *
+ * Caller holds tomoyo_policy_lock mutex.
+ */
+bool tomoyo_memory_ok(void *ptr)
+{
+ if (ptr) {
+ const size_t s = ksize(ptr);
+ tomoyo_memory_used[TOMOYO_MEMORY_POLICY] += s;
+ if (!tomoyo_memory_quota[TOMOYO_MEMORY_POLICY] ||
+ tomoyo_memory_used[TOMOYO_MEMORY_POLICY] <=
+ tomoyo_memory_quota[TOMOYO_MEMORY_POLICY])
+ return true;
+ tomoyo_memory_used[TOMOYO_MEMORY_POLICY] -= s;
+ }
+ tomoyo_warn_oom(__func__);
+ return false;
+}
+
+/**
+ * tomoyo_commit_ok - Check memory quota.
+ *
+ * @data: Data to copy from.
+ * @size: Size in byte.
+ *
+ * Returns pointer to allocated memory on success, NULL otherwise.
+ * @data is zero-cleared on success.
+ *
+ * Caller holds tomoyo_policy_lock mutex.
+ */
+void *tomoyo_commit_ok(void *data, const unsigned int size)
+{
+ void *ptr = kzalloc(size, GFP_NOFS);
+ if (tomoyo_memory_ok(ptr)) {
+ memmove(ptr, data, size);
+ memset(data, 0, size);
+ return ptr;
+ }
+ kfree(ptr);
+ return NULL;
+}
+
+/**
+ * tomoyo_get_group - Allocate memory for "struct tomoyo_path_group"/"struct tomoyo_number_group".
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @idx: Index number.
+ *
+ * Returns pointer to "struct tomoyo_group" on success, NULL otherwise.
+ */
+struct tomoyo_group *tomoyo_get_group(struct tomoyo_acl_param *param,
+ const u8 idx)
+{
+ struct tomoyo_group e = { };
+ struct tomoyo_group *group = NULL;
+ struct list_head *list;
+ const char *group_name = tomoyo_read_token(param);
+ bool found = false;
+ if (!tomoyo_correct_word(group_name) || idx >= TOMOYO_MAX_GROUP)
+ return NULL;
+ e.group_name = tomoyo_get_name(group_name);
+ if (!e.group_name)
+ return NULL;
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
+ goto out;
+ list = &param->ns->group_list[idx];
+ list_for_each_entry(group, list, head.list) {
+ if (e.group_name != group->group_name ||
+ atomic_read(&group->head.users) == TOMOYO_GC_IN_PROGRESS)
+ continue;
+ atomic_inc(&group->head.users);
+ found = true;
+ break;
+ }
+ if (!found) {
+ struct tomoyo_group *entry = tomoyo_commit_ok(&e, sizeof(e));
+ if (entry) {
+ INIT_LIST_HEAD(&entry->member_list);
+ atomic_set(&entry->head.users, 1);
+ list_add_tail_rcu(&entry->head.list, list);
+ group = entry;
+ found = true;
+ }
+ }
+ mutex_unlock(&tomoyo_policy_lock);
+out:
+ tomoyo_put_name(e.group_name);
+ return found ? group : NULL;
+}
+
+/*
+ * tomoyo_name_list is used for holding string data used by TOMOYO.
+ * Since same string data is likely used for multiple times (e.g.
+ * "/lib/libc-2.5.so"), TOMOYO shares string data in the form of
+ * "const struct tomoyo_path_info *".
+ */
+struct list_head tomoyo_name_list[TOMOYO_MAX_HASH];
+
+/**
+ * tomoyo_get_name - Allocate permanent memory for string data.
+ *
+ * @name: The string to store into the permernent memory.
+ *
+ * Returns pointer to "struct tomoyo_path_info" on success, NULL otherwise.
+ */
+const struct tomoyo_path_info *tomoyo_get_name(const char *name)
+{
+ struct tomoyo_name *ptr;
+ unsigned int hash;
+ int len;
+ struct list_head *head;
+
+ if (!name)
+ return NULL;
+ len = strlen(name) + 1;
+ hash = full_name_hash((const unsigned char *) name, len - 1);
+ head = &tomoyo_name_list[hash_long(hash, TOMOYO_HASH_BITS)];
+ if (mutex_lock_interruptible(&tomoyo_policy_lock))
+ return NULL;
+ list_for_each_entry(ptr, head, head.list) {
+ if (hash != ptr->entry.hash || strcmp(name, ptr->entry.name) ||
+ atomic_read(&ptr->head.users) == TOMOYO_GC_IN_PROGRESS)
+ continue;
+ atomic_inc(&ptr->head.users);
+ goto out;
+ }
+ ptr = kzalloc(sizeof(*ptr) + len, GFP_NOFS);
+ if (tomoyo_memory_ok(ptr)) {
+ ptr->entry.name = ((char *) ptr) + sizeof(*ptr);
+ memmove((char *) ptr->entry.name, name, len);
+ atomic_set(&ptr->head.users, 1);
+ tomoyo_fill_path_info(&ptr->entry);
+ list_add_tail(&ptr->head.list, head);
+ } else {
+ kfree(ptr);
+ ptr = NULL;
+ }
+out:
+ mutex_unlock(&tomoyo_policy_lock);
+ return ptr ? &ptr->entry : NULL;
+}
+
+/* Initial namespace.*/
+struct tomoyo_policy_namespace tomoyo_kernel_namespace;
+
+/**
+ * tomoyo_mm_init - Initialize mm related code.
+ */
+void __init tomoyo_mm_init(void)
+{
+ int idx;
+ for (idx = 0; idx < TOMOYO_MAX_HASH; idx++)
+ INIT_LIST_HEAD(&tomoyo_name_list[idx]);
+ tomoyo_kernel_namespace.name = "<kernel>";
+ tomoyo_init_policy_namespace(&tomoyo_kernel_namespace);
+ tomoyo_kernel_domain.ns = &tomoyo_kernel_namespace;
+ INIT_LIST_HEAD(&tomoyo_kernel_domain.acl_info_list);
+ tomoyo_kernel_domain.domainname = tomoyo_get_name("<kernel>");
+ list_add_tail_rcu(&tomoyo_kernel_domain.list, &tomoyo_domain_list);
+}
diff --git a/security/tomoyo/mount.c b/security/tomoyo/mount.c
new file mode 100644
index 00000000000..390c646013c
--- /dev/null
+++ b/security/tomoyo/mount.c
@@ -0,0 +1,236 @@
+/*
+ * security/tomoyo/mount.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include <linux/slab.h>
+#include "common.h"
+
+/* String table for special mount operations. */
+static const char * const tomoyo_mounts[TOMOYO_MAX_SPECIAL_MOUNT] = {
+ [TOMOYO_MOUNT_BIND] = "--bind",
+ [TOMOYO_MOUNT_MOVE] = "--move",
+ [TOMOYO_MOUNT_REMOUNT] = "--remount",
+ [TOMOYO_MOUNT_MAKE_UNBINDABLE] = "--make-unbindable",
+ [TOMOYO_MOUNT_MAKE_PRIVATE] = "--make-private",
+ [TOMOYO_MOUNT_MAKE_SLAVE] = "--make-slave",
+ [TOMOYO_MOUNT_MAKE_SHARED] = "--make-shared",
+};
+
+/**
+ * tomoyo_audit_mount_log - Audit mount log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_mount_log(struct tomoyo_request_info *r)
+{
+ return tomoyo_supervisor(r, "file mount %s %s %s 0x%lX\n",
+ r->param.mount.dev->name,
+ r->param.mount.dir->name,
+ r->param.mount.type->name,
+ r->param.mount.flags);
+}
+
+/**
+ * tomoyo_check_mount_acl - Check permission for path path path number operation.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_mount_acl(struct tomoyo_request_info *r,
+ const struct tomoyo_acl_info *ptr)
+{
+ const struct tomoyo_mount_acl *acl =
+ container_of(ptr, typeof(*acl), head);
+ return tomoyo_compare_number_union(r->param.mount.flags,
+ &acl->flags) &&
+ tomoyo_compare_name_union(r->param.mount.type,
+ &acl->fs_type) &&
+ tomoyo_compare_name_union(r->param.mount.dir,
+ &acl->dir_name) &&
+ (!r->param.mount.need_dev ||
+ tomoyo_compare_name_union(r->param.mount.dev,
+ &acl->dev_name));
+}
+
+/**
+ * tomoyo_mount_acl - Check permission for mount() operation.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @dev_name: Name of device file. Maybe NULL.
+ * @dir: Pointer to "struct path".
+ * @type: Name of filesystem type.
+ * @flags: Mount options.
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+static int tomoyo_mount_acl(struct tomoyo_request_info *r,
+ const char *dev_name,
+ struct path *dir, const char *type,
+ unsigned long flags)
+{
+ struct tomoyo_obj_info obj = { };
+ struct path path;
+ struct file_system_type *fstype = NULL;
+ const char *requested_type = NULL;
+ const char *requested_dir_name = NULL;
+ const char *requested_dev_name = NULL;
+ struct tomoyo_path_info rtype;
+ struct tomoyo_path_info rdev;
+ struct tomoyo_path_info rdir;
+ int need_dev = 0;
+ int error = -ENOMEM;
+ r->obj = &obj;
+
+ /* Get fstype. */
+ requested_type = tomoyo_encode(type);
+ if (!requested_type)
+ goto out;
+ rtype.name = requested_type;
+ tomoyo_fill_path_info(&rtype);
+
+ /* Get mount point. */
+ obj.path2 = *dir;
+ requested_dir_name = tomoyo_realpath_from_path(dir);
+ if (!requested_dir_name) {
+ error = -ENOMEM;
+ goto out;
+ }
+ rdir.name = requested_dir_name;
+ tomoyo_fill_path_info(&rdir);
+
+ /* Compare fs name. */
+ if (type == tomoyo_mounts[TOMOYO_MOUNT_REMOUNT]) {
+ /* dev_name is ignored. */
+ } else if (type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_UNBINDABLE] ||
+ type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_PRIVATE] ||
+ type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_SLAVE] ||
+ type == tomoyo_mounts[TOMOYO_MOUNT_MAKE_SHARED]) {
+ /* dev_name is ignored. */
+ } else if (type == tomoyo_mounts[TOMOYO_MOUNT_BIND] ||
+ type == tomoyo_mounts[TOMOYO_MOUNT_MOVE]) {
+ need_dev = -1; /* dev_name is a directory */
+ } else {
+ fstype = get_fs_type(type);
+ if (!fstype) {
+ error = -ENODEV;
+ goto out;
+ }
+ if (fstype->fs_flags & FS_REQUIRES_DEV)
+ /* dev_name is a block device file. */
+ need_dev = 1;
+ }
+ if (need_dev) {
+ /* Get mount point or device file. */
+ if (!dev_name || kern_path(dev_name, LOOKUP_FOLLOW, &path)) {
+ error = -ENOENT;
+ goto out;
+ }
+ obj.path1 = path;
+ requested_dev_name = tomoyo_realpath_from_path(&path);
+ if (!requested_dev_name) {
+ error = -ENOENT;
+ goto out;
+ }
+ } else {
+ /* Map dev_name to "<NULL>" if no dev_name given. */
+ if (!dev_name)
+ dev_name = "<NULL>";
+ requested_dev_name = tomoyo_encode(dev_name);
+ if (!requested_dev_name) {
+ error = -ENOMEM;
+ goto out;
+ }
+ }
+ rdev.name = requested_dev_name;
+ tomoyo_fill_path_info(&rdev);
+ r->param_type = TOMOYO_TYPE_MOUNT_ACL;
+ r->param.mount.need_dev = need_dev;
+ r->param.mount.dev = &rdev;
+ r->param.mount.dir = &rdir;
+ r->param.mount.type = &rtype;
+ r->param.mount.flags = flags;
+ do {
+ tomoyo_check_acl(r, tomoyo_check_mount_acl);
+ error = tomoyo_audit_mount_log(r);
+ } while (error == TOMOYO_RETRY_REQUEST);
+ out:
+ kfree(requested_dev_name);
+ kfree(requested_dir_name);
+ if (fstype)
+ put_filesystem(fstype);
+ kfree(requested_type);
+ /* Drop refcount obtained by kern_path(). */
+ if (obj.path1.dentry)
+ path_put(&obj.path1);
+ return error;
+}
+
+/**
+ * tomoyo_mount_permission - Check permission for mount() operation.
+ *
+ * @dev_name: Name of device file. Maybe NULL.
+ * @path: Pointer to "struct path".
+ * @type: Name of filesystem type. Maybe NULL.
+ * @flags: Mount options.
+ * @data_page: Optional data. Maybe NULL.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_mount_permission(const char *dev_name, struct path *path,
+ const char *type, unsigned long flags,
+ void *data_page)
+{
+ struct tomoyo_request_info r;
+ int error;
+ int idx;
+
+ if (tomoyo_init_request_info(&r, NULL, TOMOYO_MAC_FILE_MOUNT)
+ == TOMOYO_CONFIG_DISABLED)
+ return 0;
+ if ((flags & MS_MGC_MSK) == MS_MGC_VAL)
+ flags &= ~MS_MGC_MSK;
+ if (flags & MS_REMOUNT) {
+ type = tomoyo_mounts[TOMOYO_MOUNT_REMOUNT];
+ flags &= ~MS_REMOUNT;
+ } else if (flags & MS_BIND) {
+ type = tomoyo_mounts[TOMOYO_MOUNT_BIND];
+ flags &= ~MS_BIND;
+ } else if (flags & MS_SHARED) {
+ if (flags & (MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
+ return -EINVAL;
+ type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_SHARED];
+ flags &= ~MS_SHARED;
+ } else if (flags & MS_PRIVATE) {
+ if (flags & (MS_SHARED | MS_SLAVE | MS_UNBINDABLE))
+ return -EINVAL;
+ type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_PRIVATE];
+ flags &= ~MS_PRIVATE;
+ } else if (flags & MS_SLAVE) {
+ if (flags & (MS_SHARED | MS_PRIVATE | MS_UNBINDABLE))
+ return -EINVAL;
+ type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_SLAVE];
+ flags &= ~MS_SLAVE;
+ } else if (flags & MS_UNBINDABLE) {
+ if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE))
+ return -EINVAL;
+ type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_UNBINDABLE];
+ flags &= ~MS_UNBINDABLE;
+ } else if (flags & MS_MOVE) {
+ type = tomoyo_mounts[TOMOYO_MOUNT_MOVE];
+ flags &= ~MS_MOVE;
+ }
+ if (!type)
+ type = "<NULL>";
+ idx = tomoyo_read_lock();
+ error = tomoyo_mount_acl(&r, dev_name, path, type, flags);
+ tomoyo_read_unlock(idx);
+ return error;
+}
diff --git a/security/tomoyo/network.c b/security/tomoyo/network.c
new file mode 100644
index 00000000000..97527710a72
--- /dev/null
+++ b/security/tomoyo/network.c
@@ -0,0 +1,771 @@
+/*
+ * security/tomoyo/network.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include "common.h"
+#include <linux/slab.h>
+
+/* Structure for holding inet domain socket's address. */
+struct tomoyo_inet_addr_info {
+ __be16 port; /* In network byte order. */
+ const __be32 *address; /* In network byte order. */
+ bool is_ipv6;
+};
+
+/* Structure for holding unix domain socket's address. */
+struct tomoyo_unix_addr_info {
+ u8 *addr; /* This may not be '\0' terminated string. */
+ unsigned int addr_len;
+};
+
+/* Structure for holding socket address. */
+struct tomoyo_addr_info {
+ u8 protocol;
+ u8 operation;
+ struct tomoyo_inet_addr_info inet;
+ struct tomoyo_unix_addr_info unix0;
+};
+
+/* String table for socket's protocols. */
+const char * const tomoyo_proto_keyword[TOMOYO_SOCK_MAX] = {
+ [SOCK_STREAM] = "stream",
+ [SOCK_DGRAM] = "dgram",
+ [SOCK_RAW] = "raw",
+ [SOCK_SEQPACKET] = "seqpacket",
+ [0] = " ", /* Dummy for avoiding NULL pointer dereference. */
+ [4] = " ", /* Dummy for avoiding NULL pointer dereference. */
+};
+
+/**
+ * tomoyo_parse_ipaddr_union - Parse an IP address.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @ptr: Pointer to "struct tomoyo_ipaddr_union".
+ *
+ * Returns true on success, false otherwise.
+ */
+bool tomoyo_parse_ipaddr_union(struct tomoyo_acl_param *param,
+ struct tomoyo_ipaddr_union *ptr)
+{
+ u8 * const min = ptr->ip[0].in6_u.u6_addr8;
+ u8 * const max = ptr->ip[1].in6_u.u6_addr8;
+ char *address = tomoyo_read_token(param);
+ const char *end;
+
+ if (!strchr(address, ':') &&
+ in4_pton(address, -1, min, '-', &end) > 0) {
+ ptr->is_ipv6 = false;
+ if (!*end)
+ ptr->ip[1].s6_addr32[0] = ptr->ip[0].s6_addr32[0];
+ else if (*end++ != '-' ||
+ in4_pton(end, -1, max, '\0', &end) <= 0 || *end)
+ return false;
+ return true;
+ }
+ if (in6_pton(address, -1, min, '-', &end) > 0) {
+ ptr->is_ipv6 = true;
+ if (!*end)
+ memmove(max, min, sizeof(u16) * 8);
+ else if (*end++ != '-' ||
+ in6_pton(end, -1, max, '\0', &end) <= 0 || *end)
+ return false;
+ return true;
+ }
+ return false;
+}
+
+/**
+ * tomoyo_print_ipv4 - Print an IPv4 address.
+ *
+ * @buffer: Buffer to write to.
+ * @buffer_len: Size of @buffer.
+ * @min_ip: Pointer to __be32.
+ * @max_ip: Pointer to __be32.
+ *
+ * Returns nothing.
+ */
+static void tomoyo_print_ipv4(char *buffer, const unsigned int buffer_len,
+ const __be32 *min_ip, const __be32 *max_ip)
+{
+ snprintf(buffer, buffer_len, "%pI4%c%pI4", min_ip,
+ *min_ip == *max_ip ? '\0' : '-', max_ip);
+}
+
+/**
+ * tomoyo_print_ipv6 - Print an IPv6 address.
+ *
+ * @buffer: Buffer to write to.
+ * @buffer_len: Size of @buffer.
+ * @min_ip: Pointer to "struct in6_addr".
+ * @max_ip: Pointer to "struct in6_addr".
+ *
+ * Returns nothing.
+ */
+static void tomoyo_print_ipv6(char *buffer, const unsigned int buffer_len,
+ const struct in6_addr *min_ip,
+ const struct in6_addr *max_ip)
+{
+ snprintf(buffer, buffer_len, "%pI6c%c%pI6c", min_ip,
+ !memcmp(min_ip, max_ip, 16) ? '\0' : '-', max_ip);
+}
+
+/**
+ * tomoyo_print_ip - Print an IP address.
+ *
+ * @buf: Buffer to write to.
+ * @size: Size of @buf.
+ * @ptr: Pointer to "struct ipaddr_union".
+ *
+ * Returns nothing.
+ */
+void tomoyo_print_ip(char *buf, const unsigned int size,
+ const struct tomoyo_ipaddr_union *ptr)
+{
+ if (ptr->is_ipv6)
+ tomoyo_print_ipv6(buf, size, &ptr->ip[0], &ptr->ip[1]);
+ else
+ tomoyo_print_ipv4(buf, size, &ptr->ip[0].s6_addr32[0],
+ &ptr->ip[1].s6_addr32[0]);
+}
+
+/*
+ * Mapping table from "enum tomoyo_network_acl_index" to
+ * "enum tomoyo_mac_index" for inet domain socket.
+ */
+static const u8 tomoyo_inet2mac
+[TOMOYO_SOCK_MAX][TOMOYO_MAX_NETWORK_OPERATION] = {
+ [SOCK_STREAM] = {
+ [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_INET_STREAM_BIND,
+ [TOMOYO_NETWORK_LISTEN] =
+ TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN,
+ [TOMOYO_NETWORK_CONNECT] =
+ TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT,
+ },
+ [SOCK_DGRAM] = {
+ [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_INET_DGRAM_BIND,
+ [TOMOYO_NETWORK_SEND] = TOMOYO_MAC_NETWORK_INET_DGRAM_SEND,
+ },
+ [SOCK_RAW] = {
+ [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_INET_RAW_BIND,
+ [TOMOYO_NETWORK_SEND] = TOMOYO_MAC_NETWORK_INET_RAW_SEND,
+ },
+};
+
+/*
+ * Mapping table from "enum tomoyo_network_acl_index" to
+ * "enum tomoyo_mac_index" for unix domain socket.
+ */
+static const u8 tomoyo_unix2mac
+[TOMOYO_SOCK_MAX][TOMOYO_MAX_NETWORK_OPERATION] = {
+ [SOCK_STREAM] = {
+ [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND,
+ [TOMOYO_NETWORK_LISTEN] =
+ TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN,
+ [TOMOYO_NETWORK_CONNECT] =
+ TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT,
+ },
+ [SOCK_DGRAM] = {
+ [TOMOYO_NETWORK_BIND] = TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND,
+ [TOMOYO_NETWORK_SEND] = TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND,
+ },
+ [SOCK_SEQPACKET] = {
+ [TOMOYO_NETWORK_BIND] =
+ TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND,
+ [TOMOYO_NETWORK_LISTEN] =
+ TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN,
+ [TOMOYO_NETWORK_CONNECT] =
+ TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT,
+ },
+};
+
+/**
+ * tomoyo_same_inet_acl - Check for duplicated "struct tomoyo_inet_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b except permission bits, false otherwise.
+ */
+static bool tomoyo_same_inet_acl(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
+{
+ const struct tomoyo_inet_acl *p1 = container_of(a, typeof(*p1), head);
+ const struct tomoyo_inet_acl *p2 = container_of(b, typeof(*p2), head);
+
+ return p1->protocol == p2->protocol &&
+ tomoyo_same_ipaddr_union(&p1->address, &p2->address) &&
+ tomoyo_same_number_union(&p1->port, &p2->port);
+}
+
+/**
+ * tomoyo_same_unix_acl - Check for duplicated "struct tomoyo_unix_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if @a == @b except permission bits, false otherwise.
+ */
+static bool tomoyo_same_unix_acl(const struct tomoyo_acl_info *a,
+ const struct tomoyo_acl_info *b)
+{
+ const struct tomoyo_unix_acl *p1 = container_of(a, typeof(*p1), head);
+ const struct tomoyo_unix_acl *p2 = container_of(b, typeof(*p2), head);
+
+ return p1->protocol == p2->protocol &&
+ tomoyo_same_name_union(&p1->name, &p2->name);
+}
+
+/**
+ * tomoyo_merge_inet_acl - Merge duplicated "struct tomoyo_inet_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ * @is_delete: True for @a &= ~@b, false for @a |= @b.
+ *
+ * Returns true if @a is empty, false otherwise.
+ */
+static bool tomoyo_merge_inet_acl(struct tomoyo_acl_info *a,
+ struct tomoyo_acl_info *b,
+ const bool is_delete)
+{
+ u8 * const a_perm =
+ &container_of(a, struct tomoyo_inet_acl, head)->perm;
+ u8 perm = *a_perm;
+ const u8 b_perm = container_of(b, struct tomoyo_inet_acl, head)->perm;
+
+ if (is_delete)
+ perm &= ~b_perm;
+ else
+ perm |= b_perm;
+ *a_perm = perm;
+ return !perm;
+}
+
+/**
+ * tomoyo_merge_unix_acl - Merge duplicated "struct tomoyo_unix_acl" entry.
+ *
+ * @a: Pointer to "struct tomoyo_acl_info".
+ * @b: Pointer to "struct tomoyo_acl_info".
+ * @is_delete: True for @a &= ~@b, false for @a |= @b.
+ *
+ * Returns true if @a is empty, false otherwise.
+ */
+static bool tomoyo_merge_unix_acl(struct tomoyo_acl_info *a,
+ struct tomoyo_acl_info *b,
+ const bool is_delete)
+{
+ u8 * const a_perm =
+ &container_of(a, struct tomoyo_unix_acl, head)->perm;
+ u8 perm = *a_perm;
+ const u8 b_perm = container_of(b, struct tomoyo_unix_acl, head)->perm;
+
+ if (is_delete)
+ perm &= ~b_perm;
+ else
+ perm |= b_perm;
+ *a_perm = perm;
+ return !perm;
+}
+
+/**
+ * tomoyo_write_inet_network - Write "struct tomoyo_inet_acl" list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+int tomoyo_write_inet_network(struct tomoyo_acl_param *param)
+{
+ struct tomoyo_inet_acl e = { .head.type = TOMOYO_TYPE_INET_ACL };
+ int error = -EINVAL;
+ u8 type;
+ const char *protocol = tomoyo_read_token(param);
+ const char *operation = tomoyo_read_token(param);
+
+ for (e.protocol = 0; e.protocol < TOMOYO_SOCK_MAX; e.protocol++)
+ if (!strcmp(protocol, tomoyo_proto_keyword[e.protocol]))
+ break;
+ for (type = 0; type < TOMOYO_MAX_NETWORK_OPERATION; type++)
+ if (tomoyo_permstr(operation, tomoyo_socket_keyword[type]))
+ e.perm |= 1 << type;
+ if (e.protocol == TOMOYO_SOCK_MAX || !e.perm)
+ return -EINVAL;
+ if (param->data[0] == '@') {
+ param->data++;
+ e.address.group =
+ tomoyo_get_group(param, TOMOYO_ADDRESS_GROUP);
+ if (!e.address.group)
+ return -ENOMEM;
+ } else {
+ if (!tomoyo_parse_ipaddr_union(param, &e.address))
+ goto out;
+ }
+ if (!tomoyo_parse_number_union(param, &e.port) ||
+ e.port.values[1] > 65535)
+ goto out;
+ error = tomoyo_update_domain(&e.head, sizeof(e), param,
+ tomoyo_same_inet_acl,
+ tomoyo_merge_inet_acl);
+out:
+ tomoyo_put_group(e.address.group);
+ tomoyo_put_number_union(&e.port);
+ return error;
+}
+
+/**
+ * tomoyo_write_unix_network - Write "struct tomoyo_unix_acl" list.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_write_unix_network(struct tomoyo_acl_param *param)
+{
+ struct tomoyo_unix_acl e = { .head.type = TOMOYO_TYPE_UNIX_ACL };
+ int error;
+ u8 type;
+ const char *protocol = tomoyo_read_token(param);
+ const char *operation = tomoyo_read_token(param);
+
+ for (e.protocol = 0; e.protocol < TOMOYO_SOCK_MAX; e.protocol++)
+ if (!strcmp(protocol, tomoyo_proto_keyword[e.protocol]))
+ break;
+ for (type = 0; type < TOMOYO_MAX_NETWORK_OPERATION; type++)
+ if (tomoyo_permstr(operation, tomoyo_socket_keyword[type]))
+ e.perm |= 1 << type;
+ if (e.protocol == TOMOYO_SOCK_MAX || !e.perm)
+ return -EINVAL;
+ if (!tomoyo_parse_name_union(param, &e.name))
+ return -EINVAL;
+ error = tomoyo_update_domain(&e.head, sizeof(e), param,
+ tomoyo_same_unix_acl,
+ tomoyo_merge_unix_acl);
+ tomoyo_put_name_union(&e.name);
+ return error;
+}
+
+/**
+ * tomoyo_audit_net_log - Audit network log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @family: Name of socket family ("inet" or "unix").
+ * @protocol: Name of protocol in @family.
+ * @operation: Name of socket operation.
+ * @address: Name of address.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_net_log(struct tomoyo_request_info *r,
+ const char *family, const u8 protocol,
+ const u8 operation, const char *address)
+{
+ return tomoyo_supervisor(r, "network %s %s %s %s\n", family,
+ tomoyo_proto_keyword[protocol],
+ tomoyo_socket_keyword[operation], address);
+}
+
+/**
+ * tomoyo_audit_inet_log - Audit INET network log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_inet_log(struct tomoyo_request_info *r)
+{
+ char buf[128];
+ int len;
+ const __be32 *address = r->param.inet_network.address;
+
+ if (r->param.inet_network.is_ipv6)
+ tomoyo_print_ipv6(buf, sizeof(buf), (const struct in6_addr *)
+ address, (const struct in6_addr *) address);
+ else
+ tomoyo_print_ipv4(buf, sizeof(buf), address, address);
+ len = strlen(buf);
+ snprintf(buf + len, sizeof(buf) - len, " %u",
+ r->param.inet_network.port);
+ return tomoyo_audit_net_log(r, "inet", r->param.inet_network.protocol,
+ r->param.inet_network.operation, buf);
+}
+
+/**
+ * tomoyo_audit_unix_log - Audit UNIX network log.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_audit_unix_log(struct tomoyo_request_info *r)
+{
+ return tomoyo_audit_net_log(r, "unix", r->param.unix_network.protocol,
+ r->param.unix_network.operation,
+ r->param.unix_network.address->name);
+}
+
+/**
+ * tomoyo_check_inet_acl - Check permission for inet domain socket operation.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_inet_acl(struct tomoyo_request_info *r,
+ const struct tomoyo_acl_info *ptr)
+{
+ const struct tomoyo_inet_acl *acl =
+ container_of(ptr, typeof(*acl), head);
+ const u8 size = r->param.inet_network.is_ipv6 ? 16 : 4;
+
+ if (!(acl->perm & (1 << r->param.inet_network.operation)) ||
+ !tomoyo_compare_number_union(r->param.inet_network.port,
+ &acl->port))
+ return false;
+ if (acl->address.group)
+ return tomoyo_address_matches_group
+ (r->param.inet_network.is_ipv6,
+ r->param.inet_network.address, acl->address.group);
+ return acl->address.is_ipv6 == r->param.inet_network.is_ipv6 &&
+ memcmp(&acl->address.ip[0],
+ r->param.inet_network.address, size) <= 0 &&
+ memcmp(r->param.inet_network.address,
+ &acl->address.ip[1], size) <= 0;
+}
+
+/**
+ * tomoyo_check_unix_acl - Check permission for unix domain socket operation.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_unix_acl(struct tomoyo_request_info *r,
+ const struct tomoyo_acl_info *ptr)
+{
+ const struct tomoyo_unix_acl *acl =
+ container_of(ptr, typeof(*acl), head);
+
+ return (acl->perm & (1 << r->param.unix_network.operation)) &&
+ tomoyo_compare_name_union(r->param.unix_network.address,
+ &acl->name);
+}
+
+/**
+ * tomoyo_inet_entry - Check permission for INET network operation.
+ *
+ * @address: Pointer to "struct tomoyo_addr_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_inet_entry(const struct tomoyo_addr_info *address)
+{
+ const int idx = tomoyo_read_lock();
+ struct tomoyo_request_info r;
+ int error = 0;
+ const u8 type = tomoyo_inet2mac[address->protocol][address->operation];
+
+ if (type && tomoyo_init_request_info(&r, NULL, type)
+ != TOMOYO_CONFIG_DISABLED) {
+ r.param_type = TOMOYO_TYPE_INET_ACL;
+ r.param.inet_network.protocol = address->protocol;
+ r.param.inet_network.operation = address->operation;
+ r.param.inet_network.is_ipv6 = address->inet.is_ipv6;
+ r.param.inet_network.address = address->inet.address;
+ r.param.inet_network.port = ntohs(address->inet.port);
+ do {
+ tomoyo_check_acl(&r, tomoyo_check_inet_acl);
+ error = tomoyo_audit_inet_log(&r);
+ } while (error == TOMOYO_RETRY_REQUEST);
+ }
+ tomoyo_read_unlock(idx);
+ return error;
+}
+
+/**
+ * tomoyo_check_inet_address - Check permission for inet domain socket's operation.
+ *
+ * @addr: Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ * @port: Port number.
+ * @address: Pointer to "struct tomoyo_addr_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_check_inet_address(const struct sockaddr *addr,
+ const unsigned int addr_len,
+ const u16 port,
+ struct tomoyo_addr_info *address)
+{
+ struct tomoyo_inet_addr_info *i = &address->inet;
+
+ switch (addr->sa_family) {
+ case AF_INET6:
+ if (addr_len < SIN6_LEN_RFC2133)
+ goto skip;
+ i->is_ipv6 = true;
+ i->address = (__be32 *)
+ ((struct sockaddr_in6 *) addr)->sin6_addr.s6_addr;
+ i->port = ((struct sockaddr_in6 *) addr)->sin6_port;
+ break;
+ case AF_INET:
+ if (addr_len < sizeof(struct sockaddr_in))
+ goto skip;
+ i->is_ipv6 = false;
+ i->address = (__be32 *)
+ &((struct sockaddr_in *) addr)->sin_addr;
+ i->port = ((struct sockaddr_in *) addr)->sin_port;
+ break;
+ default:
+ goto skip;
+ }
+ if (address->protocol == SOCK_RAW)
+ i->port = htons(port);
+ return tomoyo_inet_entry(address);
+skip:
+ return 0;
+}
+
+/**
+ * tomoyo_unix_entry - Check permission for UNIX network operation.
+ *
+ * @address: Pointer to "struct tomoyo_addr_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_unix_entry(const struct tomoyo_addr_info *address)
+{
+ const int idx = tomoyo_read_lock();
+ struct tomoyo_request_info r;
+ int error = 0;
+ const u8 type = tomoyo_unix2mac[address->protocol][address->operation];
+
+ if (type && tomoyo_init_request_info(&r, NULL, type)
+ != TOMOYO_CONFIG_DISABLED) {
+ char *buf = address->unix0.addr;
+ int len = address->unix0.addr_len - sizeof(sa_family_t);
+
+ if (len <= 0) {
+ buf = "anonymous";
+ len = 9;
+ } else if (buf[0]) {
+ len = strnlen(buf, len);
+ }
+ buf = tomoyo_encode2(buf, len);
+ if (buf) {
+ struct tomoyo_path_info addr;
+
+ addr.name = buf;
+ tomoyo_fill_path_info(&addr);
+ r.param_type = TOMOYO_TYPE_UNIX_ACL;
+ r.param.unix_network.protocol = address->protocol;
+ r.param.unix_network.operation = address->operation;
+ r.param.unix_network.address = &addr;
+ do {
+ tomoyo_check_acl(&r, tomoyo_check_unix_acl);
+ error = tomoyo_audit_unix_log(&r);
+ } while (error == TOMOYO_RETRY_REQUEST);
+ kfree(buf);
+ } else
+ error = -ENOMEM;
+ }
+ tomoyo_read_unlock(idx);
+ return error;
+}
+
+/**
+ * tomoyo_check_unix_address - Check permission for unix domain socket's operation.
+ *
+ * @addr: Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ * @address: Pointer to "struct tomoyo_addr_info".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_check_unix_address(struct sockaddr *addr,
+ const unsigned int addr_len,
+ struct tomoyo_addr_info *address)
+{
+ struct tomoyo_unix_addr_info *u = &address->unix0;
+
+ if (addr->sa_family != AF_UNIX)
+ return 0;
+ u->addr = ((struct sockaddr_un *) addr)->sun_path;
+ u->addr_len = addr_len;
+ return tomoyo_unix_entry(address);
+}
+
+/**
+ * tomoyo_kernel_service - Check whether I'm kernel service or not.
+ *
+ * Returns true if I'm kernel service, false otherwise.
+ */
+static bool tomoyo_kernel_service(void)
+{
+ /* Nothing to do if I am a kernel service. */
+ return segment_eq(get_fs(), KERNEL_DS);
+}
+
+/**
+ * tomoyo_sock_family - Get socket's family.
+ *
+ * @sk: Pointer to "struct sock".
+ *
+ * Returns one of PF_INET, PF_INET6, PF_UNIX or 0.
+ */
+static u8 tomoyo_sock_family(struct sock *sk)
+{
+ u8 family;
+
+ if (tomoyo_kernel_service())
+ return 0;
+ family = sk->sk_family;
+ switch (family) {
+ case PF_INET:
+ case PF_INET6:
+ case PF_UNIX:
+ return family;
+ default:
+ return 0;
+ }
+}
+
+/**
+ * tomoyo_socket_listen_permission - Check permission for listening a socket.
+ *
+ * @sock: Pointer to "struct socket".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_socket_listen_permission(struct socket *sock)
+{
+ struct tomoyo_addr_info address;
+ const u8 family = tomoyo_sock_family(sock->sk);
+ const unsigned int type = sock->type;
+ struct sockaddr_storage addr;
+ int addr_len;
+
+ if (!family || (type != SOCK_STREAM && type != SOCK_SEQPACKET))
+ return 0;
+ {
+ const int error = sock->ops->getname(sock, (struct sockaddr *)
+ &addr, &addr_len, 0);
+
+ if (error)
+ return error;
+ }
+ address.protocol = type;
+ address.operation = TOMOYO_NETWORK_LISTEN;
+ if (family == PF_UNIX)
+ return tomoyo_check_unix_address((struct sockaddr *) &addr,
+ addr_len, &address);
+ return tomoyo_check_inet_address((struct sockaddr *) &addr, addr_len,
+ 0, &address);
+}
+
+/**
+ * tomoyo_socket_connect_permission - Check permission for setting the remote address of a socket.
+ *
+ * @sock: Pointer to "struct socket".
+ * @addr: Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_socket_connect_permission(struct socket *sock,
+ struct sockaddr *addr, int addr_len)
+{
+ struct tomoyo_addr_info address;
+ const u8 family = tomoyo_sock_family(sock->sk);
+ const unsigned int type = sock->type;
+
+ if (!family)
+ return 0;
+ address.protocol = type;
+ switch (type) {
+ case SOCK_DGRAM:
+ case SOCK_RAW:
+ address.operation = TOMOYO_NETWORK_SEND;
+ break;
+ case SOCK_STREAM:
+ case SOCK_SEQPACKET:
+ address.operation = TOMOYO_NETWORK_CONNECT;
+ break;
+ default:
+ return 0;
+ }
+ if (family == PF_UNIX)
+ return tomoyo_check_unix_address(addr, addr_len, &address);
+ return tomoyo_check_inet_address(addr, addr_len, sock->sk->sk_protocol,
+ &address);
+}
+
+/**
+ * tomoyo_socket_bind_permission - Check permission for setting the local address of a socket.
+ *
+ * @sock: Pointer to "struct socket".
+ * @addr: Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_socket_bind_permission(struct socket *sock, struct sockaddr *addr,
+ int addr_len)
+{
+ struct tomoyo_addr_info address;
+ const u8 family = tomoyo_sock_family(sock->sk);
+ const unsigned int type = sock->type;
+
+ if (!family)
+ return 0;
+ switch (type) {
+ case SOCK_STREAM:
+ case SOCK_DGRAM:
+ case SOCK_RAW:
+ case SOCK_SEQPACKET:
+ address.protocol = type;
+ address.operation = TOMOYO_NETWORK_BIND;
+ break;
+ default:
+ return 0;
+ }
+ if (family == PF_UNIX)
+ return tomoyo_check_unix_address(addr, addr_len, &address);
+ return tomoyo_check_inet_address(addr, addr_len, sock->sk->sk_protocol,
+ &address);
+}
+
+/**
+ * tomoyo_socket_sendmsg_permission - Check permission for sending a datagram.
+ *
+ * @sock: Pointer to "struct socket".
+ * @msg: Pointer to "struct msghdr".
+ * @size: Unused.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+int tomoyo_socket_sendmsg_permission(struct socket *sock, struct msghdr *msg,
+ int size)
+{
+ struct tomoyo_addr_info address;
+ const u8 family = tomoyo_sock_family(sock->sk);
+ const unsigned int type = sock->type;
+
+ if (!msg->msg_name || !family ||
+ (type != SOCK_DGRAM && type != SOCK_RAW))
+ return 0;
+ address.protocol = type;
+ address.operation = TOMOYO_NETWORK_SEND;
+ if (family == PF_UNIX)
+ return tomoyo_check_unix_address((struct sockaddr *)
+ msg->msg_name,
+ msg->msg_namelen, &address);
+ return tomoyo_check_inet_address((struct sockaddr *) msg->msg_name,
+ msg->msg_namelen,
+ sock->sk->sk_protocol, &address);
+}
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
index 18369d497eb..a3386d11942 100644
--- a/security/tomoyo/realpath.c
+++ b/security/tomoyo/realpath.c
@@ -1,502 +1,328 @@
/*
* security/tomoyo/realpath.c
*
- * Get the canonicalized absolute pathnames. The basis for TOMOYO.
- *
- * Copyright (C) 2005-2009 NTT DATA CORPORATION
- *
- * Version: 2.2.0 2009/04/01
- *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
*/
-#include <linux/types.h>
-#include <linux/mount.h>
-#include <linux/mnt_namespace.h>
-#include <linux/fs_struct.h>
-#include <linux/hash.h>
-
#include "common.h"
-#include "realpath.h"
+#include <linux/magic.h>
/**
- * tomoyo_encode: Convert binary string to ascii string.
+ * tomoyo_encode2 - Encode binary string to ascii string.
*
- * @buffer: Buffer for ASCII string.
- * @buflen: Size of @buffer.
- * @str: Binary string.
+ * @str: String in binary format.
+ * @str_len: Size of @str in byte.
*
- * Returns 0 on success, -ENOMEM otherwise.
- */
-int tomoyo_encode(char *buffer, int buflen, const char *str)
-{
- while (1) {
- const unsigned char c = *(unsigned char *) str++;
-
- if (tomoyo_is_valid(c)) {
- if (--buflen <= 0)
- break;
- *buffer++ = (char) c;
- if (c != '\\')
- continue;
- if (--buflen <= 0)
- break;
- *buffer++ = (char) c;
- continue;
- }
- if (!c) {
- if (--buflen <= 0)
- break;
- *buffer = '\0';
- return 0;
- }
- buflen -= 4;
- if (buflen <= 0)
- break;
- *buffer++ = '\\';
- *buffer++ = (c >> 6) + '0';
- *buffer++ = ((c >> 3) & 7) + '0';
- *buffer++ = (c & 7) + '0';
- }
- return -ENOMEM;
-}
-
-/**
- * tomoyo_realpath_from_path2 - Returns realpath(3) of the given dentry but ignores chroot'ed root.
- *
- * @path: Pointer to "struct path".
- * @newname: Pointer to buffer to return value in.
- * @newname_len: Size of @newname.
+ * Returns pointer to @str in ascii format on success, NULL otherwise.
*
- * Returns 0 on success, negative value otherwise.
- *
- * If dentry is a directory, trailing '/' is appended.
- * Characters out of 0x20 < c < 0x7F range are converted to
- * \ooo style octal string.
- * Character \ is converted to \\ string.
+ * This function uses kzalloc(), so caller must kfree() if this function
+ * didn't return NULL.
*/
-int tomoyo_realpath_from_path2(struct path *path, char *newname,
- int newname_len)
+char *tomoyo_encode2(const char *str, int str_len)
{
- int error = -ENOMEM;
- struct dentry *dentry = path->dentry;
- char *sp;
+ int i;
+ int len = 0;
+ const char *p = str;
+ char *cp;
+ char *cp0;
- if (!dentry || !path->mnt || !newname || newname_len <= 2048)
- return -EINVAL;
- if (dentry->d_op && dentry->d_op->d_dname) {
- /* For "socket:[\$]" and "pipe:[\$]". */
- static const int offset = 1536;
- sp = dentry->d_op->d_dname(dentry, newname + offset,
- newname_len - offset);
- } else {
- /* Taken from d_namespace_path(). */
- struct path root;
- struct path ns_root = { };
- struct path tmp;
+ if (!p)
+ return NULL;
+ for (i = 0; i < str_len; i++) {
+ const unsigned char c = p[i];
- read_lock(&current->fs->lock);
- root = current->fs->root;
- path_get(&root);
- read_unlock(&current->fs->lock);
- spin_lock(&vfsmount_lock);
- if (root.mnt && root.mnt->mnt_ns)
- ns_root.mnt = mntget(root.mnt->mnt_ns->root);
- if (ns_root.mnt)
- ns_root.dentry = dget(ns_root.mnt->mnt_root);
- spin_unlock(&vfsmount_lock);
- spin_lock(&dcache_lock);
- tmp = ns_root;
- sp = __d_path(path, &tmp, newname, newname_len);
- spin_unlock(&dcache_lock);
- path_put(&root);
- path_put(&ns_root);
- /* Prepend "/proc" prefix if using internal proc vfs mount. */
- if (!IS_ERR(sp) && (path->mnt->mnt_parent == path->mnt) &&
- (strcmp(path->mnt->mnt_sb->s_type->name, "proc") == 0)) {
- sp -= 5;
- if (sp >= newname)
- memcpy(sp, "/proc", 5);
- else
- sp = ERR_PTR(-ENOMEM);
- }
+ if (c == '\\')
+ len += 2;
+ else if (c > ' ' && c < 127)
+ len++;
+ else
+ len += 4;
}
- if (IS_ERR(sp))
- error = PTR_ERR(sp);
- else
- error = tomoyo_encode(newname, sp - newname, sp);
- /* Append trailing '/' if dentry is a directory. */
- if (!error && dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)
- && *newname) {
- sp = newname + strlen(newname);
- if (*(sp - 1) != '/') {
- if (sp < newname + newname_len - 4) {
- *sp++ = '/';
- *sp = '\0';
- } else {
- error = -ENOMEM;
- }
+ len++;
+ /* Reserve space for appending "/". */
+ cp = kzalloc(len + 10, GFP_NOFS);
+ if (!cp)
+ return NULL;
+ cp0 = cp;
+ p = str;
+ for (i = 0; i < str_len; i++) {
+ const unsigned char c = p[i];
+
+ if (c == '\\') {
+ *cp++ = '\\';
+ *cp++ = '\\';
+ } else if (c > ' ' && c < 127) {
+ *cp++ = c;
+ } else {
+ *cp++ = '\\';
+ *cp++ = (c >> 6) + '0';
+ *cp++ = ((c >> 3) & 7) + '0';
+ *cp++ = (c & 7) + '0';
}
}
- if (error)
- printk(KERN_WARNING "tomoyo_realpath: Pathname too long.\n");
- return error;
+ return cp0;
}
/**
- * tomoyo_realpath_from_path - Returns realpath(3) of the given pathname but ignores chroot'ed root.
+ * tomoyo_encode - Encode binary string to ascii string.
*
- * @path: Pointer to "struct path".
+ * @str: String in binary format.
*
- * Returns the realpath of the given @path on success, NULL otherwise.
+ * Returns pointer to @str in ascii format on success, NULL otherwise.
*
- * These functions use tomoyo_alloc(), so the caller must call tomoyo_free()
- * if these functions didn't return NULL.
+ * This function uses kzalloc(), so caller must kfree() if this function
+ * didn't return NULL.
*/
-char *tomoyo_realpath_from_path(struct path *path)
+char *tomoyo_encode(const char *str)
{
- char *buf = tomoyo_alloc(sizeof(struct tomoyo_page_buffer));
-
- BUILD_BUG_ON(sizeof(struct tomoyo_page_buffer)
- <= TOMOYO_MAX_PATHNAME_LEN - 1);
- if (!buf)
- return NULL;
- if (tomoyo_realpath_from_path2(path, buf,
- TOMOYO_MAX_PATHNAME_LEN - 1) == 0)
- return buf;
- tomoyo_free(buf);
- return NULL;
+ return str ? tomoyo_encode2(str, strlen(str)) : NULL;
}
/**
- * tomoyo_realpath - Get realpath of a pathname.
+ * tomoyo_get_absolute_path - Get the path of a dentry but ignores chroot'ed root.
*
- * @pathname: The pathname to solve.
+ * @path: Pointer to "struct path".
+ * @buffer: Pointer to buffer to return value in.
+ * @buflen: Sizeof @buffer.
*
- * Returns the realpath of @pathname on success, NULL otherwise.
- */
-char *tomoyo_realpath(const char *pathname)
-{
- struct path path;
-
- if (pathname && kern_path(pathname, LOOKUP_FOLLOW, &path) == 0) {
- char *buf = tomoyo_realpath_from_path(&path);
- path_put(&path);
- return buf;
- }
- return NULL;
-}
-
-/**
- * tomoyo_realpath_nofollow - Get realpath of a pathname.
+ * Returns the buffer on success, an error code otherwise.
*
- * @pathname: The pathname to solve.
- *
- * Returns the realpath of @pathname on success, NULL otherwise.
+ * If dentry is a directory, trailing '/' is appended.
*/
-char *tomoyo_realpath_nofollow(const char *pathname)
+static char *tomoyo_get_absolute_path(struct path *path, char * const buffer,
+ const int buflen)
{
- struct path path;
-
- if (pathname && kern_path(pathname, 0, &path) == 0) {
- char *buf = tomoyo_realpath_from_path(&path);
- path_put(&path);
- return buf;
+ char *pos = ERR_PTR(-ENOMEM);
+ if (buflen >= 256) {
+ /* go to whatever namespace root we are under */
+ pos = d_absolute_path(path, buffer, buflen - 1);
+ if (!IS_ERR(pos) && *pos == '/' && pos[1]) {
+ struct inode *inode = path->dentry->d_inode;
+ if (inode && S_ISDIR(inode->i_mode)) {
+ buffer[buflen - 2] = '/';
+ buffer[buflen - 1] = '\0';
+ }
+ }
}
- return NULL;
+ return pos;
}
-/* Memory allocated for non-string data. */
-static unsigned int tomoyo_allocated_memory_for_elements;
-/* Quota for holding non-string data. */
-static unsigned int tomoyo_quota_for_elements;
-
/**
- * tomoyo_alloc_element - Allocate permanent memory for structures.
+ * tomoyo_get_dentry_path - Get the path of a dentry.
*
- * @size: Size in bytes.
+ * @dentry: Pointer to "struct dentry".
+ * @buffer: Pointer to buffer to return value in.
+ * @buflen: Sizeof @buffer.
*
- * Returns pointer to allocated memory on success, NULL otherwise.
+ * Returns the buffer on success, an error code otherwise.
*
- * Memory has to be zeroed.
- * The RAM is chunked, so NEVER try to kfree() the returned pointer.
+ * If dentry is a directory, trailing '/' is appended.
*/
-void *tomoyo_alloc_element(const unsigned int size)
+static char *tomoyo_get_dentry_path(struct dentry *dentry, char * const buffer,
+ const int buflen)
{
- static char *buf;
- static DEFINE_MUTEX(lock);
- static unsigned int buf_used_len = PATH_MAX;
- char *ptr = NULL;
- /*Assumes sizeof(void *) >= sizeof(long) is true. */
- const unsigned int word_aligned_size
- = roundup(size, max(sizeof(void *), sizeof(long)));
- if (word_aligned_size > PATH_MAX)
- return NULL;
- mutex_lock(&lock);
- if (buf_used_len + word_aligned_size > PATH_MAX) {
- if (!tomoyo_quota_for_elements ||
- tomoyo_allocated_memory_for_elements
- + PATH_MAX <= tomoyo_quota_for_elements)
- ptr = kzalloc(PATH_MAX, GFP_KERNEL);
- if (!ptr) {
- printk(KERN_WARNING "ERROR: Out of memory "
- "for tomoyo_alloc_element().\n");
- if (!tomoyo_policy_loaded)
- panic("MAC Initialization failed.\n");
- } else {
- buf = ptr;
- tomoyo_allocated_memory_for_elements += PATH_MAX;
- buf_used_len = word_aligned_size;
- ptr = buf;
- }
- } else if (word_aligned_size) {
- int i;
- ptr = buf + buf_used_len;
- buf_used_len += word_aligned_size;
- for (i = 0; i < word_aligned_size; i++) {
- if (!ptr[i])
- continue;
- printk(KERN_ERR "WARNING: Reserved memory was tainted! "
- "The system might go wrong.\n");
- ptr[i] = '\0';
+ char *pos = ERR_PTR(-ENOMEM);
+ if (buflen >= 256) {
+ pos = dentry_path_raw(dentry, buffer, buflen - 1);
+ if (!IS_ERR(pos) && *pos == '/' && pos[1]) {
+ struct inode *inode = dentry->d_inode;
+ if (inode && S_ISDIR(inode->i_mode)) {
+ buffer[buflen - 2] = '/';
+ buffer[buflen - 1] = '\0';
+ }
}
}
- mutex_unlock(&lock);
- return ptr;
+ return pos;
}
-/* Memory allocated for string data in bytes. */
-static unsigned int tomoyo_allocated_memory_for_savename;
-/* Quota for holding string data in bytes. */
-static unsigned int tomoyo_quota_for_savename;
-
-/*
- * TOMOYO uses this hash only when appending a string into the string
- * table. Frequency of appending strings is very low. So we don't need
- * large (e.g. 64k) hash size. 256 will be sufficient.
- */
-#define TOMOYO_HASH_BITS 8
-#define TOMOYO_MAX_HASH (1u<<TOMOYO_HASH_BITS)
-
-/*
- * tomoyo_name_entry is a structure which is used for linking
- * "struct tomoyo_path_info" into tomoyo_name_list .
- *
- * Since tomoyo_name_list manages a list of strings which are shared by
- * multiple processes (whereas "struct tomoyo_path_info" inside
- * "struct tomoyo_path_info_with_data" is not shared), a reference counter will
- * be added to "struct tomoyo_name_entry" rather than "struct tomoyo_path_info"
- * when TOMOYO starts supporting garbage collector.
- */
-struct tomoyo_name_entry {
- struct list_head list;
- struct tomoyo_path_info entry;
-};
-
-/* Structure for available memory region. */
-struct tomoyo_free_memory_block_list {
- struct list_head list;
- char *ptr; /* Pointer to a free area. */
- int len; /* Length of the area. */
-};
-
-/*
- * tomoyo_name_list is used for holding string data used by TOMOYO.
- * Since same string data is likely used for multiple times (e.g.
- * "/lib/libc-2.5.so"), TOMOYO shares string data in the form of
- * "const struct tomoyo_path_info *".
- */
-static struct list_head tomoyo_name_list[TOMOYO_MAX_HASH];
-
/**
- * tomoyo_save_name - Allocate permanent memory for string data.
- *
- * @name: The string to store into the permernent memory.
+ * tomoyo_get_local_path - Get the path of a dentry.
*
- * Returns pointer to "struct tomoyo_path_info" on success, NULL otherwise.
+ * @dentry: Pointer to "struct dentry".
+ * @buffer: Pointer to buffer to return value in.
+ * @buflen: Sizeof @buffer.
*
- * The RAM is shared, so NEVER try to modify or kfree() the returned name.
+ * Returns the buffer on success, an error code otherwise.
*/
-const struct tomoyo_path_info *tomoyo_save_name(const char *name)
+static char *tomoyo_get_local_path(struct dentry *dentry, char * const buffer,
+ const int buflen)
{
- static LIST_HEAD(fmb_list);
- static DEFINE_MUTEX(lock);
- struct tomoyo_name_entry *ptr;
- unsigned int hash;
- /* fmb contains available size in bytes.
- fmb is removed from the fmb_list when fmb->len becomes 0. */
- struct tomoyo_free_memory_block_list *fmb;
- int len;
- char *cp;
- struct list_head *head;
-
- if (!name)
- return NULL;
- len = strlen(name) + 1;
- if (len > TOMOYO_MAX_PATHNAME_LEN) {
- printk(KERN_WARNING "ERROR: Name too long "
- "for tomoyo_save_name().\n");
- return NULL;
- }
- hash = full_name_hash((const unsigned char *) name, len - 1);
- head = &tomoyo_name_list[hash_long(hash, TOMOYO_HASH_BITS)];
-
- mutex_lock(&lock);
- list_for_each_entry(ptr, head, list) {
- if (hash == ptr->entry.hash && !strcmp(name, ptr->entry.name))
- goto out;
+ struct super_block *sb = dentry->d_sb;
+ char *pos = tomoyo_get_dentry_path(dentry, buffer, buflen);
+ if (IS_ERR(pos))
+ return pos;
+ /* Convert from $PID to self if $PID is current thread. */
+ if (sb->s_magic == PROC_SUPER_MAGIC && *pos == '/') {
+ char *ep;
+ const pid_t pid = (pid_t) simple_strtoul(pos + 1, &ep, 10);
+ if (*ep == '/' && pid && pid ==
+ task_tgid_nr_ns(current, sb->s_fs_info)) {
+ pos = ep - 5;
+ if (pos < buffer)
+ goto out;
+ memmove(pos, "/self", 5);
+ }
+ goto prepend_filesystem_name;
}
- list_for_each_entry(fmb, &fmb_list, list) {
- if (len <= fmb->len)
- goto ready;
+ /* Use filesystem name for unnamed devices. */
+ if (!MAJOR(sb->s_dev))
+ goto prepend_filesystem_name;
+ {
+ struct inode *inode = sb->s_root->d_inode;
+ /*
+ * Use filesystem name if filesystem does not support rename()
+ * operation.
+ */
+ if (!inode->i_op->rename)
+ goto prepend_filesystem_name;
}
- if (!tomoyo_quota_for_savename ||
- tomoyo_allocated_memory_for_savename + PATH_MAX
- <= tomoyo_quota_for_savename)
- cp = kzalloc(PATH_MAX, GFP_KERNEL);
- else
- cp = NULL;
- fmb = kzalloc(sizeof(*fmb), GFP_KERNEL);
- if (!cp || !fmb) {
- kfree(cp);
- kfree(fmb);
- printk(KERN_WARNING "ERROR: Out of memory "
- "for tomoyo_save_name().\n");
- if (!tomoyo_policy_loaded)
- panic("MAC Initialization failed.\n");
- ptr = NULL;
- goto out;
+ /* Prepend device name. */
+ {
+ char name[64];
+ int name_len;
+ const dev_t dev = sb->s_dev;
+ name[sizeof(name) - 1] = '\0';
+ snprintf(name, sizeof(name) - 1, "dev(%u,%u):", MAJOR(dev),
+ MINOR(dev));
+ name_len = strlen(name);
+ pos -= name_len;
+ if (pos < buffer)
+ goto out;
+ memmove(pos, name, name_len);
+ return pos;
}
- tomoyo_allocated_memory_for_savename += PATH_MAX;
- list_add(&fmb->list, &fmb_list);
- fmb->ptr = cp;
- fmb->len = PATH_MAX;
- ready:
- ptr = tomoyo_alloc_element(sizeof(*ptr));
- if (!ptr)
- goto out;
- ptr->entry.name = fmb->ptr;
- memmove(fmb->ptr, name, len);
- tomoyo_fill_path_info(&ptr->entry);
- fmb->ptr += len;
- fmb->len -= len;
- list_add_tail(&ptr->list, head);
- if (fmb->len == 0) {
- list_del(&fmb->list);
- kfree(fmb);
+ /* Prepend filesystem name. */
+prepend_filesystem_name:
+ {
+ const char *name = sb->s_type->name;
+ const int name_len = strlen(name);
+ pos -= name_len + 1;
+ if (pos < buffer)
+ goto out;
+ memmove(pos, name, name_len);
+ pos[name_len] = ':';
}
- out:
- mutex_unlock(&lock);
- return ptr ? &ptr->entry : NULL;
-}
-
-/**
- * tomoyo_realpath_init - Initialize realpath related code.
- */
-void __init tomoyo_realpath_init(void)
-{
- int i;
-
- BUILD_BUG_ON(TOMOYO_MAX_PATHNAME_LEN > PATH_MAX);
- for (i = 0; i < TOMOYO_MAX_HASH; i++)
- INIT_LIST_HEAD(&tomoyo_name_list[i]);
- INIT_LIST_HEAD(&tomoyo_kernel_domain.acl_info_list);
- tomoyo_kernel_domain.domainname = tomoyo_save_name(TOMOYO_ROOT_NAME);
- list_add_tail(&tomoyo_kernel_domain.list, &tomoyo_domain_list);
- down_read(&tomoyo_domain_list_lock);
- if (tomoyo_find_domain(TOMOYO_ROOT_NAME) != &tomoyo_kernel_domain)
- panic("Can't register tomoyo_kernel_domain");
- up_read(&tomoyo_domain_list_lock);
+ return pos;
+out:
+ return ERR_PTR(-ENOMEM);
}
-/* Memory allocated for temporary purpose. */
-static atomic_t tomoyo_dynamic_memory_size;
-
/**
- * tomoyo_alloc - Allocate memory for temporary purpose.
+ * tomoyo_get_socket_name - Get the name of a socket.
*
- * @size: Size in bytes.
+ * @path: Pointer to "struct path".
+ * @buffer: Pointer to buffer to return value in.
+ * @buflen: Sizeof @buffer.
*
- * Returns pointer to allocated memory on success, NULL otherwise.
+ * Returns the buffer.
*/
-void *tomoyo_alloc(const size_t size)
+static char *tomoyo_get_socket_name(struct path *path, char * const buffer,
+ const int buflen)
{
- void *p = kzalloc(size, GFP_KERNEL);
- if (p)
- atomic_add(ksize(p), &tomoyo_dynamic_memory_size);
- return p;
+ struct inode *inode = path->dentry->d_inode;
+ struct socket *sock = inode ? SOCKET_I(inode) : NULL;
+ struct sock *sk = sock ? sock->sk : NULL;
+ if (sk) {
+ snprintf(buffer, buflen, "socket:[family=%u:type=%u:"
+ "protocol=%u]", sk->sk_family, sk->sk_type,
+ sk->sk_protocol);
+ } else {
+ snprintf(buffer, buflen, "socket:[unknown]");
+ }
+ return buffer;
}
/**
- * tomoyo_free - Release memory allocated by tomoyo_alloc().
+ * tomoyo_realpath_from_path - Returns realpath(3) of the given pathname but ignores chroot'ed root.
*
- * @p: Pointer returned by tomoyo_alloc(). May be NULL.
+ * @path: Pointer to "struct path".
*
- * Returns nothing.
- */
-void tomoyo_free(const void *p)
-{
- if (p) {
- atomic_sub(ksize(p), &tomoyo_dynamic_memory_size);
- kfree(p);
- }
-}
-
-/**
- * tomoyo_read_memory_counter - Check for memory usage in bytes.
+ * Returns the realpath of the given @path on success, NULL otherwise.
*
- * @head: Pointer to "struct tomoyo_io_buffer".
+ * If dentry is a directory, trailing '/' is appended.
+ * Characters out of 0x20 < c < 0x7F range are converted to
+ * \ooo style octal string.
+ * Character \ is converted to \\ string.
*
- * Returns memory usage.
+ * These functions use kzalloc(), so the caller must call kfree()
+ * if these functions didn't return NULL.
*/
-int tomoyo_read_memory_counter(struct tomoyo_io_buffer *head)
+char *tomoyo_realpath_from_path(struct path *path)
{
- if (!head->read_eof) {
- const unsigned int shared
- = tomoyo_allocated_memory_for_savename;
- const unsigned int private
- = tomoyo_allocated_memory_for_elements;
- const unsigned int dynamic
- = atomic_read(&tomoyo_dynamic_memory_size);
- char buffer[64];
-
- memset(buffer, 0, sizeof(buffer));
- if (tomoyo_quota_for_savename)
- snprintf(buffer, sizeof(buffer) - 1,
- " (Quota: %10u)",
- tomoyo_quota_for_savename);
- else
- buffer[0] = '\0';
- tomoyo_io_printf(head, "Shared: %10u%s\n", shared, buffer);
- if (tomoyo_quota_for_elements)
- snprintf(buffer, sizeof(buffer) - 1,
- " (Quota: %10u)",
- tomoyo_quota_for_elements);
- else
- buffer[0] = '\0';
- tomoyo_io_printf(head, "Private: %10u%s\n", private, buffer);
- tomoyo_io_printf(head, "Dynamic: %10u\n", dynamic);
- tomoyo_io_printf(head, "Total: %10u\n",
- shared + private + dynamic);
- head->read_eof = true;
+ char *buf = NULL;
+ char *name = NULL;
+ unsigned int buf_len = PAGE_SIZE / 2;
+ struct dentry *dentry = path->dentry;
+ struct super_block *sb;
+ if (!dentry)
+ return NULL;
+ sb = dentry->d_sb;
+ while (1) {
+ char *pos;
+ struct inode *inode;
+ buf_len <<= 1;
+ kfree(buf);
+ buf = kmalloc(buf_len, GFP_NOFS);
+ if (!buf)
+ break;
+ /* To make sure that pos is '\0' terminated. */
+ buf[buf_len - 1] = '\0';
+ /* Get better name for socket. */
+ if (sb->s_magic == SOCKFS_MAGIC) {
+ pos = tomoyo_get_socket_name(path, buf, buf_len - 1);
+ goto encode;
+ }
+ /* For "pipe:[\$]". */
+ if (dentry->d_op && dentry->d_op->d_dname) {
+ pos = dentry->d_op->d_dname(dentry, buf, buf_len - 1);
+ goto encode;
+ }
+ inode = sb->s_root->d_inode;
+ /*
+ * Get local name for filesystems without rename() operation
+ * or dentry without vfsmount.
+ */
+ if (!path->mnt || !inode->i_op->rename)
+ pos = tomoyo_get_local_path(path->dentry, buf,
+ buf_len - 1);
+ /* Get absolute name for the rest. */
+ else {
+ pos = tomoyo_get_absolute_path(path, buf, buf_len - 1);
+ /*
+ * Fall back to local name if absolute name is not
+ * available.
+ */
+ if (pos == ERR_PTR(-EINVAL))
+ pos = tomoyo_get_local_path(path->dentry, buf,
+ buf_len - 1);
+ }
+encode:
+ if (IS_ERR(pos))
+ continue;
+ name = tomoyo_encode(pos);
+ break;
}
- return 0;
+ kfree(buf);
+ if (!name)
+ tomoyo_warn_oom(__func__);
+ return name;
}
/**
- * tomoyo_write_memory_quota - Set memory quota.
+ * tomoyo_realpath_nofollow - Get realpath of a pathname.
*
- * @head: Pointer to "struct tomoyo_io_buffer".
+ * @pathname: The pathname to solve.
*
- * Returns 0.
+ * Returns the realpath of @pathname on success, NULL otherwise.
*/
-int tomoyo_write_memory_quota(struct tomoyo_io_buffer *head)
+char *tomoyo_realpath_nofollow(const char *pathname)
{
- char *data = head->write_buf;
- unsigned int size;
+ struct path path;
- if (sscanf(data, "Shared: %u", &size) == 1)
- tomoyo_quota_for_savename = size;
- else if (sscanf(data, "Private: %u", &size) == 1)
- tomoyo_quota_for_elements = size;
- return 0;
+ if (pathname && kern_path(pathname, 0, &path) == 0) {
+ char *buf = tomoyo_realpath_from_path(&path);
+ path_put(&path);
+ return buf;
+ }
+ return NULL;
}
diff --git a/security/tomoyo/realpath.h b/security/tomoyo/realpath.h
deleted file mode 100644
index 78217a37960..00000000000
--- a/security/tomoyo/realpath.h
+++ /dev/null
@@ -1,66 +0,0 @@
-/*
- * security/tomoyo/realpath.h
- *
- * Get the canonicalized absolute pathnames. The basis for TOMOYO.
- *
- * Copyright (C) 2005-2009 NTT DATA CORPORATION
- *
- * Version: 2.2.0 2009/04/01
- *
- */
-
-#ifndef _SECURITY_TOMOYO_REALPATH_H
-#define _SECURITY_TOMOYO_REALPATH_H
-
-struct path;
-struct tomoyo_path_info;
-struct tomoyo_io_buffer;
-
-/* Convert binary string to ascii string. */
-int tomoyo_encode(char *buffer, int buflen, const char *str);
-
-/* Returns realpath(3) of the given pathname but ignores chroot'ed root. */
-int tomoyo_realpath_from_path2(struct path *path, char *newname,
- int newname_len);
-
-/*
- * Returns realpath(3) of the given pathname but ignores chroot'ed root.
- * These functions use tomoyo_alloc(), so the caller must call tomoyo_free()
- * if these functions didn't return NULL.
- */
-char *tomoyo_realpath(const char *pathname);
-/*
- * Same with tomoyo_realpath() except that it doesn't follow the final symlink.
- */
-char *tomoyo_realpath_nofollow(const char *pathname);
-/* Same with tomoyo_realpath() except that the pathname is already solved. */
-char *tomoyo_realpath_from_path(struct path *path);
-
-/*
- * Allocate memory for ACL entry.
- * The RAM is chunked, so NEVER try to kfree() the returned pointer.
- */
-void *tomoyo_alloc_element(const unsigned int size);
-
-/*
- * Keep the given name on the RAM.
- * The RAM is shared, so NEVER try to modify or kfree() the returned name.
- */
-const struct tomoyo_path_info *tomoyo_save_name(const char *name);
-
-/* Allocate memory for temporary use (e.g. permission checks). */
-void *tomoyo_alloc(const size_t size);
-
-/* Free memory allocated by tomoyo_alloc(). */
-void tomoyo_free(const void *p);
-
-/* Check for memory usage. */
-int tomoyo_read_memory_counter(struct tomoyo_io_buffer *head);
-
-/* Set memory quota. */
-int tomoyo_write_memory_quota(struct tomoyo_io_buffer *head);
-
-/* Initialize realpath related code. */
-void __init tomoyo_realpath_init(void);
-
-#endif /* !defined(_SECURITY_TOMOYO_REALPATH_H) */
diff --git a/security/tomoyo/securityfs_if.c b/security/tomoyo/securityfs_if.c
new file mode 100644
index 00000000000..179a955b319
--- /dev/null
+++ b/security/tomoyo/securityfs_if.c
@@ -0,0 +1,272 @@
+/*
+ * security/tomoyo/securityfs_if.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include <linux/security.h>
+#include "common.h"
+
+/**
+ * tomoyo_check_task_acl - Check permission for task operation.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ * @ptr: Pointer to "struct tomoyo_acl_info".
+ *
+ * Returns true if granted, false otherwise.
+ */
+static bool tomoyo_check_task_acl(struct tomoyo_request_info *r,
+ const struct tomoyo_acl_info *ptr)
+{
+ const struct tomoyo_task_acl *acl = container_of(ptr, typeof(*acl),
+ head);
+ return !tomoyo_pathcmp(r->param.task.domainname, acl->domainname);
+}
+
+/**
+ * tomoyo_write_self - write() for /sys/kernel/security/tomoyo/self_domain interface.
+ *
+ * @file: Pointer to "struct file".
+ * @buf: Domainname to transit to.
+ * @count: Size of @buf.
+ * @ppos: Unused.
+ *
+ * Returns @count on success, negative value otherwise.
+ *
+ * If domain transition was permitted but the domain transition failed, this
+ * function returns error rather than terminating current thread with SIGKILL.
+ */
+static ssize_t tomoyo_write_self(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *data;
+ int error;
+ if (!count || count >= TOMOYO_EXEC_TMPSIZE - 10)
+ return -ENOMEM;
+ data = kzalloc(count + 1, GFP_NOFS);
+ if (!data)
+ return -ENOMEM;
+ if (copy_from_user(data, buf, count)) {
+ error = -EFAULT;
+ goto out;
+ }
+ tomoyo_normalize_line(data);
+ if (tomoyo_correct_domain(data)) {
+ const int idx = tomoyo_read_lock();
+ struct tomoyo_path_info name;
+ struct tomoyo_request_info r;
+ name.name = data;
+ tomoyo_fill_path_info(&name);
+ /* Check "task manual_domain_transition" permission. */
+ tomoyo_init_request_info(&r, NULL, TOMOYO_MAC_FILE_EXECUTE);
+ r.param_type = TOMOYO_TYPE_MANUAL_TASK_ACL;
+ r.param.task.domainname = &name;
+ tomoyo_check_acl(&r, tomoyo_check_task_acl);
+ if (!r.granted)
+ error = -EPERM;
+ else {
+ struct tomoyo_domain_info *new_domain =
+ tomoyo_assign_domain(data, true);
+ if (!new_domain) {
+ error = -ENOENT;
+ } else {
+ struct cred *cred = prepare_creds();
+ if (!cred) {
+ error = -ENOMEM;
+ } else {
+ struct tomoyo_domain_info *old_domain =
+ cred->security;
+ cred->security = new_domain;
+ atomic_inc(&new_domain->users);
+ atomic_dec(&old_domain->users);
+ commit_creds(cred);
+ error = 0;
+ }
+ }
+ }
+ tomoyo_read_unlock(idx);
+ } else
+ error = -EINVAL;
+out:
+ kfree(data);
+ return error ? error : count;
+}
+
+/**
+ * tomoyo_read_self - read() for /sys/kernel/security/tomoyo/self_domain interface.
+ *
+ * @file: Pointer to "struct file".
+ * @buf: Domainname which current thread belongs to.
+ * @count: Size of @buf.
+ * @ppos: Bytes read by now.
+ *
+ * Returns read size on success, negative value otherwise.
+ */
+static ssize_t tomoyo_read_self(struct file *file, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ const char *domain = tomoyo_domain()->domainname->name;
+ loff_t len = strlen(domain);
+ loff_t pos = *ppos;
+ if (pos >= len || !count)
+ return 0;
+ len -= pos;
+ if (count < len)
+ len = count;
+ if (copy_to_user(buf, domain + pos, len))
+ return -EFAULT;
+ *ppos += len;
+ return len;
+}
+
+/* Operations for /sys/kernel/security/tomoyo/self_domain interface. */
+static const struct file_operations tomoyo_self_operations = {
+ .write = tomoyo_write_self,
+ .read = tomoyo_read_self,
+};
+
+/**
+ * tomoyo_open - open() for /sys/kernel/security/tomoyo/ interface.
+ *
+ * @inode: Pointer to "struct inode".
+ * @file: Pointer to "struct file".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_open(struct inode *inode, struct file *file)
+{
+ const int key = ((u8 *) file_inode(file)->i_private)
+ - ((u8 *) NULL);
+ return tomoyo_open_control(key, file);
+}
+
+/**
+ * tomoyo_release - close() for /sys/kernel/security/tomoyo/ interface.
+ *
+ * @file: Pointer to "struct file".
+ *
+ */
+static int tomoyo_release(struct inode *inode, struct file *file)
+{
+ tomoyo_close_control(file->private_data);
+ return 0;
+}
+
+/**
+ * tomoyo_poll - poll() for /sys/kernel/security/tomoyo/ interface.
+ *
+ * @file: Pointer to "struct file".
+ * @wait: Pointer to "poll_table". Maybe NULL.
+ *
+ * Returns POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM if ready to read/write,
+ * POLLOUT | POLLWRNORM otherwise.
+ */
+static unsigned int tomoyo_poll(struct file *file, poll_table *wait)
+{
+ return tomoyo_poll_control(file, wait);
+}
+
+/**
+ * tomoyo_read - read() for /sys/kernel/security/tomoyo/ interface.
+ *
+ * @file: Pointer to "struct file".
+ * @buf: Pointer to buffer.
+ * @count: Size of @buf.
+ * @ppos: Unused.
+ *
+ * Returns bytes read on success, negative value otherwise.
+ */
+static ssize_t tomoyo_read(struct file *file, char __user *buf, size_t count,
+ loff_t *ppos)
+{
+ return tomoyo_read_control(file->private_data, buf, count);
+}
+
+/**
+ * tomoyo_write - write() for /sys/kernel/security/tomoyo/ interface.
+ *
+ * @file: Pointer to "struct file".
+ * @buf: Pointer to buffer.
+ * @count: Size of @buf.
+ * @ppos: Unused.
+ *
+ * Returns @count on success, negative value otherwise.
+ */
+static ssize_t tomoyo_write(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return tomoyo_write_control(file->private_data, buf, count);
+}
+
+/*
+ * tomoyo_operations is a "struct file_operations" which is used for handling
+ * /sys/kernel/security/tomoyo/ interface.
+ *
+ * Some files under /sys/kernel/security/tomoyo/ directory accept open(O_RDWR).
+ * See tomoyo_io_buffer for internals.
+ */
+static const struct file_operations tomoyo_operations = {
+ .open = tomoyo_open,
+ .release = tomoyo_release,
+ .poll = tomoyo_poll,
+ .read = tomoyo_read,
+ .write = tomoyo_write,
+ .llseek = noop_llseek,
+};
+
+/**
+ * tomoyo_create_entry - Create interface files under /sys/kernel/security/tomoyo/ directory.
+ *
+ * @name: The name of the interface file.
+ * @mode: The permission of the interface file.
+ * @parent: The parent directory.
+ * @key: Type of interface.
+ *
+ * Returns nothing.
+ */
+static void __init tomoyo_create_entry(const char *name, const umode_t mode,
+ struct dentry *parent, const u8 key)
+{
+ securityfs_create_file(name, mode, parent, ((u8 *) NULL) + key,
+ &tomoyo_operations);
+}
+
+/**
+ * tomoyo_initerface_init - Initialize /sys/kernel/security/tomoyo/ interface.
+ *
+ * Returns 0.
+ */
+static int __init tomoyo_initerface_init(void)
+{
+ struct dentry *tomoyo_dir;
+
+ /* Don't create securityfs entries unless registered. */
+ if (current_cred()->security != &tomoyo_kernel_domain)
+ return 0;
+
+ tomoyo_dir = securityfs_create_dir("tomoyo", NULL);
+ tomoyo_create_entry("query", 0600, tomoyo_dir,
+ TOMOYO_QUERY);
+ tomoyo_create_entry("domain_policy", 0600, tomoyo_dir,
+ TOMOYO_DOMAINPOLICY);
+ tomoyo_create_entry("exception_policy", 0600, tomoyo_dir,
+ TOMOYO_EXCEPTIONPOLICY);
+ tomoyo_create_entry("audit", 0400, tomoyo_dir,
+ TOMOYO_AUDIT);
+ tomoyo_create_entry(".process_status", 0600, tomoyo_dir,
+ TOMOYO_PROCESS_STATUS);
+ tomoyo_create_entry("stat", 0644, tomoyo_dir,
+ TOMOYO_STAT);
+ tomoyo_create_entry("profile", 0600, tomoyo_dir,
+ TOMOYO_PROFILE);
+ tomoyo_create_entry("manager", 0600, tomoyo_dir,
+ TOMOYO_MANAGER);
+ tomoyo_create_entry("version", 0400, tomoyo_dir,
+ TOMOYO_VERSION);
+ securityfs_create_file("self_domain", 0666, tomoyo_dir, NULL,
+ &tomoyo_self_operations);
+ tomoyo_load_builtin_policy();
+ return 0;
+}
+
+fs_initcall(tomoyo_initerface_init);
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
index 8a00ade8516..f0b756e27fe 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/tomoyo.c
@@ -1,45 +1,75 @@
/*
* security/tomoyo/tomoyo.c
*
- * LSM hooks for TOMOYO Linux.
- *
- * Copyright (C) 2005-2009 NTT DATA CORPORATION
- *
- * Version: 2.2.0 2009/04/01
- *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
*/
#include <linux/security.h>
#include "common.h"
-#include "tomoyo.h"
-#include "realpath.h"
+/**
+ * tomoyo_cred_alloc_blank - Target for security_cred_alloc_blank().
+ *
+ * @new: Pointer to "struct cred".
+ * @gfp: Memory allocation flags.
+ *
+ * Returns 0.
+ */
static int tomoyo_cred_alloc_blank(struct cred *new, gfp_t gfp)
{
new->security = NULL;
return 0;
}
+/**
+ * tomoyo_cred_prepare - Target for security_prepare_creds().
+ *
+ * @new: Pointer to "struct cred".
+ * @old: Pointer to "struct cred".
+ * @gfp: Memory allocation flags.
+ *
+ * Returns 0.
+ */
static int tomoyo_cred_prepare(struct cred *new, const struct cred *old,
gfp_t gfp)
{
- /*
- * Since "struct tomoyo_domain_info *" is a sharable pointer,
- * we don't need to duplicate.
- */
- new->security = old->security;
+ struct tomoyo_domain_info *domain = old->security;
+ new->security = domain;
+ if (domain)
+ atomic_inc(&domain->users);
return 0;
}
+/**
+ * tomoyo_cred_transfer - Target for security_transfer_creds().
+ *
+ * @new: Pointer to "struct cred".
+ * @old: Pointer to "struct cred".
+ */
static void tomoyo_cred_transfer(struct cred *new, const struct cred *old)
{
- /*
- * Since "struct tomoyo_domain_info *" is a sharable pointer,
- * we don't need to duplicate.
- */
- new->security = old->security;
+ tomoyo_cred_prepare(new, old, 0);
}
+/**
+ * tomoyo_cred_free - Target for security_cred_free().
+ *
+ * @cred: Pointer to "struct cred".
+ */
+static void tomoyo_cred_free(struct cred *cred)
+{
+ struct tomoyo_domain_info *domain = cred->security;
+ if (domain)
+ atomic_dec(&domain->users);
+}
+
+/**
+ * tomoyo_bprm_set_creds - Target for security_bprm_set_creds().
+ *
+ * @bprm: Pointer to "struct linux_binprm".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
static int tomoyo_bprm_set_creds(struct linux_binprm *bprm)
{
int rc;
@@ -54,12 +84,22 @@ static int tomoyo_bprm_set_creds(struct linux_binprm *bprm)
*/
if (bprm->cred_prepared)
return 0;
+#ifndef CONFIG_SECURITY_TOMOYO_OMIT_USERSPACE_LOADER
/*
* Load policy if /sbin/tomoyo-init exists and /sbin/init is requested
* for the first time.
*/
if (!tomoyo_policy_loaded)
tomoyo_load_policy(bprm->filename);
+#endif
+ /*
+ * Release reference to "struct tomoyo_domain_info" stored inside
+ * "bprm->cred->security". New reference to "struct tomoyo_domain_info"
+ * stored inside "bprm->cred->security" will be acquired later inside
+ * tomoyo_find_next_domain().
+ */
+ atomic_dec(&((struct tomoyo_domain_info *)
+ bprm->cred->security)->users);
/*
* Tell tomoyo_bprm_check_security() is called for the first time of an
* execve operation.
@@ -68,6 +108,13 @@ static int tomoyo_bprm_set_creds(struct linux_binprm *bprm)
return 0;
}
+/**
+ * tomoyo_bprm_check_security - Target for security_bprm_check().
+ *
+ * @bprm: Pointer to "struct linux_binprm".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
static int tomoyo_bprm_check_security(struct linux_binprm *bprm)
{
struct tomoyo_domain_info *domain = bprm->cred->security;
@@ -76,91 +123,173 @@ static int tomoyo_bprm_check_security(struct linux_binprm *bprm)
* Execute permission is checked against pathname passed to do_execve()
* using current domain.
*/
- if (!domain)
- return tomoyo_find_next_domain(bprm);
+ if (!domain) {
+ const int idx = tomoyo_read_lock();
+ const int err = tomoyo_find_next_domain(bprm);
+ tomoyo_read_unlock(idx);
+ return err;
+ }
/*
* Read permission is checked against interpreters using next domain.
- * '1' is the result of open_to_namei_flags(O_RDONLY).
*/
- return tomoyo_check_open_permission(domain, &bprm->file->f_path, 1);
+ return tomoyo_check_open_permission(domain, &bprm->file->f_path,
+ O_RDONLY);
+}
+
+/**
+ * tomoyo_inode_getattr - Target for security_inode_getattr().
+ *
+ * @mnt: Pointer to "struct vfsmount".
+ * @dentry: Pointer to "struct dentry".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
+{
+ struct path path = { mnt, dentry };
+ return tomoyo_path_perm(TOMOYO_TYPE_GETATTR, &path, NULL);
}
-static int tomoyo_path_truncate(struct path *path, loff_t length,
- unsigned int time_attrs)
+/**
+ * tomoyo_path_truncate - Target for security_path_truncate().
+ *
+ * @path: Pointer to "struct path".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_path_truncate(struct path *path)
{
- return tomoyo_check_1path_perm(tomoyo_domain(),
- TOMOYO_TYPE_TRUNCATE_ACL,
- path);
+ return tomoyo_path_perm(TOMOYO_TYPE_TRUNCATE, path, NULL);
}
+/**
+ * tomoyo_path_unlink - Target for security_path_unlink().
+ *
+ * @parent: Pointer to "struct path".
+ * @dentry: Pointer to "struct dentry".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
static int tomoyo_path_unlink(struct path *parent, struct dentry *dentry)
{
struct path path = { parent->mnt, dentry };
- return tomoyo_check_1path_perm(tomoyo_domain(),
- TOMOYO_TYPE_UNLINK_ACL,
- &path);
+ return tomoyo_path_perm(TOMOYO_TYPE_UNLINK, &path, NULL);
}
+/**
+ * tomoyo_path_mkdir - Target for security_path_mkdir().
+ *
+ * @parent: Pointer to "struct path".
+ * @dentry: Pointer to "struct dentry".
+ * @mode: DAC permission mode.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
static int tomoyo_path_mkdir(struct path *parent, struct dentry *dentry,
- int mode)
+ umode_t mode)
{
struct path path = { parent->mnt, dentry };
- return tomoyo_check_1path_perm(tomoyo_domain(),
- TOMOYO_TYPE_MKDIR_ACL,
- &path);
+ return tomoyo_path_number_perm(TOMOYO_TYPE_MKDIR, &path,
+ mode & S_IALLUGO);
}
+/**
+ * tomoyo_path_rmdir - Target for security_path_rmdir().
+ *
+ * @parent: Pointer to "struct path".
+ * @dentry: Pointer to "struct dentry".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
static int tomoyo_path_rmdir(struct path *parent, struct dentry *dentry)
{
struct path path = { parent->mnt, dentry };
- return tomoyo_check_1path_perm(tomoyo_domain(),
- TOMOYO_TYPE_RMDIR_ACL,
- &path);
+ return tomoyo_path_perm(TOMOYO_TYPE_RMDIR, &path, NULL);
}
+/**
+ * tomoyo_path_symlink - Target for security_path_symlink().
+ *
+ * @parent: Pointer to "struct path".
+ * @dentry: Pointer to "struct dentry".
+ * @old_name: Symlink's content.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
static int tomoyo_path_symlink(struct path *parent, struct dentry *dentry,
const char *old_name)
{
struct path path = { parent->mnt, dentry };
- return tomoyo_check_1path_perm(tomoyo_domain(),
- TOMOYO_TYPE_SYMLINK_ACL,
- &path);
+ return tomoyo_path_perm(TOMOYO_TYPE_SYMLINK, &path, old_name);
}
+/**
+ * tomoyo_path_mknod - Target for security_path_mknod().
+ *
+ * @parent: Pointer to "struct path".
+ * @dentry: Pointer to "struct dentry".
+ * @mode: DAC permission mode.
+ * @dev: Device attributes.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
static int tomoyo_path_mknod(struct path *parent, struct dentry *dentry,
- int mode, unsigned int dev)
+ umode_t mode, unsigned int dev)
{
struct path path = { parent->mnt, dentry };
- int type = TOMOYO_TYPE_CREATE_ACL;
+ int type = TOMOYO_TYPE_CREATE;
+ const unsigned int perm = mode & S_IALLUGO;
switch (mode & S_IFMT) {
case S_IFCHR:
- type = TOMOYO_TYPE_MKCHAR_ACL;
+ type = TOMOYO_TYPE_MKCHAR;
break;
case S_IFBLK:
- type = TOMOYO_TYPE_MKBLOCK_ACL;
+ type = TOMOYO_TYPE_MKBLOCK;
break;
+ default:
+ goto no_dev;
+ }
+ return tomoyo_mkdev_perm(type, &path, perm, dev);
+ no_dev:
+ switch (mode & S_IFMT) {
case S_IFIFO:
- type = TOMOYO_TYPE_MKFIFO_ACL;
+ type = TOMOYO_TYPE_MKFIFO;
break;
case S_IFSOCK:
- type = TOMOYO_TYPE_MKSOCK_ACL;
+ type = TOMOYO_TYPE_MKSOCK;
break;
}
- return tomoyo_check_1path_perm(tomoyo_domain(),
- type, &path);
+ return tomoyo_path_number_perm(type, &path, perm);
}
+/**
+ * tomoyo_path_link - Target for security_path_link().
+ *
+ * @old_dentry: Pointer to "struct dentry".
+ * @new_dir: Pointer to "struct path".
+ * @new_dentry: Pointer to "struct dentry".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
static int tomoyo_path_link(struct dentry *old_dentry, struct path *new_dir,
struct dentry *new_dentry)
{
struct path path1 = { new_dir->mnt, old_dentry };
struct path path2 = { new_dir->mnt, new_dentry };
- return tomoyo_check_2path_perm(tomoyo_domain(),
- TOMOYO_TYPE_LINK_ACL,
- &path1, &path2);
+ return tomoyo_path2_perm(TOMOYO_TYPE_LINK, &path1, &path2);
}
+/**
+ * tomoyo_path_rename - Target for security_path_rename().
+ *
+ * @old_parent: Pointer to "struct path".
+ * @old_dentry: Pointer to "struct dentry".
+ * @new_parent: Pointer to "struct path".
+ * @new_dentry: Pointer to "struct dentry".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
static int tomoyo_path_rename(struct path *old_parent,
struct dentry *old_dentry,
struct path *new_parent,
@@ -168,32 +297,208 @@ static int tomoyo_path_rename(struct path *old_parent,
{
struct path path1 = { old_parent->mnt, old_dentry };
struct path path2 = { new_parent->mnt, new_dentry };
- return tomoyo_check_2path_perm(tomoyo_domain(),
- TOMOYO_TYPE_RENAME_ACL,
- &path1, &path2);
+ return tomoyo_path2_perm(TOMOYO_TYPE_RENAME, &path1, &path2);
}
+/**
+ * tomoyo_file_fcntl - Target for security_file_fcntl().
+ *
+ * @file: Pointer to "struct file".
+ * @cmd: Command for fcntl().
+ * @arg: Argument for @cmd.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
static int tomoyo_file_fcntl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- if (cmd == F_SETFL && ((arg ^ file->f_flags) & O_APPEND))
- return tomoyo_check_rewrite_permission(tomoyo_domain(), file);
- return 0;
+ if (!(cmd == F_SETFL && ((arg ^ file->f_flags) & O_APPEND)))
+ return 0;
+ return tomoyo_check_open_permission(tomoyo_domain(), &file->f_path,
+ O_WRONLY | (arg & O_APPEND));
}
-static int tomoyo_dentry_open(struct file *f, const struct cred *cred)
+/**
+ * tomoyo_file_open - Target for security_file_open().
+ *
+ * @f: Pointer to "struct file".
+ * @cred: Pointer to "struct cred".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_file_open(struct file *f, const struct cred *cred)
{
int flags = f->f_flags;
-
- if ((flags + 1) & O_ACCMODE)
- flags++;
- flags |= f->f_flags & (O_APPEND | O_TRUNC);
/* Don't check read permission here if called from do_execve(). */
if (current->in_execve)
return 0;
return tomoyo_check_open_permission(tomoyo_domain(), &f->f_path, flags);
}
+/**
+ * tomoyo_file_ioctl - Target for security_file_ioctl().
+ *
+ * @file: Pointer to "struct file".
+ * @cmd: Command for ioctl().
+ * @arg: Argument for @cmd.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_file_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ return tomoyo_path_number_perm(TOMOYO_TYPE_IOCTL, &file->f_path, cmd);
+}
+
+/**
+ * tomoyo_path_chmod - Target for security_path_chmod().
+ *
+ * @path: Pointer to "struct path".
+ * @mode: DAC permission mode.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_path_chmod(struct path *path, umode_t mode)
+{
+ return tomoyo_path_number_perm(TOMOYO_TYPE_CHMOD, path,
+ mode & S_IALLUGO);
+}
+
+/**
+ * tomoyo_path_chown - Target for security_path_chown().
+ *
+ * @path: Pointer to "struct path".
+ * @uid: Owner ID.
+ * @gid: Group ID.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_path_chown(struct path *path, kuid_t uid, kgid_t gid)
+{
+ int error = 0;
+ if (uid_valid(uid))
+ error = tomoyo_path_number_perm(TOMOYO_TYPE_CHOWN, path,
+ from_kuid(&init_user_ns, uid));
+ if (!error && gid_valid(gid))
+ error = tomoyo_path_number_perm(TOMOYO_TYPE_CHGRP, path,
+ from_kgid(&init_user_ns, gid));
+ return error;
+}
+
+/**
+ * tomoyo_path_chroot - Target for security_path_chroot().
+ *
+ * @path: Pointer to "struct path".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_path_chroot(struct path *path)
+{
+ return tomoyo_path_perm(TOMOYO_TYPE_CHROOT, path, NULL);
+}
+
+/**
+ * tomoyo_sb_mount - Target for security_sb_mount().
+ *
+ * @dev_name: Name of device file. Maybe NULL.
+ * @path: Pointer to "struct path".
+ * @type: Name of filesystem type. Maybe NULL.
+ * @flags: Mount options.
+ * @data: Optional data. Maybe NULL.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_sb_mount(const char *dev_name, struct path *path,
+ const char *type, unsigned long flags, void *data)
+{
+ return tomoyo_mount_permission(dev_name, path, type, flags, data);
+}
+
+/**
+ * tomoyo_sb_umount - Target for security_sb_umount().
+ *
+ * @mnt: Pointer to "struct vfsmount".
+ * @flags: Unmount options.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_sb_umount(struct vfsmount *mnt, int flags)
+{
+ struct path path = { mnt, mnt->mnt_root };
+ return tomoyo_path_perm(TOMOYO_TYPE_UMOUNT, &path, NULL);
+}
+
+/**
+ * tomoyo_sb_pivotroot - Target for security_sb_pivotroot().
+ *
+ * @old_path: Pointer to "struct path".
+ * @new_path: Pointer to "struct path".
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_sb_pivotroot(struct path *old_path, struct path *new_path)
+{
+ return tomoyo_path2_perm(TOMOYO_TYPE_PIVOT_ROOT, new_path, old_path);
+}
+
+/**
+ * tomoyo_socket_listen - Check permission for listen().
+ *
+ * @sock: Pointer to "struct socket".
+ * @backlog: Backlog parameter.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_socket_listen(struct socket *sock, int backlog)
+{
+ return tomoyo_socket_listen_permission(sock);
+}
+
+/**
+ * tomoyo_socket_connect - Check permission for connect().
+ *
+ * @sock: Pointer to "struct socket".
+ * @addr: Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_socket_connect(struct socket *sock, struct sockaddr *addr,
+ int addr_len)
+{
+ return tomoyo_socket_connect_permission(sock, addr, addr_len);
+}
+
+/**
+ * tomoyo_socket_bind - Check permission for bind().
+ *
+ * @sock: Pointer to "struct socket".
+ * @addr: Pointer to "struct sockaddr".
+ * @addr_len: Size of @addr.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_socket_bind(struct socket *sock, struct sockaddr *addr,
+ int addr_len)
+{
+ return tomoyo_socket_bind_permission(sock, addr, addr_len);
+}
+
+/**
+ * tomoyo_socket_sendmsg - Check permission for sendmsg().
+ *
+ * @sock: Pointer to "struct socket".
+ * @msg: Pointer to "struct msghdr".
+ * @size: Size of message.
+ *
+ * Returns 0 on success, negative value otherwise.
+ */
+static int tomoyo_socket_sendmsg(struct socket *sock, struct msghdr *msg,
+ int size)
+{
+ return tomoyo_socket_sendmsg_permission(sock, msg, size);
+}
+
/*
* tomoyo_security_ops is a "struct security_operations" which is used for
* registering TOMOYO.
@@ -203,10 +508,11 @@ static struct security_operations tomoyo_security_ops = {
.cred_alloc_blank = tomoyo_cred_alloc_blank,
.cred_prepare = tomoyo_cred_prepare,
.cred_transfer = tomoyo_cred_transfer,
+ .cred_free = tomoyo_cred_free,
.bprm_set_creds = tomoyo_bprm_set_creds,
.bprm_check_security = tomoyo_bprm_check_security,
.file_fcntl = tomoyo_file_fcntl,
- .dentry_open = tomoyo_dentry_open,
+ .file_open = tomoyo_file_open,
.path_truncate = tomoyo_path_truncate,
.path_unlink = tomoyo_path_unlink,
.path_mkdir = tomoyo_path_mkdir,
@@ -215,8 +521,28 @@ static struct security_operations tomoyo_security_ops = {
.path_mknod = tomoyo_path_mknod,
.path_link = tomoyo_path_link,
.path_rename = tomoyo_path_rename,
+ .inode_getattr = tomoyo_inode_getattr,
+ .file_ioctl = tomoyo_file_ioctl,
+ .path_chmod = tomoyo_path_chmod,
+ .path_chown = tomoyo_path_chown,
+ .path_chroot = tomoyo_path_chroot,
+ .sb_mount = tomoyo_sb_mount,
+ .sb_umount = tomoyo_sb_umount,
+ .sb_pivotroot = tomoyo_sb_pivotroot,
+ .socket_bind = tomoyo_socket_bind,
+ .socket_connect = tomoyo_socket_connect,
+ .socket_listen = tomoyo_socket_listen,
+ .socket_sendmsg = tomoyo_socket_sendmsg,
};
+/* Lock for GC. */
+DEFINE_SRCU(tomoyo_ss);
+
+/**
+ * tomoyo_init - Register TOMOYO Linux as a LSM module.
+ *
+ * Returns 0.
+ */
static int __init tomoyo_init(void)
{
struct cred *cred = (struct cred *) current_cred();
@@ -228,7 +554,7 @@ static int __init tomoyo_init(void)
panic("Failure registering TOMOYO Linux");
printk(KERN_INFO "TOMOYO Linux initialized\n");
cred->security = &tomoyo_kernel_domain;
- tomoyo_realpath_init();
+ tomoyo_mm_init();
return 0;
}
diff --git a/security/tomoyo/tomoyo.h b/security/tomoyo/tomoyo.h
deleted file mode 100644
index ed758325b1a..00000000000
--- a/security/tomoyo/tomoyo.h
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * security/tomoyo/tomoyo.h
- *
- * Implementation of the Domain-Based Mandatory Access Control.
- *
- * Copyright (C) 2005-2009 NTT DATA CORPORATION
- *
- * Version: 2.2.0 2009/04/01
- *
- */
-
-#ifndef _SECURITY_TOMOYO_TOMOYO_H
-#define _SECURITY_TOMOYO_TOMOYO_H
-
-struct tomoyo_path_info;
-struct path;
-struct inode;
-struct linux_binprm;
-struct pt_regs;
-
-int tomoyo_check_exec_perm(struct tomoyo_domain_info *domain,
- const struct tomoyo_path_info *filename);
-int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
- struct path *path, const int flag);
-int tomoyo_check_1path_perm(struct tomoyo_domain_info *domain,
- const u8 operation, struct path *path);
-int tomoyo_check_2path_perm(struct tomoyo_domain_info *domain,
- const u8 operation, struct path *path1,
- struct path *path2);
-int tomoyo_check_rewrite_permission(struct tomoyo_domain_info *domain,
- struct file *filp);
-int tomoyo_find_next_domain(struct linux_binprm *bprm);
-
-/* Index numbers for Access Controls. */
-
-#define TOMOYO_TYPE_SINGLE_PATH_ACL 0
-#define TOMOYO_TYPE_DOUBLE_PATH_ACL 1
-
-/* Index numbers for File Controls. */
-
-/*
- * TYPE_READ_WRITE_ACL is special. TYPE_READ_WRITE_ACL is automatically set
- * if both TYPE_READ_ACL and TYPE_WRITE_ACL are set. Both TYPE_READ_ACL and
- * TYPE_WRITE_ACL are automatically set if TYPE_READ_WRITE_ACL is set.
- * TYPE_READ_WRITE_ACL is automatically cleared if either TYPE_READ_ACL or
- * TYPE_WRITE_ACL is cleared. Both TYPE_READ_ACL and TYPE_WRITE_ACL are
- * automatically cleared if TYPE_READ_WRITE_ACL is cleared.
- */
-
-#define TOMOYO_TYPE_READ_WRITE_ACL 0
-#define TOMOYO_TYPE_EXECUTE_ACL 1
-#define TOMOYO_TYPE_READ_ACL 2
-#define TOMOYO_TYPE_WRITE_ACL 3
-#define TOMOYO_TYPE_CREATE_ACL 4
-#define TOMOYO_TYPE_UNLINK_ACL 5
-#define TOMOYO_TYPE_MKDIR_ACL 6
-#define TOMOYO_TYPE_RMDIR_ACL 7
-#define TOMOYO_TYPE_MKFIFO_ACL 8
-#define TOMOYO_TYPE_MKSOCK_ACL 9
-#define TOMOYO_TYPE_MKBLOCK_ACL 10
-#define TOMOYO_TYPE_MKCHAR_ACL 11
-#define TOMOYO_TYPE_TRUNCATE_ACL 12
-#define TOMOYO_TYPE_SYMLINK_ACL 13
-#define TOMOYO_TYPE_REWRITE_ACL 14
-#define TOMOYO_MAX_SINGLE_PATH_OPERATION 15
-
-#define TOMOYO_TYPE_LINK_ACL 0
-#define TOMOYO_TYPE_RENAME_ACL 1
-#define TOMOYO_MAX_DOUBLE_PATH_OPERATION 2
-
-#define TOMOYO_DOMAINPOLICY 0
-#define TOMOYO_EXCEPTIONPOLICY 1
-#define TOMOYO_DOMAIN_STATUS 2
-#define TOMOYO_PROCESS_STATUS 3
-#define TOMOYO_MEMINFO 4
-#define TOMOYO_SELFDOMAIN 5
-#define TOMOYO_VERSION 6
-#define TOMOYO_PROFILE 7
-#define TOMOYO_MANAGER 8
-
-extern struct tomoyo_domain_info tomoyo_kernel_domain;
-
-static inline struct tomoyo_domain_info *tomoyo_domain(void)
-{
- return current_cred()->security;
-}
-
-static inline struct tomoyo_domain_info *tomoyo_real_domain(struct task_struct
- *task)
-{
- return task_cred_xxx(task, security);
-}
-
-#endif /* !defined(_SECURITY_TOMOYO_TOMOYO_H) */
diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c
new file mode 100644
index 00000000000..2952ba576fb
--- /dev/null
+++ b/security/tomoyo/util.c
@@ -0,0 +1,1085 @@
+/*
+ * security/tomoyo/util.c
+ *
+ * Copyright (C) 2005-2011 NTT DATA CORPORATION
+ */
+
+#include <linux/slab.h>
+#include "common.h"
+
+/* Lock for protecting policy. */
+DEFINE_MUTEX(tomoyo_policy_lock);
+
+/* Has /sbin/init started? */
+bool tomoyo_policy_loaded;
+
+/*
+ * Mapping table from "enum tomoyo_mac_index" to
+ * "enum tomoyo_mac_category_index".
+ */
+const u8 tomoyo_index2category[TOMOYO_MAX_MAC_INDEX] = {
+ /* CONFIG::file group */
+ [TOMOYO_MAC_FILE_EXECUTE] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_OPEN] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_CREATE] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_UNLINK] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_GETATTR] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_MKDIR] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_RMDIR] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_MKFIFO] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_MKSOCK] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_TRUNCATE] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_SYMLINK] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_MKBLOCK] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_MKCHAR] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_LINK] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_RENAME] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_CHMOD] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_CHOWN] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_CHGRP] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_IOCTL] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_CHROOT] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_MOUNT] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_UMOUNT] = TOMOYO_MAC_CATEGORY_FILE,
+ [TOMOYO_MAC_FILE_PIVOT_ROOT] = TOMOYO_MAC_CATEGORY_FILE,
+ /* CONFIG::network group */
+ [TOMOYO_MAC_NETWORK_INET_STREAM_BIND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_INET_STREAM_LISTEN] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_INET_STREAM_CONNECT] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_INET_DGRAM_BIND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_INET_DGRAM_SEND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_INET_RAW_BIND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_INET_RAW_SEND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_STREAM_BIND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_STREAM_LISTEN] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_STREAM_CONNECT] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_DGRAM_BIND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_DGRAM_SEND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_BIND] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_LISTEN] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ [TOMOYO_MAC_NETWORK_UNIX_SEQPACKET_CONNECT] =
+ TOMOYO_MAC_CATEGORY_NETWORK,
+ /* CONFIG::misc group */
+ [TOMOYO_MAC_ENVIRON] = TOMOYO_MAC_CATEGORY_MISC,
+};
+
+/**
+ * tomoyo_convert_time - Convert time_t to YYYY/MM/DD hh/mm/ss.
+ *
+ * @time: Seconds since 1970/01/01 00:00:00.
+ * @stamp: Pointer to "struct tomoyo_time".
+ *
+ * Returns nothing.
+ *
+ * This function does not handle Y2038 problem.
+ */
+void tomoyo_convert_time(time_t time, struct tomoyo_time *stamp)
+{
+ static const u16 tomoyo_eom[2][12] = {
+ { 31, 59, 90, 120, 151, 181, 212, 243, 273, 304, 334, 365 },
+ { 31, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335, 366 }
+ };
+ u16 y;
+ u8 m;
+ bool r;
+ stamp->sec = time % 60;
+ time /= 60;
+ stamp->min = time % 60;
+ time /= 60;
+ stamp->hour = time % 24;
+ time /= 24;
+ for (y = 1970; ; y++) {
+ const unsigned short days = (y & 3) ? 365 : 366;
+ if (time < days)
+ break;
+ time -= days;
+ }
+ r = (y & 3) == 0;
+ for (m = 0; m < 11 && time >= tomoyo_eom[r][m]; m++)
+ ;
+ if (m)
+ time -= tomoyo_eom[r][m - 1];
+ stamp->year = y;
+ stamp->month = ++m;
+ stamp->day = ++time;
+}
+
+/**
+ * tomoyo_permstr - Find permission keywords.
+ *
+ * @string: String representation for permissions in foo/bar/buz format.
+ * @keyword: Keyword to find from @string/
+ *
+ * Returns ture if @keyword was found in @string, false otherwise.
+ *
+ * This function assumes that strncmp(w1, w2, strlen(w1)) != 0 if w1 != w2.
+ */
+bool tomoyo_permstr(const char *string, const char *keyword)
+{
+ const char *cp = strstr(string, keyword);
+ if (cp)
+ return cp == string || *(cp - 1) == '/';
+ return false;
+}
+
+/**
+ * tomoyo_read_token - Read a word from a line.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns a word on success, "" otherwise.
+ *
+ * To allow the caller to skip NULL check, this function returns "" rather than
+ * NULL if there is no more words to read.
+ */
+char *tomoyo_read_token(struct tomoyo_acl_param *param)
+{
+ char *pos = param->data;
+ char *del = strchr(pos, ' ');
+ if (del)
+ *del++ = '\0';
+ else
+ del = pos + strlen(pos);
+ param->data = del;
+ return pos;
+}
+
+/**
+ * tomoyo_get_domainname - Read a domainname from a line.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ *
+ * Returns a domainname on success, NULL otherwise.
+ */
+const struct tomoyo_path_info *tomoyo_get_domainname
+(struct tomoyo_acl_param *param)
+{
+ char *start = param->data;
+ char *pos = start;
+ while (*pos) {
+ if (*pos++ != ' ' || *pos++ == '/')
+ continue;
+ pos -= 2;
+ *pos++ = '\0';
+ break;
+ }
+ param->data = pos;
+ if (tomoyo_correct_domain(start))
+ return tomoyo_get_name(start);
+ return NULL;
+}
+
+/**
+ * tomoyo_parse_ulong - Parse an "unsigned long" value.
+ *
+ * @result: Pointer to "unsigned long".
+ * @str: Pointer to string to parse.
+ *
+ * Returns one of values in "enum tomoyo_value_type".
+ *
+ * The @src is updated to point the first character after the value
+ * on success.
+ */
+u8 tomoyo_parse_ulong(unsigned long *result, char **str)
+{
+ const char *cp = *str;
+ char *ep;
+ int base = 10;
+ if (*cp == '0') {
+ char c = *(cp + 1);
+ if (c == 'x' || c == 'X') {
+ base = 16;
+ cp += 2;
+ } else if (c >= '0' && c <= '7') {
+ base = 8;
+ cp++;
+ }
+ }
+ *result = simple_strtoul(cp, &ep, base);
+ if (cp == ep)
+ return TOMOYO_VALUE_TYPE_INVALID;
+ *str = ep;
+ switch (base) {
+ case 16:
+ return TOMOYO_VALUE_TYPE_HEXADECIMAL;
+ case 8:
+ return TOMOYO_VALUE_TYPE_OCTAL;
+ default:
+ return TOMOYO_VALUE_TYPE_DECIMAL;
+ }
+}
+
+/**
+ * tomoyo_print_ulong - Print an "unsigned long" value.
+ *
+ * @buffer: Pointer to buffer.
+ * @buffer_len: Size of @buffer.
+ * @value: An "unsigned long" value.
+ * @type: Type of @value.
+ *
+ * Returns nothing.
+ */
+void tomoyo_print_ulong(char *buffer, const int buffer_len,
+ const unsigned long value, const u8 type)
+{
+ if (type == TOMOYO_VALUE_TYPE_DECIMAL)
+ snprintf(buffer, buffer_len, "%lu", value);
+ else if (type == TOMOYO_VALUE_TYPE_OCTAL)
+ snprintf(buffer, buffer_len, "0%lo", value);
+ else if (type == TOMOYO_VALUE_TYPE_HEXADECIMAL)
+ snprintf(buffer, buffer_len, "0x%lX", value);
+ else
+ snprintf(buffer, buffer_len, "type(%u)", type);
+}
+
+/**
+ * tomoyo_parse_name_union - Parse a tomoyo_name_union.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @ptr: Pointer to "struct tomoyo_name_union".
+ *
+ * Returns true on success, false otherwise.
+ */
+bool tomoyo_parse_name_union(struct tomoyo_acl_param *param,
+ struct tomoyo_name_union *ptr)
+{
+ char *filename;
+ if (param->data[0] == '@') {
+ param->data++;
+ ptr->group = tomoyo_get_group(param, TOMOYO_PATH_GROUP);
+ return ptr->group != NULL;
+ }
+ filename = tomoyo_read_token(param);
+ if (!tomoyo_correct_word(filename))
+ return false;
+ ptr->filename = tomoyo_get_name(filename);
+ return ptr->filename != NULL;
+}
+
+/**
+ * tomoyo_parse_number_union - Parse a tomoyo_number_union.
+ *
+ * @param: Pointer to "struct tomoyo_acl_param".
+ * @ptr: Pointer to "struct tomoyo_number_union".
+ *
+ * Returns true on success, false otherwise.
+ */
+bool tomoyo_parse_number_union(struct tomoyo_acl_param *param,
+ struct tomoyo_number_union *ptr)
+{
+ char *data;
+ u8 type;
+ unsigned long v;
+ memset(ptr, 0, sizeof(*ptr));
+ if (param->data[0] == '@') {
+ param->data++;
+ ptr->group = tomoyo_get_group(param, TOMOYO_NUMBER_GROUP);
+ return ptr->group != NULL;
+ }
+ data = tomoyo_read_token(param);
+ type = tomoyo_parse_ulong(&v, &data);
+ if (type == TOMOYO_VALUE_TYPE_INVALID)
+ return false;
+ ptr->values[0] = v;
+ ptr->value_type[0] = type;
+ if (!*data) {
+ ptr->values[1] = v;
+ ptr->value_type[1] = type;
+ return true;
+ }
+ if (*data++ != '-')
+ return false;
+ type = tomoyo_parse_ulong(&v, &data);
+ if (type == TOMOYO_VALUE_TYPE_INVALID || *data || ptr->values[0] > v)
+ return false;
+ ptr->values[1] = v;
+ ptr->value_type[1] = type;
+ return true;
+}
+
+/**
+ * tomoyo_byte_range - Check whether the string is a \ooo style octal value.
+ *
+ * @str: Pointer to the string.
+ *
+ * Returns true if @str is a \ooo style octal value, false otherwise.
+ *
+ * TOMOYO uses \ooo style representation for 0x01 - 0x20 and 0x7F - 0xFF.
+ * This function verifies that \ooo is in valid range.
+ */
+static inline bool tomoyo_byte_range(const char *str)
+{
+ return *str >= '0' && *str++ <= '3' &&
+ *str >= '0' && *str++ <= '7' &&
+ *str >= '0' && *str <= '7';
+}
+
+/**
+ * tomoyo_alphabet_char - Check whether the character is an alphabet.
+ *
+ * @c: The character to check.
+ *
+ * Returns true if @c is an alphabet character, false otherwise.
+ */
+static inline bool tomoyo_alphabet_char(const char c)
+{
+ return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z');
+}
+
+/**
+ * tomoyo_make_byte - Make byte value from three octal characters.
+ *
+ * @c1: The first character.
+ * @c2: The second character.
+ * @c3: The third character.
+ *
+ * Returns byte value.
+ */
+static inline u8 tomoyo_make_byte(const u8 c1, const u8 c2, const u8 c3)
+{
+ return ((c1 - '0') << 6) + ((c2 - '0') << 3) + (c3 - '0');
+}
+
+/**
+ * tomoyo_valid - Check whether the character is a valid char.
+ *
+ * @c: The character to check.
+ *
+ * Returns true if @c is a valid character, false otherwise.
+ */
+static inline bool tomoyo_valid(const unsigned char c)
+{
+ return c > ' ' && c < 127;
+}
+
+/**
+ * tomoyo_invalid - Check whether the character is an invalid char.
+ *
+ * @c: The character to check.
+ *
+ * Returns true if @c is an invalid character, false otherwise.
+ */
+static inline bool tomoyo_invalid(const unsigned char c)
+{
+ return c && (c <= ' ' || c >= 127);
+}
+
+/**
+ * tomoyo_str_starts - Check whether the given string starts with the given keyword.
+ *
+ * @src: Pointer to pointer to the string.
+ * @find: Pointer to the keyword.
+ *
+ * Returns true if @src starts with @find, false otherwise.
+ *
+ * The @src is updated to point the first character after the @find
+ * if @src starts with @find.
+ */
+bool tomoyo_str_starts(char **src, const char *find)
+{
+ const int len = strlen(find);
+ char *tmp = *src;
+
+ if (strncmp(tmp, find, len))
+ return false;
+ tmp += len;
+ *src = tmp;
+ return true;
+}
+
+/**
+ * tomoyo_normalize_line - Format string.
+ *
+ * @buffer: The line to normalize.
+ *
+ * Leading and trailing whitespaces are removed.
+ * Multiple whitespaces are packed into single space.
+ *
+ * Returns nothing.
+ */
+void tomoyo_normalize_line(unsigned char *buffer)
+{
+ unsigned char *sp = buffer;
+ unsigned char *dp = buffer;
+ bool first = true;
+
+ while (tomoyo_invalid(*sp))
+ sp++;
+ while (*sp) {
+ if (!first)
+ *dp++ = ' ';
+ first = false;
+ while (tomoyo_valid(*sp))
+ *dp++ = *sp++;
+ while (tomoyo_invalid(*sp))
+ sp++;
+ }
+ *dp = '\0';
+}
+
+/**
+ * tomoyo_correct_word2 - Validate a string.
+ *
+ * @string: The string to check. Maybe non-'\0'-terminated.
+ * @len: Length of @string.
+ *
+ * Check whether the given string follows the naming rules.
+ * Returns true if @string follows the naming rules, false otherwise.
+ */
+static bool tomoyo_correct_word2(const char *string, size_t len)
+{
+ const char *const start = string;
+ bool in_repetition = false;
+ unsigned char c;
+ unsigned char d;
+ unsigned char e;
+ if (!len)
+ goto out;
+ while (len--) {
+ c = *string++;
+ if (c == '\\') {
+ if (!len--)
+ goto out;
+ c = *string++;
+ switch (c) {
+ case '\\': /* "\\" */
+ continue;
+ case '$': /* "\$" */
+ case '+': /* "\+" */
+ case '?': /* "\?" */
+ case '*': /* "\*" */
+ case '@': /* "\@" */
+ case 'x': /* "\x" */
+ case 'X': /* "\X" */
+ case 'a': /* "\a" */
+ case 'A': /* "\A" */
+ case '-': /* "\-" */
+ continue;
+ case '{': /* "/\{" */
+ if (string - 3 < start || *(string - 3) != '/')
+ break;
+ in_repetition = true;
+ continue;
+ case '}': /* "\}/" */
+ if (*string != '/')
+ break;
+ if (!in_repetition)
+ break;
+ in_repetition = false;
+ continue;
+ case '0': /* "\ooo" */
+ case '1':
+ case '2':
+ case '3':
+ if (!len-- || !len--)
+ break;
+ d = *string++;
+ e = *string++;
+ if (d < '0' || d > '7' || e < '0' || e > '7')
+ break;
+ c = tomoyo_make_byte(c, d, e);
+ if (c <= ' ' || c >= 127)
+ continue;
+ }
+ goto out;
+ } else if (in_repetition && c == '/') {
+ goto out;
+ } else if (c <= ' ' || c >= 127) {
+ goto out;
+ }
+ }
+ if (in_repetition)
+ goto out;
+ return true;
+ out:
+ return false;
+}
+
+/**
+ * tomoyo_correct_word - Validate a string.
+ *
+ * @string: The string to check.
+ *
+ * Check whether the given string follows the naming rules.
+ * Returns true if @string follows the naming rules, false otherwise.
+ */
+bool tomoyo_correct_word(const char *string)
+{
+ return tomoyo_correct_word2(string, strlen(string));
+}
+
+/**
+ * tomoyo_correct_path - Validate a pathname.
+ *
+ * @filename: The pathname to check.
+ *
+ * Check whether the given pathname follows the naming rules.
+ * Returns true if @filename follows the naming rules, false otherwise.
+ */
+bool tomoyo_correct_path(const char *filename)
+{
+ return *filename == '/' && tomoyo_correct_word(filename);
+}
+
+/**
+ * tomoyo_correct_domain - Check whether the given domainname follows the naming rules.
+ *
+ * @domainname: The domainname to check.
+ *
+ * Returns true if @domainname follows the naming rules, false otherwise.
+ */
+bool tomoyo_correct_domain(const unsigned char *domainname)
+{
+ if (!domainname || !tomoyo_domain_def(domainname))
+ return false;
+ domainname = strchr(domainname, ' ');
+ if (!domainname++)
+ return true;
+ while (1) {
+ const unsigned char *cp = strchr(domainname, ' ');
+ if (!cp)
+ break;
+ if (*domainname != '/' ||
+ !tomoyo_correct_word2(domainname, cp - domainname))
+ return false;
+ domainname = cp + 1;
+ }
+ return tomoyo_correct_path(domainname);
+}
+
+/**
+ * tomoyo_domain_def - Check whether the given token can be a domainname.
+ *
+ * @buffer: The token to check.
+ *
+ * Returns true if @buffer possibly be a domainname, false otherwise.
+ */
+bool tomoyo_domain_def(const unsigned char *buffer)
+{
+ const unsigned char *cp;
+ int len;
+ if (*buffer != '<')
+ return false;
+ cp = strchr(buffer, ' ');
+ if (!cp)
+ len = strlen(buffer);
+ else
+ len = cp - buffer;
+ if (buffer[len - 1] != '>' ||
+ !tomoyo_correct_word2(buffer + 1, len - 2))
+ return false;
+ return true;
+}
+
+/**
+ * tomoyo_find_domain - Find a domain by the given name.
+ *
+ * @domainname: The domainname to find.
+ *
+ * Returns pointer to "struct tomoyo_domain_info" if found, NULL otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+struct tomoyo_domain_info *tomoyo_find_domain(const char *domainname)
+{
+ struct tomoyo_domain_info *domain;
+ struct tomoyo_path_info name;
+
+ name.name = domainname;
+ tomoyo_fill_path_info(&name);
+ list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
+ if (!domain->is_deleted &&
+ !tomoyo_pathcmp(&name, domain->domainname))
+ return domain;
+ }
+ return NULL;
+}
+
+/**
+ * tomoyo_const_part_length - Evaluate the initial length without a pattern in a token.
+ *
+ * @filename: The string to evaluate.
+ *
+ * Returns the initial length without a pattern in @filename.
+ */
+static int tomoyo_const_part_length(const char *filename)
+{
+ char c;
+ int len = 0;
+
+ if (!filename)
+ return 0;
+ while ((c = *filename++) != '\0') {
+ if (c != '\\') {
+ len++;
+ continue;
+ }
+ c = *filename++;
+ switch (c) {
+ case '\\': /* "\\" */
+ len += 2;
+ continue;
+ case '0': /* "\ooo" */
+ case '1':
+ case '2':
+ case '3':
+ c = *filename++;
+ if (c < '0' || c > '7')
+ break;
+ c = *filename++;
+ if (c < '0' || c > '7')
+ break;
+ len += 4;
+ continue;
+ }
+ break;
+ }
+ return len;
+}
+
+/**
+ * tomoyo_fill_path_info - Fill in "struct tomoyo_path_info" members.
+ *
+ * @ptr: Pointer to "struct tomoyo_path_info" to fill in.
+ *
+ * The caller sets "struct tomoyo_path_info"->name.
+ */
+void tomoyo_fill_path_info(struct tomoyo_path_info *ptr)
+{
+ const char *name = ptr->name;
+ const int len = strlen(name);
+
+ ptr->const_len = tomoyo_const_part_length(name);
+ ptr->is_dir = len && (name[len - 1] == '/');
+ ptr->is_patterned = (ptr->const_len < len);
+ ptr->hash = full_name_hash(name, len);
+}
+
+/**
+ * tomoyo_file_matches_pattern2 - Pattern matching without '/' character and "\-" pattern.
+ *
+ * @filename: The start of string to check.
+ * @filename_end: The end of string to check.
+ * @pattern: The start of pattern to compare.
+ * @pattern_end: The end of pattern to compare.
+ *
+ * Returns true if @filename matches @pattern, false otherwise.
+ */
+static bool tomoyo_file_matches_pattern2(const char *filename,
+ const char *filename_end,
+ const char *pattern,
+ const char *pattern_end)
+{
+ while (filename < filename_end && pattern < pattern_end) {
+ char c;
+ if (*pattern != '\\') {
+ if (*filename++ != *pattern++)
+ return false;
+ continue;
+ }
+ c = *filename;
+ pattern++;
+ switch (*pattern) {
+ int i;
+ int j;
+ case '?':
+ if (c == '/') {
+ return false;
+ } else if (c == '\\') {
+ if (filename[1] == '\\')
+ filename++;
+ else if (tomoyo_byte_range(filename + 1))
+ filename += 3;
+ else
+ return false;
+ }
+ break;
+ case '\\':
+ if (c != '\\')
+ return false;
+ if (*++filename != '\\')
+ return false;
+ break;
+ case '+':
+ if (!isdigit(c))
+ return false;
+ break;
+ case 'x':
+ if (!isxdigit(c))
+ return false;
+ break;
+ case 'a':
+ if (!tomoyo_alphabet_char(c))
+ return false;
+ break;
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ if (c == '\\' && tomoyo_byte_range(filename + 1)
+ && strncmp(filename + 1, pattern, 3) == 0) {
+ filename += 3;
+ pattern += 2;
+ break;
+ }
+ return false; /* Not matched. */
+ case '*':
+ case '@':
+ for (i = 0; i <= filename_end - filename; i++) {
+ if (tomoyo_file_matches_pattern2(
+ filename + i, filename_end,
+ pattern + 1, pattern_end))
+ return true;
+ c = filename[i];
+ if (c == '.' && *pattern == '@')
+ break;
+ if (c != '\\')
+ continue;
+ if (filename[i + 1] == '\\')
+ i++;
+ else if (tomoyo_byte_range(filename + i + 1))
+ i += 3;
+ else
+ break; /* Bad pattern. */
+ }
+ return false; /* Not matched. */
+ default:
+ j = 0;
+ c = *pattern;
+ if (c == '$') {
+ while (isdigit(filename[j]))
+ j++;
+ } else if (c == 'X') {
+ while (isxdigit(filename[j]))
+ j++;
+ } else if (c == 'A') {
+ while (tomoyo_alphabet_char(filename[j]))
+ j++;
+ }
+ for (i = 1; i <= j; i++) {
+ if (tomoyo_file_matches_pattern2(
+ filename + i, filename_end,
+ pattern + 1, pattern_end))
+ return true;
+ }
+ return false; /* Not matched or bad pattern. */
+ }
+ filename++;
+ pattern++;
+ }
+ while (*pattern == '\\' &&
+ (*(pattern + 1) == '*' || *(pattern + 1) == '@'))
+ pattern += 2;
+ return filename == filename_end && pattern == pattern_end;
+}
+
+/**
+ * tomoyo_file_matches_pattern - Pattern matching without '/' character.
+ *
+ * @filename: The start of string to check.
+ * @filename_end: The end of string to check.
+ * @pattern: The start of pattern to compare.
+ * @pattern_end: The end of pattern to compare.
+ *
+ * Returns true if @filename matches @pattern, false otherwise.
+ */
+static bool tomoyo_file_matches_pattern(const char *filename,
+ const char *filename_end,
+ const char *pattern,
+ const char *pattern_end)
+{
+ const char *pattern_start = pattern;
+ bool first = true;
+ bool result;
+
+ while (pattern < pattern_end - 1) {
+ /* Split at "\-" pattern. */
+ if (*pattern++ != '\\' || *pattern++ != '-')
+ continue;
+ result = tomoyo_file_matches_pattern2(filename,
+ filename_end,
+ pattern_start,
+ pattern - 2);
+ if (first)
+ result = !result;
+ if (result)
+ return false;
+ first = false;
+ pattern_start = pattern;
+ }
+ result = tomoyo_file_matches_pattern2(filename, filename_end,
+ pattern_start, pattern_end);
+ return first ? result : !result;
+}
+
+/**
+ * tomoyo_path_matches_pattern2 - Do pathname pattern matching.
+ *
+ * @f: The start of string to check.
+ * @p: The start of pattern to compare.
+ *
+ * Returns true if @f matches @p, false otherwise.
+ */
+static bool tomoyo_path_matches_pattern2(const char *f, const char *p)
+{
+ const char *f_delimiter;
+ const char *p_delimiter;
+
+ while (*f && *p) {
+ f_delimiter = strchr(f, '/');
+ if (!f_delimiter)
+ f_delimiter = f + strlen(f);
+ p_delimiter = strchr(p, '/');
+ if (!p_delimiter)
+ p_delimiter = p + strlen(p);
+ if (*p == '\\' && *(p + 1) == '{')
+ goto recursive;
+ if (!tomoyo_file_matches_pattern(f, f_delimiter, p,
+ p_delimiter))
+ return false;
+ f = f_delimiter;
+ if (*f)
+ f++;
+ p = p_delimiter;
+ if (*p)
+ p++;
+ }
+ /* Ignore trailing "\*" and "\@" in @pattern. */
+ while (*p == '\\' &&
+ (*(p + 1) == '*' || *(p + 1) == '@'))
+ p += 2;
+ return !*f && !*p;
+ recursive:
+ /*
+ * The "\{" pattern is permitted only after '/' character.
+ * This guarantees that below "*(p - 1)" is safe.
+ * Also, the "\}" pattern is permitted only before '/' character
+ * so that "\{" + "\}" pair will not break the "\-" operator.
+ */
+ if (*(p - 1) != '/' || p_delimiter <= p + 3 || *p_delimiter != '/' ||
+ *(p_delimiter - 1) != '}' || *(p_delimiter - 2) != '\\')
+ return false; /* Bad pattern. */
+ do {
+ /* Compare current component with pattern. */
+ if (!tomoyo_file_matches_pattern(f, f_delimiter, p + 2,
+ p_delimiter - 2))
+ break;
+ /* Proceed to next component. */
+ f = f_delimiter;
+ if (!*f)
+ break;
+ f++;
+ /* Continue comparison. */
+ if (tomoyo_path_matches_pattern2(f, p_delimiter + 1))
+ return true;
+ f_delimiter = strchr(f, '/');
+ } while (f_delimiter);
+ return false; /* Not matched. */
+}
+
+/**
+ * tomoyo_path_matches_pattern - Check whether the given filename matches the given pattern.
+ *
+ * @filename: The filename to check.
+ * @pattern: The pattern to compare.
+ *
+ * Returns true if matches, false otherwise.
+ *
+ * The following patterns are available.
+ * \\ \ itself.
+ * \ooo Octal representation of a byte.
+ * \* Zero or more repetitions of characters other than '/'.
+ * \@ Zero or more repetitions of characters other than '/' or '.'.
+ * \? 1 byte character other than '/'.
+ * \$ One or more repetitions of decimal digits.
+ * \+ 1 decimal digit.
+ * \X One or more repetitions of hexadecimal digits.
+ * \x 1 hexadecimal digit.
+ * \A One or more repetitions of alphabet characters.
+ * \a 1 alphabet character.
+ *
+ * \- Subtraction operator.
+ *
+ * /\{dir\}/ '/' + 'One or more repetitions of dir/' (e.g. /dir/ /dir/dir/
+ * /dir/dir/dir/ ).
+ */
+bool tomoyo_path_matches_pattern(const struct tomoyo_path_info *filename,
+ const struct tomoyo_path_info *pattern)
+{
+ const char *f = filename->name;
+ const char *p = pattern->name;
+ const int len = pattern->const_len;
+
+ /* If @pattern doesn't contain pattern, I can use strcmp(). */
+ if (!pattern->is_patterned)
+ return !tomoyo_pathcmp(filename, pattern);
+ /* Don't compare directory and non-directory. */
+ if (filename->is_dir != pattern->is_dir)
+ return false;
+ /* Compare the initial length without patterns. */
+ if (strncmp(f, p, len))
+ return false;
+ f += len;
+ p += len;
+ return tomoyo_path_matches_pattern2(f, p);
+}
+
+/**
+ * tomoyo_get_exe - Get tomoyo_realpath() of current process.
+ *
+ * Returns the tomoyo_realpath() of current process on success, NULL otherwise.
+ *
+ * This function uses kzalloc(), so the caller must call kfree()
+ * if this function didn't return NULL.
+ */
+const char *tomoyo_get_exe(void)
+{
+ struct mm_struct *mm = current->mm;
+ const char *cp = NULL;
+
+ if (!mm)
+ return NULL;
+ down_read(&mm->mmap_sem);
+ if (mm->exe_file)
+ cp = tomoyo_realpath_from_path(&mm->exe_file->f_path);
+ up_read(&mm->mmap_sem);
+ return cp;
+}
+
+/**
+ * tomoyo_get_mode - Get MAC mode.
+ *
+ * @ns: Pointer to "struct tomoyo_policy_namespace".
+ * @profile: Profile number.
+ * @index: Index number of functionality.
+ *
+ * Returns mode.
+ */
+int tomoyo_get_mode(const struct tomoyo_policy_namespace *ns, const u8 profile,
+ const u8 index)
+{
+ u8 mode;
+ struct tomoyo_profile *p;
+
+ if (!tomoyo_policy_loaded)
+ return TOMOYO_CONFIG_DISABLED;
+ p = tomoyo_profile(ns, profile);
+ mode = p->config[index];
+ if (mode == TOMOYO_CONFIG_USE_DEFAULT)
+ mode = p->config[tomoyo_index2category[index]
+ + TOMOYO_MAX_MAC_INDEX];
+ if (mode == TOMOYO_CONFIG_USE_DEFAULT)
+ mode = p->default_config;
+ return mode & 3;
+}
+
+/**
+ * tomoyo_init_request_info - Initialize "struct tomoyo_request_info" members.
+ *
+ * @r: Pointer to "struct tomoyo_request_info" to initialize.
+ * @domain: Pointer to "struct tomoyo_domain_info". NULL for tomoyo_domain().
+ * @index: Index number of functionality.
+ *
+ * Returns mode.
+ */
+int tomoyo_init_request_info(struct tomoyo_request_info *r,
+ struct tomoyo_domain_info *domain, const u8 index)
+{
+ u8 profile;
+ memset(r, 0, sizeof(*r));
+ if (!domain)
+ domain = tomoyo_domain();
+ r->domain = domain;
+ profile = domain->profile;
+ r->profile = profile;
+ r->type = index;
+ r->mode = tomoyo_get_mode(domain->ns, profile, index);
+ return r->mode;
+}
+
+/**
+ * tomoyo_domain_quota_is_ok - Check for domain's quota.
+ *
+ * @r: Pointer to "struct tomoyo_request_info".
+ *
+ * Returns true if the domain is not exceeded quota, false otherwise.
+ *
+ * Caller holds tomoyo_read_lock().
+ */
+bool tomoyo_domain_quota_is_ok(struct tomoyo_request_info *r)
+{
+ unsigned int count = 0;
+ struct tomoyo_domain_info *domain = r->domain;
+ struct tomoyo_acl_info *ptr;
+
+ if (r->mode != TOMOYO_CONFIG_LEARNING)
+ return false;
+ if (!domain)
+ return true;
+ list_for_each_entry_rcu(ptr, &domain->acl_info_list, list) {
+ u16 perm;
+ u8 i;
+ if (ptr->is_deleted)
+ continue;
+ switch (ptr->type) {
+ case TOMOYO_TYPE_PATH_ACL:
+ perm = container_of(ptr, struct tomoyo_path_acl, head)
+ ->perm;
+ break;
+ case TOMOYO_TYPE_PATH2_ACL:
+ perm = container_of(ptr, struct tomoyo_path2_acl, head)
+ ->perm;
+ break;
+ case TOMOYO_TYPE_PATH_NUMBER_ACL:
+ perm = container_of(ptr, struct tomoyo_path_number_acl,
+ head)->perm;
+ break;
+ case TOMOYO_TYPE_MKDEV_ACL:
+ perm = container_of(ptr, struct tomoyo_mkdev_acl,
+ head)->perm;
+ break;
+ case TOMOYO_TYPE_INET_ACL:
+ perm = container_of(ptr, struct tomoyo_inet_acl,
+ head)->perm;
+ break;
+ case TOMOYO_TYPE_UNIX_ACL:
+ perm = container_of(ptr, struct tomoyo_unix_acl,
+ head)->perm;
+ break;
+ case TOMOYO_TYPE_MANUAL_TASK_ACL:
+ perm = 0;
+ break;
+ default:
+ perm = 1;
+ }
+ for (i = 0; i < 16; i++)
+ if (perm & (1 << i))
+ count++;
+ }
+ if (count < tomoyo_profile(domain->ns, domain->profile)->
+ pref[TOMOYO_PREF_MAX_LEARNING_ENTRY])
+ return true;
+ if (!domain->flags[TOMOYO_DIF_QUOTA_WARNED]) {
+ domain->flags[TOMOYO_DIF_QUOTA_WARNED] = true;
+ /* r->granted = false; */
+ tomoyo_write_log(r, "%s", tomoyo_dif[TOMOYO_DIF_QUOTA_WARNED]);
+ printk(KERN_WARNING "WARNING: "
+ "Domain '%s' has too many ACLs to hold. "
+ "Stopped learning mode.\n", domain->domainname->name);
+ }
+ return false;
+}
diff --git a/security/yama/Kconfig b/security/yama/Kconfig
new file mode 100644
index 00000000000..20ef5143c0c
--- /dev/null
+++ b/security/yama/Kconfig
@@ -0,0 +1,21 @@
+config SECURITY_YAMA
+ bool "Yama support"
+ depends on SECURITY
+ select SECURITYFS
+ select SECURITY_PATH
+ default n
+ help
+ This selects Yama, which extends DAC support with additional
+ system-wide security settings beyond regular Linux discretionary
+ access controls. Currently available is ptrace scope restriction.
+ Further information can be found in Documentation/security/Yama.txt.
+
+ If you are unsure how to answer this question, answer N.
+
+config SECURITY_YAMA_STACKED
+ bool "Yama stacked with other LSMs"
+ depends on SECURITY_YAMA
+ default n
+ help
+ When Yama is built into the kernel, force it to stack with the
+ selected primary LSM.
diff --git a/security/yama/Makefile b/security/yama/Makefile
new file mode 100644
index 00000000000..8b5e0658845
--- /dev/null
+++ b/security/yama/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_SECURITY_YAMA) := yama.o
+
+yama-y := yama_lsm.o
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
new file mode 100644
index 00000000000..13c88fbcf03
--- /dev/null
+++ b/security/yama/yama_lsm.c
@@ -0,0 +1,443 @@
+/*
+ * Yama Linux Security Module
+ *
+ * Author: Kees Cook <keescook@chromium.org>
+ *
+ * Copyright (C) 2010 Canonical, Ltd.
+ * Copyright (C) 2011 The Chromium OS Authors.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/security.h>
+#include <linux/sysctl.h>
+#include <linux/ptrace.h>
+#include <linux/prctl.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+
+#define YAMA_SCOPE_DISABLED 0
+#define YAMA_SCOPE_RELATIONAL 1
+#define YAMA_SCOPE_CAPABILITY 2
+#define YAMA_SCOPE_NO_ATTACH 3
+
+static int ptrace_scope = YAMA_SCOPE_RELATIONAL;
+
+/* describe a ptrace relationship for potential exception */
+struct ptrace_relation {
+ struct task_struct *tracer;
+ struct task_struct *tracee;
+ bool invalid;
+ struct list_head node;
+ struct rcu_head rcu;
+};
+
+static LIST_HEAD(ptracer_relations);
+static DEFINE_SPINLOCK(ptracer_relations_lock);
+
+static void yama_relation_cleanup(struct work_struct *work);
+static DECLARE_WORK(yama_relation_work, yama_relation_cleanup);
+
+/**
+ * yama_relation_cleanup - remove invalid entries from the relation list
+ *
+ */
+static void yama_relation_cleanup(struct work_struct *work)
+{
+ struct ptrace_relation *relation;
+
+ spin_lock(&ptracer_relations_lock);
+ rcu_read_lock();
+ list_for_each_entry_rcu(relation, &ptracer_relations, node) {
+ if (relation->invalid) {
+ list_del_rcu(&relation->node);
+ kfree_rcu(relation, rcu);
+ }
+ }
+ rcu_read_unlock();
+ spin_unlock(&ptracer_relations_lock);
+}
+
+/**
+ * yama_ptracer_add - add/replace an exception for this tracer/tracee pair
+ * @tracer: the task_struct of the process doing the ptrace
+ * @tracee: the task_struct of the process to be ptraced
+ *
+ * Each tracee can have, at most, one tracer registered. Each time this
+ * is called, the prior registered tracer will be replaced for the tracee.
+ *
+ * Returns 0 if relationship was added, -ve on error.
+ */
+static int yama_ptracer_add(struct task_struct *tracer,
+ struct task_struct *tracee)
+{
+ struct ptrace_relation *relation, *added;
+
+ added = kmalloc(sizeof(*added), GFP_KERNEL);
+ if (!added)
+ return -ENOMEM;
+
+ added->tracee = tracee;
+ added->tracer = tracer;
+ added->invalid = false;
+
+ spin_lock(&ptracer_relations_lock);
+ rcu_read_lock();
+ list_for_each_entry_rcu(relation, &ptracer_relations, node) {
+ if (relation->invalid)
+ continue;
+ if (relation->tracee == tracee) {
+ list_replace_rcu(&relation->node, &added->node);
+ kfree_rcu(relation, rcu);
+ goto out;
+ }
+ }
+
+ list_add_rcu(&added->node, &ptracer_relations);
+
+out:
+ rcu_read_unlock();
+ spin_unlock(&ptracer_relations_lock);
+ return 0;
+}
+
+/**
+ * yama_ptracer_del - remove exceptions related to the given tasks
+ * @tracer: remove any relation where tracer task matches
+ * @tracee: remove any relation where tracee task matches
+ */
+static void yama_ptracer_del(struct task_struct *tracer,
+ struct task_struct *tracee)
+{
+ struct ptrace_relation *relation;
+ bool marked = false;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(relation, &ptracer_relations, node) {
+ if (relation->invalid)
+ continue;
+ if (relation->tracee == tracee ||
+ (tracer && relation->tracer == tracer)) {
+ relation->invalid = true;
+ marked = true;
+ }
+ }
+ rcu_read_unlock();
+
+ if (marked)
+ schedule_work(&yama_relation_work);
+}
+
+/**
+ * yama_task_free - check for task_pid to remove from exception list
+ * @task: task being removed
+ */
+void yama_task_free(struct task_struct *task)
+{
+ yama_ptracer_del(task, task);
+}
+
+/**
+ * yama_task_prctl - check for Yama-specific prctl operations
+ * @option: operation
+ * @arg2: argument
+ * @arg3: argument
+ * @arg4: argument
+ * @arg5: argument
+ *
+ * Return 0 on success, -ve on error. -ENOSYS is returned when Yama
+ * does not handle the given option.
+ */
+int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, unsigned long arg5)
+{
+ int rc;
+ struct task_struct *myself = current;
+
+ rc = cap_task_prctl(option, arg2, arg3, arg4, arg5);
+ if (rc != -ENOSYS)
+ return rc;
+
+ switch (option) {
+ case PR_SET_PTRACER:
+ /* Since a thread can call prctl(), find the group leader
+ * before calling _add() or _del() on it, since we want
+ * process-level granularity of control. The tracer group
+ * leader checking is handled later when walking the ancestry
+ * at the time of PTRACE_ATTACH check.
+ */
+ rcu_read_lock();
+ if (!thread_group_leader(myself))
+ myself = rcu_dereference(myself->group_leader);
+ get_task_struct(myself);
+ rcu_read_unlock();
+
+ if (arg2 == 0) {
+ yama_ptracer_del(NULL, myself);
+ rc = 0;
+ } else if (arg2 == PR_SET_PTRACER_ANY || (int)arg2 == -1) {
+ rc = yama_ptracer_add(NULL, myself);
+ } else {
+ struct task_struct *tracer;
+
+ rcu_read_lock();
+ tracer = find_task_by_vpid(arg2);
+ if (tracer)
+ get_task_struct(tracer);
+ else
+ rc = -EINVAL;
+ rcu_read_unlock();
+
+ if (tracer) {
+ rc = yama_ptracer_add(tracer, myself);
+ put_task_struct(tracer);
+ }
+ }
+
+ put_task_struct(myself);
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * task_is_descendant - walk up a process family tree looking for a match
+ * @parent: the process to compare against while walking up from child
+ * @child: the process to start from while looking upwards for parent
+ *
+ * Returns 1 if child is a descendant of parent, 0 if not.
+ */
+static int task_is_descendant(struct task_struct *parent,
+ struct task_struct *child)
+{
+ int rc = 0;
+ struct task_struct *walker = child;
+
+ if (!parent || !child)
+ return 0;
+
+ rcu_read_lock();
+ if (!thread_group_leader(parent))
+ parent = rcu_dereference(parent->group_leader);
+ while (walker->pid > 0) {
+ if (!thread_group_leader(walker))
+ walker = rcu_dereference(walker->group_leader);
+ if (walker == parent) {
+ rc = 1;
+ break;
+ }
+ walker = rcu_dereference(walker->real_parent);
+ }
+ rcu_read_unlock();
+
+ return rc;
+}
+
+/**
+ * ptracer_exception_found - tracer registered as exception for this tracee
+ * @tracer: the task_struct of the process attempting ptrace
+ * @tracee: the task_struct of the process to be ptraced
+ *
+ * Returns 1 if tracer has is ptracer exception ancestor for tracee.
+ */
+static int ptracer_exception_found(struct task_struct *tracer,
+ struct task_struct *tracee)
+{
+ int rc = 0;
+ struct ptrace_relation *relation;
+ struct task_struct *parent = NULL;
+ bool found = false;
+
+ rcu_read_lock();
+ if (!thread_group_leader(tracee))
+ tracee = rcu_dereference(tracee->group_leader);
+ list_for_each_entry_rcu(relation, &ptracer_relations, node) {
+ if (relation->invalid)
+ continue;
+ if (relation->tracee == tracee) {
+ parent = relation->tracer;
+ found = true;
+ break;
+ }
+ }
+
+ if (found && (parent == NULL || task_is_descendant(parent, tracer)))
+ rc = 1;
+ rcu_read_unlock();
+
+ return rc;
+}
+
+/**
+ * yama_ptrace_access_check - validate PTRACE_ATTACH calls
+ * @child: task that current task is attempting to ptrace
+ * @mode: ptrace attach mode
+ *
+ * Returns 0 if following the ptrace is allowed, -ve on error.
+ */
+int yama_ptrace_access_check(struct task_struct *child,
+ unsigned int mode)
+{
+ int rc;
+
+ /* If standard caps disallows it, so does Yama. We should
+ * only tighten restrictions further.
+ */
+ rc = cap_ptrace_access_check(child, mode);
+ if (rc)
+ return rc;
+
+ /* require ptrace target be a child of ptracer on attach */
+ if (mode == PTRACE_MODE_ATTACH) {
+ switch (ptrace_scope) {
+ case YAMA_SCOPE_DISABLED:
+ /* No additional restrictions. */
+ break;
+ case YAMA_SCOPE_RELATIONAL:
+ rcu_read_lock();
+ if (!task_is_descendant(current, child) &&
+ !ptracer_exception_found(current, child) &&
+ !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
+ rc = -EPERM;
+ rcu_read_unlock();
+ break;
+ case YAMA_SCOPE_CAPABILITY:
+ rcu_read_lock();
+ if (!ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
+ rc = -EPERM;
+ rcu_read_unlock();
+ break;
+ case YAMA_SCOPE_NO_ATTACH:
+ default:
+ rc = -EPERM;
+ break;
+ }
+ }
+
+ if (rc) {
+ printk_ratelimited(KERN_NOTICE
+ "ptrace of pid %d was attempted by: %s (pid %d)\n",
+ child->pid, current->comm, current->pid);
+ }
+
+ return rc;
+}
+
+/**
+ * yama_ptrace_traceme - validate PTRACE_TRACEME calls
+ * @parent: task that will become the ptracer of the current task
+ *
+ * Returns 0 if following the ptrace is allowed, -ve on error.
+ */
+int yama_ptrace_traceme(struct task_struct *parent)
+{
+ int rc;
+
+ /* If standard caps disallows it, so does Yama. We should
+ * only tighten restrictions further.
+ */
+ rc = cap_ptrace_traceme(parent);
+ if (rc)
+ return rc;
+
+ /* Only disallow PTRACE_TRACEME on more aggressive settings. */
+ switch (ptrace_scope) {
+ case YAMA_SCOPE_CAPABILITY:
+ if (!has_ns_capability(parent, current_user_ns(), CAP_SYS_PTRACE))
+ rc = -EPERM;
+ break;
+ case YAMA_SCOPE_NO_ATTACH:
+ rc = -EPERM;
+ break;
+ }
+
+ if (rc) {
+ printk_ratelimited(KERN_NOTICE
+ "ptraceme of pid %d was attempted by: %s (pid %d)\n",
+ current->pid, parent->comm, parent->pid);
+ }
+
+ return rc;
+}
+
+#ifndef CONFIG_SECURITY_YAMA_STACKED
+static struct security_operations yama_ops = {
+ .name = "yama",
+
+ .ptrace_access_check = yama_ptrace_access_check,
+ .ptrace_traceme = yama_ptrace_traceme,
+ .task_prctl = yama_task_prctl,
+ .task_free = yama_task_free,
+};
+#endif
+
+#ifdef CONFIG_SYSCTL
+static int yama_dointvec_minmax(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ int rc;
+
+ if (write && !capable(CAP_SYS_PTRACE))
+ return -EPERM;
+
+ rc = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ if (rc)
+ return rc;
+
+ /* Lock the max value if it ever gets set. */
+ if (write && *(int *)table->data == *(int *)table->extra2)
+ table->extra1 = table->extra2;
+
+ return rc;
+}
+
+static int zero;
+static int max_scope = YAMA_SCOPE_NO_ATTACH;
+
+struct ctl_path yama_sysctl_path[] = {
+ { .procname = "kernel", },
+ { .procname = "yama", },
+ { }
+};
+
+static struct ctl_table yama_sysctl_table[] = {
+ {
+ .procname = "ptrace_scope",
+ .data = &ptrace_scope,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = yama_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &max_scope,
+ },
+ { }
+};
+#endif /* CONFIG_SYSCTL */
+
+static __init int yama_init(void)
+{
+#ifndef CONFIG_SECURITY_YAMA_STACKED
+ if (!security_module_enable(&yama_ops))
+ return 0;
+#endif
+
+ printk(KERN_INFO "Yama: becoming mindful.\n");
+
+#ifndef CONFIG_SECURITY_YAMA_STACKED
+ if (register_security(&yama_ops))
+ panic("Yama: kernel registration failed.\n");
+#endif
+
+#ifdef CONFIG_SYSCTL
+ if (!register_sysctl_paths(yama_sysctl_path, yama_sysctl_table))
+ panic("Yama: sysctl registration failed.\n");
+#endif
+
+ return 0;
+}
+
+security_initcall(yama_init);