aboutsummaryrefslogtreecommitdiff
path: root/security
diff options
context:
space:
mode:
Diffstat (limited to 'security')
-rw-r--r--security/Kconfig76
-rw-r--r--security/Makefile13
-rw-r--r--security/apparmor/.gitignore1
-rw-r--r--security/apparmor/Kconfig12
-rw-r--r--security/apparmor/Makefile34
-rw-r--r--security/apparmor/apparmorfs.c805
-rw-r--r--security/apparmor/audit.c60
-rw-r--r--security/apparmor/capability.c28
-rw-r--r--security/apparmor/context.c56
-rw-r--r--security/apparmor/crypto.c95
-rw-r--r--security/apparmor/domain.c117
-rw-r--r--security/apparmor/file.c93
-rw-r--r--security/apparmor/include/apparmor.h34
-rw-r--r--security/apparmor/include/apparmorfs.h84
-rw-r--r--security/apparmor/include/audit.h38
-rw-r--r--security/apparmor/include/capability.h9
-rw-r--r--security/apparmor/include/context.h76
-rw-r--r--security/apparmor/include/crypto.h36
-rw-r--r--security/apparmor/include/file.h20
-rw-r--r--security/apparmor/include/ipc.h4
-rw-r--r--security/apparmor/include/match.h24
-rw-r--r--security/apparmor/include/path.h3
-rw-r--r--security/apparmor/include/policy.h249
-rw-r--r--security/apparmor/include/policy_unpack.h21
-rw-r--r--security/apparmor/include/procattr.h1
-rw-r--r--security/apparmor/include/resource.h4
-rw-r--r--security/apparmor/include/sid.h4
-rw-r--r--security/apparmor/ipc.c34
-rw-r--r--security/apparmor/lib.c63
-rw-r--r--security/apparmor/lsm.c122
-rw-r--r--security/apparmor/match.c95
-rw-r--r--security/apparmor/path.c58
-rw-r--r--security/apparmor/policy.c711
-rw-r--r--security/apparmor/policy_unpack.c190
-rw-r--r--security/apparmor/procattr.c8
-rw-r--r--security/apparmor/resource.c34
-rw-r--r--security/capability.c100
-rw-r--r--security/commoncap.c121
-rw-r--r--security/device_cgroup.c793
-rw-r--r--security/integrity/Kconfig27
-rw-r--r--security/integrity/Makefile6
-rw-r--r--security/integrity/digsig.c14
-rw-r--r--security/integrity/digsig_asymmetric.c104
-rw-r--r--security/integrity/evm/Kconfig45
-rw-r--r--security/integrity/evm/evm.h33
-rw-r--r--security/integrity/evm/evm_crypto.c21
-rw-r--r--security/integrity/evm/evm_main.c69
-rw-r--r--security/integrity/evm/evm_posix_acl.c3
-rw-r--r--security/integrity/evm/evm_secfs.c12
-rw-r--r--security/integrity/iint.c74
-rw-r--r--security/integrity/ima/Kconfig95
-rw-r--r--security/integrity/ima/Makefile3
-rw-r--r--security/integrity/ima/ima.h156
-rw-r--r--security/integrity/ima/ima_api.c269
-rw-r--r--security/integrity/ima/ima_appraise.c387
-rw-r--r--security/integrity/ima/ima_crypto.c253
-rw-r--r--security/integrity/ima/ima_fs.c96
-rw-r--r--security/integrity/ima/ima_init.c57
-rw-r--r--security/integrity/ima/ima_main.c264
-rw-r--r--security/integrity/ima/ima_policy.c395
-rw-r--r--security/integrity/ima/ima_queue.c25
-rw-r--r--security/integrity/ima/ima_template.c190
-rw-r--r--security/integrity/ima/ima_template_lib.c342
-rw-r--r--security/integrity/ima/ima_template_lib.h49
-rw-r--r--security/integrity/integrity.h109
-rw-r--r--security/integrity/integrity_audit.c (renamed from security/integrity/ima/ima_audit.c)29
-rw-r--r--security/keys/Kconfig100
-rw-r--r--security/keys/Makefile14
-rw-r--r--security/keys/big_key.c207
-rw-r--r--security/keys/compat.c20
-rw-r--r--security/keys/encrypted-keys/encrypted.c18
-rw-r--r--security/keys/gc.c139
-rw-r--r--security/keys/internal.h110
-rw-r--r--security/keys/key.c282
-rw-r--r--security/keys/keyctl.c257
-rw-r--r--security/keys/keyring.c1524
-rw-r--r--security/keys/permission.c58
-rw-r--r--security/keys/persistent.c167
-rw-r--r--security/keys/proc.c64
-rw-r--r--security/keys/process_keys.c258
-rw-r--r--security/keys/request_key.c95
-rw-r--r--security/keys/request_key_auth.c39
-rw-r--r--security/keys/sysctl.c13
-rw-r--r--security/keys/trusted.c76
-rw-r--r--security/keys/user_defined.c32
-rw-r--r--security/lsm_audit.c122
-rw-r--r--security/security.c232
-rw-r--r--security/selinux/avc.c216
-rw-r--r--security/selinux/hooks.c1038
-rw-r--r--security/selinux/include/avc.h114
-rw-r--r--security/selinux/include/classmap.h6
-rw-r--r--security/selinux/include/objsec.h13
-rw-r--r--security/selinux/include/security.h24
-rw-r--r--security/selinux/include/xfrm.h61
-rw-r--r--security/selinux/netif.c8
-rw-r--r--security/selinux/netlabel.c37
-rw-r--r--security/selinux/netlink.c23
-rw-r--r--security/selinux/netnode.c11
-rw-r--r--security/selinux/netport.c6
-rw-r--r--security/selinux/nlmsgtab.c22
-rw-r--r--security/selinux/selinuxfs.c225
-rw-r--r--security/selinux/ss/constraint.h1
-rw-r--r--security/selinux/ss/context.h20
-rw-r--r--security/selinux/ss/ebitmap.c20
-rw-r--r--security/selinux/ss/ebitmap.h10
-rw-r--r--security/selinux/ss/hashtab.c3
-rw-r--r--security/selinux/ss/mls.c48
-rw-r--r--security/selinux/ss/mls_types.h2
-rw-r--r--security/selinux/ss/policydb.c170
-rw-r--r--security/selinux/ss/policydb.h25
-rw-r--r--security/selinux/ss/services.c158
-rw-r--r--security/selinux/xfrm.c491
-rw-r--r--security/smack/Kconfig6
-rw-r--r--security/smack/smack.h229
-rw-r--r--security/smack/smack_access.c372
-rw-r--r--security/smack/smack_lsm.c1448
-rw-r--r--security/smack/smackfs.c1479
-rw-r--r--security/tomoyo/audit.c27
-rw-r--r--security/tomoyo/common.c98
-rw-r--r--security/tomoyo/common.h15
-rw-r--r--security/tomoyo/condition.c20
-rw-r--r--security/tomoyo/domain.c4
-rw-r--r--security/tomoyo/load_policy.c2
-rw-r--r--security/tomoyo/mount.c43
-rw-r--r--security/tomoyo/realpath.c4
-rw-r--r--security/tomoyo/securityfs_if.c12
-rw-r--r--security/tomoyo/tomoyo.c27
-rw-r--r--security/tomoyo/util.c9
-rw-r--r--security/yama/Kconfig21
-rw-r--r--security/yama/Makefile3
-rw-r--r--security/yama/yama_lsm.c443
131 files changed, 12769 insertions, 5525 deletions
diff --git a/security/Kconfig b/security/Kconfig
index 51bd5a0b69a..beb86b500ad 100644
--- a/security/Kconfig
+++ b/security/Kconfig
@@ -4,73 +4,7 @@
menu "Security options"
-config KEYS
- bool "Enable access key retention support"
- help
- This option provides support for retaining authentication tokens and
- access keys in the kernel.
-
- It also includes provision of methods by which such keys might be
- associated with a process so that network filesystems, encryption
- support and the like can find them.
-
- Furthermore, a special type of key is available that acts as keyring:
- a searchable sequence of keys. Each process is equipped with access
- to five standard keyrings: UID-specific, GID-specific, session,
- process and thread.
-
- If you are unsure as to whether this is required, answer N.
-
-config TRUSTED_KEYS
- tristate "TRUSTED KEYS"
- depends on KEYS && TCG_TPM
- select CRYPTO
- select CRYPTO_HMAC
- select CRYPTO_SHA1
- help
- This option provides support for creating, sealing, and unsealing
- keys in the kernel. Trusted keys are random number symmetric keys,
- generated and RSA-sealed by the TPM. The TPM only unseals the keys,
- if the boot PCRs and other criteria match. Userspace will only ever
- see encrypted blobs.
-
- If you are unsure as to whether this is required, answer N.
-
-config ENCRYPTED_KEYS
- tristate "ENCRYPTED KEYS"
- depends on KEYS
- select CRYPTO
- select CRYPTO_HMAC
- select CRYPTO_AES
- select CRYPTO_CBC
- select CRYPTO_SHA256
- select CRYPTO_RNG
- help
- This option provides support for create/encrypting/decrypting keys
- in the kernel. Encrypted keys are kernel generated random numbers,
- which are encrypted/decrypted with a 'master' symmetric key. The
- 'master' key can be either a trusted-key or user-key type.
- Userspace only ever sees/stores encrypted blobs.
-
- If you are unsure as to whether this is required, answer N.
-
-config KEYS_DEBUG_PROC_KEYS
- bool "Enable the /proc/keys file by which keys may be viewed"
- depends on KEYS
- help
- This option turns on support for the /proc/keys file - through which
- can be listed all the keys on the system that are viewable by the
- reading process.
-
- The only keys included in the list are those that grant View
- permission to the reading process whether or not it possesses them.
- Note that LSM security checks are still performed, and may further
- filter out keys that the current process is not authorised to view.
-
- Only key attributes are listed here; key payloads are not included in
- the resulting table.
-
- If you are unsure as to whether this is required, answer N.
+source security/keys/Kconfig
config SECURITY_DMESG_RESTRICT
bool "Restrict unprivileged access to the kernel syslog"
@@ -169,7 +103,7 @@ config INTEL_TXT
config LSM_MMAP_MIN_ADDR
int "Low address space for LSM to protect from user allocation"
depends on SECURITY && SECURITY_SELINUX
- default 32768 if ARM
+ default 32768 if ARM || (ARM64 && COMPAT)
default 65536
help
This is the portion of low virtual memory which should be protected
@@ -187,6 +121,7 @@ source security/selinux/Kconfig
source security/smack/Kconfig
source security/tomoyo/Kconfig
source security/apparmor/Kconfig
+source security/yama/Kconfig
source security/integrity/Kconfig
@@ -196,6 +131,7 @@ choice
default DEFAULT_SECURITY_SMACK if SECURITY_SMACK
default DEFAULT_SECURITY_TOMOYO if SECURITY_TOMOYO
default DEFAULT_SECURITY_APPARMOR if SECURITY_APPARMOR
+ default DEFAULT_SECURITY_YAMA if SECURITY_YAMA
default DEFAULT_SECURITY_DAC
help
@@ -214,6 +150,9 @@ choice
config DEFAULT_SECURITY_APPARMOR
bool "AppArmor" if SECURITY_APPARMOR=y
+ config DEFAULT_SECURITY_YAMA
+ bool "Yama" if SECURITY_YAMA=y
+
config DEFAULT_SECURITY_DAC
bool "Unix Discretionary Access Controls"
@@ -225,6 +164,7 @@ config DEFAULT_SECURITY
default "smack" if DEFAULT_SECURITY_SMACK
default "tomoyo" if DEFAULT_SECURITY_TOMOYO
default "apparmor" if DEFAULT_SECURITY_APPARMOR
+ default "yama" if DEFAULT_SECURITY_YAMA
default "" if DEFAULT_SECURITY_DAC
endmenu
diff --git a/security/Makefile b/security/Makefile
index a5e502f8a05..05f1c934d74 100644
--- a/security/Makefile
+++ b/security/Makefile
@@ -7,6 +7,7 @@ subdir-$(CONFIG_SECURITY_SELINUX) += selinux
subdir-$(CONFIG_SECURITY_SMACK) += smack
subdir-$(CONFIG_SECURITY_TOMOYO) += tomoyo
subdir-$(CONFIG_SECURITY_APPARMOR) += apparmor
+subdir-$(CONFIG_SECURITY_YAMA) += yama
# always enable default capabilities
obj-y += commoncap.o
@@ -15,14 +16,14 @@ obj-$(CONFIG_MMU) += min_addr.o
# Object file lists
obj-$(CONFIG_SECURITY) += security.o capability.o
obj-$(CONFIG_SECURITYFS) += inode.o
-# Must precede capability.o in order to stack properly.
-obj-$(CONFIG_SECURITY_SELINUX) += selinux/built-in.o
-obj-$(CONFIG_SECURITY_SMACK) += smack/built-in.o
+obj-$(CONFIG_SECURITY_SELINUX) += selinux/
+obj-$(CONFIG_SECURITY_SMACK) += smack/
obj-$(CONFIG_AUDIT) += lsm_audit.o
-obj-$(CONFIG_SECURITY_TOMOYO) += tomoyo/built-in.o
-obj-$(CONFIG_SECURITY_APPARMOR) += apparmor/built-in.o
+obj-$(CONFIG_SECURITY_TOMOYO) += tomoyo/
+obj-$(CONFIG_SECURITY_APPARMOR) += apparmor/
+obj-$(CONFIG_SECURITY_YAMA) += yama/
obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o
# Object integrity file lists
subdir-$(CONFIG_INTEGRITY) += integrity
-obj-$(CONFIG_INTEGRITY) += integrity/built-in.o
+obj-$(CONFIG_INTEGRITY) += integrity/
diff --git a/security/apparmor/.gitignore b/security/apparmor/.gitignore
index 4d995aeaebc..9cdec70d72b 100644
--- a/security/apparmor/.gitignore
+++ b/security/apparmor/.gitignore
@@ -1,6 +1,5 @@
#
# Generated include files
#
-af_names.h
capability_names.h
rlim_names.h
diff --git a/security/apparmor/Kconfig b/security/apparmor/Kconfig
index 9b9013b2e32..d49c53960b6 100644
--- a/security/apparmor/Kconfig
+++ b/security/apparmor/Kconfig
@@ -29,3 +29,15 @@ config SECURITY_APPARMOR_BOOTPARAM_VALUE
boot.
If you are unsure how to answer this question, answer 1.
+
+config SECURITY_APPARMOR_HASH
+ bool "SHA1 hash of loaded profiles"
+ depends on SECURITY_APPARMOR
+ depends on CRYPTO
+ select CRYPTO_SHA1
+ default y
+
+ help
+ This option selects whether sha1 hashing is done against loaded
+ profiles and exported for inspection to user space via the apparmor
+ filesystem.
diff --git a/security/apparmor/Makefile b/security/apparmor/Makefile
index 2dafe50a2e2..d693df87481 100644
--- a/security/apparmor/Makefile
+++ b/security/apparmor/Makefile
@@ -5,6 +5,7 @@ obj-$(CONFIG_SECURITY_APPARMOR) += apparmor.o
apparmor-y := apparmorfs.o audit.o capability.o context.o ipc.o lib.o match.o \
path.o domain.o policy.o policy_unpack.o procattr.o lsm.o \
resource.o sid.o file.o
+apparmor-$(CONFIG_SECURITY_APPARMOR_HASH) += crypto.o
clean-files := capability_names.h rlim_names.h
@@ -15,10 +16,14 @@ clean-files := capability_names.h rlim_names.h
# to
# [1] = "dac_override",
quiet_cmd_make-caps = GEN $@
-cmd_make-caps = echo "static const char *capability_names[] = {" > $@ ;\
+cmd_make-caps = echo "static const char *const capability_names[] = {" > $@ ;\
sed $< >>$@ -r -n -e '/CAP_FS_MASK/d' \
-e 's/^\#define[ \t]+CAP_([A-Z0-9_]+)[ \t]+([0-9]+)/[\2] = "\L\1",/p';\
- echo "};" >> $@
+ echo "};" >> $@ ;\
+ echo -n '\#define AA_FS_CAPS_MASK "' >> $@ ;\
+ sed $< -r -n -e '/CAP_FS_MASK/d' \
+ -e 's/^\#define[ \t]+CAP_([A-Z0-9_]+)[ \t]+([0-9]+)/\L\1/p' | \
+ tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@
# Build a lower case string table of rlimit names.
@@ -28,25 +33,38 @@ cmd_make-caps = echo "static const char *capability_names[] = {" > $@ ;\
# [RLIMIT_STACK] = "stack",
#
# and build a second integer table (with the second sed cmd), that maps
-# RLIMIT defines to the order defined in asm-generic/resource.h Thi is
+# RLIMIT defines to the order defined in asm-generic/resource.h This is
# required by policy load to map policy ordering of RLIMITs to internal
# ordering for architectures that redefine an RLIMIT.
# Transforms lines from
# #define RLIMIT_STACK 3 /* max stack size */
# to
# RLIMIT_STACK,
+#
+# and build the securityfs entries for the mapping.
+# Transforms lines from
+# #define RLIMIT_FSIZE 1 /* Maximum filesize */
+# #define RLIMIT_STACK 3 /* max stack size */
+# to
+# #define AA_FS_RLIMIT_MASK "fsize stack"
quiet_cmd_make-rlim = GEN $@
-cmd_make-rlim = echo "static const char *rlim_names[] = {" > $@ ;\
+cmd_make-rlim = echo "static const char *const rlim_names[RLIM_NLIMITS] = {" \
+ > $@ ;\
sed $< >> $@ -r -n \
-e 's/^\# ?define[ \t]+(RLIMIT_([A-Z0-9_]+)).*/[\1] = "\L\2",/p';\
echo "};" >> $@ ;\
- echo "static const int rlim_map[] = {" >> $@ ;\
+ echo "static const int rlim_map[RLIM_NLIMITS] = {" >> $@ ;\
sed -r -n "s/^\# ?define[ \t]+(RLIMIT_[A-Z0-9_]+).*/\1,/p" $< >> $@ ;\
- echo "};" >> $@
+ echo "};" >> $@ ; \
+ echo -n '\#define AA_FS_RLIMIT_MASK "' >> $@ ;\
+ sed -r -n 's/^\# ?define[ \t]+RLIMIT_([A-Z0-9_]+).*/\L\1/p' $< | \
+ tr '\n' ' ' | sed -e 's/ $$/"\n/' >> $@
$(obj)/capability.o : $(obj)/capability_names.h
$(obj)/resource.o : $(obj)/rlim_names.h
-$(obj)/capability_names.h : $(srctree)/include/linux/capability.h
+$(obj)/capability_names.h : $(srctree)/include/uapi/linux/capability.h \
+ $(src)/Makefile
$(call cmd,make-caps)
-$(obj)/rlim_names.h : $(srctree)/include/asm-generic/resource.h
+$(obj)/rlim_names.h : $(srctree)/include/uapi/asm-generic/resource.h \
+ $(src)/Makefile
$(call cmd,make-rlim)
diff --git a/security/apparmor/apparmorfs.c b/security/apparmor/apparmorfs.c
index e39df6d4377..7db9954f1af 100644
--- a/security/apparmor/apparmorfs.c
+++ b/security/apparmor/apparmorfs.c
@@ -12,18 +12,62 @@
* License.
*/
+#include <linux/ctype.h>
#include <linux/security.h>
#include <linux/vmalloc.h>
#include <linux/module.h>
#include <linux/seq_file.h>
#include <linux/uaccess.h>
#include <linux/namei.h>
+#include <linux/capability.h>
+#include <linux/rcupdate.h>
#include "include/apparmor.h"
#include "include/apparmorfs.h"
#include "include/audit.h"
#include "include/context.h"
+#include "include/crypto.h"
#include "include/policy.h"
+#include "include/resource.h"
+
+/**
+ * aa_mangle_name - mangle a profile name to std profile layout form
+ * @name: profile name to mangle (NOT NULL)
+ * @target: buffer to store mangled name, same length as @name (MAYBE NULL)
+ *
+ * Returns: length of mangled name
+ */
+static int mangle_name(char *name, char *target)
+{
+ char *t = target;
+
+ while (*name == '/' || *name == '.')
+ name++;
+
+ if (target) {
+ for (; *name; name++) {
+ if (*name == '/')
+ *(t)++ = '.';
+ else if (isspace(*name))
+ *(t)++ = '_';
+ else if (isalnum(*name) || strchr("._-", *name))
+ *(t)++ = *name;
+ }
+
+ *t = 0;
+ } else {
+ int len = 0;
+ for (; *name; name++) {
+ if (isalnum(*name) || isspace(*name) ||
+ strchr("/._-", *name))
+ len++;
+ }
+
+ return len;
+ }
+
+ return t - target;
+}
/**
* aa_simple_write_to_buffer - common routine for getting policy from user
@@ -142,38 +186,733 @@ static const struct file_operations aa_fs_profile_remove = {
.llseek = default_llseek,
};
-/** Base file system setup **/
+static int aa_fs_seq_show(struct seq_file *seq, void *v)
+{
+ struct aa_fs_entry *fs_file = seq->private;
+
+ if (!fs_file)
+ return 0;
+
+ switch (fs_file->v_type) {
+ case AA_FS_TYPE_BOOLEAN:
+ seq_printf(seq, "%s\n", fs_file->v.boolean ? "yes" : "no");
+ break;
+ case AA_FS_TYPE_STRING:
+ seq_printf(seq, "%s\n", fs_file->v.string);
+ break;
+ case AA_FS_TYPE_U64:
+ seq_printf(seq, "%#08lx\n", fs_file->v.u64);
+ break;
+ default:
+ /* Ignore unpritable entry types. */
+ break;
+ }
+
+ return 0;
+}
+
+static int aa_fs_seq_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, aa_fs_seq_show, inode->i_private);
+}
+
+const struct file_operations aa_fs_seq_file_ops = {
+ .owner = THIS_MODULE,
+ .open = aa_fs_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+static int aa_fs_seq_profile_open(struct inode *inode, struct file *file,
+ int (*show)(struct seq_file *, void *))
+{
+ struct aa_replacedby *r = aa_get_replacedby(inode->i_private);
+ int error = single_open(file, show, r);
+
+ if (error) {
+ file->private_data = NULL;
+ aa_put_replacedby(r);
+ }
+
+ return error;
+}
+
+static int aa_fs_seq_profile_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = (struct seq_file *) file->private_data;
+ if (seq)
+ aa_put_replacedby(seq->private);
+ return single_release(inode, file);
+}
+
+static int aa_fs_seq_profname_show(struct seq_file *seq, void *v)
+{
+ struct aa_replacedby *r = seq->private;
+ struct aa_profile *profile = aa_get_profile_rcu(&r->profile);
+ seq_printf(seq, "%s\n", profile->base.name);
+ aa_put_profile(profile);
+
+ return 0;
+}
+
+static int aa_fs_seq_profname_open(struct inode *inode, struct file *file)
+{
+ return aa_fs_seq_profile_open(inode, file, aa_fs_seq_profname_show);
+}
+
+static const struct file_operations aa_fs_profname_fops = {
+ .owner = THIS_MODULE,
+ .open = aa_fs_seq_profname_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = aa_fs_seq_profile_release,
+};
+
+static int aa_fs_seq_profmode_show(struct seq_file *seq, void *v)
+{
+ struct aa_replacedby *r = seq->private;
+ struct aa_profile *profile = aa_get_profile_rcu(&r->profile);
+ seq_printf(seq, "%s\n", aa_profile_mode_names[profile->mode]);
+ aa_put_profile(profile);
+
+ return 0;
+}
+
+static int aa_fs_seq_profmode_open(struct inode *inode, struct file *file)
+{
+ return aa_fs_seq_profile_open(inode, file, aa_fs_seq_profmode_show);
+}
+
+static const struct file_operations aa_fs_profmode_fops = {
+ .owner = THIS_MODULE,
+ .open = aa_fs_seq_profmode_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = aa_fs_seq_profile_release,
+};
+
+static int aa_fs_seq_profattach_show(struct seq_file *seq, void *v)
+{
+ struct aa_replacedby *r = seq->private;
+ struct aa_profile *profile = aa_get_profile_rcu(&r->profile);
+ if (profile->attach)
+ seq_printf(seq, "%s\n", profile->attach);
+ else if (profile->xmatch)
+ seq_puts(seq, "<unknown>\n");
+ else
+ seq_printf(seq, "%s\n", profile->base.name);
+ aa_put_profile(profile);
+
+ return 0;
+}
+
+static int aa_fs_seq_profattach_open(struct inode *inode, struct file *file)
+{
+ return aa_fs_seq_profile_open(inode, file, aa_fs_seq_profattach_show);
+}
+
+static const struct file_operations aa_fs_profattach_fops = {
+ .owner = THIS_MODULE,
+ .open = aa_fs_seq_profattach_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = aa_fs_seq_profile_release,
+};
+
+static int aa_fs_seq_hash_show(struct seq_file *seq, void *v)
+{
+ struct aa_replacedby *r = seq->private;
+ struct aa_profile *profile = aa_get_profile_rcu(&r->profile);
+ unsigned int i, size = aa_hash_size();
+
+ if (profile->hash) {
+ for (i = 0; i < size; i++)
+ seq_printf(seq, "%.2x", profile->hash[i]);
+ seq_puts(seq, "\n");
+ }
+
+ return 0;
+}
+
+static int aa_fs_seq_hash_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, aa_fs_seq_hash_show, inode->i_private);
+}
+
+static const struct file_operations aa_fs_seq_hash_fops = {
+ .owner = THIS_MODULE,
+ .open = aa_fs_seq_hash_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+/** fns to setup dynamic per profile/namespace files **/
+void __aa_fs_profile_rmdir(struct aa_profile *profile)
+{
+ struct aa_profile *child;
+ int i;
+
+ if (!profile)
+ return;
+
+ list_for_each_entry(child, &profile->base.profiles, base.list)
+ __aa_fs_profile_rmdir(child);
+
+ for (i = AAFS_PROF_SIZEOF - 1; i >= 0; --i) {
+ struct aa_replacedby *r;
+ if (!profile->dents[i])
+ continue;
+
+ r = profile->dents[i]->d_inode->i_private;
+ securityfs_remove(profile->dents[i]);
+ aa_put_replacedby(r);
+ profile->dents[i] = NULL;
+ }
+}
+
+void __aa_fs_profile_migrate_dents(struct aa_profile *old,
+ struct aa_profile *new)
+{
+ int i;
+
+ for (i = 0; i < AAFS_PROF_SIZEOF; i++) {
+ new->dents[i] = old->dents[i];
+ old->dents[i] = NULL;
+ }
+}
+
+static struct dentry *create_profile_file(struct dentry *dir, const char *name,
+ struct aa_profile *profile,
+ const struct file_operations *fops)
+{
+ struct aa_replacedby *r = aa_get_replacedby(profile->replacedby);
+ struct dentry *dent;
+
+ dent = securityfs_create_file(name, S_IFREG | 0444, dir, r, fops);
+ if (IS_ERR(dent))
+ aa_put_replacedby(r);
+
+ return dent;
+}
+
+/* requires lock be held */
+int __aa_fs_profile_mkdir(struct aa_profile *profile, struct dentry *parent)
+{
+ struct aa_profile *child;
+ struct dentry *dent = NULL, *dir;
+ int error;
+
+ if (!parent) {
+ struct aa_profile *p;
+ p = aa_deref_parent(profile);
+ dent = prof_dir(p);
+ /* adding to parent that previously didn't have children */
+ dent = securityfs_create_dir("profiles", dent);
+ if (IS_ERR(dent))
+ goto fail;
+ prof_child_dir(p) = parent = dent;
+ }
+
+ if (!profile->dirname) {
+ int len, id_len;
+ len = mangle_name(profile->base.name, NULL);
+ id_len = snprintf(NULL, 0, ".%ld", profile->ns->uniq_id);
+
+ profile->dirname = kmalloc(len + id_len + 1, GFP_KERNEL);
+ if (!profile->dirname)
+ goto fail;
+
+ mangle_name(profile->base.name, profile->dirname);
+ sprintf(profile->dirname + len, ".%ld", profile->ns->uniq_id++);
+ }
+
+ dent = securityfs_create_dir(profile->dirname, parent);
+ if (IS_ERR(dent))
+ goto fail;
+ prof_dir(profile) = dir = dent;
+
+ dent = create_profile_file(dir, "name", profile, &aa_fs_profname_fops);
+ if (IS_ERR(dent))
+ goto fail;
+ profile->dents[AAFS_PROF_NAME] = dent;
+
+ dent = create_profile_file(dir, "mode", profile, &aa_fs_profmode_fops);
+ if (IS_ERR(dent))
+ goto fail;
+ profile->dents[AAFS_PROF_MODE] = dent;
+
+ dent = create_profile_file(dir, "attach", profile,
+ &aa_fs_profattach_fops);
+ if (IS_ERR(dent))
+ goto fail;
+ profile->dents[AAFS_PROF_ATTACH] = dent;
+
+ if (profile->hash) {
+ dent = create_profile_file(dir, "sha1", profile,
+ &aa_fs_seq_hash_fops);
+ if (IS_ERR(dent))
+ goto fail;
+ profile->dents[AAFS_PROF_HASH] = dent;
+ }
+
+ list_for_each_entry(child, &profile->base.profiles, base.list) {
+ error = __aa_fs_profile_mkdir(child, prof_child_dir(profile));
+ if (error)
+ goto fail2;
+ }
+
+ return 0;
+
+fail:
+ error = PTR_ERR(dent);
+
+fail2:
+ __aa_fs_profile_rmdir(profile);
+
+ return error;
+}
+
+void __aa_fs_namespace_rmdir(struct aa_namespace *ns)
+{
+ struct aa_namespace *sub;
+ struct aa_profile *child;
+ int i;
+
+ if (!ns)
+ return;
+
+ list_for_each_entry(child, &ns->base.profiles, base.list)
+ __aa_fs_profile_rmdir(child);
+
+ list_for_each_entry(sub, &ns->sub_ns, base.list) {
+ mutex_lock(&sub->lock);
+ __aa_fs_namespace_rmdir(sub);
+ mutex_unlock(&sub->lock);
+ }
+
+ for (i = AAFS_NS_SIZEOF - 1; i >= 0; --i) {
+ securityfs_remove(ns->dents[i]);
+ ns->dents[i] = NULL;
+ }
+}
+
+int __aa_fs_namespace_mkdir(struct aa_namespace *ns, struct dentry *parent,
+ const char *name)
+{
+ struct aa_namespace *sub;
+ struct aa_profile *child;
+ struct dentry *dent, *dir;
+ int error;
+
+ if (!name)
+ name = ns->base.name;
+
+ dent = securityfs_create_dir(name, parent);
+ if (IS_ERR(dent))
+ goto fail;
+ ns_dir(ns) = dir = dent;
+
+ dent = securityfs_create_dir("profiles", dir);
+ if (IS_ERR(dent))
+ goto fail;
+ ns_subprofs_dir(ns) = dent;
+
+ dent = securityfs_create_dir("namespaces", dir);
+ if (IS_ERR(dent))
+ goto fail;
+ ns_subns_dir(ns) = dent;
+
+ list_for_each_entry(child, &ns->base.profiles, base.list) {
+ error = __aa_fs_profile_mkdir(child, ns_subprofs_dir(ns));
+ if (error)
+ goto fail2;
+ }
+
+ list_for_each_entry(sub, &ns->sub_ns, base.list) {
+ mutex_lock(&sub->lock);
+ error = __aa_fs_namespace_mkdir(sub, ns_subns_dir(ns), NULL);
+ mutex_unlock(&sub->lock);
+ if (error)
+ goto fail2;
+ }
+
+ return 0;
-static struct dentry *aa_fs_dentry __initdata;
+fail:
+ error = PTR_ERR(dent);
-static void __init aafs_remove(const char *name)
+fail2:
+ __aa_fs_namespace_rmdir(ns);
+
+ return error;
+}
+
+
+#define list_entry_next(pos, member) \
+ list_entry(pos->member.next, typeof(*pos), member)
+#define list_entry_is_head(pos, head, member) (&pos->member == (head))
+
+/**
+ * __next_namespace - find the next namespace to list
+ * @root: root namespace to stop search at (NOT NULL)
+ * @ns: current ns position (NOT NULL)
+ *
+ * Find the next namespace from @ns under @root and handle all locking needed
+ * while switching current namespace.
+ *
+ * Returns: next namespace or NULL if at last namespace under @root
+ * Requires: ns->parent->lock to be held
+ * NOTE: will not unlock root->lock
+ */
+static struct aa_namespace *__next_namespace(struct aa_namespace *root,
+ struct aa_namespace *ns)
{
- struct dentry *dentry;
+ struct aa_namespace *parent, *next;
+
+ /* is next namespace a child */
+ if (!list_empty(&ns->sub_ns)) {
+ next = list_first_entry(&ns->sub_ns, typeof(*ns), base.list);
+ mutex_lock(&next->lock);
+ return next;
+ }
+
+ /* check if the next ns is a sibling, parent, gp, .. */
+ parent = ns->parent;
+ while (ns != root) {
+ mutex_unlock(&ns->lock);
+ next = list_entry_next(ns, base.list);
+ if (!list_entry_is_head(next, &parent->sub_ns, base.list)) {
+ mutex_lock(&next->lock);
+ return next;
+ }
+ ns = parent;
+ parent = parent->parent;
+ }
+
+ return NULL;
+}
- dentry = lookup_one_len(name, aa_fs_dentry, strlen(name));
- if (!IS_ERR(dentry)) {
- securityfs_remove(dentry);
- dput(dentry);
+/**
+ * __first_profile - find the first profile in a namespace
+ * @root: namespace that is root of profiles being displayed (NOT NULL)
+ * @ns: namespace to start in (NOT NULL)
+ *
+ * Returns: unrefcounted profile or NULL if no profile
+ * Requires: profile->ns.lock to be held
+ */
+static struct aa_profile *__first_profile(struct aa_namespace *root,
+ struct aa_namespace *ns)
+{
+ for (; ns; ns = __next_namespace(root, ns)) {
+ if (!list_empty(&ns->base.profiles))
+ return list_first_entry(&ns->base.profiles,
+ struct aa_profile, base.list);
}
+ return NULL;
}
/**
- * aafs_create - create an entry in the apparmor filesystem
- * @name: name of the entry (NOT NULL)
- * @mask: file permission mask of the file
- * @fops: file operations for the file (NOT NULL)
+ * __next_profile - step to the next profile in a profile tree
+ * @profile: current profile in tree (NOT NULL)
*
- * Used aafs_remove to remove entries created with this fn.
+ * Perform a depth first traversal on the profile tree in a namespace
+ *
+ * Returns: next profile or NULL if done
+ * Requires: profile->ns.lock to be held
*/
-static int __init aafs_create(const char *name, umode_t mask,
- const struct file_operations *fops)
+static struct aa_profile *__next_profile(struct aa_profile *p)
{
- struct dentry *dentry;
+ struct aa_profile *parent;
+ struct aa_namespace *ns = p->ns;
+
+ /* is next profile a child */
+ if (!list_empty(&p->base.profiles))
+ return list_first_entry(&p->base.profiles, typeof(*p),
+ base.list);
+
+ /* is next profile a sibling, parent sibling, gp, sibling, .. */
+ parent = rcu_dereference_protected(p->parent,
+ mutex_is_locked(&p->ns->lock));
+ while (parent) {
+ p = list_entry_next(p, base.list);
+ if (!list_entry_is_head(p, &parent->base.profiles, base.list))
+ return p;
+ p = parent;
+ parent = rcu_dereference_protected(parent->parent,
+ mutex_is_locked(&parent->ns->lock));
+ }
- dentry = securityfs_create_file(name, S_IFREG | mask, aa_fs_dentry,
- NULL, fops);
+ /* is next another profile in the namespace */
+ p = list_entry_next(p, base.list);
+ if (!list_entry_is_head(p, &ns->base.profiles, base.list))
+ return p;
- return IS_ERR(dentry) ? PTR_ERR(dentry) : 0;
+ return NULL;
+}
+
+/**
+ * next_profile - step to the next profile in where ever it may be
+ * @root: root namespace (NOT NULL)
+ * @profile: current profile (NOT NULL)
+ *
+ * Returns: next profile or NULL if there isn't one
+ */
+static struct aa_profile *next_profile(struct aa_namespace *root,
+ struct aa_profile *profile)
+{
+ struct aa_profile *next = __next_profile(profile);
+ if (next)
+ return next;
+
+ /* finished all profiles in namespace move to next namespace */
+ return __first_profile(root, __next_namespace(root, profile->ns));
+}
+
+/**
+ * p_start - start a depth first traversal of profile tree
+ * @f: seq_file to fill
+ * @pos: current position
+ *
+ * Returns: first profile under current namespace or NULL if none found
+ *
+ * acquires first ns->lock
+ */
+static void *p_start(struct seq_file *f, loff_t *pos)
+{
+ struct aa_profile *profile = NULL;
+ struct aa_namespace *root = aa_current_profile()->ns;
+ loff_t l = *pos;
+ f->private = aa_get_namespace(root);
+
+
+ /* find the first profile */
+ mutex_lock(&root->lock);
+ profile = __first_profile(root, root);
+
+ /* skip to position */
+ for (; profile && l > 0; l--)
+ profile = next_profile(root, profile);
+
+ return profile;
+}
+
+/**
+ * p_next - read the next profile entry
+ * @f: seq_file to fill
+ * @p: profile previously returned
+ * @pos: current position
+ *
+ * Returns: next profile after @p or NULL if none
+ *
+ * may acquire/release locks in namespace tree as necessary
+ */
+static void *p_next(struct seq_file *f, void *p, loff_t *pos)
+{
+ struct aa_profile *profile = p;
+ struct aa_namespace *ns = f->private;
+ (*pos)++;
+
+ return next_profile(ns, profile);
+}
+
+/**
+ * p_stop - stop depth first traversal
+ * @f: seq_file we are filling
+ * @p: the last profile writen
+ *
+ * Release all locking done by p_start/p_next on namespace tree
+ */
+static void p_stop(struct seq_file *f, void *p)
+{
+ struct aa_profile *profile = p;
+ struct aa_namespace *root = f->private, *ns;
+
+ if (profile) {
+ for (ns = profile->ns; ns && ns != root; ns = ns->parent)
+ mutex_unlock(&ns->lock);
+ }
+ mutex_unlock(&root->lock);
+ aa_put_namespace(root);
+}
+
+/**
+ * seq_show_profile - show a profile entry
+ * @f: seq_file to file
+ * @p: current position (profile) (NOT NULL)
+ *
+ * Returns: error on failure
+ */
+static int seq_show_profile(struct seq_file *f, void *p)
+{
+ struct aa_profile *profile = (struct aa_profile *)p;
+ struct aa_namespace *root = f->private;
+
+ if (profile->ns != root)
+ seq_printf(f, ":%s://", aa_ns_name(root, profile->ns));
+ seq_printf(f, "%s (%s)\n", profile->base.hname,
+ aa_profile_mode_names[profile->mode]);
+
+ return 0;
+}
+
+static const struct seq_operations aa_fs_profiles_op = {
+ .start = p_start,
+ .next = p_next,
+ .stop = p_stop,
+ .show = seq_show_profile,
+};
+
+static int profiles_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &aa_fs_profiles_op);
+}
+
+static int profiles_release(struct inode *inode, struct file *file)
+{
+ return seq_release(inode, file);
+}
+
+static const struct file_operations aa_fs_profiles_fops = {
+ .open = profiles_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = profiles_release,
+};
+
+
+/** Base file system setup **/
+static struct aa_fs_entry aa_fs_entry_file[] = {
+ AA_FS_FILE_STRING("mask", "create read write exec append mmap_exec " \
+ "link lock"),
+ { }
+};
+
+static struct aa_fs_entry aa_fs_entry_domain[] = {
+ AA_FS_FILE_BOOLEAN("change_hat", 1),
+ AA_FS_FILE_BOOLEAN("change_hatv", 1),
+ AA_FS_FILE_BOOLEAN("change_onexec", 1),
+ AA_FS_FILE_BOOLEAN("change_profile", 1),
+ { }
+};
+
+static struct aa_fs_entry aa_fs_entry_policy[] = {
+ AA_FS_FILE_BOOLEAN("set_load", 1),
+ {}
+};
+
+static struct aa_fs_entry aa_fs_entry_features[] = {
+ AA_FS_DIR("policy", aa_fs_entry_policy),
+ AA_FS_DIR("domain", aa_fs_entry_domain),
+ AA_FS_DIR("file", aa_fs_entry_file),
+ AA_FS_FILE_U64("capability", VFS_CAP_FLAGS_MASK),
+ AA_FS_DIR("rlimit", aa_fs_entry_rlimit),
+ AA_FS_DIR("caps", aa_fs_entry_caps),
+ { }
+};
+
+static struct aa_fs_entry aa_fs_entry_apparmor[] = {
+ AA_FS_FILE_FOPS(".load", 0640, &aa_fs_profile_load),
+ AA_FS_FILE_FOPS(".replace", 0640, &aa_fs_profile_replace),
+ AA_FS_FILE_FOPS(".remove", 0640, &aa_fs_profile_remove),
+ AA_FS_FILE_FOPS("profiles", 0640, &aa_fs_profiles_fops),
+ AA_FS_DIR("features", aa_fs_entry_features),
+ { }
+};
+
+static struct aa_fs_entry aa_fs_entry =
+ AA_FS_DIR("apparmor", aa_fs_entry_apparmor);
+
+/**
+ * aafs_create_file - create a file entry in the apparmor securityfs
+ * @fs_file: aa_fs_entry to build an entry for (NOT NULL)
+ * @parent: the parent dentry in the securityfs
+ *
+ * Use aafs_remove_file to remove entries created with this fn.
+ */
+static int __init aafs_create_file(struct aa_fs_entry *fs_file,
+ struct dentry *parent)
+{
+ int error = 0;
+
+ fs_file->dentry = securityfs_create_file(fs_file->name,
+ S_IFREG | fs_file->mode,
+ parent, fs_file,
+ fs_file->file_ops);
+ if (IS_ERR(fs_file->dentry)) {
+ error = PTR_ERR(fs_file->dentry);
+ fs_file->dentry = NULL;
+ }
+ return error;
+}
+
+static void __init aafs_remove_dir(struct aa_fs_entry *fs_dir);
+/**
+ * aafs_create_dir - recursively create a directory entry in the securityfs
+ * @fs_dir: aa_fs_entry (and all child entries) to build (NOT NULL)
+ * @parent: the parent dentry in the securityfs
+ *
+ * Use aafs_remove_dir to remove entries created with this fn.
+ */
+static int __init aafs_create_dir(struct aa_fs_entry *fs_dir,
+ struct dentry *parent)
+{
+ struct aa_fs_entry *fs_file;
+ struct dentry *dir;
+ int error;
+
+ dir = securityfs_create_dir(fs_dir->name, parent);
+ if (IS_ERR(dir))
+ return PTR_ERR(dir);
+ fs_dir->dentry = dir;
+
+ for (fs_file = fs_dir->v.files; fs_file && fs_file->name; ++fs_file) {
+ if (fs_file->v_type == AA_FS_TYPE_DIR)
+ error = aafs_create_dir(fs_file, fs_dir->dentry);
+ else
+ error = aafs_create_file(fs_file, fs_dir->dentry);
+ if (error)
+ goto failed;
+ }
+
+ return 0;
+
+failed:
+ aafs_remove_dir(fs_dir);
+
+ return error;
+}
+
+/**
+ * aafs_remove_file - drop a single file entry in the apparmor securityfs
+ * @fs_file: aa_fs_entry to detach from the securityfs (NOT NULL)
+ */
+static void __init aafs_remove_file(struct aa_fs_entry *fs_file)
+{
+ if (!fs_file->dentry)
+ return;
+
+ securityfs_remove(fs_file->dentry);
+ fs_file->dentry = NULL;
+}
+
+/**
+ * aafs_remove_dir - recursively drop a directory entry from the securityfs
+ * @fs_dir: aa_fs_entry (and all child entries) to detach (NOT NULL)
+ */
+static void __init aafs_remove_dir(struct aa_fs_entry *fs_dir)
+{
+ struct aa_fs_entry *fs_file;
+
+ for (fs_file = fs_dir->v.files; fs_file && fs_file->name; ++fs_file) {
+ if (fs_file->v_type == AA_FS_TYPE_DIR)
+ aafs_remove_dir(fs_file);
+ else
+ aafs_remove_file(fs_file);
+ }
+
+ aafs_remove_file(fs_dir);
}
/**
@@ -183,14 +922,7 @@ static int __init aafs_create(const char *name, umode_t mask,
*/
void __init aa_destroy_aafs(void)
{
- if (aa_fs_dentry) {
- aafs_remove(".remove");
- aafs_remove(".replace");
- aafs_remove(".load");
-
- securityfs_remove(aa_fs_dentry);
- aa_fs_dentry = NULL;
- }
+ aafs_remove_dir(&aa_fs_entry);
}
/**
@@ -207,25 +939,18 @@ static int __init aa_create_aafs(void)
if (!apparmor_initialized)
return 0;
- if (aa_fs_dentry) {
+ if (aa_fs_entry.dentry) {
AA_ERROR("%s: AppArmor securityfs already exists\n", __func__);
return -EEXIST;
}
- aa_fs_dentry = securityfs_create_dir("apparmor", NULL);
- if (IS_ERR(aa_fs_dentry)) {
- error = PTR_ERR(aa_fs_dentry);
- aa_fs_dentry = NULL;
- goto error;
- }
-
- error = aafs_create(".load", 0640, &aa_fs_profile_load);
- if (error)
- goto error;
- error = aafs_create(".replace", 0640, &aa_fs_profile_replace);
+ /* Populate fs tree. */
+ error = aafs_create_dir(&aa_fs_entry, NULL);
if (error)
goto error;
- error = aafs_create(".remove", 0640, &aa_fs_profile_remove);
+
+ error = __aa_fs_namespace_mkdir(root_ns, aa_fs_entry.dentry,
+ "policy");
if (error)
goto error;
diff --git a/security/apparmor/audit.c b/security/apparmor/audit.c
index f3fafedd798..89c78658031 100644
--- a/security/apparmor/audit.c
+++ b/security/apparmor/audit.c
@@ -19,7 +19,7 @@
#include "include/audit.h"
#include "include/policy.h"
-const char *op_table[] = {
+const char *const op_table[] = {
"null",
"sysctl",
@@ -73,7 +73,7 @@ const char *op_table[] = {
"profile_remove"
};
-const char *audit_mode_names[] = {
+const char *const audit_mode_names[] = {
"normal",
"quiet_denied",
"quiet",
@@ -81,14 +81,15 @@ const char *audit_mode_names[] = {
"all"
};
-static char *aa_audit_type[] = {
+static const char *const aa_audit_type[] = {
"AUDIT",
"ALLOWED",
"DENIED",
"HINT",
"STATUS",
"ERROR",
- "KILLED"
+ "KILLED",
+ "AUTO"
};
/*
@@ -110,32 +111,26 @@ static char *aa_audit_type[] = {
static void audit_pre(struct audit_buffer *ab, void *ca)
{
struct common_audit_data *sa = ca;
- struct task_struct *tsk = sa->tsk ? sa->tsk : current;
if (aa_g_audit_header) {
audit_log_format(ab, "apparmor=");
- audit_log_string(ab, aa_audit_type[sa->aad.type]);
+ audit_log_string(ab, aa_audit_type[sa->aad->type]);
}
- if (sa->aad.op) {
+ if (sa->aad->op) {
audit_log_format(ab, " operation=");
- audit_log_string(ab, op_table[sa->aad.op]);
+ audit_log_string(ab, op_table[sa->aad->op]);
}
- if (sa->aad.info) {
+ if (sa->aad->info) {
audit_log_format(ab, " info=");
- audit_log_string(ab, sa->aad.info);
- if (sa->aad.error)
- audit_log_format(ab, " error=%d", sa->aad.error);
+ audit_log_string(ab, sa->aad->info);
+ if (sa->aad->error)
+ audit_log_format(ab, " error=%d", sa->aad->error);
}
- if (sa->aad.profile) {
- struct aa_profile *profile = sa->aad.profile;
- pid_t pid;
- rcu_read_lock();
- pid = rcu_dereference(tsk->real_parent)->pid;
- rcu_read_unlock();
- audit_log_format(ab, " parent=%d", pid);
+ if (sa->aad->profile) {
+ struct aa_profile *profile = sa->aad->profile;
if (profile->ns != root_ns) {
audit_log_format(ab, " namespace=");
audit_log_untrustedstring(ab, profile->ns->base.hname);
@@ -144,9 +139,9 @@ static void audit_pre(struct audit_buffer *ab, void *ca)
audit_log_untrustedstring(ab, profile->base.hname);
}
- if (sa->aad.name) {
+ if (sa->aad->name) {
audit_log_format(ab, " name=");
- audit_log_untrustedstring(ab, sa->aad.name);
+ audit_log_untrustedstring(ab, sa->aad->name);
}
}
@@ -158,10 +153,8 @@ static void audit_pre(struct audit_buffer *ab, void *ca)
void aa_audit_msg(int type, struct common_audit_data *sa,
void (*cb) (struct audit_buffer *, void *))
{
- sa->aad.type = type;
- sa->lsm_pre_audit = audit_pre;
- sa->lsm_post_audit = cb;
- common_lsm_audit(sa);
+ sa->aad->type = type;
+ common_lsm_audit(sa, audit_pre, cb);
}
/**
@@ -183,7 +176,7 @@ int aa_audit(int type, struct aa_profile *profile, gfp_t gfp,
BUG_ON(!profile);
if (type == AUDIT_APPARMOR_AUTO) {
- if (likely(!sa->aad.error)) {
+ if (likely(!sa->aad->error)) {
if (AUDIT_MODE(profile) != AUDIT_ALL)
return 0;
type = AUDIT_APPARMOR_AUDIT;
@@ -195,21 +188,22 @@ int aa_audit(int type, struct aa_profile *profile, gfp_t gfp,
if (AUDIT_MODE(profile) == AUDIT_QUIET ||
(type == AUDIT_APPARMOR_DENIED &&
AUDIT_MODE(profile) == AUDIT_QUIET))
- return sa->aad.error;
+ return sa->aad->error;
if (KILL_MODE(profile) && type == AUDIT_APPARMOR_DENIED)
type = AUDIT_APPARMOR_KILL;
if (!unconfined(profile))
- sa->aad.profile = profile;
+ sa->aad->profile = profile;
aa_audit_msg(type, sa, cb);
- if (sa->aad.type == AUDIT_APPARMOR_KILL)
- (void)send_sig_info(SIGKILL, NULL, sa->tsk ? sa->tsk : current);
+ if (sa->aad->type == AUDIT_APPARMOR_KILL)
+ (void)send_sig_info(SIGKILL, NULL,
+ sa->u.tsk ? sa->u.tsk : current);
- if (sa->aad.type == AUDIT_APPARMOR_ALLOWED)
- return complain_error(sa->aad.error);
+ if (sa->aad->type == AUDIT_APPARMOR_ALLOWED)
+ return complain_error(sa->aad->error);
- return sa->aad.error;
+ return sa->aad->error;
}
diff --git a/security/apparmor/capability.c b/security/apparmor/capability.c
index 9982c48def4..1101c6f64bb 100644
--- a/security/apparmor/capability.c
+++ b/security/apparmor/capability.c
@@ -27,6 +27,11 @@
*/
#include "capability_names.h"
+struct aa_fs_entry aa_fs_entry_caps[] = {
+ AA_FS_FILE_STRING("mask", AA_FS_CAPS_MASK),
+ { }
+};
+
struct audit_cache {
struct aa_profile *profile;
kernel_cap_t caps;
@@ -48,8 +53,7 @@ static void audit_cb(struct audit_buffer *ab, void *va)
/**
* audit_caps - audit a capability
- * @profile: profile confining task (NOT NULL)
- * @task: task capability test was performed against (NOT NULL)
+ * @profile: profile being tested for confinement (NOT NULL)
* @cap: capability tested
* @error: error code returned by test
*
@@ -58,17 +62,17 @@ static void audit_cb(struct audit_buffer *ab, void *va)
*
* Returns: 0 or sa->error on success, error code on failure
*/
-static int audit_caps(struct aa_profile *profile, struct task_struct *task,
- int cap, int error)
+static int audit_caps(struct aa_profile *profile, int cap, int error)
{
struct audit_cache *ent;
int type = AUDIT_APPARMOR_AUTO;
struct common_audit_data sa;
- COMMON_AUDIT_DATA_INIT(&sa, CAP);
- sa.tsk = task;
+ struct apparmor_audit_data aad = {0,};
+ sa.type = LSM_AUDIT_DATA_CAP;
+ sa.aad = &aad;
sa.u.cap = cap;
- sa.aad.op = OP_CAPABLE;
- sa.aad.error = error;
+ sa.aad->op = OP_CAPABLE;
+ sa.aad->error = error;
if (likely(!error)) {
/* test if auditing is being forced */
@@ -117,8 +121,7 @@ static int profile_capable(struct aa_profile *profile, int cap)
/**
* aa_capable - test permission to use capability
- * @task: task doing capability test against (NOT NULL)
- * @profile: profile confining @task (NOT NULL)
+ * @profile: profile being tested against (NOT NULL)
* @cap: capability to be tested
* @audit: whether an audit record should be generated
*
@@ -126,8 +129,7 @@ static int profile_capable(struct aa_profile *profile, int cap)
*
* Returns: 0 on success, or else an error code.
*/
-int aa_capable(struct task_struct *task, struct aa_profile *profile, int cap,
- int audit)
+int aa_capable(struct aa_profile *profile, int cap, int audit)
{
int error = profile_capable(profile, cap);
@@ -137,5 +139,5 @@ int aa_capable(struct task_struct *task, struct aa_profile *profile, int cap,
return error;
}
- return audit_caps(profile, task, cap, error);
+ return audit_caps(profile, cap, error);
}
diff --git a/security/apparmor/context.c b/security/apparmor/context.c
index 8a9b5027c81..3064c6ced87 100644
--- a/security/apparmor/context.c
+++ b/security/apparmor/context.c
@@ -69,6 +69,23 @@ void aa_dup_task_context(struct aa_task_cxt *new, const struct aa_task_cxt *old)
}
/**
+ * aa_get_task_profile - Get another task's profile
+ * @task: task to query (NOT NULL)
+ *
+ * Returns: counted reference to @task's profile
+ */
+struct aa_profile *aa_get_task_profile(struct task_struct *task)
+{
+ struct aa_profile *p;
+
+ rcu_read_lock();
+ p = aa_get_profile(__aa_task_profile(task));
+ rcu_read_unlock();
+
+ return p;
+}
+
+/**
* aa_replace_current_profile - replace the current tasks profiles
* @profile: new profile (NOT NULL)
*
@@ -76,7 +93,7 @@ void aa_dup_task_context(struct aa_task_cxt *new, const struct aa_task_cxt *old)
*/
int aa_replace_current_profile(struct aa_profile *profile)
{
- struct aa_task_cxt *cxt = current_cred()->security;
+ struct aa_task_cxt *cxt = current_cxt();
struct cred *new;
BUG_ON(!profile);
@@ -87,21 +104,17 @@ int aa_replace_current_profile(struct aa_profile *profile)
if (!new)
return -ENOMEM;
- cxt = new->security;
- if (unconfined(profile) || (cxt->profile->ns != profile->ns)) {
+ cxt = cred_cxt(new);
+ if (unconfined(profile) || (cxt->profile->ns != profile->ns))
/* if switching to unconfined or a different profile namespace
* clear out context state
*/
- aa_put_profile(cxt->previous);
- aa_put_profile(cxt->onexec);
- cxt->previous = NULL;
- cxt->onexec = NULL;
- cxt->token = 0;
- }
+ aa_clear_task_cxt_trans(cxt);
+
/* be careful switching cxt->profile, when racing replacement it
- * is possible that cxt->profile->replacedby is the reference keeping
- * @profile valid, so make sure to get its reference before dropping
- * the reference on cxt->profile */
+ * is possible that cxt->profile->replacedby->profile is the reference
+ * keeping @profile valid, so make sure to get its reference before
+ * dropping the reference on cxt->profile */
aa_get_profile(profile);
aa_put_profile(cxt->profile);
cxt->profile = profile;
@@ -123,7 +136,7 @@ int aa_set_current_onexec(struct aa_profile *profile)
if (!new)
return -ENOMEM;
- cxt = new->security;
+ cxt = cred_cxt(new);
aa_get_profile(profile);
aa_put_profile(cxt->onexec);
cxt->onexec = profile;
@@ -150,7 +163,7 @@ int aa_set_current_hat(struct aa_profile *profile, u64 token)
return -ENOMEM;
BUG_ON(!profile);
- cxt = new->security;
+ cxt = cred_cxt(new);
if (!cxt->previous) {
/* transfer refcount */
cxt->previous = cxt->profile;
@@ -162,7 +175,7 @@ int aa_set_current_hat(struct aa_profile *profile, u64 token)
abort_creds(new);
return -EACCES;
}
- cxt->profile = aa_get_profile(aa_newest_version(profile));
+ cxt->profile = aa_get_newest_profile(profile);
/* clear exec on switching context */
aa_put_profile(cxt->onexec);
cxt->onexec = NULL;
@@ -187,7 +200,7 @@ int aa_restore_previous_profile(u64 token)
if (!new)
return -ENOMEM;
- cxt = new->security;
+ cxt = cred_cxt(new);
if (cxt->token != token) {
abort_creds(new);
return -EACCES;
@@ -199,17 +212,10 @@ int aa_restore_previous_profile(u64 token)
}
aa_put_profile(cxt->profile);
- cxt->profile = aa_newest_version(cxt->previous);
+ cxt->profile = aa_get_newest_profile(cxt->previous);
BUG_ON(!cxt->profile);
- if (unlikely(cxt->profile != cxt->previous)) {
- aa_get_profile(cxt->profile);
- aa_put_profile(cxt->previous);
- }
/* clear exec && prev information when restoring to previous context */
- cxt->previous = NULL;
- cxt->token = 0;
- aa_put_profile(cxt->onexec);
- cxt->onexec = NULL;
+ aa_clear_task_cxt_trans(cxt);
commit_creds(new);
return 0;
diff --git a/security/apparmor/crypto.c b/security/apparmor/crypto.c
new file mode 100644
index 00000000000..532471d0b3a
--- /dev/null
+++ b/security/apparmor/crypto.c
@@ -0,0 +1,95 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor policy loading interface function definitions.
+ *
+ * Copyright 2013 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * Fns to provide a checksum of policy that has been loaded this can be
+ * compared to userspace policy compiles to check loaded policy is what
+ * it should be.
+ */
+
+#include <crypto/hash.h>
+
+#include "include/apparmor.h"
+#include "include/crypto.h"
+
+static unsigned int apparmor_hash_size;
+
+static struct crypto_shash *apparmor_tfm;
+
+unsigned int aa_hash_size(void)
+{
+ return apparmor_hash_size;
+}
+
+int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start,
+ size_t len)
+{
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(apparmor_tfm)];
+ } desc;
+ int error = -ENOMEM;
+ u32 le32_version = cpu_to_le32(version);
+
+ if (!apparmor_tfm)
+ return 0;
+
+ profile->hash = kzalloc(apparmor_hash_size, GFP_KERNEL);
+ if (!profile->hash)
+ goto fail;
+
+ desc.shash.tfm = apparmor_tfm;
+ desc.shash.flags = 0;
+
+ error = crypto_shash_init(&desc.shash);
+ if (error)
+ goto fail;
+ error = crypto_shash_update(&desc.shash, (u8 *) &le32_version, 4);
+ if (error)
+ goto fail;
+ error = crypto_shash_update(&desc.shash, (u8 *) start, len);
+ if (error)
+ goto fail;
+ error = crypto_shash_final(&desc.shash, profile->hash);
+ if (error)
+ goto fail;
+
+ return 0;
+
+fail:
+ kfree(profile->hash);
+ profile->hash = NULL;
+
+ return error;
+}
+
+static int __init init_profile_hash(void)
+{
+ struct crypto_shash *tfm;
+
+ if (!apparmor_initialized)
+ return 0;
+
+ tfm = crypto_alloc_shash("sha1", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm)) {
+ int error = PTR_ERR(tfm);
+ AA_ERROR("failed to setup profile sha1 hashing: %d\n", error);
+ return error;
+ }
+ apparmor_tfm = tfm;
+ apparmor_hash_size = crypto_shash_digestsize(apparmor_tfm);
+
+ aa_info_message("AppArmor sha1 policy hashing enabled");
+
+ return 0;
+}
+
+late_initcall(init_profile_hash);
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index c1e18ba5bdc..452567d3a08 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -50,40 +50,34 @@ void aa_free_domain_entries(struct aa_domain *domain)
/**
* may_change_ptraced_domain - check if can change profile on ptraced task
- * @task: task we want to change profile of (NOT NULL)
* @to_profile: profile to change to (NOT NULL)
*
- * Check if the task is ptraced and if so if the tracing task is allowed
+ * Check if current is ptraced and if so if the tracing task is allowed
* to trace the new domain
*
* Returns: %0 or error if change not allowed
*/
-static int may_change_ptraced_domain(struct task_struct *task,
- struct aa_profile *to_profile)
+static int may_change_ptraced_domain(struct aa_profile *to_profile)
{
struct task_struct *tracer;
- const struct cred *cred = NULL;
struct aa_profile *tracerp = NULL;
int error = 0;
rcu_read_lock();
- tracer = ptrace_parent(task);
- if (tracer) {
+ tracer = ptrace_parent(current);
+ if (tracer)
/* released below */
- cred = get_task_cred(tracer);
- tracerp = aa_cred_profile(cred);
- }
+ tracerp = aa_get_task_profile(tracer);
/* not ptraced */
if (!tracer || unconfined(tracerp))
goto out;
- error = aa_may_ptrace(tracer, tracerp, to_profile, PTRACE_MODE_ATTACH);
+ error = aa_may_ptrace(tracerp, to_profile, PTRACE_MODE_ATTACH);
out:
rcu_read_unlock();
- if (cred)
- put_cred(cred);
+ aa_put_profile(tracerp);
return error;
}
@@ -148,7 +142,7 @@ static struct aa_profile *__attach_match(const char *name,
int len = 0;
struct aa_profile *profile, *candidate = NULL;
- list_for_each_entry(profile, head, base.list) {
+ list_for_each_entry_rcu(profile, head, base.list) {
if (profile->flags & PFLAG_NULL)
continue;
if (profile->xmatch && profile->xmatch_len > len) {
@@ -181,9 +175,9 @@ static struct aa_profile *find_attach(struct aa_namespace *ns,
{
struct aa_profile *profile;
- read_lock(&ns->lock);
+ rcu_read_lock();
profile = aa_get_profile(__attach_match(name, list));
- read_unlock(&ns->lock);
+ rcu_read_unlock();
return profile;
}
@@ -349,8 +343,8 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
unsigned int state;
struct file_perms perms = {};
struct path_cond cond = {
- bprm->file->f_path.dentry->d_inode->i_uid,
- bprm->file->f_path.dentry->d_inode->i_mode
+ file_inode(bprm->file)->i_uid,
+ file_inode(bprm->file)->i_mode
};
const char *name = NULL, *target = NULL, *info = NULL;
int error = cap_bprm_set_creds(bprm);
@@ -360,10 +354,10 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
if (bprm->cred_prepared)
return 0;
- cxt = bprm->cred->security;
+ cxt = cred_cxt(bprm->cred);
BUG_ON(!cxt);
- profile = aa_get_profile(aa_newest_version(cxt->profile));
+ profile = aa_get_newest_profile(cxt->profile);
/*
* get the namespace from the replacement profile as replacement
* can change the namespace
@@ -372,13 +366,12 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
state = profile->file.start;
/* buffer freed below, name is pointer into buffer */
- error = aa_get_name(&bprm->file->f_path, profile->path_flags, &buffer,
- &name);
+ error = aa_path_name(&bprm->file->f_path, profile->path_flags, &buffer,
+ &name, &info);
if (error) {
- if (profile->flags &
- (PFLAG_IX_ON_NAME_ERROR | PFLAG_UNCONFINED))
+ if (unconfined(profile) ||
+ (profile->flags & PFLAG_IX_ON_NAME_ERROR))
error = 0;
- info = "Exec failed name resolution";
name = bprm->filename;
goto audit;
}
@@ -395,6 +388,11 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
new_profile = find_attach(ns, &ns->base.profiles, name);
if (!new_profile)
goto cleanup;
+ /*
+ * NOTE: Domain transitions from unconfined are allowed
+ * even when no_new_privs is set because this aways results
+ * in a further reduction of permissions.
+ */
goto apply;
}
@@ -411,12 +409,13 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
* exec\0change_profile
*/
state = aa_dfa_null_transition(profile->file.dfa, state);
- cp = change_profile_perms(profile, cxt->onexec->ns, name,
+ cp = change_profile_perms(profile, cxt->onexec->ns,
+ cxt->onexec->base.name,
AA_MAY_ONEXEC, state);
if (!(cp.allow & AA_MAY_ONEXEC))
goto audit;
- new_profile = aa_get_profile(aa_newest_version(cxt->onexec));
+ new_profile = aa_get_newest_profile(cxt->onexec);
goto apply;
}
@@ -433,11 +432,13 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
new_profile = aa_get_profile(profile);
goto x_clear;
} else if (perms.xindex & AA_X_UNCONFINED) {
- new_profile = aa_get_profile(ns->unconfined);
+ new_profile = aa_get_newest_profile(ns->unconfined);
info = "ux fallback";
} else {
error = -ENOENT;
info = "profile not found";
+ /* remove MAY_EXEC to audit as failure */
+ perms.allow &= ~MAY_EXEC;
}
}
} else if (COMPLAIN_MODE(profile)) {
@@ -455,6 +456,16 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
/* fail exec */
error = -EACCES;
+ /*
+ * Policy has specified a domain transition, if no_new_privs then
+ * fail the exec.
+ */
+ if (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS) {
+ aa_put_profile(new_profile);
+ error = -EPERM;
+ goto cleanup;
+ }
+
if (!new_profile)
goto audit;
@@ -464,7 +475,7 @@ int apparmor_bprm_set_creds(struct linux_binprm *bprm)
}
if (bprm->unsafe & (LSM_UNSAFE_PTRACE | LSM_UNSAFE_PTRACE_CAP)) {
- error = may_change_ptraced_domain(current, new_profile);
+ error = may_change_ptraced_domain(new_profile);
if (error) {
aa_put_profile(new_profile);
goto audit;
@@ -499,11 +510,7 @@ x_clear:
cxt->profile = new_profile;
/* clear out all temporary/transitional state from the context */
- aa_put_profile(cxt->previous);
- aa_put_profile(cxt->onexec);
- cxt->previous = NULL;
- cxt->onexec = NULL;
- cxt->token = 0;
+ aa_clear_task_cxt_trans(cxt);
audit:
error = aa_audit_file(profile, &perms, GFP_KERNEL, OP_EXEC, MAY_EXEC,
@@ -542,7 +549,7 @@ int apparmor_bprm_secureexec(struct linux_binprm *bprm)
void apparmor_bprm_committing_creds(struct linux_binprm *bprm)
{
struct aa_profile *profile = __aa_current_profile();
- struct aa_task_cxt *new_cxt = bprm->cred->security;
+ struct aa_task_cxt *new_cxt = cred_cxt(bprm->cred);
/* bail out if unconfined or not changing profile */
if ((new_cxt->profile == profile) ||
@@ -609,9 +616,17 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
const char *target = NULL, *info = NULL;
int error = 0;
+ /*
+ * Fail explicitly requested domain transitions if no_new_privs.
+ * There is no exception for unconfined as change_hat is not
+ * available.
+ */
+ if (current->no_new_privs)
+ return -EPERM;
+
/* released below */
cred = get_current_cred();
- cxt = cred->security;
+ cxt = cred_cxt(cred);
profile = aa_cred_profile(cred);
previous_profile = cxt->previous;
@@ -624,7 +639,10 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
if (count) {
/* attempting to change into a new hat or switch to a sibling */
struct aa_profile *root;
- root = PROFILE_IS_HAT(profile) ? profile->parent : profile;
+ if (PROFILE_IS_HAT(profile))
+ root = aa_get_profile_rcu(&profile->parent);
+ else
+ root = aa_get_profile(profile);
/* find first matching hat */
for (i = 0; i < count && !hat; i++)
@@ -636,6 +654,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
error = -ECHILD;
else
error = -ENOENT;
+ aa_put_profile(root);
goto out;
}
@@ -650,6 +669,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
/* freed below */
name = new_compound_name(root->base.hname, hats[0]);
+ aa_put_profile(root);
target = name;
/* released below */
hat = aa_new_null_profile(profile, 1);
@@ -659,6 +679,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
goto audit;
}
} else {
+ aa_put_profile(root);
target = hat->base.hname;
if (!PROFILE_IS_HAT(hat)) {
info = "target not hat";
@@ -667,7 +688,7 @@ int aa_change_hat(const char *hats[], int count, u64 token, bool permtest)
}
}
- error = may_change_ptraced_domain(current, hat);
+ error = may_change_ptraced_domain(hat);
if (error) {
info = "ptraced";
error = -EPERM;
@@ -698,7 +719,7 @@ audit:
if (!permtest)
error = aa_audit_file(profile, &perms, GFP_KERNEL,
OP_CHANGE_HAT, AA_MAY_CHANGEHAT, NULL,
- target, 0, info, error);
+ target, GLOBAL_ROOT_UID, info, error);
out:
aa_put_profile(hat);
@@ -727,7 +748,6 @@ int aa_change_profile(const char *ns_name, const char *hname, bool onexec,
bool permtest)
{
const struct cred *cred;
- struct aa_task_cxt *cxt;
struct aa_profile *profile, *target = NULL;
struct aa_namespace *ns = NULL;
struct file_perms perms = {};
@@ -747,9 +767,20 @@ int aa_change_profile(const char *ns_name, const char *hname, bool onexec,
}
cred = get_current_cred();
- cxt = cred->security;
profile = aa_cred_profile(cred);
+ /*
+ * Fail explicitly requested domain transitions if no_new_privs
+ * and not unconfined.
+ * Domain transitions from unconfined are allowed even when
+ * no_new_privs is set because this aways results in a reduction
+ * of permissions.
+ */
+ if (current->no_new_privs && !unconfined(profile)) {
+ put_cred(cred);
+ return -EPERM;
+ }
+
if (ns_name) {
/* released below */
ns = aa_find_namespace(profile->ns, ns_name);
@@ -796,7 +827,7 @@ int aa_change_profile(const char *ns_name, const char *hname, bool onexec,
}
/* check if tracing task is allowed to trace target domain */
- error = may_change_ptraced_domain(current, target);
+ error = may_change_ptraced_domain(target);
if (error) {
info = "ptrace prevents transition";
goto audit;
@@ -813,7 +844,7 @@ int aa_change_profile(const char *ns_name, const char *hname, bool onexec,
audit:
if (!permtest)
error = aa_audit_file(profile, &perms, GFP_KERNEL, op, request,
- name, hname, 0, info, error);
+ name, hname, GLOBAL_ROOT_UID, info, error);
aa_put_namespace(ns);
aa_put_profile(target);
diff --git a/security/apparmor/file.c b/security/apparmor/file.c
index 7312db74121..fdaa50cb187 100644
--- a/security/apparmor/file.c
+++ b/security/apparmor/file.c
@@ -65,24 +65,26 @@ static void audit_file_mask(struct audit_buffer *ab, u32 mask)
static void file_audit_cb(struct audit_buffer *ab, void *va)
{
struct common_audit_data *sa = va;
- uid_t fsuid = current_fsuid();
+ kuid_t fsuid = current_fsuid();
- if (sa->aad.fs.request & AA_AUDIT_FILE_MASK) {
+ if (sa->aad->fs.request & AA_AUDIT_FILE_MASK) {
audit_log_format(ab, " requested_mask=");
- audit_file_mask(ab, sa->aad.fs.request);
+ audit_file_mask(ab, sa->aad->fs.request);
}
- if (sa->aad.fs.denied & AA_AUDIT_FILE_MASK) {
+ if (sa->aad->fs.denied & AA_AUDIT_FILE_MASK) {
audit_log_format(ab, " denied_mask=");
- audit_file_mask(ab, sa->aad.fs.denied);
+ audit_file_mask(ab, sa->aad->fs.denied);
}
- if (sa->aad.fs.request & AA_AUDIT_FILE_MASK) {
- audit_log_format(ab, " fsuid=%d", fsuid);
- audit_log_format(ab, " ouid=%d", sa->aad.fs.ouid);
+ if (sa->aad->fs.request & AA_AUDIT_FILE_MASK) {
+ audit_log_format(ab, " fsuid=%d",
+ from_kuid(&init_user_ns, fsuid));
+ audit_log_format(ab, " ouid=%d",
+ from_kuid(&init_user_ns, sa->aad->fs.ouid));
}
- if (sa->aad.fs.target) {
+ if (sa->aad->fs.target) {
audit_log_format(ab, " target=");
- audit_log_untrustedstring(ab, sa->aad.fs.target);
+ audit_log_untrustedstring(ab, sa->aad->fs.target);
}
}
@@ -103,49 +105,51 @@ static void file_audit_cb(struct audit_buffer *ab, void *va)
*/
int aa_audit_file(struct aa_profile *profile, struct file_perms *perms,
gfp_t gfp, int op, u32 request, const char *name,
- const char *target, uid_t ouid, const char *info, int error)
+ const char *target, kuid_t ouid, const char *info, int error)
{
int type = AUDIT_APPARMOR_AUTO;
struct common_audit_data sa;
- COMMON_AUDIT_DATA_INIT(&sa, NONE);
- sa.aad.op = op,
- sa.aad.fs.request = request;
- sa.aad.name = name;
- sa.aad.fs.target = target;
- sa.aad.fs.ouid = ouid;
- sa.aad.info = info;
- sa.aad.error = error;
-
- if (likely(!sa.aad.error)) {
+ struct apparmor_audit_data aad = {0,};
+ sa.type = LSM_AUDIT_DATA_NONE;
+ sa.aad = &aad;
+ aad.op = op,
+ aad.fs.request = request;
+ aad.name = name;
+ aad.fs.target = target;
+ aad.fs.ouid = ouid;
+ aad.info = info;
+ aad.error = error;
+
+ if (likely(!sa.aad->error)) {
u32 mask = perms->audit;
if (unlikely(AUDIT_MODE(profile) == AUDIT_ALL))
mask = 0xffff;
/* mask off perms that are not being force audited */
- sa.aad.fs.request &= mask;
+ sa.aad->fs.request &= mask;
- if (likely(!sa.aad.fs.request))
+ if (likely(!sa.aad->fs.request))
return 0;
type = AUDIT_APPARMOR_AUDIT;
} else {
/* only report permissions that were denied */
- sa.aad.fs.request = sa.aad.fs.request & ~perms->allow;
+ sa.aad->fs.request = sa.aad->fs.request & ~perms->allow;
- if (sa.aad.fs.request & perms->kill)
+ if (sa.aad->fs.request & perms->kill)
type = AUDIT_APPARMOR_KILL;
/* quiet known rejects, assumes quiet and kill do not overlap */
- if ((sa.aad.fs.request & perms->quiet) &&
+ if ((sa.aad->fs.request & perms->quiet) &&
AUDIT_MODE(profile) != AUDIT_NOQUIET &&
AUDIT_MODE(profile) != AUDIT_ALL)
- sa.aad.fs.request &= ~perms->quiet;
+ sa.aad->fs.request &= ~perms->quiet;
- if (!sa.aad.fs.request)
- return COMPLAIN_MODE(profile) ? 0 : sa.aad.error;
+ if (!sa.aad->fs.request)
+ return COMPLAIN_MODE(profile) ? 0 : sa.aad->error;
}
- sa.aad.fs.denied = sa.aad.fs.request & ~perms->allow;
+ sa.aad->fs.denied = sa.aad->fs.request & ~perms->allow;
return aa_audit(type, profile, gfp, &sa, file_audit_cb);
}
@@ -173,8 +177,6 @@ static u32 map_old_perms(u32 old)
if (old & 0x40) /* AA_EXEC_MMAP */
new |= AA_EXEC_MMAP;
- new |= AA_MAY_META_READ;
-
return new;
}
@@ -201,7 +203,7 @@ static struct file_perms compute_perms(struct aa_dfa *dfa, unsigned int state,
*/
perms.kill = 0;
- if (current_fsuid() == cond->uid) {
+ if (uid_eq(current_fsuid(), cond->uid)) {
perms.allow = map_old_perms(dfa_user_allow(dfa, state));
perms.audit = map_old_perms(dfa_user_audit(dfa, state));
perms.quiet = map_old_perms(dfa_user_quiet(dfa, state));
@@ -212,10 +214,13 @@ static struct file_perms compute_perms(struct aa_dfa *dfa, unsigned int state,
perms.quiet = map_old_perms(dfa_other_quiet(dfa, state));
perms.xindex = dfa_other_xindex(dfa, state);
}
+ perms.allow |= AA_MAY_META_READ;
/* change_profile wasn't determined by ownership in old mapping */
if (ACCEPT_TABLE(dfa)[state] & 0x80000000)
perms.allow |= AA_MAY_CHANGE_PROFILE;
+ if (ACCEPT_TABLE(dfa)[state] & 0x40000000)
+ perms.allow |= AA_MAY_ONEXEC;
return perms;
}
@@ -279,22 +284,16 @@ int aa_path_perm(int op, struct aa_profile *profile, struct path *path,
int error;
flags |= profile->path_flags | (S_ISDIR(cond->mode) ? PATH_IS_DIR : 0);
- error = aa_get_name(path, flags, &buffer, &name);
+ error = aa_path_name(path, flags, &buffer, &name, &info);
if (error) {
if (error == -ENOENT && is_deleted(path->dentry)) {
/* Access to open files that are deleted are
* give a pass (implicit delegation)
*/
error = 0;
+ info = NULL;
perms.allow = request;
- } else if (error == -ENOENT)
- info = "Failed name lookup - deleted entry";
- else if (error == -ESTALE)
- info = "Failed name lookup - disconnected path";
- else if (error == -ENAMETOOLONG)
- info = "Failed name lookup - name too long";
- else
- info = "Failed name lookup";
+ }
} else {
aa_str_perms(profile->file.dfa, profile->file.start, name, cond,
&perms);
@@ -365,12 +364,14 @@ int aa_path_link(struct aa_profile *profile, struct dentry *old_dentry,
lperms = nullperms;
/* buffer freed below, lname is pointer in buffer */
- error = aa_get_name(&link, profile->path_flags, &buffer, &lname);
+ error = aa_path_name(&link, profile->path_flags, &buffer, &lname,
+ &info);
if (error)
goto audit;
/* buffer2 freed below, tname is pointer in buffer2 */
- error = aa_get_name(&target, profile->path_flags, &buffer2, &tname);
+ error = aa_path_name(&target, profile->path_flags, &buffer2, &tname,
+ &info);
if (error)
goto audit;
@@ -448,8 +449,8 @@ int aa_file_perm(int op, struct aa_profile *profile, struct file *file,
u32 request)
{
struct path_cond cond = {
- .uid = file->f_path.dentry->d_inode->i_uid,
- .mode = file->f_path.dentry->d_inode->i_mode
+ .uid = file_inode(file)->i_uid,
+ .mode = file_inode(file)->i_mode
};
return aa_path_perm(op, profile, &file->f_path, PATH_DELEGATE_DELETED,
diff --git a/security/apparmor/include/apparmor.h b/security/apparmor/include/apparmor.h
index df364956081..97130f88838 100644
--- a/security/apparmor/include/apparmor.h
+++ b/security/apparmor/include/apparmor.h
@@ -15,10 +15,24 @@
#ifndef __APPARMOR_H
#define __APPARMOR_H
+#include <linux/slab.h>
#include <linux/fs.h>
#include "match.h"
+/*
+ * Class of mediation types in the AppArmor policy db
+ */
+#define AA_CLASS_ENTRY 0
+#define AA_CLASS_UNKNOWN 1
+#define AA_CLASS_FILE 2
+#define AA_CLASS_CAP 3
+#define AA_CLASS_NET 4
+#define AA_CLASS_RLIMITS 5
+#define AA_CLASS_DOMAIN 6
+
+#define AA_CLASS_LAST AA_CLASS_DOMAIN
+
/* Control parameters settable through module/boot flags */
extern enum audit_mode aa_g_audit;
extern bool aa_g_audit_header;
@@ -51,9 +65,23 @@ extern int apparmor_initialized __initdata;
/* fn's in lib */
char *aa_split_fqname(char *args, char **ns_name);
void aa_info_message(const char *str);
-void *kvmalloc(size_t size);
-void kvfree(void *buffer);
+void *__aa_kvmalloc(size_t size, gfp_t flags);
+
+static inline void *kvmalloc(size_t size)
+{
+ return __aa_kvmalloc(size, 0);
+}
+
+static inline void *kvzalloc(size_t size)
+{
+ return __aa_kvmalloc(size, __GFP_ZERO);
+}
+/* returns 0 if kref not incremented */
+static inline int kref_get_not0(struct kref *kref)
+{
+ return atomic_inc_not_zero(&kref->refcount);
+}
/**
* aa_strneq - compare null terminated @str to a non null terminated substring
@@ -81,7 +109,7 @@ static inline unsigned int aa_dfa_null_transition(struct aa_dfa *dfa,
unsigned int start)
{
/* the null transition only needs the string's null terminator byte */
- return aa_dfa_match_len(dfa, start, "", 1);
+ return aa_dfa_next(dfa, start, 0);
}
static inline bool mediated_filesystem(struct inode *inode)
diff --git a/security/apparmor/include/apparmorfs.h b/security/apparmor/include/apparmorfs.h
index cb1e93a114d..414e56878dd 100644
--- a/security/apparmor/include/apparmorfs.h
+++ b/security/apparmor/include/apparmorfs.h
@@ -15,6 +15,90 @@
#ifndef __AA_APPARMORFS_H
#define __AA_APPARMORFS_H
+enum aa_fs_type {
+ AA_FS_TYPE_BOOLEAN,
+ AA_FS_TYPE_STRING,
+ AA_FS_TYPE_U64,
+ AA_FS_TYPE_FOPS,
+ AA_FS_TYPE_DIR,
+};
+
+struct aa_fs_entry;
+
+struct aa_fs_entry {
+ const char *name;
+ struct dentry *dentry;
+ umode_t mode;
+ enum aa_fs_type v_type;
+ union {
+ bool boolean;
+ char *string;
+ unsigned long u64;
+ struct aa_fs_entry *files;
+ } v;
+ const struct file_operations *file_ops;
+};
+
+extern const struct file_operations aa_fs_seq_file_ops;
+
+#define AA_FS_FILE_BOOLEAN(_name, _value) \
+ { .name = (_name), .mode = 0444, \
+ .v_type = AA_FS_TYPE_BOOLEAN, .v.boolean = (_value), \
+ .file_ops = &aa_fs_seq_file_ops }
+#define AA_FS_FILE_STRING(_name, _value) \
+ { .name = (_name), .mode = 0444, \
+ .v_type = AA_FS_TYPE_STRING, .v.string = (_value), \
+ .file_ops = &aa_fs_seq_file_ops }
+#define AA_FS_FILE_U64(_name, _value) \
+ { .name = (_name), .mode = 0444, \
+ .v_type = AA_FS_TYPE_U64, .v.u64 = (_value), \
+ .file_ops = &aa_fs_seq_file_ops }
+#define AA_FS_FILE_FOPS(_name, _mode, _fops) \
+ { .name = (_name), .v_type = AA_FS_TYPE_FOPS, \
+ .mode = (_mode), .file_ops = (_fops) }
+#define AA_FS_DIR(_name, _value) \
+ { .name = (_name), .v_type = AA_FS_TYPE_DIR, .v.files = (_value) }
+
extern void __init aa_destroy_aafs(void);
+struct aa_profile;
+struct aa_namespace;
+
+enum aafs_ns_type {
+ AAFS_NS_DIR,
+ AAFS_NS_PROFS,
+ AAFS_NS_NS,
+ AAFS_NS_COUNT,
+ AAFS_NS_MAX_COUNT,
+ AAFS_NS_SIZE,
+ AAFS_NS_MAX_SIZE,
+ AAFS_NS_OWNER,
+ AAFS_NS_SIZEOF,
+};
+
+enum aafs_prof_type {
+ AAFS_PROF_DIR,
+ AAFS_PROF_PROFS,
+ AAFS_PROF_NAME,
+ AAFS_PROF_MODE,
+ AAFS_PROF_ATTACH,
+ AAFS_PROF_HASH,
+ AAFS_PROF_SIZEOF,
+};
+
+#define ns_dir(X) ((X)->dents[AAFS_NS_DIR])
+#define ns_subns_dir(X) ((X)->dents[AAFS_NS_NS])
+#define ns_subprofs_dir(X) ((X)->dents[AAFS_NS_PROFS])
+
+#define prof_dir(X) ((X)->dents[AAFS_PROF_DIR])
+#define prof_child_dir(X) ((X)->dents[AAFS_PROF_PROFS])
+
+void __aa_fs_profile_rmdir(struct aa_profile *profile);
+void __aa_fs_profile_migrate_dents(struct aa_profile *old,
+ struct aa_profile *new);
+int __aa_fs_profile_mkdir(struct aa_profile *profile, struct dentry *parent);
+void __aa_fs_namespace_rmdir(struct aa_namespace *ns);
+int __aa_fs_namespace_mkdir(struct aa_namespace *ns, struct dentry *parent,
+ const char *name);
+
#endif /* __AA_APPARMORFS_H */
diff --git a/security/apparmor/include/audit.h b/security/apparmor/include/audit.h
index 1951786d32e..ba3dfd17f23 100644
--- a/security/apparmor/include/audit.h
+++ b/security/apparmor/include/audit.h
@@ -25,11 +25,8 @@
struct aa_profile;
-extern const char *audit_mode_names[];
+extern const char *const audit_mode_names[];
#define AUDIT_MAX_INDEX 5
-
-#define AUDIT_APPARMOR_AUTO 0 /* auto choose audit message type */
-
enum audit_mode {
AUDIT_NORMAL, /* follow normal auditing of accesses */
AUDIT_QUIET_DENIED, /* quiet all denied access messages */
@@ -45,10 +42,11 @@ enum audit_type {
AUDIT_APPARMOR_HINT,
AUDIT_APPARMOR_STATUS,
AUDIT_APPARMOR_ERROR,
- AUDIT_APPARMOR_KILL
+ AUDIT_APPARMOR_KILL,
+ AUDIT_APPARMOR_AUTO
};
-extern const char *op_table[];
+extern const char *const op_table[];
enum aa_ops {
OP_NULL,
@@ -104,7 +102,33 @@ enum aa_ops {
};
-/* define a short hand for apparmor_audit_data portion of common_audit_data */
+struct apparmor_audit_data {
+ int error;
+ int op;
+ int type;
+ void *profile;
+ const char *name;
+ const char *info;
+ union {
+ void *target;
+ struct {
+ long pos;
+ void *target;
+ } iface;
+ struct {
+ int rlim;
+ unsigned long max;
+ } rlim;
+ struct {
+ const char *target;
+ u32 request;
+ u32 denied;
+ kuid_t ouid;
+ } fs;
+ };
+};
+
+/* define a short hand for apparmor_audit_data structure */
#define aad apparmor_audit_data
void aa_audit_msg(int type, struct common_audit_data *sa,
diff --git a/security/apparmor/include/capability.h b/security/apparmor/include/capability.h
index c24d2959ea0..fc3fa381d85 100644
--- a/security/apparmor/include/capability.h
+++ b/security/apparmor/include/capability.h
@@ -4,7 +4,7 @@
* This file contains AppArmor capability mediation definitions.
*
* Copyright (C) 1998-2008 Novell/SUSE
- * Copyright 2009-2010 Canonical Ltd.
+ * Copyright 2009-2013 Canonical Ltd.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -17,6 +17,8 @@
#include <linux/sched.h>
+#include "apparmorfs.h"
+
struct aa_profile;
/* aa_caps - confinement data for capabilities
@@ -34,8 +36,9 @@ struct aa_caps {
kernel_cap_t extended;
};
-int aa_capable(struct task_struct *task, struct aa_profile *profile, int cap,
- int audit);
+extern struct aa_fs_entry aa_fs_entry_caps[];
+
+int aa_capable(struct aa_profile *profile, int cap, int audit);
static inline void aa_free_cap_rules(struct aa_caps *caps)
{
diff --git a/security/apparmor/include/context.h b/security/apparmor/include/context.h
index a9cbee4d9e4..6bf65798e5d 100644
--- a/security/apparmor/include/context.h
+++ b/security/apparmor/include/context.h
@@ -21,6 +21,9 @@
#include "policy.h"
+#define cred_cxt(X) (X)->security
+#define current_cxt() cred_cxt(current_cred())
+
/* struct aa_file_cxt - the AppArmor context the file was opened in
* @perms: the permission the file was opened with
*
@@ -80,23 +83,8 @@ int aa_replace_current_profile(struct aa_profile *profile);
int aa_set_current_onexec(struct aa_profile *profile);
int aa_set_current_hat(struct aa_profile *profile, u64 token);
int aa_restore_previous_profile(u64 cookie);
+struct aa_profile *aa_get_task_profile(struct task_struct *task);
-/**
- * __aa_task_is_confined - determine if @task has any confinement
- * @task: task to check confinement of (NOT NULL)
- *
- * If @task != current needs to be called in RCU safe critical section
- */
-static inline bool __aa_task_is_confined(struct task_struct *task)
-{
- struct aa_task_cxt *cxt = __task_cred(task)->security;
-
- BUG_ON(!cxt || !cxt->profile);
- if (unconfined(aa_newest_version(cxt->profile)))
- return 0;
-
- return 1;
-}
/**
* aa_cred_profile - obtain cred's profiles
@@ -108,9 +96,33 @@ static inline bool __aa_task_is_confined(struct task_struct *task)
*/
static inline struct aa_profile *aa_cred_profile(const struct cred *cred)
{
- struct aa_task_cxt *cxt = cred->security;
+ struct aa_task_cxt *cxt = cred_cxt(cred);
BUG_ON(!cxt || !cxt->profile);
- return aa_newest_version(cxt->profile);
+ return cxt->profile;
+}
+
+/**
+ * __aa_task_profile - retrieve another task's profile
+ * @task: task to query (NOT NULL)
+ *
+ * Returns: @task's profile without incrementing its ref count
+ *
+ * If @task != current needs to be called in RCU safe critical section
+ */
+static inline struct aa_profile *__aa_task_profile(struct task_struct *task)
+{
+ return aa_cred_profile(__task_cred(task));
+}
+
+/**
+ * __aa_task_is_confined - determine if @task has any confinement
+ * @task: task to check confinement of (NOT NULL)
+ *
+ * If @task != current needs to be called in RCU safe critical section
+ */
+static inline bool __aa_task_is_confined(struct task_struct *task)
+{
+ return !unconfined(__aa_task_profile(task));
}
/**
@@ -136,19 +148,31 @@ static inline struct aa_profile *__aa_current_profile(void)
*/
static inline struct aa_profile *aa_current_profile(void)
{
- const struct aa_task_cxt *cxt = current_cred()->security;
+ const struct aa_task_cxt *cxt = current_cxt();
struct aa_profile *profile;
BUG_ON(!cxt || !cxt->profile);
- profile = aa_newest_version(cxt->profile);
- /*
- * Whether or not replacement succeeds, use newest profile so
- * there is no need to update it after replacement.
- */
- if (unlikely((cxt->profile != profile)))
+ if (PROFILE_INVALID(cxt->profile)) {
+ profile = aa_get_newest_profile(cxt->profile);
aa_replace_current_profile(profile);
+ aa_put_profile(profile);
+ cxt = current_cxt();
+ }
- return profile;
+ return cxt->profile;
+}
+
+/**
+ * aa_clear_task_cxt_trans - clear transition tracking info from the cxt
+ * @cxt: task context to clear (NOT NULL)
+ */
+static inline void aa_clear_task_cxt_trans(struct aa_task_cxt *cxt)
+{
+ aa_put_profile(cxt->previous);
+ aa_put_profile(cxt->onexec);
+ cxt->previous = NULL;
+ cxt->onexec = NULL;
+ cxt->token = 0;
}
#endif /* __AA_CONTEXT_H */
diff --git a/security/apparmor/include/crypto.h b/security/apparmor/include/crypto.h
new file mode 100644
index 00000000000..dc418e5024d
--- /dev/null
+++ b/security/apparmor/include/crypto.h
@@ -0,0 +1,36 @@
+/*
+ * AppArmor security module
+ *
+ * This file contains AppArmor policy loading interface function definitions.
+ *
+ * Copyright 2013 Canonical Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ */
+
+#ifndef __APPARMOR_CRYPTO_H
+#define __APPARMOR_CRYPTO_H
+
+#include "policy.h"
+
+#ifdef CONFIG_SECURITY_APPARMOR_HASH
+unsigned int aa_hash_size(void);
+int aa_calc_profile_hash(struct aa_profile *profile, u32 version, void *start,
+ size_t len);
+#else
+static inline int aa_calc_profile_hash(struct aa_profile *profile, u32 version,
+ void *start, size_t len)
+{
+ return 0;
+}
+
+static inline unsigned int aa_hash_size(void)
+{
+ return 0;
+}
+#endif
+
+#endif /* __APPARMOR_CRYPTO_H */
diff --git a/security/apparmor/include/file.h b/security/apparmor/include/file.h
index ab8c6d87f75..2c922b86bd4 100644
--- a/security/apparmor/include/file.h
+++ b/security/apparmor/include/file.h
@@ -71,7 +71,7 @@ struct path;
/* need to make conditional which ones are being set */
struct path_cond {
- uid_t uid;
+ kuid_t uid;
umode_t mode;
};
@@ -117,7 +117,7 @@ static inline u16 dfa_map_xindex(u16 mask)
index |= AA_X_NAME;
} else if (old_index == 3) {
index |= AA_X_NAME | AA_X_CHILD;
- } else {
+ } else if (old_index) {
index |= AA_X_TABLE;
index |= old_index - 4;
}
@@ -146,7 +146,7 @@ static inline u16 dfa_map_xindex(u16 mask)
int aa_audit_file(struct aa_profile *profile, struct file_perms *perms,
gfp_t gfp, int op, u32 request, const char *name,
- const char *target, uid_t ouid, const char *info, int error);
+ const char *target, kuid_t ouid, const char *info, int error);
/**
* struct aa_file_rules - components used for file rule permissions
@@ -186,11 +186,6 @@ static inline void aa_free_file_rules(struct aa_file_rules *rules)
aa_free_domain_entries(&rules->trans);
}
-#define ACC_FMODE(x) (("\000\004\002\006"[(x)&O_ACCMODE]) | (((x) << 1) & 0x40))
-
-/* from namei.c */
-#define MAP_OPEN_FLAGS(x) ((((x) + 1) & O_ACCMODE) ? (x) + 1 : (x))
-
/**
* aa_map_file_perms - map file flags to AppArmor permissions
* @file: open file to map flags to AppArmor permissions
@@ -199,8 +194,13 @@ static inline void aa_free_file_rules(struct aa_file_rules *rules)
*/
static inline u32 aa_map_file_to_perms(struct file *file)
{
- int flags = MAP_OPEN_FLAGS(file->f_flags);
- u32 perms = ACC_FMODE(file->f_mode);
+ int flags = file->f_flags;
+ u32 perms = 0;
+
+ if (file->f_mode & FMODE_WRITE)
+ perms |= MAY_WRITE;
+ if (file->f_mode & FMODE_READ)
+ perms |= MAY_READ;
if ((flags & O_APPEND) && (perms & MAY_WRITE))
perms = (perms & ~MAY_WRITE) | MAY_APPEND;
diff --git a/security/apparmor/include/ipc.h b/security/apparmor/include/ipc.h
index aeda0fbc8b2..288ca76e2fb 100644
--- a/security/apparmor/include/ipc.h
+++ b/security/apparmor/include/ipc.h
@@ -19,8 +19,8 @@
struct aa_profile;
-int aa_may_ptrace(struct task_struct *tracer_task, struct aa_profile *tracer,
- struct aa_profile *tracee, unsigned int mode);
+int aa_may_ptrace(struct aa_profile *tracer, struct aa_profile *tracee,
+ unsigned int mode);
int aa_ptrace(struct task_struct *tracer, struct task_struct *tracee,
unsigned int mode);
diff --git a/security/apparmor/include/match.h b/security/apparmor/include/match.h
index a4a863997bd..001c43aa040 100644
--- a/security/apparmor/include/match.h
+++ b/security/apparmor/include/match.h
@@ -4,7 +4,7 @@
* This file contains AppArmor policy dfa matching engine definitions.
*
* Copyright (C) 1998-2008 Novell/SUSE
- * Copyright 2009-2010 Canonical Ltd.
+ * Copyright 2009-2012 Canonical Ltd.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -16,25 +16,30 @@
#define __AA_MATCH_H
#include <linux/kref.h>
-#include <linux/workqueue.h>
#define DFA_NOMATCH 0
#define DFA_START 1
-#define DFA_VALID_PERM_MASK 0xffffffff
-#define DFA_VALID_PERM2_MASK 0xffffffff
/**
* The format used for transition tables is based on the GNU flex table
* file format (--tables-file option; see Table File Format in the flex
* info pages and the flex sources for documentation). The magic number
* used in the header is 0x1B5E783D instead of 0xF13C57B1 though, because
- * the YY_ID_CHK (check) and YY_ID_DEF (default) tables are used
- * slightly differently (see the apparmor-parser package).
+ * new tables have been defined and others YY_ID_CHK (check) and YY_ID_DEF
+ * (default) tables are used slightly differently (see the apparmor-parser
+ * package).
+ *
+ *
+ * The data in the packed dfa is stored in network byte order, and the tables
+ * are arranged for flexibility. We convert the table data to host native
+ * byte order.
+ *
+ * The dfa begins with a table set header, and is followed by the actual
+ * tables.
*/
#define YYTH_MAGIC 0x1B5E783D
-#define YYTH_DEF_RECURSE 0x1 /* DEF Table is recursive */
struct table_set_header {
u32 th_magic; /* YYTH_MAGIC */
@@ -63,7 +68,7 @@ struct table_set_header {
#define YYTD_DATA32 4
#define YYTD_DATA64 8
-/* Each ACCEPT2 table gets 6 dedicated flags, YYTD_DATAX define the
+/* ACCEPT & ACCEPT2 tables gets 6 dedicated flags, YYTD_DATAX define the
* first flags
*/
#define ACCEPT1_FLAGS(X) ((X) & 0x3f)
@@ -116,6 +121,9 @@ unsigned int aa_dfa_match_len(struct aa_dfa *dfa, unsigned int start,
const char *str, int len);
unsigned int aa_dfa_match(struct aa_dfa *dfa, unsigned int start,
const char *str);
+unsigned int aa_dfa_next(struct aa_dfa *dfa, unsigned int state,
+ const char c);
+
void aa_dfa_free_kref(struct kref *kref);
/**
diff --git a/security/apparmor/include/path.h b/security/apparmor/include/path.h
index 27b327a7fae..286ac75dc88 100644
--- a/security/apparmor/include/path.h
+++ b/security/apparmor/include/path.h
@@ -26,6 +26,7 @@ enum path_flags {
PATH_MEDIATE_DELETED = 0x10000, /* mediate deleted paths */
};
-int aa_get_name(struct path *path, int flags, char **buffer, const char **name);
+int aa_path_name(struct path *path, int flags, char **buffer,
+ const char **name, const char **info);
#endif /* __AA_PATH_H */
diff --git a/security/apparmor/include/policy.h b/security/apparmor/include/policy.h
index aeda5cf5690..c28b0f20ab5 100644
--- a/security/apparmor/include/policy.h
+++ b/security/apparmor/include/policy.h
@@ -29,19 +29,23 @@
#include "file.h"
#include "resource.h"
-extern const char *profile_mode_names[];
-#define APPARMOR_NAMES_MAX_INDEX 3
+extern const char *const aa_profile_mode_names[];
+#define APPARMOR_MODE_NAMES_MAX_INDEX 4
-#define COMPLAIN_MODE(_profile) \
- ((aa_g_profile_mode == APPARMOR_COMPLAIN) || \
- ((_profile)->mode == APPARMOR_COMPLAIN))
+#define PROFILE_MODE(_profile, _mode) \
+ ((aa_g_profile_mode == (_mode)) || \
+ ((_profile)->mode == (_mode)))
-#define KILL_MODE(_profile) \
- ((aa_g_profile_mode == APPARMOR_KILL) || \
- ((_profile)->mode == APPARMOR_KILL))
+#define COMPLAIN_MODE(_profile) PROFILE_MODE((_profile), APPARMOR_COMPLAIN)
+
+#define KILL_MODE(_profile) PROFILE_MODE((_profile), APPARMOR_KILL)
#define PROFILE_IS_HAT(_profile) ((_profile)->flags & PFLAG_HAT)
+#define PROFILE_INVALID(_profile) ((_profile)->flags & PFLAG_INVALID)
+
+#define on_list_rcu(X) (!list_empty(X) && (X)->prev != LIST_POISON2)
+
/*
* FIXME: currently need a clean way to replace and remove profiles as a
* set. It should be done at the namespace level.
@@ -52,17 +56,19 @@ enum profile_mode {
APPARMOR_ENFORCE, /* enforce access rules */
APPARMOR_COMPLAIN, /* allow and log access violations */
APPARMOR_KILL, /* kill task on access violation */
+ APPARMOR_UNCONFINED, /* profile set to unconfined */
};
enum profile_flags {
PFLAG_HAT = 1, /* profile is a hat */
- PFLAG_UNCONFINED = 2, /* profile is an unconfined profile */
PFLAG_NULL = 4, /* profile is null learning profile */
PFLAG_IX_ON_NAME_ERROR = 8, /* fallback to ix on name lookup fail */
PFLAG_IMMUTABLE = 0x10, /* don't allow changes/replacement */
PFLAG_USER_DEFINED = 0x20, /* user based profile - lower privs */
PFLAG_NO_LIST_REF = 0x40, /* list doesn't keep profile ref */
PFLAG_OLD_NULL_TRANS = 0x100, /* use // as the null transition */
+ PFLAG_INVALID = 0x200, /* profile replaced/removed */
+ PFLAG_NS_COUNT = 0x400, /* carries NS ref count */
/* These flags must correspond with PATH_flags */
PFLAG_MEDIATE_DELETED = 0x10000, /* mediate instead delegate deleted */
@@ -73,14 +79,12 @@ struct aa_profile;
/* struct aa_policy - common part of both namespaces and profiles
* @name: name of the object
* @hname - The hierarchical name
- * @count: reference count of the obj
* @list: list policy object is on
* @profiles: head of the profiles list contained in the object
*/
struct aa_policy {
char *name;
char *hname;
- struct kref count;
struct list_head list;
struct list_head profiles;
};
@@ -105,6 +109,9 @@ struct aa_ns_acct {
* @acct: accounting for the namespace
* @unconfined: special unconfined profile for the namespace
* @sub_ns: list of namespaces under the current namespace.
+ * @uniq_null: uniq value used for null learning profiles
+ * @uniq_id: a unique id count for the profiles in the namespace
+ * @dents: dentries for the namespaces file entries in apparmorfs
*
* An aa_namespace defines the set profiles that are searched to determine
* which profile to attach to a task. Profiles can not be shared between
@@ -123,37 +130,63 @@ struct aa_ns_acct {
struct aa_namespace {
struct aa_policy base;
struct aa_namespace *parent;
- rwlock_t lock;
+ struct mutex lock;
struct aa_ns_acct acct;
struct aa_profile *unconfined;
struct list_head sub_ns;
+ atomic_t uniq_null;
+ long uniq_id;
+
+ struct dentry *dents[AAFS_NS_SIZEOF];
};
+/* struct aa_policydb - match engine for a policy
+ * dfa: dfa pattern match
+ * start: set of start states for the different classes of data
+ */
+struct aa_policydb {
+ /* Generic policy DFA specific rule types will be subsections of it */
+ struct aa_dfa *dfa;
+ unsigned int start[AA_CLASS_LAST + 1];
+
+};
+
+struct aa_replacedby {
+ struct kref count;
+ struct aa_profile __rcu *profile;
+};
+
+
/* struct aa_profile - basic confinement data
* @base - base components of the profile (name, refcount, lists, lock ...)
+ * @count: reference count of the obj
+ * @rcu: rcu head used when removing from @list
* @parent: parent of profile
* @ns: namespace the profile is in
* @replacedby: is set to the profile that replaced this profile
* @rename: optional profile name that this profile renamed
+ * @attach: human readable attachment string
* @xmatch: optional extended matching for unconfined executables names
* @xmatch_len: xmatch prefix len, used to determine xmatch priority
- * @sid: the unique security id number of this profile
* @audit: the auditing mode of the profile
* @mode: the enforcement mode of the profile
* @flags: flags controlling profile behavior
* @path_flags: flags controlling path generation behavior
* @size: the memory consumed by this profiles rules
+ * @policy: general match rules governing policy
* @file: The set of rules governing basic file access and domain transitions
* @caps: capabilities for the profile
* @rlimits: rlimits for the profile
*
+ * @dents: dentries for the profiles file entries in apparmorfs
+ * @dirname: name of the profile dir in apparmorfs
+ *
* The AppArmor profile contains the basic confinement data. Each profile
* has a name, and exists in a namespace. The @name and @exec_match are
* used to determine profile attachment against unconfined tasks. All other
* attachments are determined by profile X transition rules.
*
- * The @replacedby field is write protected by the profile lock. Reads
- * are assumed to be atomic, and are done without locking.
+ * The @replacedby struct is write protected by the profile lock.
*
* Profiles have a hierarchy where hats and children profiles keep
* a reference to their parent.
@@ -164,24 +197,31 @@ struct aa_namespace {
*/
struct aa_profile {
struct aa_policy base;
- struct aa_profile *parent;
+ struct kref count;
+ struct rcu_head rcu;
+ struct aa_profile __rcu *parent;
struct aa_namespace *ns;
- struct aa_profile *replacedby;
+ struct aa_replacedby *replacedby;
const char *rename;
+ const char *attach;
struct aa_dfa *xmatch;
int xmatch_len;
- u32 sid;
enum audit_mode audit;
- enum profile_mode mode;
- u32 flags;
+ long mode;
+ long flags;
u32 path_flags;
int size;
+ struct aa_policydb policy;
struct aa_file_rules file;
struct aa_caps caps;
struct aa_rlimit rlimits;
+
+ unsigned char *hash;
+ char *dirname;
+ struct dentry *dents[AAFS_PROF_SIZEOF];
};
extern struct aa_namespace *root_ns;
@@ -198,43 +238,11 @@ void aa_free_namespace_kref(struct kref *kref);
struct aa_namespace *aa_find_namespace(struct aa_namespace *root,
const char *name);
-static inline struct aa_policy *aa_get_common(struct aa_policy *c)
-{
- if (c)
- kref_get(&c->count);
-
- return c;
-}
-
-/**
- * aa_get_namespace - increment references count on @ns
- * @ns: namespace to increment reference count of (MAYBE NULL)
- *
- * Returns: pointer to @ns, if @ns is NULL returns NULL
- * Requires: @ns must be held with valid refcount when called
- */
-static inline struct aa_namespace *aa_get_namespace(struct aa_namespace *ns)
-{
- if (ns)
- kref_get(&(ns->base.count));
-
- return ns;
-}
-
-/**
- * aa_put_namespace - decrement refcount on @ns
- * @ns: namespace to put reference of
- *
- * Decrement reference count of @ns and if no longer in use free it
- */
-static inline void aa_put_namespace(struct aa_namespace *ns)
-{
- if (ns)
- kref_put(&ns->base.count, aa_free_namespace_kref);
-}
+void aa_free_replacedby_kref(struct kref *kref);
struct aa_profile *aa_alloc_profile(const char *name);
struct aa_profile *aa_new_null_profile(struct aa_profile *parent, int hat);
+void aa_free_profile(struct aa_profile *profile);
void aa_free_profile_kref(struct kref *kref);
struct aa_profile *aa_find_child(struct aa_profile *parent, const char *name);
struct aa_profile *aa_lookup_profile(struct aa_namespace *ns, const char *name);
@@ -246,25 +254,13 @@ ssize_t aa_remove_profiles(char *name, size_t size);
#define PROF_ADD 1
#define PROF_REPLACE 0
-#define unconfined(X) ((X)->flags & PFLAG_UNCONFINED)
+#define unconfined(X) ((X)->mode == APPARMOR_UNCONFINED)
-/**
- * aa_newest_version - find the newest version of @profile
- * @profile: the profile to check for newer versions of (NOT NULL)
- *
- * Returns: newest version of @profile, if @profile is the newest version
- * return @profile.
- *
- * NOTE: the profile returned is not refcounted, The refcount on @profile
- * must be held until the caller decides what to do with the returned newest
- * version.
- */
-static inline struct aa_profile *aa_newest_version(struct aa_profile *profile)
-{
- while (profile->replacedby)
- profile = profile->replacedby;
- return profile;
+static inline struct aa_profile *aa_deref_parent(struct aa_profile *p)
+{
+ return rcu_dereference_protected(p->parent,
+ mutex_is_locked(&p->ns->lock));
}
/**
@@ -277,19 +273,126 @@ static inline struct aa_profile *aa_newest_version(struct aa_profile *profile)
static inline struct aa_profile *aa_get_profile(struct aa_profile *p)
{
if (p)
- kref_get(&(p->base.count));
+ kref_get(&(p->count));
return p;
}
/**
+ * aa_get_profile_not0 - increment refcount on profile @p found via lookup
+ * @p: profile (MAYBE NULL)
+ *
+ * Returns: pointer to @p if @p is NULL will return NULL
+ * Requires: @p must be held with valid refcount when called
+ */
+static inline struct aa_profile *aa_get_profile_not0(struct aa_profile *p)
+{
+ if (p && kref_get_not0(&p->count))
+ return p;
+
+ return NULL;
+}
+
+/**
+ * aa_get_profile_rcu - increment a refcount profile that can be replaced
+ * @p: pointer to profile that can be replaced (NOT NULL)
+ *
+ * Returns: pointer to a refcounted profile.
+ * else NULL if no profile
+ */
+static inline struct aa_profile *aa_get_profile_rcu(struct aa_profile __rcu **p)
+{
+ struct aa_profile *c;
+
+ rcu_read_lock();
+ do {
+ c = rcu_dereference(*p);
+ } while (c && !kref_get_not0(&c->count));
+ rcu_read_unlock();
+
+ return c;
+}
+
+/**
+ * aa_get_newest_profile - find the newest version of @profile
+ * @profile: the profile to check for newer versions of
+ *
+ * Returns: refcounted newest version of @profile taking into account
+ * replacement, renames and removals
+ * return @profile.
+ */
+static inline struct aa_profile *aa_get_newest_profile(struct aa_profile *p)
+{
+ if (!p)
+ return NULL;
+
+ if (PROFILE_INVALID(p))
+ return aa_get_profile_rcu(&p->replacedby->profile);
+
+ return aa_get_profile(p);
+}
+
+/**
* aa_put_profile - decrement refcount on profile @p
* @p: profile (MAYBE NULL)
*/
static inline void aa_put_profile(struct aa_profile *p)
{
if (p)
- kref_put(&p->base.count, aa_free_profile_kref);
+ kref_put(&p->count, aa_free_profile_kref);
+}
+
+static inline struct aa_replacedby *aa_get_replacedby(struct aa_replacedby *p)
+{
+ if (p)
+ kref_get(&(p->count));
+
+ return p;
+}
+
+static inline void aa_put_replacedby(struct aa_replacedby *p)
+{
+ if (p)
+ kref_put(&p->count, aa_free_replacedby_kref);
+}
+
+/* requires profile list write lock held */
+static inline void __aa_update_replacedby(struct aa_profile *orig,
+ struct aa_profile *new)
+{
+ struct aa_profile *tmp;
+ tmp = rcu_dereference_protected(orig->replacedby->profile,
+ mutex_is_locked(&orig->ns->lock));
+ rcu_assign_pointer(orig->replacedby->profile, aa_get_profile(new));
+ orig->flags |= PFLAG_INVALID;
+ aa_put_profile(tmp);
+}
+
+/**
+ * aa_get_namespace - increment references count on @ns
+ * @ns: namespace to increment reference count of (MAYBE NULL)
+ *
+ * Returns: pointer to @ns, if @ns is NULL returns NULL
+ * Requires: @ns must be held with valid refcount when called
+ */
+static inline struct aa_namespace *aa_get_namespace(struct aa_namespace *ns)
+{
+ if (ns)
+ aa_get_profile(ns->unconfined);
+
+ return ns;
+}
+
+/**
+ * aa_put_namespace - decrement refcount on @ns
+ * @ns: namespace to put reference of
+ *
+ * Decrement reference count of @ns and if no longer in use free it
+ */
+static inline void aa_put_namespace(struct aa_namespace *ns)
+{
+ if (ns)
+ aa_put_profile(ns->unconfined);
}
static inline int AUDIT_MODE(struct aa_profile *profile)
diff --git a/security/apparmor/include/policy_unpack.h b/security/apparmor/include/policy_unpack.h
index a2dcccac45a..c214fb88b1b 100644
--- a/security/apparmor/include/policy_unpack.h
+++ b/security/apparmor/include/policy_unpack.h
@@ -15,6 +15,25 @@
#ifndef __POLICY_INTERFACE_H
#define __POLICY_INTERFACE_H
-struct aa_profile *aa_unpack(void *udata, size_t size, const char **ns);
+#include <linux/list.h>
+
+struct aa_load_ent {
+ struct list_head list;
+ struct aa_profile *new;
+ struct aa_profile *old;
+ struct aa_profile *rename;
+};
+
+void aa_load_ent_free(struct aa_load_ent *ent);
+struct aa_load_ent *aa_load_ent_alloc(void);
+
+#define PACKED_FLAG_HAT 1
+
+#define PACKED_MODE_ENFORCE 0
+#define PACKED_MODE_COMPLAIN 1
+#define PACKED_MODE_KILL 2
+#define PACKED_MODE_UNCONFINED 3
+
+int aa_unpack(void *udata, size_t size, struct list_head *lh, const char **ns);
#endif /* __POLICY_INTERFACE_H */
diff --git a/security/apparmor/include/procattr.h b/security/apparmor/include/procattr.h
index 544aa6b766a..6bd5f33d953 100644
--- a/security/apparmor/include/procattr.h
+++ b/security/apparmor/include/procattr.h
@@ -21,6 +21,5 @@
int aa_getprocattr(struct aa_profile *profile, char **string);
int aa_setprocattr_changehat(char *args, size_t size, int test);
int aa_setprocattr_changeprofile(char *fqname, bool onexec, int test);
-int aa_setprocattr_permipc(char *fqname);
#endif /* __AA_PROCATTR_H */
diff --git a/security/apparmor/include/resource.h b/security/apparmor/include/resource.h
index 02baec732bb..d3f4cf02795 100644
--- a/security/apparmor/include/resource.h
+++ b/security/apparmor/include/resource.h
@@ -18,6 +18,8 @@
#include <linux/resource.h>
#include <linux/sched.h>
+#include "apparmorfs.h"
+
struct aa_profile;
/* struct aa_rlimit - rlimit settings for the profile
@@ -32,6 +34,8 @@ struct aa_rlimit {
struct rlimit limits[RLIM_NLIMITS];
};
+extern struct aa_fs_entry aa_fs_entry_rlimit[];
+
int aa_map_resource(int resource);
int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *,
unsigned int resource, struct rlimit *new_rlim);
diff --git a/security/apparmor/include/sid.h b/security/apparmor/include/sid.h
index 020db35c301..513ca0e4896 100644
--- a/security/apparmor/include/sid.h
+++ b/security/apparmor/include/sid.h
@@ -16,7 +16,9 @@
#include <linux/types.h>
-struct aa_profile;
+/* sid value that will not be allocated */
+#define AA_SID_INVALID 0
+#define AA_SID_ALLOC AA_SID_INVALID
u32 aa_alloc_sid(void);
void aa_free_sid(u32 sid);
diff --git a/security/apparmor/ipc.c b/security/apparmor/ipc.c
index 7ee05c6f3c6..777ac1c4725 100644
--- a/security/apparmor/ipc.c
+++ b/security/apparmor/ipc.c
@@ -26,7 +26,7 @@ static void audit_cb(struct audit_buffer *ab, void *va)
{
struct common_audit_data *sa = va;
audit_log_format(ab, " target=");
- audit_log_untrustedstring(ab, sa->aad.target);
+ audit_log_untrustedstring(ab, sa->aad->target);
}
/**
@@ -41,10 +41,12 @@ static int aa_audit_ptrace(struct aa_profile *profile,
struct aa_profile *target, int error)
{
struct common_audit_data sa;
- COMMON_AUDIT_DATA_INIT(&sa, NONE);
- sa.aad.op = OP_PTRACE;
- sa.aad.target = target;
- sa.aad.error = error;
+ struct apparmor_audit_data aad = {0,};
+ sa.type = LSM_AUDIT_DATA_NONE;
+ sa.aad = &aad;
+ aad.op = OP_PTRACE;
+ aad.target = target;
+ aad.error = error;
return aa_audit(AUDIT_APPARMOR_AUTO, profile, GFP_ATOMIC, &sa,
audit_cb);
@@ -52,15 +54,14 @@ static int aa_audit_ptrace(struct aa_profile *profile,
/**
* aa_may_ptrace - test if tracer task can trace the tracee
- * @tracer_task: task who will do the tracing (NOT NULL)
* @tracer: profile of the task doing the tracing (NOT NULL)
* @tracee: task to be traced
* @mode: whether PTRACE_MODE_READ || PTRACE_MODE_ATTACH
*
* Returns: %0 else error code if permission denied or error
*/
-int aa_may_ptrace(struct task_struct *tracer_task, struct aa_profile *tracer,
- struct aa_profile *tracee, unsigned int mode)
+int aa_may_ptrace(struct aa_profile *tracer, struct aa_profile *tracee,
+ unsigned int mode)
{
/* TODO: currently only based on capability, not extended ptrace
* rules,
@@ -70,7 +71,7 @@ int aa_may_ptrace(struct task_struct *tracer_task, struct aa_profile *tracer,
if (unconfined(tracer) || tracer == tracee)
return 0;
/* log this capability request */
- return aa_capable(tracer_task, tracer, CAP_SYS_PTRACE, 1);
+ return aa_capable(tracer, CAP_SYS_PTRACE, 1);
}
/**
@@ -93,23 +94,18 @@ int aa_ptrace(struct task_struct *tracer, struct task_struct *tracee,
* - tracer profile has CAP_SYS_PTRACE
*/
- struct aa_profile *tracer_p;
- /* cred released below */
- const struct cred *cred = get_task_cred(tracer);
+ struct aa_profile *tracer_p = aa_get_task_profile(tracer);
int error = 0;
- tracer_p = aa_cred_profile(cred);
if (!unconfined(tracer_p)) {
- /* lcred released below */
- const struct cred *lcred = get_task_cred(tracee);
- struct aa_profile *tracee_p = aa_cred_profile(lcred);
+ struct aa_profile *tracee_p = aa_get_task_profile(tracee);
- error = aa_may_ptrace(tracer, tracer_p, tracee_p, mode);
+ error = aa_may_ptrace(tracer_p, tracee_p, mode);
error = aa_audit_ptrace(tracer_p, tracee_p, error);
- put_cred(lcred);
+ aa_put_profile(tracee_p);
}
- put_cred(cred);
+ aa_put_profile(tracer_p);
return error;
}
diff --git a/security/apparmor/lib.c b/security/apparmor/lib.c
index 9516948041a..c1827e06845 100644
--- a/security/apparmor/lib.c
+++ b/security/apparmor/lib.c
@@ -45,8 +45,10 @@ char *aa_split_fqname(char *fqname, char **ns_name)
*ns_name = skip_spaces(&name[1]);
if (split) {
/* overwrite ':' with \0 */
- *split = 0;
- name = skip_spaces(split + 1);
+ *split++ = 0;
+ if (strncmp(split, "//", 2) == 0)
+ split += 2;
+ name = skip_spaces(split);
} else
/* a ns name without a following profile is allowed */
name = NULL;
@@ -65,23 +67,26 @@ void aa_info_message(const char *str)
{
if (audit_enabled) {
struct common_audit_data sa;
- COMMON_AUDIT_DATA_INIT(&sa, NONE);
- sa.aad.info = str;
+ struct apparmor_audit_data aad = {0,};
+ sa.type = LSM_AUDIT_DATA_NONE;
+ sa.aad = &aad;
+ aad.info = str;
aa_audit_msg(AUDIT_APPARMOR_STATUS, &sa, NULL);
}
printk(KERN_INFO "AppArmor: %s\n", str);
}
/**
- * kvmalloc - do allocation preferring kmalloc but falling back to vmalloc
- * @size: size of allocation
+ * __aa_kvmalloc - do allocation preferring kmalloc but falling back to vmalloc
+ * @size: how many bytes of memory are required
+ * @flags: the type of memory to allocate (see kmalloc).
*
* Return: allocated buffer or NULL if failed
*
* It is possible that policy being loaded from the user is larger than
* what can be allocated by kmalloc, in those cases fall back to vmalloc.
*/
-void *kvmalloc(size_t size)
+void *__aa_kvmalloc(size_t size, gfp_t flags)
{
void *buffer = NULL;
@@ -90,46 +95,12 @@ void *kvmalloc(size_t size)
/* do not attempt kmalloc if we need more than 16 pages at once */
if (size <= (16*PAGE_SIZE))
- buffer = kmalloc(size, GFP_NOIO | __GFP_NOWARN);
+ buffer = kmalloc(size, flags | GFP_NOIO | __GFP_NOWARN);
if (!buffer) {
- /* see kvfree for why size must be at least work_struct size
- * when allocated via vmalloc
- */
- if (size < sizeof(struct work_struct))
- size = sizeof(struct work_struct);
- buffer = vmalloc(size);
+ if (flags & __GFP_ZERO)
+ buffer = vzalloc(size);
+ else
+ buffer = vmalloc(size);
}
return buffer;
}
-
-/**
- * do_vfree - workqueue routine for freeing vmalloced memory
- * @work: data to be freed
- *
- * The work_struct is overlaid to the data being freed, as at the point
- * the work is scheduled the data is no longer valid, be its freeing
- * needs to be delayed until safe.
- */
-static void do_vfree(struct work_struct *work)
-{
- vfree(work);
-}
-
-/**
- * kvfree - free an allocation do by kvmalloc
- * @buffer: buffer to free (MAYBE_NULL)
- *
- * Free a buffer allocated by kvmalloc
- */
-void kvfree(void *buffer)
-{
- if (is_vmalloc_addr(buffer)) {
- /* Data is no longer valid so just use the allocated space
- * as the work_struct
- */
- struct work_struct *work = (struct work_struct *) buffer;
- INIT_WORK(work, do_vfree);
- schedule_work(work);
- } else
- kfree(buffer);
-}
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 97ce8fae49b..99810009333 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -48,8 +48,8 @@ int apparmor_initialized __initdata;
*/
static void apparmor_cred_free(struct cred *cred)
{
- aa_free_task_context(cred->security);
- cred->security = NULL;
+ aa_free_task_context(cred_cxt(cred));
+ cred_cxt(cred) = NULL;
}
/*
@@ -62,7 +62,7 @@ static int apparmor_cred_alloc_blank(struct cred *cred, gfp_t gfp)
if (!cxt)
return -ENOMEM;
- cred->security = cxt;
+ cred_cxt(cred) = cxt;
return 0;
}
@@ -77,8 +77,8 @@ static int apparmor_cred_prepare(struct cred *new, const struct cred *old,
if (!cxt)
return -ENOMEM;
- aa_dup_task_context(cxt, old->security);
- new->security = cxt;
+ aa_dup_task_context(cxt, cred_cxt(old));
+ cred_cxt(new) = cxt;
return 0;
}
@@ -87,8 +87,8 @@ static int apparmor_cred_prepare(struct cred *new, const struct cred *old,
*/
static void apparmor_cred_transfer(struct cred *new, const struct cred *old)
{
- const struct aa_task_cxt *old_cxt = old->security;
- struct aa_task_cxt *new_cxt = new->security;
+ const struct aa_task_cxt *old_cxt = cred_cxt(old);
+ struct aa_task_cxt *new_cxt = cred_cxt(new);
aa_dup_task_context(new_cxt, old_cxt);
}
@@ -145,7 +145,7 @@ static int apparmor_capable(const struct cred *cred, struct user_namespace *ns,
if (!error) {
profile = aa_cred_profile(cred);
if (!unconfined(profile))
- error = aa_capable(current, profile, cap, audit);
+ error = aa_capable(profile, cap, audit);
}
return error;
}
@@ -352,7 +352,7 @@ static int apparmor_path_chmod(struct path *path, umode_t mode)
return common_perm_mnt_dentry(OP_CHMOD, path->mnt, path->dentry, AA_MAY_CHMOD);
}
-static int apparmor_path_chown(struct path *path, uid_t uid, gid_t gid)
+static int apparmor_path_chown(struct path *path, kuid_t uid, kgid_t gid)
{
struct path_cond cond = { path->dentry->d_inode->i_uid,
path->dentry->d_inode->i_mode
@@ -373,13 +373,13 @@ static int apparmor_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
AA_MAY_META_READ);
}
-static int apparmor_dentry_open(struct file *file, const struct cred *cred)
+static int apparmor_file_open(struct file *file, const struct cred *cred)
{
struct aa_file_cxt *fcxt = file->f_security;
struct aa_profile *profile;
int error = 0;
- if (!mediated_filesystem(file->f_path.dentry->d_inode))
+ if (!mediated_filesystem(file_inode(file)))
return 0;
/* If in exec, permission is handled by bprm hooks.
@@ -394,7 +394,7 @@ static int apparmor_dentry_open(struct file *file, const struct cred *cred)
profile = aa_cred_profile(cred);
if (!unconfined(profile)) {
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct path_cond cond = { inode->i_uid, inode->i_mode };
error = aa_path_perm(OP_OPEN, profile, &file->f_path, 0,
@@ -432,7 +432,7 @@ static int common_file_perm(int op, struct file *file, u32 mask)
BUG_ON(!fprofile);
if (!file->f_path.mnt ||
- !mediated_filesystem(file->f_path.dentry->d_inode))
+ !mediated_filesystem(file_inode(file)))
return 0;
profile = __aa_current_profile();
@@ -469,7 +469,6 @@ static int apparmor_file_lock(struct file *file, unsigned int cmd)
static int common_mmap(int op, struct file *file, unsigned long prot,
unsigned long flags)
{
- struct dentry *dentry;
int mask = 0;
if (!file || !file->f_security)
@@ -486,21 +485,12 @@ static int common_mmap(int op, struct file *file, unsigned long prot,
if (prot & PROT_EXEC)
mask |= AA_EXEC_MMAP;
- dentry = file->f_path.dentry;
return common_file_perm(op, file, mask);
}
-static int apparmor_file_mmap(struct file *file, unsigned long reqprot,
- unsigned long prot, unsigned long flags,
- unsigned long addr, unsigned long addr_only)
+static int apparmor_mmap_file(struct file *file, unsigned long reqprot,
+ unsigned long prot, unsigned long flags)
{
- int rc = 0;
-
- /* do DAC check */
- rc = cap_file_mmap(file, reqprot, prot, flags, addr, addr_only);
- if (rc || addr_only)
- return rc;
-
return common_mmap(OP_FMMAP, file, prot, flags);
}
@@ -515,24 +505,24 @@ static int apparmor_getprocattr(struct task_struct *task, char *name,
char **value)
{
int error = -ENOENT;
- struct aa_profile *profile;
/* released below */
const struct cred *cred = get_task_cred(task);
- struct aa_task_cxt *cxt = cred->security;
- profile = aa_cred_profile(cred);
+ struct aa_task_cxt *cxt = cred_cxt(cred);
+ struct aa_profile *profile = NULL;
if (strcmp(name, "current") == 0)
- error = aa_getprocattr(aa_newest_version(cxt->profile),
- value);
+ profile = aa_get_newest_profile(cxt->profile);
else if (strcmp(name, "prev") == 0 && cxt->previous)
- error = aa_getprocattr(aa_newest_version(cxt->previous),
- value);
+ profile = aa_get_newest_profile(cxt->previous);
else if (strcmp(name, "exec") == 0 && cxt->onexec)
- error = aa_getprocattr(aa_newest_version(cxt->onexec),
- value);
+ profile = aa_get_newest_profile(cxt->onexec);
else
error = -EINVAL;
+ if (profile)
+ error = aa_getprocattr(profile, value);
+
+ aa_put_profile(profile);
put_cred(cred);
return error;
@@ -541,6 +531,8 @@ static int apparmor_getprocattr(struct task_struct *task, char *name,
static int apparmor_setprocattr(struct task_struct *task, char *name,
void *value, size_t size)
{
+ struct common_audit_data sa;
+ struct apparmor_audit_data aad = {0,};
char *command, *args = value;
size_t arg_size;
int error;
@@ -584,28 +576,31 @@ static int apparmor_setprocattr(struct task_struct *task, char *name,
} else if (strcmp(command, "permprofile") == 0) {
error = aa_setprocattr_changeprofile(args, !AA_ONEXEC,
AA_DO_TEST);
- } else if (strcmp(command, "permipc") == 0) {
- error = aa_setprocattr_permipc(args);
- } else {
- struct common_audit_data sa;
- COMMON_AUDIT_DATA_INIT(&sa, NONE);
- sa.aad.op = OP_SETPROCATTR;
- sa.aad.info = name;
- sa.aad.error = -EINVAL;
- return aa_audit(AUDIT_APPARMOR_DENIED,
- __aa_current_profile(), GFP_KERNEL,
- &sa, NULL);
- }
+ } else
+ goto fail;
} else if (strcmp(name, "exec") == 0) {
- error = aa_setprocattr_changeprofile(args, AA_ONEXEC,
- !AA_DO_TEST);
- } else {
+ if (strcmp(command, "exec") == 0)
+ error = aa_setprocattr_changeprofile(args, AA_ONEXEC,
+ !AA_DO_TEST);
+ else
+ goto fail;
+ } else
/* only support the "current" and "exec" process attributes */
return -EINVAL;
- }
+
if (!error)
error = size;
return error;
+
+fail:
+ sa.type = LSM_AUDIT_DATA_NONE;
+ sa.aad = &aad;
+ aad.profile = aa_current_profile();
+ aad.op = OP_SETPROCATTR;
+ aad.info = name;
+ aad.error = -EINVAL;
+ aa_audit_msg(AUDIT_APPARMOR_DENIED, &sa, NULL);
+ return -EINVAL;
}
static int apparmor_task_setrlimit(struct task_struct *task,
@@ -638,13 +633,14 @@ static struct security_operations apparmor_ops = {
.path_chmod = apparmor_path_chmod,
.path_chown = apparmor_path_chown,
.path_truncate = apparmor_path_truncate,
- .dentry_open = apparmor_dentry_open,
.inode_getattr = apparmor_inode_getattr,
+ .file_open = apparmor_file_open,
.file_permission = apparmor_file_permission,
.file_alloc_security = apparmor_file_alloc_security,
.file_free_security = apparmor_file_free_security,
- .file_mmap = apparmor_file_mmap,
+ .mmap_file = apparmor_mmap_file,
+ .mmap_addr = cap_mmap_addr,
.file_mprotect = apparmor_file_mprotect,
.file_lock = apparmor_file_lock,
@@ -672,6 +668,7 @@ static int param_set_aabool(const char *val, const struct kernel_param *kp);
static int param_get_aabool(char *buffer, const struct kernel_param *kp);
#define param_check_aabool param_check_bool
static struct kernel_param_ops param_ops_aabool = {
+ .flags = KERNEL_PARAM_FL_NOARG,
.set = param_set_aabool,
.get = param_get_aabool
};
@@ -688,6 +685,7 @@ static int param_set_aalockpolicy(const char *val, const struct kernel_param *kp
static int param_get_aalockpolicy(char *buffer, const struct kernel_param *kp);
#define param_check_aalockpolicy param_check_bool
static struct kernel_param_ops param_ops_aalockpolicy = {
+ .flags = KERNEL_PARAM_FL_NOARG,
.set = param_set_aalockpolicy,
.get = param_get_aalockpolicy
};
@@ -748,12 +746,12 @@ module_param_named(paranoid_load, aa_g_paranoid_load, aabool,
/* Boot time disable flag */
static bool apparmor_enabled = CONFIG_SECURITY_APPARMOR_BOOTPARAM_VALUE;
-module_param_named(enabled, apparmor_enabled, aabool, S_IRUSR);
+module_param_named(enabled, apparmor_enabled, bool, S_IRUGO);
static int __init apparmor_enabled_setup(char *str)
{
unsigned long enabled;
- int error = strict_strtoul(str, 0, &enabled);
+ int error = kstrtoul(str, 0, &enabled);
if (!error)
apparmor_enabled = enabled ? 1 : 0;
return 1;
@@ -847,7 +845,7 @@ static int param_get_mode(char *buffer, struct kernel_param *kp)
if (!apparmor_enabled)
return -EINVAL;
- return sprintf(buffer, "%s", profile_mode_names[aa_g_profile_mode]);
+ return sprintf(buffer, "%s", aa_profile_mode_names[aa_g_profile_mode]);
}
static int param_set_mode(const char *val, struct kernel_param *kp)
@@ -862,8 +860,8 @@ static int param_set_mode(const char *val, struct kernel_param *kp)
if (!val)
return -EINVAL;
- for (i = 0; i < APPARMOR_NAMES_MAX_INDEX; i++) {
- if (strcmp(val, profile_mode_names[i]) == 0) {
+ for (i = 0; i < APPARMOR_MODE_NAMES_MAX_INDEX; i++) {
+ if (strcmp(val, aa_profile_mode_names[i]) == 0) {
aa_g_profile_mode = i;
return 0;
}
@@ -891,7 +889,7 @@ static int __init set_init_cxt(void)
return -ENOMEM;
cxt->profile = aa_get_profile(root_ns->unconfined);
- cred->security = cxt;
+ cred_cxt(cred) = cxt;
return 0;
}
@@ -920,8 +918,11 @@ static int __init apparmor_init(void)
error = register_security(&apparmor_ops);
if (error) {
+ struct cred *cred = (struct cred *)current->real_cred;
+ aa_free_task_context(cred_cxt(cred));
+ cred_cxt(cred) = NULL;
AA_ERROR("Unable to register AppArmor\n");
- goto set_init_cxt_out;
+ goto register_security_out;
}
/* Report that AppArmor successfully initialized */
@@ -935,9 +936,6 @@ static int __init apparmor_init(void)
return error;
-set_init_cxt_out:
- aa_free_task_context(current->real_cred->security);
-
register_security_out:
aa_free_root_ns();
diff --git a/security/apparmor/match.c b/security/apparmor/match.c
index 94de6b4907c..727eb4200d5 100644
--- a/security/apparmor/match.c
+++ b/security/apparmor/match.c
@@ -4,7 +4,7 @@
* This file contains AppArmor dfa based regular expression matching engine
*
* Copyright (C) 1998-2008 Novell/SUSE
- * Copyright 2009-2010 Canonical Ltd.
+ * Copyright 2009-2012 Canonical Ltd.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
@@ -23,6 +23,8 @@
#include "include/apparmor.h"
#include "include/match.h"
+#define base_idx(X) ((X) & 0xffffff)
+
/**
* unpack_table - unpack a dfa table (one of accept, default, base, next check)
* @blob: data to unpack (NOT NULL)
@@ -30,7 +32,7 @@
*
* Returns: pointer to table else NULL on failure
*
- * NOTE: must be freed by kvfree (not kmalloc)
+ * NOTE: must be freed by kvfree (not kfree)
*/
static struct table_header *unpack_table(char *blob, size_t bsize)
{
@@ -57,7 +59,7 @@ static struct table_header *unpack_table(char *blob, size_t bsize)
if (bsize < tsize)
goto out;
- table = kvmalloc(tsize);
+ table = kvzalloc(tsize);
if (table) {
*table = th;
if (th.td_flags == YYTD_DATA8)
@@ -137,8 +139,7 @@ static int verify_dfa(struct aa_dfa *dfa, int flags)
for (i = 0; i < state_count; i++) {
if (DEFAULT_TABLE(dfa)[i] >= state_count)
goto out;
- /* TODO: do check that DEF state recursion terminates */
- if (BASE_TABLE(dfa)[i] + 255 >= trans_count) {
+ if (base_idx(BASE_TABLE(dfa)[i]) + 255 >= trans_count) {
printk(KERN_ERR "AppArmor DFA next/check upper "
"bounds error\n");
goto out;
@@ -314,7 +315,7 @@ unsigned int aa_dfa_match_len(struct aa_dfa *dfa, unsigned int start,
u8 *equiv = EQUIV_TABLE(dfa);
/* default is direct to next state */
for (; len; len--) {
- pos = base[state] + equiv[(u8) *str++];
+ pos = base_idx(base[state]) + equiv[(u8) *str++];
if (check[pos] == state)
state = next[pos];
else
@@ -323,7 +324,7 @@ unsigned int aa_dfa_match_len(struct aa_dfa *dfa, unsigned int start,
} else {
/* default is direct to next state */
for (; len; len--) {
- pos = base[state] + (u8) *str++;
+ pos = base_idx(base[state]) + (u8) *str++;
if (check[pos] == state)
state = next[pos];
else
@@ -335,12 +336,12 @@ unsigned int aa_dfa_match_len(struct aa_dfa *dfa, unsigned int start,
}
/**
- * aa_dfa_next_state - traverse @dfa to find state @str stops at
+ * aa_dfa_match - traverse @dfa to find state @str stops at
* @dfa: the dfa to match @str against (NOT NULL)
* @start: the state of the dfa to start matching in
* @str: the null terminated string of bytes to match against the dfa (NOT NULL)
*
- * aa_dfa_next_state will match @str against the dfa and return the state it
+ * aa_dfa_match will match @str against the dfa and return the state it
* finished matching in. The final state can be used to look up the accepting
* label, or as the start state of a continuing match.
*
@@ -349,5 +350,79 @@ unsigned int aa_dfa_match_len(struct aa_dfa *dfa, unsigned int start,
unsigned int aa_dfa_match(struct aa_dfa *dfa, unsigned int start,
const char *str)
{
- return aa_dfa_match_len(dfa, start, str, strlen(str));
+ u16 *def = DEFAULT_TABLE(dfa);
+ u32 *base = BASE_TABLE(dfa);
+ u16 *next = NEXT_TABLE(dfa);
+ u16 *check = CHECK_TABLE(dfa);
+ unsigned int state = start, pos;
+
+ if (state == 0)
+ return 0;
+
+ /* current state is <state>, matching character *str */
+ if (dfa->tables[YYTD_ID_EC]) {
+ /* Equivalence class table defined */
+ u8 *equiv = EQUIV_TABLE(dfa);
+ /* default is direct to next state */
+ while (*str) {
+ pos = base_idx(base[state]) + equiv[(u8) *str++];
+ if (check[pos] == state)
+ state = next[pos];
+ else
+ state = def[state];
+ }
+ } else {
+ /* default is direct to next state */
+ while (*str) {
+ pos = base_idx(base[state]) + (u8) *str++;
+ if (check[pos] == state)
+ state = next[pos];
+ else
+ state = def[state];
+ }
+ }
+
+ return state;
+}
+
+/**
+ * aa_dfa_next - step one character to the next state in the dfa
+ * @dfa: the dfa to tranverse (NOT NULL)
+ * @state: the state to start in
+ * @c: the input character to transition on
+ *
+ * aa_dfa_match will step through the dfa by one input character @c
+ *
+ * Returns: state reach after input @c
+ */
+unsigned int aa_dfa_next(struct aa_dfa *dfa, unsigned int state,
+ const char c)
+{
+ u16 *def = DEFAULT_TABLE(dfa);
+ u32 *base = BASE_TABLE(dfa);
+ u16 *next = NEXT_TABLE(dfa);
+ u16 *check = CHECK_TABLE(dfa);
+ unsigned int pos;
+
+ /* current state is <state>, matching character *str */
+ if (dfa->tables[YYTD_ID_EC]) {
+ /* Equivalence class table defined */
+ u8 *equiv = EQUIV_TABLE(dfa);
+ /* default is direct to next state */
+
+ pos = base_idx(base[state]) + equiv[(u8) c];
+ if (check[pos] == state)
+ state = next[pos];
+ else
+ state = def[state];
+ } else {
+ /* default is direct to next state */
+ pos = base_idx(base[state]) + (u8) c;
+ if (check[pos] == state)
+ state = next[pos];
+ else
+ state = def[state];
+ }
+
+ return state;
}
diff --git a/security/apparmor/path.c b/security/apparmor/path.c
index 9d070a7c3ff..35b394a75d7 100644
--- a/security/apparmor/path.c
+++ b/security/apparmor/path.c
@@ -83,31 +83,31 @@ static int d_namespace_path(struct path *path, char *buf, int buflen,
struct path root;
get_fs_root(current->fs, &root);
res = __d_path(path, &root, buf, buflen);
- if (res && !IS_ERR(res)) {
- /* everything's fine */
- *name = res;
- path_put(&root);
- goto ok;
- }
path_put(&root);
- connected = 0;
+ } else {
+ res = d_absolute_path(path, buf, buflen);
+ if (!our_mnt(path->mnt))
+ connected = 0;
}
- res = d_absolute_path(path, buf, buflen);
-
- *name = res;
/* handle error conditions - and still allow a partial path to
* be returned.
*/
- if (IS_ERR(res)) {
- error = PTR_ERR(res);
- *name = buf;
- goto out;
- }
- if (!our_mnt(path->mnt))
+ if (!res || IS_ERR(res)) {
+ if (PTR_ERR(res) == -ENAMETOOLONG)
+ return -ENAMETOOLONG;
+ connected = 0;
+ res = dentry_path_raw(path->dentry, buf, buflen);
+ if (IS_ERR(res)) {
+ error = PTR_ERR(res);
+ *name = buf;
+ goto out;
+ };
+ } else if (!our_mnt(path->mnt))
connected = 0;
-ok:
+ *name = res;
+
/* Handle two cases:
* 1. A deleted dentry && profile is not allowing mediation of deleted
* 2. On some filesystems, newly allocated dentries appear to the
@@ -138,7 +138,7 @@ ok:
/* disconnected path, don't return pathname starting
* with '/'
*/
- error = -ESTALE;
+ error = -EACCES;
if (*res == '/')
*name = res + 1;
}
@@ -159,7 +159,7 @@ out:
* Returns: %0 else error on failure
*/
static int get_name_to_buffer(struct path *path, int flags, char *buffer,
- int size, char **name)
+ int size, char **name, const char **info)
{
int adjust = (flags & PATH_IS_DIR) ? 1 : 0;
int error = d_namespace_path(path, buffer, size - adjust, name, flags);
@@ -171,15 +171,27 @@ static int get_name_to_buffer(struct path *path, int flags, char *buffer,
*/
strcpy(&buffer[size - 2], "/");
+ if (info && error) {
+ if (error == -ENOENT)
+ *info = "Failed name lookup - deleted entry";
+ else if (error == -EACCES)
+ *info = "Failed name lookup - disconnected path";
+ else if (error == -ENAMETOOLONG)
+ *info = "Failed name lookup - name too long";
+ else
+ *info = "Failed name lookup";
+ }
+
return error;
}
/**
- * aa_get_name - compute the pathname of a file
+ * aa_path_name - compute the pathname of a file
* @path: path the file (NOT NULL)
* @flags: flags controlling path name generation
* @buffer: buffer that aa_get_name() allocated (NOT NULL)
* @name: Returns - the generated path name if !error (NOT NULL)
+ * @info: Returns - information on why the path lookup failed (MAYBE NULL)
*
* @name is a pointer to the beginning of the pathname (which usually differs
* from the beginning of the buffer), or NULL. If there is an error @name
@@ -192,7 +204,8 @@ static int get_name_to_buffer(struct path *path, int flags, char *buffer,
*
* Returns: %0 else error code if could retrieve name
*/
-int aa_get_name(struct path *path, int flags, char **buffer, const char **name)
+int aa_path_name(struct path *path, int flags, char **buffer, const char **name,
+ const char **info)
{
char *buf, *str = NULL;
int size = 256;
@@ -206,7 +219,7 @@ int aa_get_name(struct path *path, int flags, char **buffer, const char **name)
if (!buf)
return -ENOMEM;
- error = get_name_to_buffer(path, flags, buf, size, &str);
+ error = get_name_to_buffer(path, flags, buf, size, &str, info);
if (error != -ENAMETOOLONG)
break;
@@ -214,6 +227,7 @@ int aa_get_name(struct path *path, int flags, char **buffer, const char **name)
size <<= 1;
if (size > aa_g_path_max)
return -ENAMETOOLONG;
+ *info = NULL;
}
*buffer = buf;
*name = str;
diff --git a/security/apparmor/policy.c b/security/apparmor/policy.c
index 4f0eadee78b..705c2879d3a 100644
--- a/security/apparmor/policy.c
+++ b/security/apparmor/policy.c
@@ -87,16 +87,16 @@
#include "include/policy.h"
#include "include/policy_unpack.h"
#include "include/resource.h"
-#include "include/sid.h"
/* root profile namespace */
struct aa_namespace *root_ns;
-const char *profile_mode_names[] = {
+const char *const aa_profile_mode_names[] = {
"enforce",
"complain",
"kill",
+ "unconfined",
};
/**
@@ -142,7 +142,6 @@ static bool policy_init(struct aa_policy *policy, const char *prefix,
policy->name = (char *)hname_tail(policy->hname);
INIT_LIST_HEAD(&policy->list);
INIT_LIST_HEAD(&policy->profiles);
- kref_init(&policy->count);
return 1;
}
@@ -154,13 +153,13 @@ static bool policy_init(struct aa_policy *policy, const char *prefix,
static void policy_destroy(struct aa_policy *policy)
{
/* still contains profiles -- invalid */
- if (!list_empty(&policy->profiles)) {
+ if (on_list_rcu(&policy->profiles)) {
AA_ERROR("%s: internal error, "
"policy '%s' still contains profiles\n",
__func__, policy->name);
BUG();
}
- if (!list_empty(&policy->list)) {
+ if (on_list_rcu(&policy->list)) {
AA_ERROR("%s: internal error, policy '%s' still on list\n",
__func__, policy->name);
BUG();
@@ -175,7 +174,7 @@ static void policy_destroy(struct aa_policy *policy)
* @head: list to search (NOT NULL)
* @name: name to search for (NOT NULL)
*
- * Requires: correct locks for the @head list be held
+ * Requires: rcu_read_lock be held
*
* Returns: unrefcounted policy that match @name or NULL if not found
*/
@@ -183,7 +182,7 @@ static struct aa_policy *__policy_find(struct list_head *head, const char *name)
{
struct aa_policy *policy;
- list_for_each_entry(policy, head, list) {
+ list_for_each_entry_rcu(policy, head, list) {
if (!strcmp(policy->name, name))
return policy;
}
@@ -196,7 +195,7 @@ static struct aa_policy *__policy_find(struct list_head *head, const char *name)
* @str: string to search for (NOT NULL)
* @len: length of match required
*
- * Requires: correct locks for the @head list be held
+ * Requires: rcu_read_lock be held
*
* Returns: unrefcounted policy that match @str or NULL if not found
*
@@ -208,7 +207,7 @@ static struct aa_policy *__policy_strn_find(struct list_head *head,
{
struct aa_policy *policy;
- list_for_each_entry(policy, head, list) {
+ list_for_each_entry_rcu(policy, head, list) {
if (aa_strneq(policy->name, str, len))
return policy;
}
@@ -285,23 +284,21 @@ static struct aa_namespace *alloc_namespace(const char *prefix,
goto fail_ns;
INIT_LIST_HEAD(&ns->sub_ns);
- rwlock_init(&ns->lock);
+ mutex_init(&ns->lock);
/* released by free_namespace */
ns->unconfined = aa_alloc_profile("unconfined");
if (!ns->unconfined)
goto fail_unconfined;
- ns->unconfined->sid = aa_alloc_sid();
- ns->unconfined->flags = PFLAG_UNCONFINED | PFLAG_IX_ON_NAME_ERROR |
- PFLAG_IMMUTABLE;
+ ns->unconfined->flags = PFLAG_IX_ON_NAME_ERROR |
+ PFLAG_IMMUTABLE | PFLAG_NS_COUNT;
+ ns->unconfined->mode = APPARMOR_UNCONFINED;
- /*
- * released by free_namespace, however __remove_namespace breaks
- * the cyclic references (ns->unconfined, and unconfined->ns) and
- * replaces with refs to parent namespace unconfined
- */
- ns->unconfined->ns = aa_get_namespace(ns);
+ /* ns and ns->unconfined share ns->unconfined refcount */
+ ns->unconfined->ns = ns;
+
+ atomic_set(&ns->uniq_null, 0);
return ns;
@@ -327,30 +324,19 @@ static void free_namespace(struct aa_namespace *ns)
policy_destroy(&ns->base);
aa_put_namespace(ns->parent);
- if (ns->unconfined && ns->unconfined->ns == ns)
- ns->unconfined->ns = NULL;
-
- aa_put_profile(ns->unconfined);
+ ns->unconfined->ns = NULL;
+ aa_free_profile(ns->unconfined);
kzfree(ns);
}
/**
- * aa_free_namespace_kref - free aa_namespace by kref (see aa_put_namespace)
- * @kr: kref callback for freeing of a namespace (NOT NULL)
- */
-void aa_free_namespace_kref(struct kref *kref)
-{
- free_namespace(container_of(kref, struct aa_namespace, base.count));
-}
-
-/**
* __aa_find_namespace - find a namespace on a list by @name
* @head: list to search for namespace on (NOT NULL)
* @name: name of namespace to look for (NOT NULL)
*
* Returns: unrefcounted namespace
*
- * Requires: ns lock be held
+ * Requires: rcu_read_lock be held
*/
static struct aa_namespace *__aa_find_namespace(struct list_head *head,
const char *name)
@@ -373,9 +359,9 @@ struct aa_namespace *aa_find_namespace(struct aa_namespace *root,
{
struct aa_namespace *ns = NULL;
- read_lock(&root->lock);
+ rcu_read_lock();
ns = aa_get_namespace(__aa_find_namespace(&root->sub_ns, name));
- read_unlock(&root->lock);
+ rcu_read_unlock();
return ns;
}
@@ -392,7 +378,7 @@ static struct aa_namespace *aa_prepare_namespace(const char *name)
root = aa_current_profile()->ns;
- write_lock(&root->lock);
+ mutex_lock(&root->lock);
/* if name isn't specified the profile is loaded to the current ns */
if (!name) {
@@ -405,31 +391,23 @@ static struct aa_namespace *aa_prepare_namespace(const char *name)
/* released by caller */
ns = aa_get_namespace(__aa_find_namespace(&root->sub_ns, name));
if (!ns) {
- /* namespace not found */
- struct aa_namespace *new_ns;
- write_unlock(&root->lock);
- new_ns = alloc_namespace(root->base.hname, name);
- if (!new_ns)
- return NULL;
- write_lock(&root->lock);
- /* test for race when new_ns was allocated */
- ns = __aa_find_namespace(&root->sub_ns, name);
- if (!ns) {
- /* add parent ref */
- new_ns->parent = aa_get_namespace(root);
-
- list_add(&new_ns->base.list, &root->sub_ns);
- /* add list ref */
- ns = aa_get_namespace(new_ns);
- } else {
- /* raced so free the new one */
- free_namespace(new_ns);
- /* get reference on namespace */
- aa_get_namespace(ns);
+ ns = alloc_namespace(root->base.hname, name);
+ if (!ns)
+ goto out;
+ if (__aa_fs_namespace_mkdir(ns, ns_subns_dir(root), name)) {
+ AA_ERROR("Failed to create interface for ns %s\n",
+ ns->base.name);
+ free_namespace(ns);
+ ns = NULL;
+ goto out;
}
+ ns->parent = aa_get_namespace(root);
+ list_add_rcu(&ns->base.list, &root->sub_ns);
+ /* add list ref */
+ aa_get_namespace(ns);
}
out:
- write_unlock(&root->lock);
+ mutex_unlock(&root->lock);
/* return ref */
return ns;
@@ -447,7 +425,7 @@ out:
static void __list_add_profile(struct list_head *list,
struct aa_profile *profile)
{
- list_add(&profile->base.list, list);
+ list_add_rcu(&profile->base.list, list);
/* get list reference */
aa_get_profile(profile);
}
@@ -466,50 +444,8 @@ static void __list_add_profile(struct list_head *list,
*/
static void __list_remove_profile(struct aa_profile *profile)
{
- list_del_init(&profile->base.list);
- if (!(profile->flags & PFLAG_NO_LIST_REF))
- /* release list reference */
- aa_put_profile(profile);
-}
-
-/**
- * __replace_profile - replace @old with @new on a list
- * @old: profile to be replaced (NOT NULL)
- * @new: profile to replace @old with (NOT NULL)
- *
- * Will duplicate and refcount elements that @new inherits from @old
- * and will inherit @old children.
- *
- * refcount @new for list, put @old list refcount
- *
- * Requires: namespace list lock be held, or list not be shared
- */
-static void __replace_profile(struct aa_profile *old, struct aa_profile *new)
-{
- struct aa_policy *policy;
- struct aa_profile *child, *tmp;
-
- if (old->parent)
- policy = &old->parent->base;
- else
- policy = &old->ns->base;
-
- /* released when @new is freed */
- new->parent = aa_get_profile(old->parent);
- new->ns = aa_get_namespace(old->ns);
- new->sid = old->sid;
- __list_add_profile(&policy->profiles, new);
- /* inherit children */
- list_for_each_entry_safe(child, tmp, &old->base.profiles, base.list) {
- aa_put_profile(child->parent);
- child->parent = aa_get_profile(new);
- /* list refcount transferred to @new*/
- list_move(&child->base.list, &new->base.profiles);
- }
-
- /* released by free_profile */
- old->replacedby = aa_get_profile(new);
- __list_remove_profile(old);
+ list_del_rcu(&profile->base.list);
+ aa_put_profile(profile);
}
static void __profile_list_release(struct list_head *head);
@@ -525,7 +461,8 @@ static void __remove_profile(struct aa_profile *profile)
/* release any children lists first */
__profile_list_release(&profile->base.profiles);
/* released by free_profile */
- profile->replacedby = aa_get_profile(profile->ns->unconfined);
+ __aa_update_replacedby(profile, profile->ns->unconfined);
+ __aa_fs_profile_rmdir(profile);
__list_remove_profile(profile);
}
@@ -553,14 +490,17 @@ static void destroy_namespace(struct aa_namespace *ns)
if (!ns)
return;
- write_lock(&ns->lock);
+ mutex_lock(&ns->lock);
/* release all profiles in this namespace */
__profile_list_release(&ns->base.profiles);
/* release all sub namespaces */
__ns_list_release(&ns->sub_ns);
- write_unlock(&ns->lock);
+ if (ns->parent)
+ __aa_update_replacedby(ns->unconfined, ns->parent->unconfined);
+ __aa_fs_namespace_rmdir(ns);
+ mutex_unlock(&ns->lock);
}
/**
@@ -571,25 +511,9 @@ static void destroy_namespace(struct aa_namespace *ns)
*/
static void __remove_namespace(struct aa_namespace *ns)
{
- struct aa_profile *unconfined = ns->unconfined;
-
/* remove ns from namespace list */
- list_del_init(&ns->base.list);
-
- /*
- * break the ns, unconfined profile cyclic reference and forward
- * all new unconfined profiles requests to the parent namespace
- * This will result in all confined tasks that have a profile
- * being removed, inheriting the parent->unconfined profile.
- */
- if (ns->parent)
- ns->unconfined = aa_get_profile(ns->parent->unconfined);
-
+ list_del_rcu(&ns->base.list);
destroy_namespace(ns);
-
- /* release original ns->unconfined ref */
- aa_put_profile(unconfined);
- /* release ns->base.list ref, from removal above */
aa_put_namespace(ns);
}
@@ -635,6 +559,84 @@ void __init aa_free_root_ns(void)
aa_put_namespace(ns);
}
+
+static void free_replacedby(struct aa_replacedby *r)
+{
+ if (r) {
+ /* r->profile will not be updated any more as r is dead */
+ aa_put_profile(rcu_dereference_protected(r->profile, true));
+ kzfree(r);
+ }
+}
+
+
+void aa_free_replacedby_kref(struct kref *kref)
+{
+ struct aa_replacedby *r = container_of(kref, struct aa_replacedby,
+ count);
+ free_replacedby(r);
+}
+
+/**
+ * aa_free_profile - free a profile
+ * @profile: the profile to free (MAYBE NULL)
+ *
+ * Free a profile, its hats and null_profile. All references to the profile,
+ * its hats and null_profile must have been put.
+ *
+ * If the profile was referenced from a task context, free_profile() will
+ * be called from an rcu callback routine, so we must not sleep here.
+ */
+void aa_free_profile(struct aa_profile *profile)
+{
+ AA_DEBUG("%s(%p)\n", __func__, profile);
+
+ if (!profile)
+ return;
+
+ /* free children profiles */
+ policy_destroy(&profile->base);
+ aa_put_profile(rcu_access_pointer(profile->parent));
+
+ aa_put_namespace(profile->ns);
+ kzfree(profile->rename);
+
+ aa_free_file_rules(&profile->file);
+ aa_free_cap_rules(&profile->caps);
+ aa_free_rlimit_rules(&profile->rlimits);
+
+ kzfree(profile->dirname);
+ aa_put_dfa(profile->xmatch);
+ aa_put_dfa(profile->policy.dfa);
+ aa_put_replacedby(profile->replacedby);
+
+ kzfree(profile->hash);
+ kzfree(profile);
+}
+
+/**
+ * aa_free_profile_rcu - free aa_profile by rcu (called by aa_free_profile_kref)
+ * @head: rcu_head callback for freeing of a profile (NOT NULL)
+ */
+static void aa_free_profile_rcu(struct rcu_head *head)
+{
+ struct aa_profile *p = container_of(head, struct aa_profile, rcu);
+ if (p->flags & PFLAG_NS_COUNT)
+ free_namespace(p->ns);
+ else
+ aa_free_profile(p);
+}
+
+/**
+ * aa_free_profile_kref - free aa_profile by kref (called by aa_put_profile)
+ * @kr: kref callback for freeing of a profile (NOT NULL)
+ */
+void aa_free_profile_kref(struct kref *kref)
+{
+ struct aa_profile *p = container_of(kref, struct aa_profile, count);
+ call_rcu(&p->rcu, aa_free_profile_rcu);
+}
+
/**
* aa_alloc_profile - allocate, initialize and return a new profile
* @hname: name of the profile (NOT NULL)
@@ -650,13 +652,23 @@ struct aa_profile *aa_alloc_profile(const char *hname)
if (!profile)
return NULL;
- if (!policy_init(&profile->base, NULL, hname)) {
- kzfree(profile);
- return NULL;
- }
+ profile->replacedby = kzalloc(sizeof(struct aa_replacedby), GFP_KERNEL);
+ if (!profile->replacedby)
+ goto fail;
+ kref_init(&profile->replacedby->count);
+
+ if (!policy_init(&profile->base, NULL, hname))
+ goto fail;
+ kref_init(&profile->count);
/* refcount released by caller */
return profile;
+
+fail:
+ kzfree(profile->replacedby);
+ kzfree(profile);
+
+ return NULL;
}
/**
@@ -665,7 +677,7 @@ struct aa_profile *aa_alloc_profile(const char *hname)
* @hat: true if the null- learning profile is a hat
*
* Create a null- complain mode profile used in learning mode. The name of
- * the profile is unique and follows the format of parent//null-sid.
+ * the profile is unique and follows the format of parent//null-<uniq>.
*
* null profiles are added to the profile list but the list does not
* hold a count on them so that they are automatically released when
@@ -677,96 +689,39 @@ struct aa_profile *aa_new_null_profile(struct aa_profile *parent, int hat)
{
struct aa_profile *profile = NULL;
char *name;
- u32 sid = aa_alloc_sid();
+ int uniq = atomic_inc_return(&parent->ns->uniq_null);
/* freed below */
name = kmalloc(strlen(parent->base.hname) + 2 + 7 + 8, GFP_KERNEL);
if (!name)
goto fail;
- sprintf(name, "%s//null-%x", parent->base.hname, sid);
+ sprintf(name, "%s//null-%x", parent->base.hname, uniq);
profile = aa_alloc_profile(name);
kfree(name);
if (!profile)
goto fail;
- profile->sid = sid;
profile->mode = APPARMOR_COMPLAIN;
profile->flags = PFLAG_NULL;
if (hat)
profile->flags |= PFLAG_HAT;
/* released on free_profile */
- profile->parent = aa_get_profile(parent);
+ rcu_assign_pointer(profile->parent, aa_get_profile(parent));
profile->ns = aa_get_namespace(parent->ns);
- write_lock(&profile->ns->lock);
+ mutex_lock(&profile->ns->lock);
__list_add_profile(&parent->base.profiles, profile);
- write_unlock(&profile->ns->lock);
+ mutex_unlock(&profile->ns->lock);
/* refcount released by caller */
return profile;
fail:
- aa_free_sid(sid);
return NULL;
}
-/**
- * free_profile - free a profile
- * @profile: the profile to free (MAYBE NULL)
- *
- * Free a profile, its hats and null_profile. All references to the profile,
- * its hats and null_profile must have been put.
- *
- * If the profile was referenced from a task context, free_profile() will
- * be called from an rcu callback routine, so we must not sleep here.
- */
-static void free_profile(struct aa_profile *profile)
-{
- AA_DEBUG("%s(%p)\n", __func__, profile);
-
- if (!profile)
- return;
-
- if (!list_empty(&profile->base.list)) {
- AA_ERROR("%s: internal error, "
- "profile '%s' still on ns list\n",
- __func__, profile->base.name);
- BUG();
- }
-
- /* free children profiles */
- policy_destroy(&profile->base);
- aa_put_profile(profile->parent);
-
- aa_put_namespace(profile->ns);
- kzfree(profile->rename);
-
- aa_free_file_rules(&profile->file);
- aa_free_cap_rules(&profile->caps);
- aa_free_rlimit_rules(&profile->rlimits);
-
- aa_free_sid(profile->sid);
- aa_put_dfa(profile->xmatch);
-
- aa_put_profile(profile->replacedby);
-
- kzfree(profile);
-}
-
-/**
- * aa_free_profile_kref - free aa_profile by kref (called by aa_put_profile)
- * @kr: kref callback for freeing of a profile (NOT NULL)
- */
-void aa_free_profile_kref(struct kref *kref)
-{
- struct aa_profile *p = container_of(kref, struct aa_profile,
- base.count);
-
- free_profile(p);
-}
-
/* TODO: profile accounting - setup in remove */
/**
@@ -774,7 +729,7 @@ void aa_free_profile_kref(struct kref *kref)
* @head: list to search (NOT NULL)
* @name: name of profile (NOT NULL)
*
- * Requires: ns lock protecting list be held
+ * Requires: rcu_read_lock be held
*
* Returns: unrefcounted profile ptr, or NULL if not found
*/
@@ -789,7 +744,7 @@ static struct aa_profile *__find_child(struct list_head *head, const char *name)
* @name: name of profile (NOT NULL)
* @len: length of @name substring to match
*
- * Requires: ns lock protecting list be held
+ * Requires: rcu_read_lock be held
*
* Returns: unrefcounted profile ptr, or NULL if not found
*/
@@ -810,9 +765,9 @@ struct aa_profile *aa_find_child(struct aa_profile *parent, const char *name)
{
struct aa_profile *profile;
- read_lock(&parent->ns->lock);
+ rcu_read_lock();
profile = aa_get_profile(__find_child(&parent->base.profiles, name));
- read_unlock(&parent->ns->lock);
+ rcu_read_unlock();
/* refcount released by caller */
return profile;
@@ -827,7 +782,7 @@ struct aa_profile *aa_find_child(struct aa_profile *parent, const char *name)
* that matches hname does not need to exist, in general this
* is used to load a new profile.
*
- * Requires: ns->lock be held
+ * Requires: rcu_read_lock be held
*
* Returns: unrefcounted policy or NULL if not found
*/
@@ -859,7 +814,7 @@ static struct aa_policy *__lookup_parent(struct aa_namespace *ns,
* @base: base list to start looking up profile name from (NOT NULL)
* @hname: hierarchical profile name (NOT NULL)
*
- * Requires: ns->lock be held
+ * Requires: rcu_read_lock be held
*
* Returns: unrefcounted profile pointer or NULL if not found
*
@@ -898,9 +853,15 @@ struct aa_profile *aa_lookup_profile(struct aa_namespace *ns, const char *hname)
{
struct aa_profile *profile;
- read_lock(&ns->lock);
- profile = aa_get_profile(__lookup_profile(&ns->base, hname));
- read_unlock(&ns->lock);
+ rcu_read_lock();
+ do {
+ profile = __lookup_profile(&ns->base, hname);
+ } while (profile && !aa_get_profile_not0(profile));
+ rcu_read_unlock();
+
+ /* the unconfined profile is not in the regular profile list */
+ if (!profile && strcmp(hname, "unconfined") == 0)
+ profile = aa_get_newest_profile(ns->unconfined);
/* refcount released by caller */
return profile;
@@ -930,26 +891,6 @@ static int replacement_allowed(struct aa_profile *profile, int noreplace,
}
/**
- * __add_new_profile - simple wrapper around __list_add_profile
- * @ns: namespace that profile is being added to (NOT NULL)
- * @policy: the policy container to add the profile to (NOT NULL)
- * @profile: profile to add (NOT NULL)
- *
- * add a profile to a list and do other required basic allocations
- */
-static void __add_new_profile(struct aa_namespace *ns, struct aa_policy *policy,
- struct aa_profile *profile)
-{
- if (policy != &ns->base)
- /* released on profile replacement or free_profile */
- profile->parent = aa_get_profile((struct aa_profile *) policy);
- __list_add_profile(&policy->profiles, profile);
- /* released on free_profile */
- profile->sid = aa_alloc_sid();
- profile->ns = aa_get_namespace(ns);
-}
-
-/**
* aa_audit_policy - Do auditing of policy changes
* @op: policy operation being performed
* @gfp: memory allocation flags
@@ -963,11 +904,13 @@ static int audit_policy(int op, gfp_t gfp, const char *name, const char *info,
int error)
{
struct common_audit_data sa;
- COMMON_AUDIT_DATA_INIT(&sa, NONE);
- sa.aad.op = op;
- sa.aad.name = name;
- sa.aad.info = info;
- sa.aad.error = error;
+ struct apparmor_audit_data aad = {0,};
+ sa.type = LSM_AUDIT_DATA_NONE;
+ sa.aad = &aad;
+ aad.op = op;
+ aad.name = name;
+ aad.info = info;
+ aad.error = error;
return aa_audit(AUDIT_APPARMOR_STATUS, __aa_current_profile(), gfp,
&sa, NULL);
@@ -995,6 +938,121 @@ bool aa_may_manage_policy(int op)
return 1;
}
+static struct aa_profile *__list_lookup_parent(struct list_head *lh,
+ struct aa_profile *profile)
+{
+ const char *base = hname_tail(profile->base.hname);
+ long len = base - profile->base.hname;
+ struct aa_load_ent *ent;
+
+ /* parent won't have trailing // so remove from len */
+ if (len <= 2)
+ return NULL;
+ len -= 2;
+
+ list_for_each_entry(ent, lh, list) {
+ if (ent->new == profile)
+ continue;
+ if (strncmp(ent->new->base.hname, profile->base.hname, len) ==
+ 0 && ent->new->base.hname[len] == 0)
+ return ent->new;
+ }
+
+ return NULL;
+}
+
+/**
+ * __replace_profile - replace @old with @new on a list
+ * @old: profile to be replaced (NOT NULL)
+ * @new: profile to replace @old with (NOT NULL)
+ * @share_replacedby: transfer @old->replacedby to @new
+ *
+ * Will duplicate and refcount elements that @new inherits from @old
+ * and will inherit @old children.
+ *
+ * refcount @new for list, put @old list refcount
+ *
+ * Requires: namespace list lock be held, or list not be shared
+ */
+static void __replace_profile(struct aa_profile *old, struct aa_profile *new,
+ bool share_replacedby)
+{
+ struct aa_profile *child, *tmp;
+
+ if (!list_empty(&old->base.profiles)) {
+ LIST_HEAD(lh);
+ list_splice_init_rcu(&old->base.profiles, &lh, synchronize_rcu);
+
+ list_for_each_entry_safe(child, tmp, &lh, base.list) {
+ struct aa_profile *p;
+
+ list_del_init(&child->base.list);
+ p = __find_child(&new->base.profiles, child->base.name);
+ if (p) {
+ /* @p replaces @child */
+ __replace_profile(child, p, share_replacedby);
+ continue;
+ }
+
+ /* inherit @child and its children */
+ /* TODO: update hname of inherited children */
+ /* list refcount transferred to @new */
+ p = aa_deref_parent(child);
+ rcu_assign_pointer(child->parent, aa_get_profile(new));
+ list_add_rcu(&child->base.list, &new->base.profiles);
+ aa_put_profile(p);
+ }
+ }
+
+ if (!rcu_access_pointer(new->parent)) {
+ struct aa_profile *parent = aa_deref_parent(old);
+ rcu_assign_pointer(new->parent, aa_get_profile(parent));
+ }
+ __aa_update_replacedby(old, new);
+ if (share_replacedby) {
+ aa_put_replacedby(new->replacedby);
+ new->replacedby = aa_get_replacedby(old->replacedby);
+ } else if (!rcu_access_pointer(new->replacedby->profile))
+ /* aafs interface uses replacedby */
+ rcu_assign_pointer(new->replacedby->profile,
+ aa_get_profile(new));
+ __aa_fs_profile_migrate_dents(old, new);
+
+ if (list_empty(&new->base.list)) {
+ /* new is not on a list already */
+ list_replace_rcu(&old->base.list, &new->base.list);
+ aa_get_profile(new);
+ aa_put_profile(old);
+ } else
+ __list_remove_profile(old);
+}
+
+/**
+ * __lookup_replace - lookup replacement information for a profile
+ * @ns - namespace the lookup occurs in
+ * @hname - name of profile to lookup
+ * @noreplace - true if not replacing an existing profile
+ * @p - Returns: profile to be replaced
+ * @info - Returns: info string on why lookup failed
+ *
+ * Returns: profile to replace (no ref) on success else ptr error
+ */
+static int __lookup_replace(struct aa_namespace *ns, const char *hname,
+ bool noreplace, struct aa_profile **p,
+ const char **info)
+{
+ *p = aa_get_profile(__lookup_profile(&ns->base, hname));
+ if (*p) {
+ int error = replacement_allowed(*p, noreplace, info);
+ if (error) {
+ *info = "profile can not be replaced";
+ return error;
+ }
+ }
+
+ return 0;
+}
+
/**
* aa_replace_profiles - replace profile(s) on the profile list
* @udata: serialized data stream (NOT NULL)
@@ -1009,21 +1067,17 @@ bool aa_may_manage_policy(int op)
*/
ssize_t aa_replace_profiles(void *udata, size_t size, bool noreplace)
{
- struct aa_policy *policy;
- struct aa_profile *old_profile = NULL, *new_profile = NULL;
- struct aa_profile *rename_profile = NULL;
- struct aa_namespace *ns = NULL;
const char *ns_name, *name = NULL, *info = NULL;
+ struct aa_namespace *ns = NULL;
+ struct aa_load_ent *ent, *tmp;
int op = OP_PROF_REPL;
ssize_t error;
+ LIST_HEAD(lh);
/* released below */
- new_profile = aa_unpack(udata, size, &ns_name);
- if (IS_ERR(new_profile)) {
- error = PTR_ERR(new_profile);
- new_profile = NULL;
- goto fail;
- }
+ error = aa_unpack(udata, size, &lh, &ns_name);
+ if (error)
+ goto out;
/* released below */
ns = aa_prepare_namespace(ns_name);
@@ -1034,77 +1088,140 @@ ssize_t aa_replace_profiles(void *udata, size_t size, bool noreplace)
goto fail;
}
- name = new_profile->base.hname;
+ mutex_lock(&ns->lock);
+ /* setup parent and ns info */
+ list_for_each_entry(ent, &lh, list) {
+ struct aa_policy *policy;
+
+ name = ent->new->base.hname;
+ error = __lookup_replace(ns, ent->new->base.hname, noreplace,
+ &ent->old, &info);
+ if (error)
+ goto fail_lock;
+
+ if (ent->new->rename) {
+ error = __lookup_replace(ns, ent->new->rename,
+ noreplace, &ent->rename,
+ &info);
+ if (error)
+ goto fail_lock;
+ }
- write_lock(&ns->lock);
- /* no ref on policy only use inside lock */
- policy = __lookup_parent(ns, new_profile->base.hname);
+ /* released when @new is freed */
+ ent->new->ns = aa_get_namespace(ns);
- if (!policy) {
- info = "parent does not exist";
- error = -ENOENT;
- goto audit;
+ if (ent->old || ent->rename)
+ continue;
+
+ /* no ref on policy only use inside lock */
+ policy = __lookup_parent(ns, ent->new->base.hname);
+ if (!policy) {
+ struct aa_profile *p;
+ p = __list_lookup_parent(&lh, ent->new);
+ if (!p) {
+ error = -ENOENT;
+ info = "parent does not exist";
+ name = ent->new->base.hname;
+ goto fail_lock;
+ }
+ rcu_assign_pointer(ent->new->parent, aa_get_profile(p));
+ } else if (policy != &ns->base) {
+ /* released on profile replacement or free_profile */
+ struct aa_profile *p = (struct aa_profile *) policy;
+ rcu_assign_pointer(ent->new->parent, aa_get_profile(p));
+ }
}
- old_profile = __find_child(&policy->profiles, new_profile->base.name);
- /* released below */
- aa_get_profile(old_profile);
+ /* create new fs entries for introspection if needed */
+ list_for_each_entry(ent, &lh, list) {
+ if (ent->old) {
+ /* inherit old interface files */
- if (new_profile->rename) {
- rename_profile = __lookup_profile(&ns->base,
- new_profile->rename);
- /* released below */
- aa_get_profile(rename_profile);
+ /* if (ent->rename)
+ TODO: support rename */
+ /* } else if (ent->rename) {
+ TODO: support rename */
+ } else {
+ struct dentry *parent;
+ if (rcu_access_pointer(ent->new->parent)) {
+ struct aa_profile *p;
+ p = aa_deref_parent(ent->new);
+ parent = prof_child_dir(p);
+ } else
+ parent = ns_subprofs_dir(ent->new->ns);
+ error = __aa_fs_profile_mkdir(ent->new, parent);
+ }
- if (!rename_profile) {
- info = "profile to rename does not exist";
- name = new_profile->rename;
- error = -ENOENT;
- goto audit;
+ if (error) {
+ info = "failed to create ";
+ goto fail_lock;
}
}
- error = replacement_allowed(old_profile, noreplace, &info);
- if (error)
- goto audit;
-
- error = replacement_allowed(rename_profile, noreplace, &info);
- if (error)
- goto audit;
-
-audit:
- if (!old_profile && !rename_profile)
- op = OP_PROF_LOAD;
-
- error = audit_policy(op, GFP_ATOMIC, name, info, error);
-
- if (!error) {
- if (rename_profile)
- __replace_profile(rename_profile, new_profile);
- if (old_profile) {
- /* when there are both rename and old profiles
- * inherit old profiles sid
- */
- if (rename_profile)
- aa_free_sid(new_profile->sid);
- __replace_profile(old_profile, new_profile);
+ /* Done with checks that may fail - do actual replacement */
+ list_for_each_entry_safe(ent, tmp, &lh, list) {
+ list_del_init(&ent->list);
+ op = (!ent->old && !ent->rename) ? OP_PROF_LOAD : OP_PROF_REPL;
+
+ audit_policy(op, GFP_ATOMIC, ent->new->base.name, NULL, error);
+
+ if (ent->old) {
+ __replace_profile(ent->old, ent->new, 1);
+ if (ent->rename) {
+ /* aafs interface uses replacedby */
+ struct aa_replacedby *r = ent->new->replacedby;
+ rcu_assign_pointer(r->profile,
+ aa_get_profile(ent->new));
+ __replace_profile(ent->rename, ent->new, 0);
+ }
+ } else if (ent->rename) {
+ /* aafs interface uses replacedby */
+ rcu_assign_pointer(ent->new->replacedby->profile,
+ aa_get_profile(ent->new));
+ __replace_profile(ent->rename, ent->new, 0);
+ } else if (ent->new->parent) {
+ struct aa_profile *parent, *newest;
+ parent = aa_deref_parent(ent->new);
+ newest = aa_get_newest_profile(parent);
+
+ /* parent replaced in this atomic set? */
+ if (newest != parent) {
+ aa_get_profile(newest);
+ aa_put_profile(parent);
+ rcu_assign_pointer(ent->new->parent, newest);
+ } else
+ aa_put_profile(newest);
+ /* aafs interface uses replacedby */
+ rcu_assign_pointer(ent->new->replacedby->profile,
+ aa_get_profile(ent->new));
+ __list_add_profile(&parent->base.profiles, ent->new);
+ } else {
+ /* aafs interface uses replacedby */
+ rcu_assign_pointer(ent->new->replacedby->profile,
+ aa_get_profile(ent->new));
+ __list_add_profile(&ns->base.profiles, ent->new);
}
- if (!(old_profile || rename_profile))
- __add_new_profile(ns, policy, new_profile);
+ aa_load_ent_free(ent);
}
- write_unlock(&ns->lock);
+ mutex_unlock(&ns->lock);
out:
aa_put_namespace(ns);
- aa_put_profile(rename_profile);
- aa_put_profile(old_profile);
- aa_put_profile(new_profile);
+
if (error)
return error;
return size;
+fail_lock:
+ mutex_unlock(&ns->lock);
fail:
error = audit_policy(op, GFP_KERNEL, name, info, error);
+
+ list_for_each_entry_safe(ent, tmp, &lh, list) {
+ list_del_init(&ent->list);
+ aa_load_ent_free(ent);
+ }
+
goto out;
}
@@ -1138,14 +1255,12 @@ ssize_t aa_remove_profiles(char *fqname, size_t size)
if (fqname[0] == ':') {
char *ns_name;
name = aa_split_fqname(fqname, &ns_name);
- if (ns_name) {
- /* released below */
- ns = aa_find_namespace(root, ns_name);
- if (!ns) {
- info = "namespace does not exist";
- error = -ENOENT;
- goto fail;
- }
+ /* released below */
+ ns = aa_find_namespace(root, ns_name);
+ if (!ns) {
+ info = "namespace does not exist";
+ error = -ENOENT;
+ goto fail;
}
} else
/* released below */
@@ -1153,12 +1268,12 @@ ssize_t aa_remove_profiles(char *fqname, size_t size)
if (!name) {
/* remove namespace - can only happen if fqname[0] == ':' */
- write_lock(&ns->parent->lock);
+ mutex_lock(&ns->parent->lock);
__remove_namespace(ns);
- write_unlock(&ns->parent->lock);
+ mutex_unlock(&ns->parent->lock);
} else {
/* remove profile */
- write_lock(&ns->lock);
+ mutex_lock(&ns->lock);
profile = aa_get_profile(__lookup_profile(&ns->base, name));
if (!profile) {
error = -ENOENT;
@@ -1167,7 +1282,7 @@ ssize_t aa_remove_profiles(char *fqname, size_t size)
}
name = profile->base.hname;
__remove_profile(profile);
- write_unlock(&ns->lock);
+ mutex_unlock(&ns->lock);
}
/* don't fail removal if audit fails */
@@ -1177,7 +1292,7 @@ ssize_t aa_remove_profiles(char *fqname, size_t size)
return size;
fail_ns_lock:
- write_unlock(&ns->lock);
+ mutex_unlock(&ns->lock);
aa_put_namespace(ns);
fail:
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c
index 741dd13e089..a689f10930b 100644
--- a/security/apparmor/policy_unpack.c
+++ b/security/apparmor/policy_unpack.c
@@ -24,10 +24,10 @@
#include "include/apparmor.h"
#include "include/audit.h"
#include "include/context.h"
+#include "include/crypto.h"
#include "include/match.h"
#include "include/policy.h"
#include "include/policy_unpack.h"
-#include "include/sid.h"
/*
* The AppArmor interface treats data as a type byte followed by the
@@ -70,13 +70,13 @@ struct aa_ext {
static void audit_cb(struct audit_buffer *ab, void *va)
{
struct common_audit_data *sa = va;
- if (sa->aad.iface.target) {
- struct aa_profile *name = sa->aad.iface.target;
+ if (sa->aad->iface.target) {
+ struct aa_profile *name = sa->aad->iface.target;
audit_log_format(ab, " name=");
audit_log_untrustedstring(ab, name->base.hname);
}
- if (sa->aad.iface.pos)
- audit_log_format(ab, " offset=%ld", sa->aad.iface.pos);
+ if (sa->aad->iface.pos)
+ audit_log_format(ab, " offset=%ld", sa->aad->iface.pos);
}
/**
@@ -84,7 +84,7 @@ static void audit_cb(struct audit_buffer *ab, void *va)
* @new: profile if it has been allocated (MAYBE NULL)
* @name: name of the profile being manipulated (MAYBE NULL)
* @info: any extra info about the failure (MAYBE NULL)
- * @e: buffer position info (NOT NULL)
+ * @e: buffer position info
* @error: error code
*
* Returns: %0 or error
@@ -94,12 +94,15 @@ static int audit_iface(struct aa_profile *new, const char *name,
{
struct aa_profile *profile = __aa_current_profile();
struct common_audit_data sa;
- COMMON_AUDIT_DATA_INIT(&sa, NONE);
- sa.aad.iface.pos = e->pos - e->start;
- sa.aad.iface.target = new;
- sa.aad.name = name;
- sa.aad.info = info;
- sa.aad.error = error;
+ struct apparmor_audit_data aad = {0,};
+ sa.type = LSM_AUDIT_DATA_NONE;
+ sa.aad = &aad;
+ if (e)
+ aad.iface.pos = e->pos - e->start;
+ aad.iface.target = new;
+ aad.name = name;
+ aad.info = info;
+ aad.error = error;
return aa_audit(AUDIT_APPARMOR_STATUS, profile, GFP_KERNEL, &sa,
audit_cb);
@@ -287,6 +290,9 @@ static int unpack_strdup(struct aa_ext *e, char **string, const char *name)
return res;
}
+#define DFA_VALID_PERM_MASK 0xffffffff
+#define DFA_VALID_PERM2_MASK 0xffffffff
+
/**
* verify_accept - verify the accept tables of a dfa
* @dfa: dfa to verify accept tables of (NOT NULL)
@@ -328,8 +334,10 @@ static struct aa_dfa *unpack_dfa(struct aa_ext *e)
/*
* The dfa is aligned with in the blob to 8 bytes
* from the beginning of the stream.
+ * alignment adjust needed by dfa unpack
*/
- size_t sz = blob - (char *)e->start;
+ size_t sz = blob - (char *) e->start -
+ ((e->pos - e->start) & 7);
size_t pad = ALIGN(sz, 8) - sz;
int flags = TO_ACCEPT1_FLAG(YYTD_DATA32) |
TO_ACCEPT2_FLAG(YYTD_DATA32);
@@ -468,7 +476,7 @@ static struct aa_profile *unpack_profile(struct aa_ext *e)
{
struct aa_profile *profile = NULL;
const char *name = NULL;
- int error = -EPROTO;
+ int i, error = -EPROTO;
kernel_cap_t tmpcap;
u32 tmp;
@@ -485,6 +493,9 @@ static struct aa_profile *unpack_profile(struct aa_ext *e)
/* profile renaming is optional */
(void) unpack_str(e, &profile->rename, "rename");
+ /* attachment string is optional */
+ (void) unpack_str(e, &profile->attach, "attach");
+
/* xmatch is optional and may be NULL */
profile->xmatch = unpack_dfa(e);
if (IS_ERR(profile->xmatch)) {
@@ -504,12 +515,16 @@ static struct aa_profile *unpack_profile(struct aa_ext *e)
goto fail;
if (!unpack_u32(e, &tmp, NULL))
goto fail;
- if (tmp)
+ if (tmp & PACKED_FLAG_HAT)
profile->flags |= PFLAG_HAT;
if (!unpack_u32(e, &tmp, NULL))
goto fail;
- if (tmp)
+ if (tmp == PACKED_MODE_COMPLAIN)
profile->mode = APPARMOR_COMPLAIN;
+ else if (tmp == PACKED_MODE_KILL)
+ profile->mode = APPARMOR_KILL;
+ else if (tmp == PACKED_MODE_UNCONFINED)
+ profile->mode = APPARMOR_UNCONFINED;
if (!unpack_u32(e, &tmp, NULL))
goto fail;
if (tmp)
@@ -554,11 +569,35 @@ static struct aa_profile *unpack_profile(struct aa_ext *e)
goto fail;
if (!unpack_u32(e, &(profile->caps.extended.cap[1]), NULL))
goto fail;
+ if (!unpack_nameX(e, AA_STRUCTEND, NULL))
+ goto fail;
}
if (!unpack_rlimits(e, profile))
goto fail;
+ if (unpack_nameX(e, AA_STRUCT, "policydb")) {
+ /* generic policy dfa - optional and may be NULL */
+ profile->policy.dfa = unpack_dfa(e);
+ if (IS_ERR(profile->policy.dfa)) {
+ error = PTR_ERR(profile->policy.dfa);
+ profile->policy.dfa = NULL;
+ goto fail;
+ }
+ if (!unpack_u32(e, &profile->policy.start[0], "start"))
+ /* default start state */
+ profile->policy.start[0] = DFA_START;
+ /* setup class index */
+ for (i = AA_CLASS_FILE; i <= AA_CLASS_LAST; i++) {
+ profile->policy.start[i] =
+ aa_dfa_next(profile->policy.dfa,
+ profile->policy.start[0],
+ i);
+ }
+ if (!unpack_nameX(e, AA_STRUCTEND, NULL))
+ goto fail;
+ }
+
/* get file rules */
profile->file.dfa = unpack_dfa(e);
if (IS_ERR(profile->file.dfa)) {
@@ -585,7 +624,7 @@ fail:
else if (!name)
name = "unknown";
audit_iface(profile, name, "failed to unpack profile", e, error);
- aa_put_profile(profile);
+ aa_free_profile(profile);
return ERR_PTR(error);
}
@@ -593,29 +632,41 @@ fail:
/**
* verify_head - unpack serialized stream header
* @e: serialized data read head (NOT NULL)
+ * @required: whether the header is required or optional
* @ns: Returns - namespace if one is specified else NULL (NOT NULL)
*
* Returns: error or 0 if header is good
*/
-static int verify_header(struct aa_ext *e, const char **ns)
+static int verify_header(struct aa_ext *e, int required, const char **ns)
{
int error = -EPROTONOSUPPORT;
+ const char *name = NULL;
+ *ns = NULL;
+
/* get the interface version */
if (!unpack_u32(e, &e->version, "version")) {
- audit_iface(NULL, NULL, "invalid profile format", e, error);
- return error;
- }
+ if (required) {
+ audit_iface(NULL, NULL, "invalid profile format", e,
+ error);
+ return error;
+ }
- /* check that the interface version is currently supported */
- if (e->version != 5) {
- audit_iface(NULL, NULL, "unsupported interface version", e,
- error);
- return error;
+ /* check that the interface version is currently supported */
+ if (e->version != 5) {
+ audit_iface(NULL, NULL, "unsupported interface version",
+ e, error);
+ return error;
+ }
}
+
/* read the namespace if present */
- if (!unpack_str(e, ns, "namespace"))
- *ns = NULL;
+ if (unpack_str(e, &name, "namespace")) {
+ if (*ns && strcmp(*ns, name))
+ audit_iface(NULL, NULL, "invalid ns change", e, error);
+ else if (!*ns)
+ *ns = name;
+ }
return 0;
}
@@ -664,18 +715,40 @@ static int verify_profile(struct aa_profile *profile)
return 0;
}
+void aa_load_ent_free(struct aa_load_ent *ent)
+{
+ if (ent) {
+ aa_put_profile(ent->rename);
+ aa_put_profile(ent->old);
+ aa_put_profile(ent->new);
+ kzfree(ent);
+ }
+}
+
+struct aa_load_ent *aa_load_ent_alloc(void)
+{
+ struct aa_load_ent *ent = kzalloc(sizeof(*ent), GFP_KERNEL);
+ if (ent)
+ INIT_LIST_HEAD(&ent->list);
+ return ent;
+}
+
/**
- * aa_unpack - unpack packed binary profile data loaded from user space
+ * aa_unpack - unpack packed binary profile(s) data loaded from user space
* @udata: user data copied to kmem (NOT NULL)
* @size: the size of the user data
+ * @lh: list to place unpacked profiles in a aa_repl_ws
* @ns: Returns namespace profile is in if specified else NULL (NOT NULL)
*
- * Unpack user data and return refcounted allocated profile or ERR_PTR
+ * Unpack user data and return refcounted allocated profile(s) stored in
+ * @lh in order of discovery, with the list chain stored in base.list
+ * or error
*
- * Returns: profile else error pointer if fails to unpack
+ * Returns: profile(s) on @lh else error pointer if fails to unpack
*/
-struct aa_profile *aa_unpack(void *udata, size_t size, const char **ns)
+int aa_unpack(void *udata, size_t size, struct list_head *lh, const char **ns)
{
+ struct aa_load_ent *tmp, *ent;
struct aa_profile *profile = NULL;
int error;
struct aa_ext e = {
@@ -684,20 +757,49 @@ struct aa_profile *aa_unpack(void *udata, size_t size, const char **ns)
.pos = udata,
};
- error = verify_header(&e, ns);
- if (error)
- return ERR_PTR(error);
+ *ns = NULL;
+ while (e.pos < e.end) {
+ void *start;
+ error = verify_header(&e, e.pos == e.start, ns);
+ if (error)
+ goto fail;
+
+ start = e.pos;
+ profile = unpack_profile(&e);
+ if (IS_ERR(profile)) {
+ error = PTR_ERR(profile);
+ goto fail;
+ }
+
+ error = verify_profile(profile);
+ if (error)
+ goto fail_profile;
- profile = unpack_profile(&e);
- if (IS_ERR(profile))
- return profile;
+ error = aa_calc_profile_hash(profile, e.version, start,
+ e.pos - start);
+ if (error)
+ goto fail_profile;
- error = verify_profile(profile);
- if (error) {
- aa_put_profile(profile);
- profile = ERR_PTR(error);
+ ent = aa_load_ent_alloc();
+ if (!ent) {
+ error = -ENOMEM;
+ goto fail_profile;
+ }
+
+ ent->new = profile;
+ list_add_tail(&ent->list, lh);
}
- /* return refcount */
- return profile;
+ return 0;
+
+fail_profile:
+ aa_put_profile(profile);
+
+fail:
+ list_for_each_entry_safe(ent, tmp, lh, list) {
+ list_del_init(&ent->list);
+ aa_load_ent_free(ent);
+ }
+
+ return error;
}
diff --git a/security/apparmor/procattr.c b/security/apparmor/procattr.c
index 1b41c542d37..b125acc9aa2 100644
--- a/security/apparmor/procattr.c
+++ b/security/apparmor/procattr.c
@@ -37,7 +37,7 @@ int aa_getprocattr(struct aa_profile *profile, char **string)
{
char *str;
int len = 0, mode_len = 0, ns_len = 0, name_len;
- const char *mode_str = profile_mode_names[profile->mode];
+ const char *mode_str = aa_profile_mode_names[profile->mode];
const char *ns_name = NULL;
struct aa_namespace *ns = profile->ns;
struct aa_namespace *current_ns = __aa_current_profile()->ns;
@@ -163,9 +163,3 @@ int aa_setprocattr_changeprofile(char *fqname, bool onexec, int test)
name = aa_split_fqname(fqname, &ns_name);
return aa_change_profile(ns_name, name, onexec, test);
}
-
-int aa_setprocattr_permipc(char *fqname)
-{
- /* TODO: add ipc permission querying */
- return -ENOTSUPP;
-}
diff --git a/security/apparmor/resource.c b/security/apparmor/resource.c
index a4136c10b1c..748bf0ca6c9 100644
--- a/security/apparmor/resource.c
+++ b/security/apparmor/resource.c
@@ -15,6 +15,7 @@
#include <linux/audit.h>
#include "include/audit.h"
+#include "include/context.h"
#include "include/resource.h"
#include "include/policy.h"
@@ -23,13 +24,18 @@
*/
#include "rlim_names.h"
+struct aa_fs_entry aa_fs_entry_rlimit[] = {
+ AA_FS_FILE_STRING("mask", AA_FS_RLIMIT_MASK),
+ { }
+};
+
/* audit callback for resource specific fields */
static void audit_cb(struct audit_buffer *ab, void *va)
{
struct common_audit_data *sa = va;
audit_log_format(ab, " rlimit=%s value=%lu",
- rlim_names[sa->aad.rlim.rlim], sa->aad.rlim.max);
+ rlim_names[sa->aad->rlim.rlim], sa->aad->rlim.max);
}
/**
@@ -45,12 +51,14 @@ static int audit_resource(struct aa_profile *profile, unsigned int resource,
unsigned long value, int error)
{
struct common_audit_data sa;
+ struct apparmor_audit_data aad = {0,};
- COMMON_AUDIT_DATA_INIT(&sa, NONE);
- sa.aad.op = OP_SETRLIMIT,
- sa.aad.rlim.rlim = resource;
- sa.aad.rlim.max = value;
- sa.aad.error = error;
+ sa.type = LSM_AUDIT_DATA_NONE;
+ sa.aad = &aad;
+ aad.op = OP_SETRLIMIT,
+ aad.rlim.rlim = resource;
+ aad.rlim.max = value;
+ aad.error = error;
return aa_audit(AUDIT_APPARMOR_AUTO, profile, GFP_KERNEL, &sa,
audit_cb);
}
@@ -83,17 +91,25 @@ int aa_map_resource(int resource)
int aa_task_setrlimit(struct aa_profile *profile, struct task_struct *task,
unsigned int resource, struct rlimit *new_rlim)
{
+ struct aa_profile *task_profile;
int error = 0;
+ rcu_read_lock();
+ task_profile = aa_get_profile(aa_cred_profile(__task_cred(task)));
+ rcu_read_unlock();
+
/* TODO: extend resource control to handle other (non current)
- * processes. AppArmor rules currently have the implicit assumption
- * that the task is setting the resource of the current process
+ * profiles. AppArmor rules currently have the implicit assumption
+ * that the task is setting the resource of a task confined with
+ * the same profile.
*/
- if ((task != current->group_leader) ||
+ if (profile != task_profile ||
(profile->rlimits.mask & (1 << resource) &&
new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max))
error = -EACCES;
+ aa_put_profile(task_profile);
+
return audit_resource(profile, resource, new_rlim->rlim_max, error);
}
diff --git a/security/capability.c b/security/capability.c
index 2f680eb02b5..e76373de312 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -74,8 +74,8 @@ static int cap_sb_statfs(struct dentry *dentry)
return 0;
}
-static int cap_sb_mount(char *dev_name, struct path *path, char *type,
- unsigned long flags, void *data)
+static int cap_sb_mount(const char *dev_name, struct path *path,
+ const char *type, unsigned long flags, void *data)
{
return 0;
}
@@ -91,16 +91,20 @@ static int cap_sb_pivotroot(struct path *old_path, struct path *new_path)
}
static int cap_sb_set_mnt_opts(struct super_block *sb,
- struct security_mnt_opts *opts)
+ struct security_mnt_opts *opts,
+ unsigned long kern_flags,
+ unsigned long *set_kern_flags)
+
{
if (unlikely(opts->num_mnt_opts))
return -EOPNOTSUPP;
return 0;
}
-static void cap_sb_clone_mnt_opts(const struct super_block *oldsb,
+static int cap_sb_clone_mnt_opts(const struct super_block *oldsb,
struct super_block *newsb)
{
+ return 0;
}
static int cap_sb_parse_opts_str(char *options, struct security_mnt_opts *opts)
@@ -108,6 +112,13 @@ static int cap_sb_parse_opts_str(char *options, struct security_mnt_opts *opts)
return 0;
}
+static int cap_dentry_init_security(struct dentry *dentry, int mode,
+ struct qstr *name, void **ctx,
+ u32 *ctxlen)
+{
+ return -EOPNOTSUPP;
+}
+
static int cap_inode_alloc_security(struct inode *inode)
{
return 0;
@@ -118,7 +129,7 @@ static void cap_inode_free_security(struct inode *inode)
}
static int cap_inode_init_security(struct inode *inode, struct inode *dir,
- const struct qstr *qstr, char **name,
+ const struct qstr *qstr, const char **name,
void **value, size_t *len)
{
return -EOPNOTSUPP;
@@ -284,7 +295,7 @@ static int cap_path_chmod(struct path *path, umode_t mode)
return 0;
}
-static int cap_path_chown(struct path *path, uid_t uid, gid_t gid)
+static int cap_path_chown(struct path *path, kuid_t uid, kgid_t gid)
{
return 0;
}
@@ -348,7 +359,7 @@ static int cap_file_receive(struct file *file)
return 0;
}
-static int cap_dentry_open(struct file *file, const struct cred *cred)
+static int cap_file_open(struct file *file, const struct cred *cred)
{
return 0;
}
@@ -358,6 +369,10 @@ static int cap_task_create(unsigned long clone_flags)
return 0;
}
+static void cap_task_free(struct task_struct *task)
+{
+}
+
static int cap_cred_alloc_blank(struct cred *cred, gfp_t gfp)
{
return 0;
@@ -391,6 +406,11 @@ static int cap_kernel_module_request(char *kmod_name)
return 0;
}
+static int cap_kernel_module_from_file(struct file *file)
+{
+ return 0;
+}
+
static int cap_task_setpgid(struct task_struct *p, pid_t pgid)
{
return 0;
@@ -700,24 +720,45 @@ static void cap_req_classify_flow(const struct request_sock *req,
{
}
+static int cap_tun_dev_alloc_security(void **security)
+{
+ return 0;
+}
+
+static void cap_tun_dev_free_security(void *security)
+{
+}
+
static int cap_tun_dev_create(void)
{
return 0;
}
-static void cap_tun_dev_post_create(struct sock *sk)
+static int cap_tun_dev_attach_queue(void *security)
{
+ return 0;
+}
+
+static int cap_tun_dev_attach(struct sock *sk, void *security)
+{
+ return 0;
}
-static int cap_tun_dev_attach(struct sock *sk)
+static int cap_tun_dev_open(void *security)
{
return 0;
}
+
+static void cap_skb_owned_by(struct sk_buff *skb, struct sock *sk)
+{
+}
+
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
static int cap_xfrm_policy_alloc_security(struct xfrm_sec_ctx **ctxp,
- struct xfrm_user_sec_ctx *sec_ctx)
+ struct xfrm_user_sec_ctx *sec_ctx,
+ gfp_t gfp)
{
return 0;
}
@@ -737,9 +778,15 @@ static int cap_xfrm_policy_delete_security(struct xfrm_sec_ctx *ctx)
return 0;
}
-static int cap_xfrm_state_alloc_security(struct xfrm_state *x,
- struct xfrm_user_sec_ctx *sec_ctx,
- u32 secid)
+static int cap_xfrm_state_alloc(struct xfrm_state *x,
+ struct xfrm_user_sec_ctx *sec_ctx)
+{
+ return 0;
+}
+
+static int cap_xfrm_state_alloc_acquire(struct xfrm_state *x,
+ struct xfrm_sec_ctx *polsec,
+ u32 secid)
{
return 0;
}
@@ -786,6 +833,11 @@ static int cap_setprocattr(struct task_struct *p, char *name, void *value,
return -EINVAL;
}
+static int cap_ismaclabel(const char *name)
+{
+ return 0;
+}
+
static int cap_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
{
return -EOPNOTSUPP;
@@ -813,7 +865,7 @@ static int cap_inode_setsecctx(struct dentry *dentry, void *ctx, u32 ctxlen)
static int cap_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen)
{
- return 0;
+ return -EOPNOTSUPP;
}
#ifdef CONFIG_KEYS
static int cap_key_alloc(struct key *key, const struct cred *cred,
@@ -827,7 +879,7 @@ static void cap_key_free(struct key *key)
}
static int cap_key_permission(key_ref_t key_ref, const struct cred *cred,
- key_perm_t perm)
+ unsigned perm)
{
return 0;
}
@@ -901,6 +953,7 @@ void __init security_fixup_ops(struct security_operations *ops)
set_to_cap_if_null(ops, sb_set_mnt_opts);
set_to_cap_if_null(ops, sb_clone_mnt_opts);
set_to_cap_if_null(ops, sb_parse_opts_str);
+ set_to_cap_if_null(ops, dentry_init_security);
set_to_cap_if_null(ops, inode_alloc_security);
set_to_cap_if_null(ops, inode_free_security);
set_to_cap_if_null(ops, inode_init_security);
@@ -945,15 +998,17 @@ void __init security_fixup_ops(struct security_operations *ops)
set_to_cap_if_null(ops, file_alloc_security);
set_to_cap_if_null(ops, file_free_security);
set_to_cap_if_null(ops, file_ioctl);
- set_to_cap_if_null(ops, file_mmap);
+ set_to_cap_if_null(ops, mmap_addr);
+ set_to_cap_if_null(ops, mmap_file);
set_to_cap_if_null(ops, file_mprotect);
set_to_cap_if_null(ops, file_lock);
set_to_cap_if_null(ops, file_fcntl);
set_to_cap_if_null(ops, file_set_fowner);
set_to_cap_if_null(ops, file_send_sigiotask);
set_to_cap_if_null(ops, file_receive);
- set_to_cap_if_null(ops, dentry_open);
+ set_to_cap_if_null(ops, file_open);
set_to_cap_if_null(ops, task_create);
+ set_to_cap_if_null(ops, task_free);
set_to_cap_if_null(ops, cred_alloc_blank);
set_to_cap_if_null(ops, cred_free);
set_to_cap_if_null(ops, cred_prepare);
@@ -961,6 +1016,7 @@ void __init security_fixup_ops(struct security_operations *ops)
set_to_cap_if_null(ops, kernel_act_as);
set_to_cap_if_null(ops, kernel_create_files_as);
set_to_cap_if_null(ops, kernel_module_request);
+ set_to_cap_if_null(ops, kernel_module_from_file);
set_to_cap_if_null(ops, task_fix_setuid);
set_to_cap_if_null(ops, task_setpgid);
set_to_cap_if_null(ops, task_getpgid);
@@ -1001,6 +1057,7 @@ void __init security_fixup_ops(struct security_operations *ops)
set_to_cap_if_null(ops, d_instantiate);
set_to_cap_if_null(ops, getprocattr);
set_to_cap_if_null(ops, setprocattr);
+ set_to_cap_if_null(ops, ismaclabel);
set_to_cap_if_null(ops, secid_to_secctx);
set_to_cap_if_null(ops, secctx_to_secid);
set_to_cap_if_null(ops, release_secctx);
@@ -1038,16 +1095,21 @@ void __init security_fixup_ops(struct security_operations *ops)
set_to_cap_if_null(ops, secmark_refcount_inc);
set_to_cap_if_null(ops, secmark_refcount_dec);
set_to_cap_if_null(ops, req_classify_flow);
+ set_to_cap_if_null(ops, tun_dev_alloc_security);
+ set_to_cap_if_null(ops, tun_dev_free_security);
set_to_cap_if_null(ops, tun_dev_create);
- set_to_cap_if_null(ops, tun_dev_post_create);
+ set_to_cap_if_null(ops, tun_dev_open);
+ set_to_cap_if_null(ops, tun_dev_attach_queue);
set_to_cap_if_null(ops, tun_dev_attach);
+ set_to_cap_if_null(ops, skb_owned_by);
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
set_to_cap_if_null(ops, xfrm_policy_alloc_security);
set_to_cap_if_null(ops, xfrm_policy_clone_security);
set_to_cap_if_null(ops, xfrm_policy_free_security);
set_to_cap_if_null(ops, xfrm_policy_delete_security);
- set_to_cap_if_null(ops, xfrm_state_alloc_security);
+ set_to_cap_if_null(ops, xfrm_state_alloc);
+ set_to_cap_if_null(ops, xfrm_state_alloc_acquire);
set_to_cap_if_null(ops, xfrm_state_free_security);
set_to_cap_if_null(ops, xfrm_state_delete_security);
set_to_cap_if_null(ops, xfrm_policy_lookup);
diff --git a/security/commoncap.c b/security/commoncap.c
index 7ce191ea29a..b9d613e0ef1 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -28,6 +28,8 @@
#include <linux/prctl.h>
#include <linux/securebits.h>
#include <linux/user_namespace.h>
+#include <linux/binfmts.h>
+#include <linux/personality.h>
/*
* If a non-root user executes a setuid-root binary in
@@ -74,24 +76,33 @@ int cap_netlink_send(struct sock *sk, struct sk_buff *skb)
int cap_capable(const struct cred *cred, struct user_namespace *targ_ns,
int cap, int audit)
{
- for (;;) {
- /* The creator of the user namespace has all caps. */
- if (targ_ns != &init_user_ns && targ_ns->creator == cred->user)
- return 0;
+ struct user_namespace *ns = targ_ns;
+ /* See if cred has the capability in the target user namespace
+ * by examining the target user namespace and all of the target
+ * user namespace's parents.
+ */
+ for (;;) {
/* Do we have the necessary capabilities? */
- if (targ_ns == cred->user->user_ns)
+ if (ns == cred->user_ns)
return cap_raised(cred->cap_effective, cap) ? 0 : -EPERM;
/* Have we tried all of the parent namespaces? */
- if (targ_ns == &init_user_ns)
+ if (ns == &init_user_ns)
return -EPERM;
+ /*
+ * The owner of the user namespace in the parent of the
+ * user namespace has all caps.
+ */
+ if ((ns->parent == cred->user_ns) && uid_eq(ns->owner, cred->euid))
+ return 0;
+
/*
- *If you have a capability in a parent user ns, then you have
+ * If you have a capability in a parent user ns, then you have
* it over all children user namespaces as well.
*/
- targ_ns = targ_ns->creator->user_ns;
+ ns = ns->parent;
}
/* We never get here */
@@ -135,10 +146,10 @@ int cap_ptrace_access_check(struct task_struct *child, unsigned int mode)
rcu_read_lock();
cred = current_cred();
child_cred = __task_cred(child);
- if (cred->user->user_ns == child_cred->user->user_ns &&
+ if (cred->user_ns == child_cred->user_ns &&
cap_issubset(child_cred->cap_permitted, cred->cap_permitted))
goto out;
- if (ns_capable(child_cred->user->user_ns, CAP_SYS_PTRACE))
+ if (ns_capable(child_cred->user_ns, CAP_SYS_PTRACE))
goto out;
ret = -EPERM;
out:
@@ -167,10 +178,10 @@ int cap_ptrace_traceme(struct task_struct *parent)
rcu_read_lock();
cred = __task_cred(parent);
child_cred = current_cred();
- if (cred->user->user_ns == child_cred->user->user_ns &&
+ if (cred->user_ns == child_cred->user_ns &&
cap_issubset(child_cred->cap_permitted, cred->cap_permitted))
goto out;
- if (has_ns_capability(parent, child_cred->user->user_ns, CAP_SYS_PTRACE))
+ if (has_ns_capability(parent, child_cred->user_ns, CAP_SYS_PTRACE))
goto out;
ret = -EPERM;
out:
@@ -213,7 +224,7 @@ static inline int cap_inh_is_capped(void)
/* they are so limited unless the current task has the CAP_SETPCAP
* capability
*/
- if (cap_capable(current_cred(), current_cred()->user->user_ns,
+ if (cap_capable(current_cred(), current_cred()->user_ns,
CAP_SETPCAP, SECURITY_CAP_AUDIT) == 0)
return 0;
return 1;
@@ -429,7 +440,7 @@ static int get_file_caps(struct linux_binprm *bprm, bool *effective, bool *has_c
if (!file_caps_enabled)
return 0;
- if (bprm->file->f_vfsmnt->mnt_flags & MNT_NOSUID)
+ if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
return 0;
dentry = dget(bprm->file->f_dentry);
@@ -471,19 +482,22 @@ int cap_bprm_set_creds(struct linux_binprm *bprm)
struct cred *new = bprm->cred;
bool effective, has_cap = false;
int ret;
+ kuid_t root_uid;
effective = false;
ret = get_file_caps(bprm, &effective, &has_cap);
if (ret < 0)
return ret;
+ root_uid = make_kuid(new->user_ns, 0);
+
if (!issecure(SECURE_NOROOT)) {
/*
* If the legacy file capability is set, then don't set privs
* for a setuid root binary run by a non-root user. Do set it
* for a root user just to cause least surprise to an admin.
*/
- if (has_cap && new->uid != 0 && new->euid == 0) {
+ if (has_cap && !uid_eq(new->uid, root_uid) && uid_eq(new->euid, root_uid)) {
warn_setuid_and_fcaps_mixed(bprm->filename);
goto skip;
}
@@ -494,25 +508,33 @@ int cap_bprm_set_creds(struct linux_binprm *bprm)
*
* If only the real uid is 0, we do not set the effective bit.
*/
- if (new->euid == 0 || new->uid == 0) {
+ if (uid_eq(new->euid, root_uid) || uid_eq(new->uid, root_uid)) {
/* pP' = (cap_bset & ~0) | (pI & ~0) */
new->cap_permitted = cap_combine(old->cap_bset,
old->cap_inheritable);
}
- if (new->euid == 0)
+ if (uid_eq(new->euid, root_uid))
effective = true;
}
skip:
+ /* if we have fs caps, clear dangerous personality flags */
+ if (!cap_issubset(new->cap_permitted, old->cap_permitted))
+ bprm->per_clear |= PER_CLEAR_ON_SETID;
+
+
/* Don't let someone trace a set[ug]id/setpcap binary with the revised
- * credentials unless they have the appropriate permit
+ * credentials unless they have the appropriate permit.
+ *
+ * In addition, if NO_NEW_PRIVS, then ensure we get no new privs.
*/
- if ((new->euid != old->uid ||
- new->egid != old->gid ||
+ if ((!uid_eq(new->euid, old->uid) ||
+ !gid_eq(new->egid, old->gid) ||
!cap_issubset(new->cap_permitted, old->cap_permitted)) &&
bprm->unsafe & ~LSM_UNSAFE_PTRACE_CAP) {
/* downgrade; they get no more than they had, and maybe less */
- if (!capable(CAP_SETUID)) {
+ if (!capable(CAP_SETUID) ||
+ (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS)) {
new->euid = new->uid;
new->egid = new->gid;
}
@@ -543,7 +565,7 @@ skip:
*/
if (!cap_isclear(new->cap_effective)) {
if (!cap_issubset(CAP_FULL_SET, new->cap_effective) ||
- new->euid != 0 || new->uid != 0 ||
+ !uid_eq(new->euid, root_uid) || !uid_eq(new->uid, root_uid) ||
issecure(SECURE_NOROOT)) {
ret = audit_log_bprm_fcaps(bprm, new, old);
if (ret < 0)
@@ -568,16 +590,17 @@ skip:
int cap_bprm_secureexec(struct linux_binprm *bprm)
{
const struct cred *cred = current_cred();
+ kuid_t root_uid = make_kuid(cred->user_ns, 0);
- if (cred->uid != 0) {
+ if (!uid_eq(cred->uid, root_uid)) {
if (bprm->cap_effective)
return 1;
if (!cap_isclear(cred->cap_permitted))
return 1;
}
- return (cred->euid != cred->uid ||
- cred->egid != cred->gid);
+ return (!uid_eq(cred->euid, cred->uid) ||
+ !gid_eq(cred->egid, cred->gid));
}
/**
@@ -667,15 +690,21 @@ int cap_inode_removexattr(struct dentry *dentry, const char *name)
*/
static inline void cap_emulate_setxuid(struct cred *new, const struct cred *old)
{
- if ((old->uid == 0 || old->euid == 0 || old->suid == 0) &&
- (new->uid != 0 && new->euid != 0 && new->suid != 0) &&
+ kuid_t root_uid = make_kuid(old->user_ns, 0);
+
+ if ((uid_eq(old->uid, root_uid) ||
+ uid_eq(old->euid, root_uid) ||
+ uid_eq(old->suid, root_uid)) &&
+ (!uid_eq(new->uid, root_uid) &&
+ !uid_eq(new->euid, root_uid) &&
+ !uid_eq(new->suid, root_uid)) &&
!issecure(SECURE_KEEP_CAPS)) {
cap_clear(new->cap_permitted);
cap_clear(new->cap_effective);
}
- if (old->euid == 0 && new->euid != 0)
+ if (uid_eq(old->euid, root_uid) && !uid_eq(new->euid, root_uid))
cap_clear(new->cap_effective);
- if (old->euid != 0 && new->euid == 0)
+ if (!uid_eq(old->euid, root_uid) && uid_eq(new->euid, root_uid))
new->cap_effective = new->cap_permitted;
}
@@ -708,11 +737,12 @@ int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags)
* if not, we might be a bit too harsh here.
*/
if (!issecure(SECURE_NO_SETUID_FIXUP)) {
- if (old->fsuid == 0 && new->fsuid != 0)
+ kuid_t root_uid = make_kuid(old->user_ns, 0);
+ if (uid_eq(old->fsuid, root_uid) && !uid_eq(new->fsuid, root_uid))
new->cap_effective =
cap_drop_fs_set(new->cap_effective);
- if (old->fsuid != 0 && new->fsuid == 0)
+ if (!uid_eq(old->fsuid, root_uid) && uid_eq(new->fsuid, root_uid))
new->cap_effective =
cap_raise_fs_set(new->cap_effective,
new->cap_permitted);
@@ -738,16 +768,16 @@ int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags)
*/
static int cap_safe_nice(struct task_struct *p)
{
- int is_subset;
+ int is_subset, ret = 0;
rcu_read_lock();
is_subset = cap_issubset(__task_cred(p)->cap_permitted,
current_cred()->cap_permitted);
+ if (!is_subset && !ns_capable(__task_cred(p)->user_ns, CAP_SYS_NICE))
+ ret = -EPERM;
rcu_read_unlock();
- if (!is_subset && !capable(CAP_SYS_NICE))
- return -EPERM;
- return 0;
+ return ret;
}
/**
@@ -794,7 +824,7 @@ int cap_task_setnice(struct task_struct *p, int nice)
*/
static long cap_prctl_drop(struct cred *new, unsigned long cap)
{
- if (!capable(CAP_SETPCAP))
+ if (!ns_capable(current_user_ns(), CAP_SETPCAP))
return -EPERM;
if (!cap_valid(cap))
return -EINVAL;
@@ -865,7 +895,7 @@ int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
|| ((new->securebits & SECURE_ALL_LOCKS & ~arg2)) /*[2]*/
|| (arg2 & ~(SECURE_ALL_LOCKS | SECURE_ALL_BITS)) /*[3]*/
|| (cap_capable(current_cred(),
- current_cred()->user->user_ns, CAP_SETPCAP,
+ current_cred()->user_ns, CAP_SETPCAP,
SECURITY_CAP_AUDIT) != 0) /*[4]*/
/*
* [1] no changing of bits that are locked
@@ -937,22 +967,15 @@ int cap_vm_enough_memory(struct mm_struct *mm, long pages)
}
/*
- * cap_file_mmap - check if able to map given addr
- * @file: unused
- * @reqprot: unused
- * @prot: unused
- * @flags: unused
+ * cap_mmap_addr - check if able to map given addr
* @addr: address attempting to be mapped
- * @addr_only: unused
*
* If the process is attempting to map memory below dac_mmap_min_addr they need
* CAP_SYS_RAWIO. The other parameters to this function are unused by the
* capability security module. Returns 0 if this mapping should be allowed
* -EPERM if not.
*/
-int cap_file_mmap(struct file *file, unsigned long reqprot,
- unsigned long prot, unsigned long flags,
- unsigned long addr, unsigned long addr_only)
+int cap_mmap_addr(unsigned long addr)
{
int ret = 0;
@@ -965,3 +988,9 @@ int cap_file_mmap(struct file *file, unsigned long reqprot,
}
return ret;
}
+
+int cap_mmap_file(struct file *file, unsigned long reqprot,
+ unsigned long prot, unsigned long flags)
+{
+ return 0;
+}
diff --git a/security/device_cgroup.c b/security/device_cgroup.c
index 8b5b5d8612c..d9d69e6930e 100644
--- a/security/device_cgroup.c
+++ b/security/device_cgroup.c
@@ -25,13 +25,19 @@
static DEFINE_MUTEX(devcgroup_mutex);
+enum devcg_behavior {
+ DEVCG_DEFAULT_NONE,
+ DEVCG_DEFAULT_ALLOW,
+ DEVCG_DEFAULT_DENY,
+};
+
/*
- * whitelist locking rules:
+ * exception list locking rules:
* hold devcgroup_mutex for update/read.
* hold rcu_read_lock() for read.
*/
-struct dev_whitelist_item {
+struct dev_exception_item {
u32 major, minor;
short type;
short access;
@@ -41,45 +47,31 @@ struct dev_whitelist_item {
struct dev_cgroup {
struct cgroup_subsys_state css;
- struct list_head whitelist;
+ struct list_head exceptions;
+ enum devcg_behavior behavior;
};
static inline struct dev_cgroup *css_to_devcgroup(struct cgroup_subsys_state *s)
{
- return container_of(s, struct dev_cgroup, css);
-}
-
-static inline struct dev_cgroup *cgroup_to_devcgroup(struct cgroup *cgroup)
-{
- return css_to_devcgroup(cgroup_subsys_state(cgroup, devices_subsys_id));
+ return s ? container_of(s, struct dev_cgroup, css) : NULL;
}
static inline struct dev_cgroup *task_devcgroup(struct task_struct *task)
{
- return css_to_devcgroup(task_subsys_state(task, devices_subsys_id));
-}
-
-struct cgroup_subsys devices_subsys;
-
-static int devcgroup_can_attach(struct cgroup_subsys *ss,
- struct cgroup *new_cgrp, struct cgroup_taskset *set)
-{
- struct task_struct *task = cgroup_taskset_first(set);
-
- if (current != task && !capable(CAP_SYS_ADMIN))
- return -EPERM;
- return 0;
+ return css_to_devcgroup(task_css(task, devices_cgrp_id));
}
/*
* called under devcgroup_mutex
*/
-static int dev_whitelist_copy(struct list_head *dest, struct list_head *orig)
+static int dev_exceptions_copy(struct list_head *dest, struct list_head *orig)
{
- struct dev_whitelist_item *wh, *tmp, *new;
+ struct dev_exception_item *ex, *tmp, *new;
- list_for_each_entry(wh, orig, list) {
- new = kmemdup(wh, sizeof(*wh), GFP_KERNEL);
+ lockdep_assert_held(&devcgroup_mutex);
+
+ list_for_each_entry(ex, orig, list) {
+ new = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
if (!new)
goto free_and_exit;
list_add_tail(&new->list, dest);
@@ -88,64 +80,64 @@ static int dev_whitelist_copy(struct list_head *dest, struct list_head *orig)
return 0;
free_and_exit:
- list_for_each_entry_safe(wh, tmp, dest, list) {
- list_del(&wh->list);
- kfree(wh);
+ list_for_each_entry_safe(ex, tmp, dest, list) {
+ list_del(&ex->list);
+ kfree(ex);
}
return -ENOMEM;
}
-/* Stupid prototype - don't bother combining existing entries */
/*
* called under devcgroup_mutex
*/
-static int dev_whitelist_add(struct dev_cgroup *dev_cgroup,
- struct dev_whitelist_item *wh)
+static int dev_exception_add(struct dev_cgroup *dev_cgroup,
+ struct dev_exception_item *ex)
{
- struct dev_whitelist_item *whcopy, *walk;
+ struct dev_exception_item *excopy, *walk;
+
+ lockdep_assert_held(&devcgroup_mutex);
- whcopy = kmemdup(wh, sizeof(*wh), GFP_KERNEL);
- if (!whcopy)
+ excopy = kmemdup(ex, sizeof(*ex), GFP_KERNEL);
+ if (!excopy)
return -ENOMEM;
- list_for_each_entry(walk, &dev_cgroup->whitelist, list) {
- if (walk->type != wh->type)
+ list_for_each_entry(walk, &dev_cgroup->exceptions, list) {
+ if (walk->type != ex->type)
continue;
- if (walk->major != wh->major)
+ if (walk->major != ex->major)
continue;
- if (walk->minor != wh->minor)
+ if (walk->minor != ex->minor)
continue;
- walk->access |= wh->access;
- kfree(whcopy);
- whcopy = NULL;
+ walk->access |= ex->access;
+ kfree(excopy);
+ excopy = NULL;
}
- if (whcopy != NULL)
- list_add_tail_rcu(&whcopy->list, &dev_cgroup->whitelist);
+ if (excopy != NULL)
+ list_add_tail_rcu(&excopy->list, &dev_cgroup->exceptions);
return 0;
}
/*
* called under devcgroup_mutex
*/
-static void dev_whitelist_rm(struct dev_cgroup *dev_cgroup,
- struct dev_whitelist_item *wh)
+static void dev_exception_rm(struct dev_cgroup *dev_cgroup,
+ struct dev_exception_item *ex)
{
- struct dev_whitelist_item *walk, *tmp;
+ struct dev_exception_item *walk, *tmp;
+
+ lockdep_assert_held(&devcgroup_mutex);
- list_for_each_entry_safe(walk, tmp, &dev_cgroup->whitelist, list) {
- if (walk->type == DEV_ALL)
- goto remove;
- if (walk->type != wh->type)
+ list_for_each_entry_safe(walk, tmp, &dev_cgroup->exceptions, list) {
+ if (walk->type != ex->type)
continue;
- if (walk->major != ~0 && walk->major != wh->major)
+ if (walk->major != ex->major)
continue;
- if (walk->minor != ~0 && walk->minor != wh->minor)
+ if (walk->minor != ex->minor)
continue;
-remove:
- walk->access &= ~wh->access;
+ walk->access &= ~ex->access;
if (!walk->access) {
list_del_rcu(&walk->list);
kfree_rcu(walk, rcu);
@@ -153,59 +145,92 @@ remove:
}
}
+static void __dev_exception_clean(struct dev_cgroup *dev_cgroup)
+{
+ struct dev_exception_item *ex, *tmp;
+
+ list_for_each_entry_safe(ex, tmp, &dev_cgroup->exceptions, list) {
+ list_del_rcu(&ex->list);
+ kfree_rcu(ex, rcu);
+ }
+}
+
+/**
+ * dev_exception_clean - frees all entries of the exception list
+ * @dev_cgroup: dev_cgroup with the exception list to be cleaned
+ *
+ * called under devcgroup_mutex
+ */
+static void dev_exception_clean(struct dev_cgroup *dev_cgroup)
+{
+ lockdep_assert_held(&devcgroup_mutex);
+
+ __dev_exception_clean(dev_cgroup);
+}
+
+static inline bool is_devcg_online(const struct dev_cgroup *devcg)
+{
+ return (devcg->behavior != DEVCG_DEFAULT_NONE);
+}
+
+/**
+ * devcgroup_online - initializes devcgroup's behavior and exceptions based on
+ * parent's
+ * @css: css getting online
+ * returns 0 in case of success, error code otherwise
+ */
+static int devcgroup_online(struct cgroup_subsys_state *css)
+{
+ struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
+ struct dev_cgroup *parent_dev_cgroup = css_to_devcgroup(css->parent);
+ int ret = 0;
+
+ mutex_lock(&devcgroup_mutex);
+
+ if (parent_dev_cgroup == NULL)
+ dev_cgroup->behavior = DEVCG_DEFAULT_ALLOW;
+ else {
+ ret = dev_exceptions_copy(&dev_cgroup->exceptions,
+ &parent_dev_cgroup->exceptions);
+ if (!ret)
+ dev_cgroup->behavior = parent_dev_cgroup->behavior;
+ }
+ mutex_unlock(&devcgroup_mutex);
+
+ return ret;
+}
+
+static void devcgroup_offline(struct cgroup_subsys_state *css)
+{
+ struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
+
+ mutex_lock(&devcgroup_mutex);
+ dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
+ mutex_unlock(&devcgroup_mutex);
+}
+
/*
* called from kernel/cgroup.c with cgroup_lock() held.
*/
-static struct cgroup_subsys_state *devcgroup_create(struct cgroup_subsys *ss,
- struct cgroup *cgroup)
+static struct cgroup_subsys_state *
+devcgroup_css_alloc(struct cgroup_subsys_state *parent_css)
{
- struct dev_cgroup *dev_cgroup, *parent_dev_cgroup;
- struct cgroup *parent_cgroup;
- int ret;
+ struct dev_cgroup *dev_cgroup;
dev_cgroup = kzalloc(sizeof(*dev_cgroup), GFP_KERNEL);
if (!dev_cgroup)
return ERR_PTR(-ENOMEM);
- INIT_LIST_HEAD(&dev_cgroup->whitelist);
- parent_cgroup = cgroup->parent;
-
- if (parent_cgroup == NULL) {
- struct dev_whitelist_item *wh;
- wh = kmalloc(sizeof(*wh), GFP_KERNEL);
- if (!wh) {
- kfree(dev_cgroup);
- return ERR_PTR(-ENOMEM);
- }
- wh->minor = wh->major = ~0;
- wh->type = DEV_ALL;
- wh->access = ACC_MASK;
- list_add(&wh->list, &dev_cgroup->whitelist);
- } else {
- parent_dev_cgroup = cgroup_to_devcgroup(parent_cgroup);
- mutex_lock(&devcgroup_mutex);
- ret = dev_whitelist_copy(&dev_cgroup->whitelist,
- &parent_dev_cgroup->whitelist);
- mutex_unlock(&devcgroup_mutex);
- if (ret) {
- kfree(dev_cgroup);
- return ERR_PTR(ret);
- }
- }
+ INIT_LIST_HEAD(&dev_cgroup->exceptions);
+ dev_cgroup->behavior = DEVCG_DEFAULT_NONE;
return &dev_cgroup->css;
}
-static void devcgroup_destroy(struct cgroup_subsys *ss,
- struct cgroup *cgroup)
+static void devcgroup_css_free(struct cgroup_subsys_state *css)
{
- struct dev_cgroup *dev_cgroup;
- struct dev_whitelist_item *wh, *tmp;
+ struct dev_cgroup *dev_cgroup = css_to_devcgroup(css);
- dev_cgroup = cgroup_to_devcgroup(cgroup);
- list_for_each_entry_safe(wh, tmp, &dev_cgroup->whitelist, list) {
- list_del(&wh->list);
- kfree(wh);
- }
+ __dev_exception_clean(dev_cgroup);
kfree(dev_cgroup);
}
@@ -247,112 +272,384 @@ static void set_majmin(char *str, unsigned m)
sprintf(str, "%u", m);
}
-static int devcgroup_seq_read(struct cgroup *cgroup, struct cftype *cft,
- struct seq_file *m)
+static int devcgroup_seq_show(struct seq_file *m, void *v)
{
- struct dev_cgroup *devcgroup = cgroup_to_devcgroup(cgroup);
- struct dev_whitelist_item *wh;
+ struct dev_cgroup *devcgroup = css_to_devcgroup(seq_css(m));
+ struct dev_exception_item *ex;
char maj[MAJMINLEN], min[MAJMINLEN], acc[ACCLEN];
rcu_read_lock();
- list_for_each_entry_rcu(wh, &devcgroup->whitelist, list) {
- set_access(acc, wh->access);
- set_majmin(maj, wh->major);
- set_majmin(min, wh->minor);
- seq_printf(m, "%c %s:%s %s\n", type_to_char(wh->type),
+ /*
+ * To preserve the compatibility:
+ * - Only show the "all devices" when the default policy is to allow
+ * - List the exceptions in case the default policy is to deny
+ * This way, the file remains as a "whitelist of devices"
+ */
+ if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
+ set_access(acc, ACC_MASK);
+ set_majmin(maj, ~0);
+ set_majmin(min, ~0);
+ seq_printf(m, "%c %s:%s %s\n", type_to_char(DEV_ALL),
maj, min, acc);
+ } else {
+ list_for_each_entry_rcu(ex, &devcgroup->exceptions, list) {
+ set_access(acc, ex->access);
+ set_majmin(maj, ex->major);
+ set_majmin(min, ex->minor);
+ seq_printf(m, "%c %s:%s %s\n", type_to_char(ex->type),
+ maj, min, acc);
+ }
}
rcu_read_unlock();
return 0;
}
-/*
- * may_access_whitelist:
- * does the access granted to dev_cgroup c contain the access
- * requested in whitelist item refwh.
- * return 1 if yes, 0 if no.
- * call with devcgroup_mutex held
+/**
+ * match_exception - iterates the exception list trying to find a complete match
+ * @exceptions: list of exceptions
+ * @type: device type (DEV_BLOCK or DEV_CHAR)
+ * @major: device file major number, ~0 to match all
+ * @minor: device file minor number, ~0 to match all
+ * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD)
+ *
+ * It is considered a complete match if an exception is found that will
+ * contain the entire range of provided parameters.
+ *
+ * Return: true in case it matches an exception completely
*/
-static int may_access_whitelist(struct dev_cgroup *c,
- struct dev_whitelist_item *refwh)
+static bool match_exception(struct list_head *exceptions, short type,
+ u32 major, u32 minor, short access)
{
- struct dev_whitelist_item *whitem;
+ struct dev_exception_item *ex;
- list_for_each_entry(whitem, &c->whitelist, list) {
- if (whitem->type & DEV_ALL)
- return 1;
- if ((refwh->type & DEV_BLOCK) && !(whitem->type & DEV_BLOCK))
+ list_for_each_entry_rcu(ex, exceptions, list) {
+ if ((type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
continue;
- if ((refwh->type & DEV_CHAR) && !(whitem->type & DEV_CHAR))
+ if ((type & DEV_CHAR) && !(ex->type & DEV_CHAR))
continue;
- if (whitem->major != ~0 && whitem->major != refwh->major)
+ if (ex->major != ~0 && ex->major != major)
continue;
- if (whitem->minor != ~0 && whitem->minor != refwh->minor)
+ if (ex->minor != ~0 && ex->minor != minor)
continue;
- if (refwh->access & (~whitem->access))
+ /* provided access cannot have more than the exception rule */
+ if (access & (~ex->access))
continue;
- return 1;
+ return true;
}
- return 0;
+ return false;
+}
+
+/**
+ * match_exception_partial - iterates the exception list trying to find a partial match
+ * @exceptions: list of exceptions
+ * @type: device type (DEV_BLOCK or DEV_CHAR)
+ * @major: device file major number, ~0 to match all
+ * @minor: device file minor number, ~0 to match all
+ * @access: permission mask (ACC_READ, ACC_WRITE, ACC_MKNOD)
+ *
+ * It is considered a partial match if an exception's range is found to
+ * contain *any* of the devices specified by provided parameters. This is
+ * used to make sure no extra access is being granted that is forbidden by
+ * any of the exception list.
+ *
+ * Return: true in case the provided range mat matches an exception completely
+ */
+static bool match_exception_partial(struct list_head *exceptions, short type,
+ u32 major, u32 minor, short access)
+{
+ struct dev_exception_item *ex;
+
+ list_for_each_entry_rcu(ex, exceptions, list) {
+ if ((type & DEV_BLOCK) && !(ex->type & DEV_BLOCK))
+ continue;
+ if ((type & DEV_CHAR) && !(ex->type & DEV_CHAR))
+ continue;
+ /*
+ * We must be sure that both the exception and the provided
+ * range aren't masking all devices
+ */
+ if (ex->major != ~0 && major != ~0 && ex->major != major)
+ continue;
+ if (ex->minor != ~0 && minor != ~0 && ex->minor != minor)
+ continue;
+ /*
+ * In order to make sure the provided range isn't matching
+ * an exception, all its access bits shouldn't match the
+ * exception's access bits
+ */
+ if (!(access & ex->access))
+ continue;
+ return true;
+ }
+ return false;
+}
+
+/**
+ * verify_new_ex - verifies if a new exception is allowed by parent cgroup's permissions
+ * @dev_cgroup: dev cgroup to be tested against
+ * @refex: new exception
+ * @behavior: behavior of the exception's dev_cgroup
+ *
+ * This is used to make sure a child cgroup won't have more privileges
+ * than its parent
+ */
+static bool verify_new_ex(struct dev_cgroup *dev_cgroup,
+ struct dev_exception_item *refex,
+ enum devcg_behavior behavior)
+{
+ bool match = false;
+
+ rcu_lockdep_assert(rcu_read_lock_held() ||
+ lockdep_is_held(&devcgroup_mutex),
+ "device_cgroup:verify_new_ex called without proper synchronization");
+
+ if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW) {
+ if (behavior == DEVCG_DEFAULT_ALLOW) {
+ /*
+ * new exception in the child doesn't matter, only
+ * adding extra restrictions
+ */
+ return true;
+ } else {
+ /*
+ * new exception in the child will add more devices
+ * that can be acessed, so it can't match any of
+ * parent's exceptions, even slightly
+ */
+ match = match_exception_partial(&dev_cgroup->exceptions,
+ refex->type,
+ refex->major,
+ refex->minor,
+ refex->access);
+
+ if (match)
+ return false;
+ return true;
+ }
+ } else {
+ /*
+ * Only behavior == DEVCG_DEFAULT_DENY allowed here, therefore
+ * the new exception will add access to more devices and must
+ * be contained completely in an parent's exception to be
+ * allowed
+ */
+ match = match_exception(&dev_cgroup->exceptions, refex->type,
+ refex->major, refex->minor,
+ refex->access);
+
+ if (match)
+ /* parent has an exception that matches the proposed */
+ return true;
+ else
+ return false;
+ }
+ return false;
}
/*
* parent_has_perm:
- * when adding a new allow rule to a device whitelist, the rule
+ * when adding a new allow rule to a device exception list, the rule
* must be allowed in the parent device
*/
static int parent_has_perm(struct dev_cgroup *childcg,
- struct dev_whitelist_item *wh)
+ struct dev_exception_item *ex)
{
- struct cgroup *pcg = childcg->css.cgroup->parent;
- struct dev_cgroup *parent;
+ struct dev_cgroup *parent = css_to_devcgroup(childcg->css.parent);
+
+ if (!parent)
+ return 1;
+ return verify_new_ex(parent, ex, childcg->behavior);
+}
+
+/**
+ * parent_allows_removal - verify if it's ok to remove an exception
+ * @childcg: child cgroup from where the exception will be removed
+ * @ex: exception being removed
+ *
+ * When removing an exception in cgroups with default ALLOW policy, it must
+ * be checked if removing it will give the child cgroup more access than the
+ * parent.
+ *
+ * Return: true if it's ok to remove exception, false otherwise
+ */
+static bool parent_allows_removal(struct dev_cgroup *childcg,
+ struct dev_exception_item *ex)
+{
+ struct dev_cgroup *parent = css_to_devcgroup(childcg->css.parent);
+
+ if (!parent)
+ return true;
- if (!pcg)
+ /* It's always allowed to remove access to devices */
+ if (childcg->behavior == DEVCG_DEFAULT_DENY)
+ return true;
+
+ /*
+ * Make sure you're not removing part or a whole exception existing in
+ * the parent cgroup
+ */
+ return !match_exception_partial(&parent->exceptions, ex->type,
+ ex->major, ex->minor, ex->access);
+}
+
+/**
+ * may_allow_all - checks if it's possible to change the behavior to
+ * allow based on parent's rules.
+ * @parent: device cgroup's parent
+ * returns: != 0 in case it's allowed, 0 otherwise
+ */
+static inline int may_allow_all(struct dev_cgroup *parent)
+{
+ if (!parent)
return 1;
- parent = cgroup_to_devcgroup(pcg);
- return may_access_whitelist(parent, wh);
+ return parent->behavior == DEVCG_DEFAULT_ALLOW;
+}
+
+/**
+ * revalidate_active_exceptions - walks through the active exception list and
+ * revalidates the exceptions based on parent's
+ * behavior and exceptions. The exceptions that
+ * are no longer valid will be removed.
+ * Called with devcgroup_mutex held.
+ * @devcg: cgroup which exceptions will be checked
+ *
+ * This is one of the three key functions for hierarchy implementation.
+ * This function is responsible for re-evaluating all the cgroup's active
+ * exceptions due to a parent's exception change.
+ * Refer to Documentation/cgroups/devices.txt for more details.
+ */
+static void revalidate_active_exceptions(struct dev_cgroup *devcg)
+{
+ struct dev_exception_item *ex;
+ struct list_head *this, *tmp;
+
+ list_for_each_safe(this, tmp, &devcg->exceptions) {
+ ex = container_of(this, struct dev_exception_item, list);
+ if (!parent_has_perm(devcg, ex))
+ dev_exception_rm(devcg, ex);
+ }
+}
+
+/**
+ * propagate_exception - propagates a new exception to the children
+ * @devcg_root: device cgroup that added a new exception
+ * @ex: new exception to be propagated
+ *
+ * returns: 0 in case of success, != 0 in case of error
+ */
+static int propagate_exception(struct dev_cgroup *devcg_root,
+ struct dev_exception_item *ex)
+{
+ struct cgroup_subsys_state *pos;
+ int rc = 0;
+
+ rcu_read_lock();
+
+ css_for_each_descendant_pre(pos, &devcg_root->css) {
+ struct dev_cgroup *devcg = css_to_devcgroup(pos);
+
+ /*
+ * Because devcgroup_mutex is held, no devcg will become
+ * online or offline during the tree walk (see on/offline
+ * methods), and online ones are safe to access outside RCU
+ * read lock without bumping refcnt.
+ */
+ if (pos == &devcg_root->css || !is_devcg_online(devcg))
+ continue;
+
+ rcu_read_unlock();
+
+ /*
+ * in case both root's behavior and devcg is allow, a new
+ * restriction means adding to the exception list
+ */
+ if (devcg_root->behavior == DEVCG_DEFAULT_ALLOW &&
+ devcg->behavior == DEVCG_DEFAULT_ALLOW) {
+ rc = dev_exception_add(devcg, ex);
+ if (rc)
+ break;
+ } else {
+ /*
+ * in the other possible cases:
+ * root's behavior: allow, devcg's: deny
+ * root's behavior: deny, devcg's: deny
+ * the exception will be removed
+ */
+ dev_exception_rm(devcg, ex);
+ }
+ revalidate_active_exceptions(devcg);
+
+ rcu_read_lock();
+ }
+
+ rcu_read_unlock();
+ return rc;
}
/*
- * Modify the whitelist using allow/deny rules.
+ * Modify the exception list using allow/deny rules.
* CAP_SYS_ADMIN is needed for this. It's at least separate from CAP_MKNOD
* so we can give a container CAP_MKNOD to let it create devices but not
- * modify the whitelist.
+ * modify the exception list.
* It seems likely we'll want to add a CAP_CONTAINER capability to allow
* us to also grant CAP_SYS_ADMIN to containers without giving away the
- * device whitelist controls, but for now we'll stick with CAP_SYS_ADMIN
+ * device exception list controls, but for now we'll stick with CAP_SYS_ADMIN
*
* Taking rules away is always allowed (given CAP_SYS_ADMIN). Granting
* new access is only allowed if you're in the top-level cgroup, or your
* parent cgroup has the access you're asking for.
*/
static int devcgroup_update_access(struct dev_cgroup *devcgroup,
- int filetype, const char *buffer)
+ int filetype, char *buffer)
{
const char *b;
- char *endp;
- int count;
- struct dev_whitelist_item wh;
+ char temp[12]; /* 11 + 1 characters needed for a u32 */
+ int count, rc = 0;
+ struct dev_exception_item ex;
+ struct dev_cgroup *parent = css_to_devcgroup(devcgroup->css.parent);
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- memset(&wh, 0, sizeof(wh));
+ memset(&ex, 0, sizeof(ex));
b = buffer;
switch (*b) {
case 'a':
- wh.type = DEV_ALL;
- wh.access = ACC_MASK;
- wh.major = ~0;
- wh.minor = ~0;
- goto handle;
+ switch (filetype) {
+ case DEVCG_ALLOW:
+ if (css_has_online_children(&devcgroup->css))
+ return -EINVAL;
+
+ if (!may_allow_all(parent))
+ return -EPERM;
+ dev_exception_clean(devcgroup);
+ devcgroup->behavior = DEVCG_DEFAULT_ALLOW;
+ if (!parent)
+ break;
+
+ rc = dev_exceptions_copy(&devcgroup->exceptions,
+ &parent->exceptions);
+ if (rc)
+ return rc;
+ break;
+ case DEVCG_DENY:
+ if (css_has_online_children(&devcgroup->css))
+ return -EINVAL;
+
+ dev_exception_clean(devcgroup);
+ devcgroup->behavior = DEVCG_DEFAULT_DENY;
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
case 'b':
- wh.type = DEV_BLOCK;
+ ex.type = DEV_BLOCK;
break;
case 'c':
- wh.type = DEV_CHAR;
+ ex.type = DEV_CHAR;
break;
default:
return -EINVAL;
@@ -362,11 +659,19 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
return -EINVAL;
b++;
if (*b == '*') {
- wh.major = ~0;
+ ex.major = ~0;
b++;
} else if (isdigit(*b)) {
- wh.major = simple_strtoul(b, &endp, 10);
- b = endp;
+ memset(temp, 0, sizeof(temp));
+ for (count = 0; count < sizeof(temp) - 1; count++) {
+ temp[count] = *b;
+ b++;
+ if (!isdigit(*b))
+ break;
+ }
+ rc = kstrtou32(temp, 10, &ex.major);
+ if (rc)
+ return -EINVAL;
} else {
return -EINVAL;
}
@@ -376,11 +681,19 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
/* read minor */
if (*b == '*') {
- wh.minor = ~0;
+ ex.minor = ~0;
b++;
} else if (isdigit(*b)) {
- wh.minor = simple_strtoul(b, &endp, 10);
- b = endp;
+ memset(temp, 0, sizeof(temp));
+ for (count = 0; count < sizeof(temp) - 1; count++) {
+ temp[count] = *b;
+ b++;
+ if (!isdigit(*b))
+ break;
+ }
+ rc = kstrtou32(temp, 10, &ex.minor);
+ if (rc)
+ return -EINVAL;
} else {
return -EINVAL;
}
@@ -389,13 +702,13 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
for (b++, count = 0; count < 3; count++, b++) {
switch (*b) {
case 'r':
- wh.access |= ACC_READ;
+ ex.access |= ACC_READ;
break;
case 'w':
- wh.access |= ACC_WRITE;
+ ex.access |= ACC_WRITE;
break;
case 'm':
- wh.access |= ACC_MKNOD;
+ ex.access |= ACC_MKNOD;
break;
case '\n':
case '\0':
@@ -406,134 +719,150 @@ static int devcgroup_update_access(struct dev_cgroup *devcgroup,
}
}
-handle:
switch (filetype) {
case DEVCG_ALLOW:
- if (!parent_has_perm(devcgroup, &wh))
+ /*
+ * If the default policy is to allow by default, try to remove
+ * an matching exception instead. And be silent about it: we
+ * don't want to break compatibility
+ */
+ if (devcgroup->behavior == DEVCG_DEFAULT_ALLOW) {
+ /* Check if the parent allows removing it first */
+ if (!parent_allows_removal(devcgroup, &ex))
+ return -EPERM;
+ dev_exception_rm(devcgroup, &ex);
+ break;
+ }
+
+ if (!parent_has_perm(devcgroup, &ex))
return -EPERM;
- return dev_whitelist_add(devcgroup, &wh);
+ rc = dev_exception_add(devcgroup, &ex);
+ break;
case DEVCG_DENY:
- dev_whitelist_rm(devcgroup, &wh);
+ /*
+ * If the default policy is to deny by default, try to remove
+ * an matching exception instead. And be silent about it: we
+ * don't want to break compatibility
+ */
+ if (devcgroup->behavior == DEVCG_DEFAULT_DENY)
+ dev_exception_rm(devcgroup, &ex);
+ else
+ rc = dev_exception_add(devcgroup, &ex);
+
+ if (rc)
+ break;
+ /* we only propagate new restrictions */
+ rc = propagate_exception(devcgroup, &ex);
break;
default:
- return -EINVAL;
+ rc = -EINVAL;
}
- return 0;
+ return rc;
}
-static int devcgroup_access_write(struct cgroup *cgrp, struct cftype *cft,
- const char *buffer)
+static ssize_t devcgroup_access_write(struct kernfs_open_file *of,
+ char *buf, size_t nbytes, loff_t off)
{
int retval;
mutex_lock(&devcgroup_mutex);
- retval = devcgroup_update_access(cgroup_to_devcgroup(cgrp),
- cft->private, buffer);
+ retval = devcgroup_update_access(css_to_devcgroup(of_css(of)),
+ of_cft(of)->private, strstrip(buf));
mutex_unlock(&devcgroup_mutex);
- return retval;
+ return retval ?: nbytes;
}
static struct cftype dev_cgroup_files[] = {
{
.name = "allow",
- .write_string = devcgroup_access_write,
+ .write = devcgroup_access_write,
.private = DEVCG_ALLOW,
},
{
.name = "deny",
- .write_string = devcgroup_access_write,
+ .write = devcgroup_access_write,
.private = DEVCG_DENY,
},
{
.name = "list",
- .read_seq_string = devcgroup_seq_read,
+ .seq_show = devcgroup_seq_show,
.private = DEVCG_LIST,
},
+ { } /* terminate */
};
-static int devcgroup_populate(struct cgroup_subsys *ss,
- struct cgroup *cgroup)
-{
- return cgroup_add_files(cgroup, ss, dev_cgroup_files,
- ARRAY_SIZE(dev_cgroup_files));
-}
-
-struct cgroup_subsys devices_subsys = {
- .name = "devices",
- .can_attach = devcgroup_can_attach,
- .create = devcgroup_create,
- .destroy = devcgroup_destroy,
- .populate = devcgroup_populate,
- .subsys_id = devices_subsys_id,
+struct cgroup_subsys devices_cgrp_subsys = {
+ .css_alloc = devcgroup_css_alloc,
+ .css_free = devcgroup_css_free,
+ .css_online = devcgroup_online,
+ .css_offline = devcgroup_offline,
+ .base_cftypes = dev_cgroup_files,
};
-int __devcgroup_inode_permission(struct inode *inode, int mask)
+/**
+ * __devcgroup_check_permission - checks if an inode operation is permitted
+ * @dev_cgroup: the dev cgroup to be tested against
+ * @type: device type
+ * @major: device major number
+ * @minor: device minor number
+ * @access: combination of ACC_WRITE, ACC_READ and ACC_MKNOD
+ *
+ * returns 0 on success, -EPERM case the operation is not permitted
+ */
+static int __devcgroup_check_permission(short type, u32 major, u32 minor,
+ short access)
{
struct dev_cgroup *dev_cgroup;
- struct dev_whitelist_item *wh;
+ bool rc;
rcu_read_lock();
-
dev_cgroup = task_devcgroup(current);
+ if (dev_cgroup->behavior == DEVCG_DEFAULT_ALLOW)
+ /* Can't match any of the exceptions, even partially */
+ rc = !match_exception_partial(&dev_cgroup->exceptions,
+ type, major, minor, access);
+ else
+ /* Need to match completely one exception to be allowed */
+ rc = match_exception(&dev_cgroup->exceptions, type, major,
+ minor, access);
+ rcu_read_unlock();
- list_for_each_entry_rcu(wh, &dev_cgroup->whitelist, list) {
- if (wh->type & DEV_ALL)
- goto found;
- if ((wh->type & DEV_BLOCK) && !S_ISBLK(inode->i_mode))
- continue;
- if ((wh->type & DEV_CHAR) && !S_ISCHR(inode->i_mode))
- continue;
- if (wh->major != ~0 && wh->major != imajor(inode))
- continue;
- if (wh->minor != ~0 && wh->minor != iminor(inode))
- continue;
-
- if ((mask & MAY_WRITE) && !(wh->access & ACC_WRITE))
- continue;
- if ((mask & MAY_READ) && !(wh->access & ACC_READ))
- continue;
-found:
- rcu_read_unlock();
- return 0;
- }
+ if (!rc)
+ return -EPERM;
- rcu_read_unlock();
+ return 0;
+}
- return -EPERM;
+int __devcgroup_inode_permission(struct inode *inode, int mask)
+{
+ short type, access = 0;
+
+ if (S_ISBLK(inode->i_mode))
+ type = DEV_BLOCK;
+ if (S_ISCHR(inode->i_mode))
+ type = DEV_CHAR;
+ if (mask & MAY_WRITE)
+ access |= ACC_WRITE;
+ if (mask & MAY_READ)
+ access |= ACC_READ;
+
+ return __devcgroup_check_permission(type, imajor(inode), iminor(inode),
+ access);
}
int devcgroup_inode_mknod(int mode, dev_t dev)
{
- struct dev_cgroup *dev_cgroup;
- struct dev_whitelist_item *wh;
+ short type;
if (!S_ISBLK(mode) && !S_ISCHR(mode))
return 0;
- rcu_read_lock();
-
- dev_cgroup = task_devcgroup(current);
-
- list_for_each_entry_rcu(wh, &dev_cgroup->whitelist, list) {
- if (wh->type & DEV_ALL)
- goto found;
- if ((wh->type & DEV_BLOCK) && !S_ISBLK(mode))
- continue;
- if ((wh->type & DEV_CHAR) && !S_ISCHR(mode))
- continue;
- if (wh->major != ~0 && wh->major != MAJOR(dev))
- continue;
- if (wh->minor != ~0 && wh->minor != MINOR(dev))
- continue;
-
- if (!(wh->access & ACC_MKNOD))
- continue;
-found:
- rcu_read_unlock();
- return 0;
- }
+ if (S_ISBLK(mode))
+ type = DEV_BLOCK;
+ else
+ type = DEV_CHAR;
- rcu_read_unlock();
+ return __devcgroup_check_permission(type, MAJOR(dev), MINOR(dev),
+ ACC_MKNOD);
- return -EPERM;
}
diff --git a/security/integrity/Kconfig b/security/integrity/Kconfig
index 5bd1cc1b4a5..245c6d92065 100644
--- a/security/integrity/Kconfig
+++ b/security/integrity/Kconfig
@@ -17,5 +17,32 @@ config INTEGRITY_SIGNATURE
This is useful for evm and module keyrings, when keys are
usually only added from initramfs.
+config INTEGRITY_AUDIT
+ bool "Enables integrity auditing support "
+ depends on INTEGRITY && AUDIT
+ default y
+ help
+ In addition to enabling integrity auditing support, this
+ option adds a kernel parameter 'integrity_audit', which
+ controls the level of integrity auditing messages.
+ 0 - basic integrity auditing messages (default)
+ 1 - additional integrity auditing messages
+
+ Additional informational integrity auditing messages would
+ be enabled by specifying 'integrity_audit=1' on the kernel
+ command line.
+
+config INTEGRITY_ASYMMETRIC_KEYS
+ boolean "Enable asymmetric keys support"
+ depends on INTEGRITY_SIGNATURE
+ default n
+ select ASYMMETRIC_KEY_TYPE
+ select ASYMMETRIC_PUBLIC_KEY_SUBTYPE
+ select PUBLIC_KEY_ALGO_RSA
+ select X509_CERTIFICATE_PARSER
+ help
+ This option enables digital signature verification using
+ asymmetric keys.
+
source security/integrity/ima/Kconfig
source security/integrity/evm/Kconfig
diff --git a/security/integrity/Makefile b/security/integrity/Makefile
index d43799cc14f..0793f4811cb 100644
--- a/security/integrity/Makefile
+++ b/security/integrity/Makefile
@@ -3,11 +3,13 @@
#
obj-$(CONFIG_INTEGRITY) += integrity.o
+obj-$(CONFIG_INTEGRITY_AUDIT) += integrity_audit.o
obj-$(CONFIG_INTEGRITY_SIGNATURE) += digsig.o
+obj-$(CONFIG_INTEGRITY_ASYMMETRIC_KEYS) += digsig_asymmetric.o
integrity-y := iint.o
subdir-$(CONFIG_IMA) += ima
-obj-$(CONFIG_IMA) += ima/built-in.o
+obj-$(CONFIG_IMA) += ima/
subdir-$(CONFIG_EVM) += evm
-obj-$(CONFIG_EVM) += evm/built-in.o
+obj-$(CONFIG_EVM) += evm/
diff --git a/security/integrity/digsig.c b/security/integrity/digsig.c
index 2dc167d7cde..b4af4ebc5be 100644
--- a/security/integrity/digsig.c
+++ b/security/integrity/digsig.c
@@ -28,7 +28,7 @@ static const char *keyring_name[INTEGRITY_KEYRING_MAX] = {
};
int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
- const char *digest, int digestlen)
+ const char *digest, int digestlen)
{
if (id >= INTEGRITY_KEYRING_MAX)
return -EINVAL;
@@ -44,5 +44,15 @@ int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
}
}
- return digsig_verify(keyring[id], sig, siglen, digest, digestlen);
+ switch (sig[1]) {
+ case 1:
+ /* v1 API expect signature without xattr type */
+ return digsig_verify(keyring[id], sig + 1, siglen - 1,
+ digest, digestlen);
+ case 2:
+ return asymmetric_verify(keyring[id], sig, siglen,
+ digest, digestlen);
+ }
+
+ return -EOPNOTSUPP;
}
diff --git a/security/integrity/digsig_asymmetric.c b/security/integrity/digsig_asymmetric.c
new file mode 100644
index 00000000000..9eae4809006
--- /dev/null
+++ b/security/integrity/digsig_asymmetric.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2013 Intel Corporation
+ *
+ * Author:
+ * Dmitry Kasatkin <dmitry.kasatkin@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/err.h>
+#include <linux/key-type.h>
+#include <crypto/public_key.h>
+#include <keys/asymmetric-type.h>
+
+#include "integrity.h"
+
+/*
+ * Request an asymmetric key.
+ */
+static struct key *request_asymmetric_key(struct key *keyring, uint32_t keyid)
+{
+ struct key *key;
+ char name[12];
+
+ sprintf(name, "id:%x", keyid);
+
+ pr_debug("key search: \"%s\"\n", name);
+
+ if (keyring) {
+ /* search in specific keyring */
+ key_ref_t kref;
+ kref = keyring_search(make_key_ref(keyring, 1),
+ &key_type_asymmetric, name);
+ if (IS_ERR(kref))
+ key = ERR_CAST(kref);
+ else
+ key = key_ref_to_ptr(kref);
+ } else {
+ key = request_key(&key_type_asymmetric, name, NULL);
+ }
+
+ if (IS_ERR(key)) {
+ pr_warn("Request for unknown key '%s' err %ld\n",
+ name, PTR_ERR(key));
+ switch (PTR_ERR(key)) {
+ /* Hide some search errors */
+ case -EACCES:
+ case -ENOTDIR:
+ case -EAGAIN:
+ return ERR_PTR(-ENOKEY);
+ default:
+ return key;
+ }
+ }
+
+ pr_debug("%s() = 0 [%x]\n", __func__, key_serial(key));
+
+ return key;
+}
+
+int asymmetric_verify(struct key *keyring, const char *sig,
+ int siglen, const char *data, int datalen)
+{
+ struct public_key_signature pks;
+ struct signature_v2_hdr *hdr = (struct signature_v2_hdr *)sig;
+ struct key *key;
+ int ret = -ENOMEM;
+
+ if (siglen <= sizeof(*hdr))
+ return -EBADMSG;
+
+ siglen -= sizeof(*hdr);
+
+ if (siglen != __be16_to_cpu(hdr->sig_size))
+ return -EBADMSG;
+
+ if (hdr->hash_algo >= PKEY_HASH__LAST)
+ return -ENOPKG;
+
+ key = request_asymmetric_key(keyring, __be32_to_cpu(hdr->keyid));
+ if (IS_ERR(key))
+ return PTR_ERR(key);
+
+ memset(&pks, 0, sizeof(pks));
+
+ pks.pkey_hash_algo = hdr->hash_algo;
+ pks.digest = (u8 *)data;
+ pks.digest_size = datalen;
+ pks.nr_mpi = 1;
+ pks.rsa.s = mpi_read_raw_data(hdr->sig, siglen);
+
+ if (pks.rsa.s)
+ ret = verify_signature(key, &pks);
+
+ mpi_free(pks.rsa.s);
+ key_put(key);
+ pr_debug("%s() = %d\n", __func__, ret);
+ return ret;
+}
diff --git a/security/integrity/evm/Kconfig b/security/integrity/evm/Kconfig
index afbb59dd262..d606f3d12d6 100644
--- a/security/integrity/evm/Kconfig
+++ b/security/integrity/evm/Kconfig
@@ -1,13 +1,52 @@
config EVM
boolean "EVM support"
- depends on SECURITY && KEYS && (TRUSTED_KEYS=y || TRUSTED_KEYS=n)
+ depends on SECURITY
+ select KEYS
+ select ENCRYPTED_KEYS
select CRYPTO_HMAC
- select CRYPTO_MD5
select CRYPTO_SHA1
- select ENCRYPTED_KEYS
default n
help
EVM protects a file's security extended attributes against
integrity attacks.
If you are unsure how to answer this question, answer N.
+
+if EVM
+
+menu "EVM options"
+
+config EVM_ATTR_FSUUID
+ bool "FSUUID (version 2)"
+ default y
+ depends on EVM
+ help
+ Include filesystem UUID for HMAC calculation.
+
+ Default value is 'selected', which is former version 2.
+ if 'not selected', it is former version 1
+
+ WARNING: changing the HMAC calculation method or adding
+ additional info to the calculation, requires existing EVM
+ labeled file systems to be relabeled.
+
+config EVM_EXTRA_SMACK_XATTRS
+ bool "Additional SMACK xattrs"
+ depends on EVM && SECURITY_SMACK
+ default n
+ help
+ Include additional SMACK xattrs for HMAC calculation.
+
+ In addition to the original security xattrs (eg. security.selinux,
+ security.SMACK64, security.capability, and security.ima) included
+ in the HMAC calculation, enabling this option includes newly defined
+ Smack xattrs: security.SMACK64EXEC, security.SMACK64TRANSMUTE and
+ security.SMACK64MMAP.
+
+ WARNING: changing the HMAC calculation method or adding
+ additional info to the calculation, requires existing EVM
+ labeled file systems to be relabeled.
+
+endmenu
+
+endif
diff --git a/security/integrity/evm/evm.h b/security/integrity/evm/evm.h
index c885247ebcf..88bfe77efa1 100644
--- a/security/integrity/evm/evm.h
+++ b/security/integrity/evm/evm.h
@@ -25,26 +25,29 @@ extern int evm_initialized;
extern char *evm_hmac;
extern char *evm_hash;
+#define EVM_ATTR_FSUUID 0x0001
+
+extern int evm_hmac_attrs;
+
extern struct crypto_shash *hmac_tfm;
extern struct crypto_shash *hash_tfm;
/* List of EVM protected security xattrs */
extern char *evm_config_xattrnames[];
-extern int evm_init_key(void);
-extern int evm_update_evmxattr(struct dentry *dentry,
- const char *req_xattr_name,
- const char *req_xattr_value,
- size_t req_xattr_value_len);
-extern int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name,
- const char *req_xattr_value,
- size_t req_xattr_value_len, char *digest);
-extern int evm_calc_hash(struct dentry *dentry, const char *req_xattr_name,
- const char *req_xattr_value,
- size_t req_xattr_value_len, char *digest);
-extern int evm_init_hmac(struct inode *inode, const struct xattr *xattr,
- char *hmac_val);
-extern int evm_init_secfs(void);
-extern void evm_cleanup_secfs(void);
+int evm_init_key(void);
+int evm_update_evmxattr(struct dentry *dentry,
+ const char *req_xattr_name,
+ const char *req_xattr_value,
+ size_t req_xattr_value_len);
+int evm_calc_hmac(struct dentry *dentry, const char *req_xattr_name,
+ const char *req_xattr_value,
+ size_t req_xattr_value_len, char *digest);
+int evm_calc_hash(struct dentry *dentry, const char *req_xattr_name,
+ const char *req_xattr_value,
+ size_t req_xattr_value_len, char *digest);
+int evm_init_hmac(struct inode *inode, const struct xattr *xattr,
+ char *hmac_val);
+int evm_init_secfs(void);
#endif
diff --git a/security/integrity/evm/evm_crypto.c b/security/integrity/evm/evm_crypto.c
index 49a464f5595..5e9687f02e1 100644
--- a/security/integrity/evm/evm_crypto.c
+++ b/security/integrity/evm/evm_crypto.c
@@ -13,6 +13,8 @@
* Using root's kernel master key (kmk), calculate the HMAC
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/crypto.h>
#include <linux/xattr.h>
@@ -103,13 +105,16 @@ static void hmac_add_misc(struct shash_desc *desc, struct inode *inode,
umode_t mode;
} hmac_misc;
- memset(&hmac_misc, 0, sizeof hmac_misc);
+ memset(&hmac_misc, 0, sizeof(hmac_misc));
hmac_misc.ino = inode->i_ino;
hmac_misc.generation = inode->i_generation;
- hmac_misc.uid = inode->i_uid;
- hmac_misc.gid = inode->i_gid;
+ hmac_misc.uid = from_kuid(&init_user_ns, inode->i_uid);
+ hmac_misc.gid = from_kgid(&init_user_ns, inode->i_gid);
hmac_misc.mode = inode->i_mode;
- crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof hmac_misc);
+ crypto_shash_update(desc, (const u8 *)&hmac_misc, sizeof(hmac_misc));
+ if (evm_hmac_attrs & EVM_ATTR_FSUUID)
+ crypto_shash_update(desc, inode->i_sb->s_uuid,
+ sizeof(inode->i_sb->s_uuid));
crypto_shash_final(desc, digest);
}
@@ -134,7 +139,7 @@ static int evm_calc_hmac_or_hash(struct dentry *dentry,
int error;
int size;
- if (!inode->i_op || !inode->i_op->getxattr)
+ if (!inode->i_op->getxattr)
return -EOPNOTSUPP;
desc = init_desc(type);
if (IS_ERR(desc))
@@ -205,9 +210,9 @@ int evm_update_evmxattr(struct dentry *dentry, const char *xattr_name,
rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_EVM,
&xattr_data,
sizeof(xattr_data), 0);
- }
- else if (rc == -ENODATA)
+ } else if (rc == -ENODATA && inode->i_op->removexattr) {
rc = inode->i_op->removexattr(dentry, XATTR_NAME_EVM);
+ }
return rc;
}
@@ -218,7 +223,7 @@ int evm_init_hmac(struct inode *inode, const struct xattr *lsm_xattr,
desc = init_desc(EVM_XATTR_HMAC);
if (IS_ERR(desc)) {
- printk(KERN_INFO "init_desc failed\n");
+ pr_info("init_desc failed\n");
return PTR_ERR(desc);
}
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index 8901501425f..3bcb80df4d0 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -14,8 +14,11 @@
* evm_inode_removexattr, and evm_verifyxattr
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/crypto.h>
+#include <linux/audit.h>
#include <linux/xattr.h>
#include <linux/integrity.h>
#include <linux/evm.h>
@@ -24,8 +27,12 @@
int evm_initialized;
+static char *integrity_status_msg[] = {
+ "pass", "fail", "no_label", "no_xattrs", "unknown"
+};
char *evm_hmac = "hmac(sha1)";
char *evm_hash = "sha1";
+int evm_hmac_attrs;
char *evm_config_xattrnames[] = {
#ifdef CONFIG_SECURITY_SELINUX
@@ -33,6 +40,14 @@ char *evm_config_xattrnames[] = {
#endif
#ifdef CONFIG_SECURITY_SMACK
XATTR_NAME_SMACK,
+#ifdef CONFIG_EVM_EXTRA_SMACK_XATTRS
+ XATTR_NAME_SMACKEXEC,
+ XATTR_NAME_SMACKTRANSMUTE,
+ XATTR_NAME_SMACKMMAP,
+#endif
+#endif
+#ifdef CONFIG_IMA_APPRAISE
+ XATTR_NAME_IMA,
#endif
XATTR_NAME_CAPS,
NULL
@@ -47,6 +62,14 @@ static int __init evm_set_fixmode(char *str)
}
__setup("evm=", evm_set_fixmode);
+static void __init evm_init_config(void)
+{
+#ifdef CONFIG_EVM_ATTR_FSUUID
+ evm_hmac_attrs |= EVM_ATTR_FSUUID;
+#endif
+ pr_info("HMAC attrs: 0x%x\n", evm_hmac_attrs);
+}
+
static int evm_find_protected_xattrs(struct dentry *dentry)
{
struct inode *inode = dentry->d_inode;
@@ -54,7 +77,7 @@ static int evm_find_protected_xattrs(struct dentry *dentry)
int error;
int count = 0;
- if (!inode->i_op || !inode->i_op->getxattr)
+ if (!inode->i_op->getxattr)
return -EOPNOTSUPP;
for (xattr = evm_config_xattrnames; *xattr != NULL; xattr++) {
@@ -115,7 +138,7 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
goto out;
}
- xattr_len = rc - 1;
+ xattr_len = rc;
/* check value type */
switch (xattr_data->type) {
@@ -135,7 +158,7 @@ static enum integrity_status evm_verify_hmac(struct dentry *dentry,
if (rc)
break;
rc = integrity_digsig_verify(INTEGRITY_KEYRING_EVM,
- xattr_data->digest, xattr_len,
+ (const char *)xattr_data, xattr_len,
calc.digest, sizeof(calc.digest));
if (!rc) {
/* we probably want to replace rsa with hmac here */
@@ -258,9 +281,15 @@ static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name,
if ((evm_status == INTEGRITY_PASS) ||
(evm_status == INTEGRITY_NOXATTRS))
return 0;
- return -EPERM;
+ goto out;
}
evm_status = evm_verify_current_integrity(dentry);
+out:
+ if (evm_status != INTEGRITY_PASS)
+ integrity_audit_msg(AUDIT_INTEGRITY_METADATA, dentry->d_inode,
+ dentry->d_name.name, "appraise_metadata",
+ integrity_status_msg[evm_status],
+ -EPERM, 0);
return evm_status == INTEGRITY_PASS ? 0 : -EPERM;
}
@@ -271,12 +300,20 @@ static int evm_protect_xattr(struct dentry *dentry, const char *xattr_name,
* @xattr_value: pointer to the new extended attribute value
* @xattr_value_len: pointer to the new extended attribute value length
*
- * Updating 'security.evm' requires CAP_SYS_ADMIN privileges and that
- * the current value is valid.
+ * Before allowing the 'security.evm' protected xattr to be updated,
+ * verify the existing value is valid. As only the kernel should have
+ * access to the EVM encrypted key needed to calculate the HMAC, prevent
+ * userspace from writing HMAC value. Writing 'security.evm' requires
+ * requires CAP_SYS_ADMIN privileges.
*/
int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
const void *xattr_value, size_t xattr_value_len)
{
+ const struct evm_ima_xattr_data *xattr_data = xattr_value;
+
+ if ((strcmp(xattr_name, XATTR_NAME_EVM) == 0)
+ && (xattr_data->type == EVM_XATTR_HMAC))
+ return -EPERM;
return evm_protect_xattr(dentry, xattr_name, xattr_value,
xattr_value_len);
}
@@ -353,6 +390,9 @@ int evm_inode_setattr(struct dentry *dentry, struct iattr *attr)
if ((evm_status == INTEGRITY_PASS) ||
(evm_status == INTEGRITY_NOXATTRS))
return 0;
+ integrity_audit_msg(AUDIT_INTEGRITY_METADATA, dentry->d_inode,
+ dentry->d_name.name, "appraise_metadata",
+ integrity_status_msg[evm_status], -EPERM, 0);
return -EPERM;
}
@@ -401,7 +441,7 @@ int evm_inode_init_security(struct inode *inode,
evm_xattr->value = xattr_data;
evm_xattr->value_len = sizeof(*xattr_data);
- evm_xattr->name = kstrdup(XATTR_EVM_SUFFIX, GFP_NOFS);
+ evm_xattr->name = XATTR_EVM_SUFFIX;
return 0;
out:
kfree(xattr_data);
@@ -413,9 +453,11 @@ static int __init init_evm(void)
{
int error;
+ evm_init_config();
+
error = evm_init_secfs();
if (error < 0) {
- printk(KERN_INFO "EVM: Error registering secfs\n");
+ pr_info("Error registering secfs\n");
goto err;
}
@@ -424,15 +466,6 @@ err:
return error;
}
-static void __exit cleanup_evm(void)
-{
- evm_cleanup_secfs();
- if (hmac_tfm)
- crypto_free_shash(hmac_tfm);
- if (hash_tfm)
- crypto_free_shash(hash_tfm);
-}
-
/*
* evm_display_config - list the EVM protected security extended attributes
*/
@@ -441,7 +474,7 @@ static int __init evm_display_config(void)
char **xattrname;
for (xattrname = evm_config_xattrnames; *xattrname != NULL; xattrname++)
- printk(KERN_INFO "EVM: %s\n", *xattrname);
+ pr_info("%s\n", *xattrname);
return 0;
}
diff --git a/security/integrity/evm/evm_posix_acl.c b/security/integrity/evm/evm_posix_acl.c
index b1753e98bf9..46408b9e62e 100644
--- a/security/integrity/evm/evm_posix_acl.c
+++ b/security/integrity/evm/evm_posix_acl.c
@@ -11,8 +11,9 @@
#include <linux/module.h>
#include <linux/xattr.h>
+#include <linux/evm.h>
-int posix_xattr_acl(char *xattr)
+int posix_xattr_acl(const char *xattr)
{
int xattr_len = strlen(xattr);
diff --git a/security/integrity/evm/evm_secfs.c b/security/integrity/evm/evm_secfs.c
index ac762995057..cf12a04717d 100644
--- a/security/integrity/evm/evm_secfs.c
+++ b/security/integrity/evm/evm_secfs.c
@@ -13,6 +13,8 @@
* - Get the key and enable EVM
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/uaccess.h>
#include <linux/module.h>
#include "evm.h"
@@ -79,9 +81,9 @@ static ssize_t evm_write_key(struct file *file, const char __user *buf,
error = evm_init_key();
if (!error) {
evm_initialized = 1;
- pr_info("EVM: initialized\n");
+ pr_info("initialized\n");
} else
- pr_err("EVM: initialization failed\n");
+ pr_err("initialization failed\n");
return count;
}
@@ -100,9 +102,3 @@ int __init evm_init_secfs(void)
error = -EFAULT;
return error;
}
-
-void __exit evm_cleanup_secfs(void)
-{
- if (evm_init_tpm)
- securityfs_remove(evm_init_tpm);
-}
diff --git a/security/integrity/iint.c b/security/integrity/iint.c
index 399641c3e84..a521edf4cbd 100644
--- a/security/integrity/iint.c
+++ b/security/integrity/iint.c
@@ -22,7 +22,7 @@
#include "integrity.h"
static struct rb_root integrity_iint_tree = RB_ROOT;
-static DEFINE_SPINLOCK(integrity_iint_lock);
+static DEFINE_RWLOCK(integrity_iint_lock);
static struct kmem_cache *iint_cache __read_mostly;
int iint_initialized;
@@ -35,8 +35,6 @@ static struct integrity_iint_cache *__integrity_iint_find(struct inode *inode)
struct integrity_iint_cache *iint;
struct rb_node *n = integrity_iint_tree.rb_node;
- assert_spin_locked(&integrity_iint_lock);
-
while (n) {
iint = rb_entry(n, struct integrity_iint_cache, rb_node);
@@ -63,70 +61,69 @@ struct integrity_iint_cache *integrity_iint_find(struct inode *inode)
if (!IS_IMA(inode))
return NULL;
- spin_lock(&integrity_iint_lock);
+ read_lock(&integrity_iint_lock);
iint = __integrity_iint_find(inode);
- spin_unlock(&integrity_iint_lock);
+ read_unlock(&integrity_iint_lock);
return iint;
}
static void iint_free(struct integrity_iint_cache *iint)
{
+ kfree(iint->ima_hash);
+ iint->ima_hash = NULL;
iint->version = 0;
iint->flags = 0UL;
+ iint->ima_file_status = INTEGRITY_UNKNOWN;
+ iint->ima_mmap_status = INTEGRITY_UNKNOWN;
+ iint->ima_bprm_status = INTEGRITY_UNKNOWN;
+ iint->ima_module_status = INTEGRITY_UNKNOWN;
iint->evm_status = INTEGRITY_UNKNOWN;
kmem_cache_free(iint_cache, iint);
}
/**
- * integrity_inode_alloc - allocate an iint associated with an inode
+ * integrity_inode_get - find or allocate an iint associated with an inode
* @inode: pointer to the inode
+ * @return: allocated iint
+ *
+ * Caller must lock i_mutex
*/
-int integrity_inode_alloc(struct inode *inode)
+struct integrity_iint_cache *integrity_inode_get(struct inode *inode)
{
struct rb_node **p;
- struct rb_node *new_node, *parent = NULL;
- struct integrity_iint_cache *new_iint, *test_iint;
- int rc;
+ struct rb_node *node, *parent = NULL;
+ struct integrity_iint_cache *iint, *test_iint;
- new_iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
- if (!new_iint)
- return -ENOMEM;
+ iint = integrity_iint_find(inode);
+ if (iint)
+ return iint;
- new_iint->inode = inode;
- new_node = &new_iint->rb_node;
+ iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
+ if (!iint)
+ return NULL;
- mutex_lock(&inode->i_mutex); /* i_flags */
- spin_lock(&integrity_iint_lock);
+ write_lock(&integrity_iint_lock);
p = &integrity_iint_tree.rb_node;
while (*p) {
parent = *p;
test_iint = rb_entry(parent, struct integrity_iint_cache,
rb_node);
- rc = -EEXIST;
if (inode < test_iint->inode)
p = &(*p)->rb_left;
- else if (inode > test_iint->inode)
- p = &(*p)->rb_right;
else
- goto out_err;
+ p = &(*p)->rb_right;
}
+ iint->inode = inode;
+ node = &iint->rb_node;
inode->i_flags |= S_IMA;
- rb_link_node(new_node, parent, p);
- rb_insert_color(new_node, &integrity_iint_tree);
+ rb_link_node(node, parent, p);
+ rb_insert_color(node, &integrity_iint_tree);
- spin_unlock(&integrity_iint_lock);
- mutex_unlock(&inode->i_mutex); /* i_flags */
-
- return 0;
-out_err:
- spin_unlock(&integrity_iint_lock);
- mutex_unlock(&inode->i_mutex); /* i_flags */
- iint_free(new_iint);
-
- return rc;
+ write_unlock(&integrity_iint_lock);
+ return iint;
}
/**
@@ -142,10 +139,10 @@ void integrity_inode_free(struct inode *inode)
if (!IS_IMA(inode))
return;
- spin_lock(&integrity_iint_lock);
+ write_lock(&integrity_iint_lock);
iint = __integrity_iint_find(inode);
rb_erase(&iint->rb_node, &integrity_iint_tree);
- spin_unlock(&integrity_iint_lock);
+ write_unlock(&integrity_iint_lock);
iint_free(iint);
}
@@ -154,10 +151,13 @@ static void init_once(void *foo)
{
struct integrity_iint_cache *iint = foo;
- memset(iint, 0, sizeof *iint);
+ memset(iint, 0, sizeof(*iint));
iint->version = 0;
iint->flags = 0UL;
- mutex_init(&iint->mutex);
+ iint->ima_file_status = INTEGRITY_UNKNOWN;
+ iint->ima_mmap_status = INTEGRITY_UNKNOWN;
+ iint->ima_bprm_status = INTEGRITY_UNKNOWN;
+ iint->ima_module_status = INTEGRITY_UNKNOWN;
iint->evm_status = INTEGRITY_UNKNOWN;
}
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index 4f554f20dc9..81a27971d88 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -9,8 +9,10 @@ config IMA
select CRYPTO_HMAC
select CRYPTO_MD5
select CRYPTO_SHA1
- select TCG_TPM if !S390 && !UML
- select TCG_TIS if TCG_TPM
+ select CRYPTO_HASH_INFO
+ select TCG_TPM if HAS_IOMEM && !UML
+ select TCG_TIS if TCG_TPM && X86
+ select TCG_IBMVTPM if TCG_TPM && PPC64
help
The Trusted Computing Group(TCG) runtime Integrity
Measurement Architecture(IMA) maintains a list of hash
@@ -37,20 +39,87 @@ config IMA_MEASURE_PCR_IDX
that IMA uses to maintain the integrity aggregate of the
measurement list. If unsure, use the default 10.
-config IMA_AUDIT
- bool
- depends on IMA
- default y
- help
- This option adds a kernel parameter 'ima_audit', which
- allows informational auditing messages to be enabled
- at boot. If this option is selected, informational integrity
- auditing messages can be enabled with 'ima_audit=1' on
- the kernel command line.
-
config IMA_LSM_RULES
bool
depends on IMA && AUDIT && (SECURITY_SELINUX || SECURITY_SMACK)
default y
help
Disabling this option will disregard LSM based policy rules.
+
+choice
+ prompt "Default template"
+ default IMA_NG_TEMPLATE
+ depends on IMA
+ help
+ Select the default IMA measurement template.
+
+ The original 'ima' measurement list template contains a
+ hash, defined as 20 bytes, and a null terminated pathname,
+ limited to 255 characters. The 'ima-ng' measurement list
+ template permits both larger hash digests and longer
+ pathnames.
+
+ config IMA_TEMPLATE
+ bool "ima"
+ config IMA_NG_TEMPLATE
+ bool "ima-ng (default)"
+ config IMA_SIG_TEMPLATE
+ bool "ima-sig"
+endchoice
+
+config IMA_DEFAULT_TEMPLATE
+ string
+ depends on IMA
+ default "ima" if IMA_TEMPLATE
+ default "ima-ng" if IMA_NG_TEMPLATE
+ default "ima-sig" if IMA_SIG_TEMPLATE
+
+choice
+ prompt "Default integrity hash algorithm"
+ default IMA_DEFAULT_HASH_SHA1
+ depends on IMA
+ help
+ Select the default hash algorithm used for the measurement
+ list, integrity appraisal and audit log. The compiled default
+ hash algorithm can be overwritten using the kernel command
+ line 'ima_hash=' option.
+
+ config IMA_DEFAULT_HASH_SHA1
+ bool "SHA1 (default)"
+ depends on CRYPTO_SHA1
+
+ config IMA_DEFAULT_HASH_SHA256
+ bool "SHA256"
+ depends on CRYPTO_SHA256 && !IMA_TEMPLATE
+
+ config IMA_DEFAULT_HASH_SHA512
+ bool "SHA512"
+ depends on CRYPTO_SHA512 && !IMA_TEMPLATE
+
+ config IMA_DEFAULT_HASH_WP512
+ bool "WP512"
+ depends on CRYPTO_WP512 && !IMA_TEMPLATE
+endchoice
+
+config IMA_DEFAULT_HASH
+ string
+ depends on IMA
+ default "sha1" if IMA_DEFAULT_HASH_SHA1
+ default "sha256" if IMA_DEFAULT_HASH_SHA256
+ default "sha512" if IMA_DEFAULT_HASH_SHA512
+ default "wp512" if IMA_DEFAULT_HASH_WP512
+
+config IMA_APPRAISE
+ bool "Appraise integrity measurements"
+ depends on IMA
+ default n
+ help
+ This option enables local measurement integrity appraisal.
+ It requires the system to be labeled with a security extended
+ attribute containing the file hash measurement. To protect
+ the security extended attributes from offline attack, enable
+ and configure EVM.
+
+ For more information on integrity appraisal refer to:
+ <http://linux-ima.sourceforge.net>
+ If unsure, say N.
diff --git a/security/integrity/ima/Makefile b/security/integrity/ima/Makefile
index 5690c021de8..d79263d2fdb 100644
--- a/security/integrity/ima/Makefile
+++ b/security/integrity/ima/Makefile
@@ -6,4 +6,5 @@
obj-$(CONFIG_IMA) += ima.o
ima-y := ima_fs.o ima_queue.o ima_init.o ima_main.o ima_crypto.o ima_api.o \
- ima_policy.o ima_audit.o
+ ima_policy.o ima_template.o ima_template_lib.o
+ima-$(CONFIG_IMA_APPRAISE) += ima_appraise.o
diff --git a/security/integrity/ima/ima.h b/security/integrity/ima/ima.h
index 3ccf7acac6d..f79fa8be203 100644
--- a/security/integrity/ima/ima.h
+++ b/security/integrity/ima/ima.h
@@ -26,7 +26,8 @@
#include "../integrity.h"
-enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_ASCII };
+enum ima_show_type { IMA_SHOW_BINARY, IMA_SHOW_BINARY_NO_FIELD_LEN,
+ IMA_SHOW_BINARY_OLD_STRING_FMT, IMA_SHOW_ASCII };
enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
/* digest size for IMA, fits SHA1 or MD5 */
@@ -36,22 +37,48 @@ enum tpm_pcrs { TPM_PCR0 = 0, TPM_PCR8 = 8 };
#define IMA_HASH_BITS 9
#define IMA_MEASURE_HTABLE_SIZE (1 << IMA_HASH_BITS)
+#define IMA_TEMPLATE_FIELD_ID_MAX_LEN 16
+#define IMA_TEMPLATE_NUM_FIELDS_MAX 15
+
+#define IMA_TEMPLATE_IMA_NAME "ima"
+#define IMA_TEMPLATE_IMA_FMT "d|n"
+
/* set during initialization */
extern int ima_initialized;
extern int ima_used_chip;
-extern char *ima_hash;
+extern int ima_hash_algo;
+extern int ima_appraise;
+
+/* IMA template field data definition */
+struct ima_field_data {
+ u8 *data;
+ u32 len;
+};
-/* IMA inode template definition */
-struct ima_template_data {
- u8 digest[IMA_DIGEST_SIZE]; /* sha1/md5 measurement hash */
- char file_name[IMA_EVENT_NAME_LEN_MAX + 1]; /* name + \0 */
+/* IMA template field definition */
+struct ima_template_field {
+ const char field_id[IMA_TEMPLATE_FIELD_ID_MAX_LEN];
+ int (*field_init) (struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len, struct ima_field_data *field_data);
+ void (*field_show) (struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+};
+
+/* IMA template descriptor definition */
+struct ima_template_desc {
+ char *name;
+ char *fmt;
+ int num_fields;
+ struct ima_template_field **fields;
};
struct ima_template_entry {
- u8 digest[IMA_DIGEST_SIZE]; /* sha1 or md5 measurement hash */
- const char *template_name;
- int template_len;
- struct ima_template_data template;
+ u8 digest[TPM_DIGEST_SIZE]; /* sha1 or md5 measurement hash */
+ struct ima_template_desc *template_desc; /* template descriptor */
+ u32 template_data_len;
+ struct ima_field_data template_data[0]; /* template related data */
};
struct ima_queue_entry {
@@ -61,11 +88,6 @@ struct ima_queue_entry {
};
extern struct list_head ima_measurements; /* list of all measurements */
-/* declarations */
-void integrity_audit_msg(int audit_msgno, struct inode *inode,
- const unsigned char *fname, const char *op,
- const char *cause, int result, int info);
-
/* Internal IMA function definitions */
int ima_init(void);
void ima_cleanup(void);
@@ -73,12 +95,22 @@ int ima_fs_init(void);
void ima_fs_cleanup(void);
int ima_inode_alloc(struct inode *inode);
int ima_add_template_entry(struct ima_template_entry *entry, int violation,
- const char *op, struct inode *inode);
-int ima_calc_hash(struct file *file, char *digest);
-int ima_calc_template_hash(int template_len, void *template, char *digest);
-int ima_calc_boot_aggregate(char *digest);
-void ima_add_violation(struct inode *inode, const unsigned char *filename,
+ const char *op, struct inode *inode,
+ const unsigned char *filename);
+int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash);
+int ima_calc_field_array_hash(struct ima_field_data *field_data,
+ struct ima_template_desc *desc, int num_fields,
+ struct ima_digest_data *hash);
+int __init ima_calc_boot_aggregate(struct ima_digest_data *hash);
+void ima_add_violation(struct file *file, const unsigned char *filename,
const char *op, const char *cause);
+int ima_init_crypto(void);
+void ima_putc(struct seq_file *m, void *data, int datalen);
+void ima_print_digest(struct seq_file *m, u8 *digest, int size);
+struct ima_template_desc *ima_template_desc_current(void);
+int ima_init_template(void);
+
+int ima_init_template(void);
/*
* used to protect h_table and sha_table
@@ -98,14 +130,26 @@ static inline unsigned long ima_hash_key(u8 *digest)
}
/* LIM API function definitions */
+int ima_get_action(struct inode *inode, int mask, int function);
int ima_must_measure(struct inode *inode, int mask, int function);
int ima_collect_measurement(struct integrity_iint_cache *iint,
- struct file *file);
+ struct file *file,
+ struct evm_ima_xattr_data **xattr_value,
+ int *xattr_len);
void ima_store_measurement(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len);
+void ima_audit_measurement(struct integrity_iint_cache *iint,
const unsigned char *filename);
+int ima_alloc_init_template(struct integrity_iint_cache *iint,
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len, struct ima_template_entry **entry);
int ima_store_template(struct ima_template_entry *entry, int violation,
- struct inode *inode);
-void ima_template_show(struct seq_file *m, void *e, enum ima_show_type show);
+ struct inode *inode, const unsigned char *filename);
+void ima_free_template_entry(struct ima_template_entry *entry);
+const char *ima_d_path(struct path *path, char **pathbuf);
/* rbtree tree calls to lookup, insert, delete
* integrity data associated with an inode.
@@ -114,14 +158,76 @@ struct integrity_iint_cache *integrity_iint_insert(struct inode *inode);
struct integrity_iint_cache *integrity_iint_find(struct inode *inode);
/* IMA policy related functions */
-enum ima_hooks { FILE_CHECK = 1, FILE_MMAP, BPRM_CHECK };
+enum ima_hooks { FILE_CHECK = 1, MMAP_CHECK, BPRM_CHECK, MODULE_CHECK, POST_SETATTR };
-int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask);
+int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask,
+ int flags);
void ima_init_policy(void);
void ima_update_policy(void);
ssize_t ima_parse_add_rule(char *);
void ima_delete_rules(void);
+/* Appraise integrity measurements */
+#define IMA_APPRAISE_ENFORCE 0x01
+#define IMA_APPRAISE_FIX 0x02
+#define IMA_APPRAISE_MODULES 0x04
+
+#ifdef CONFIG_IMA_APPRAISE
+int ima_appraise_measurement(int func, struct integrity_iint_cache *iint,
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len);
+int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func);
+void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file);
+enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint,
+ int func);
+void ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_digest_data *hash);
+int ima_read_xattr(struct dentry *dentry,
+ struct evm_ima_xattr_data **xattr_value);
+
+#else
+static inline int ima_appraise_measurement(int func,
+ struct integrity_iint_cache *iint,
+ struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len)
+{
+ return INTEGRITY_UNKNOWN;
+}
+
+static inline int ima_must_appraise(struct inode *inode, int mask,
+ enum ima_hooks func)
+{
+ return 0;
+}
+
+static inline void ima_update_xattr(struct integrity_iint_cache *iint,
+ struct file *file)
+{
+}
+
+static inline enum integrity_status ima_get_cache_status(struct integrity_iint_cache
+ *iint, int func)
+{
+ return INTEGRITY_UNKNOWN;
+}
+
+static inline void ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value,
+ int xattr_len,
+ struct ima_digest_data *hash)
+{
+}
+
+static inline int ima_read_xattr(struct dentry *dentry,
+ struct evm_ima_xattr_data **xattr_value)
+{
+ return 0;
+}
+
+#endif
+
/* LSM based policy rules require audit */
#ifdef CONFIG_IMA_LSM_RULES
diff --git a/security/integrity/ima/ima_api.c b/security/integrity/ima/ima_api.c
index 88a2788b981..d9cd5ce14d2 100644
--- a/security/integrity/ima/ima_api.c
+++ b/security/integrity/ima/ima_api.c
@@ -9,14 +9,68 @@
* License.
*
* File: ima_api.c
- * Implements must_measure, collect_measurement, store_measurement,
- * and store_template.
+ * Implements must_appraise_or_measure, collect_measurement,
+ * appraise_measurement, store_measurement and store_template.
*/
#include <linux/module.h>
#include <linux/slab.h>
-
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/xattr.h>
+#include <linux/evm.h>
+#include <crypto/hash_info.h>
#include "ima.h"
-static const char *IMA_TEMPLATE_NAME = "ima";
+
+/*
+ * ima_free_template_entry - free an existing template entry
+ */
+void ima_free_template_entry(struct ima_template_entry *entry)
+{
+ int i;
+
+ for (i = 0; i < entry->template_desc->num_fields; i++)
+ kfree(entry->template_data[i].data);
+
+ kfree(entry);
+}
+
+/*
+ * ima_alloc_init_template - create and initialize a new template entry
+ */
+int ima_alloc_init_template(struct integrity_iint_cache *iint,
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len, struct ima_template_entry **entry)
+{
+ struct ima_template_desc *template_desc = ima_template_desc_current();
+ int i, result = 0;
+
+ *entry = kzalloc(sizeof(**entry) + template_desc->num_fields *
+ sizeof(struct ima_field_data), GFP_NOFS);
+ if (!*entry)
+ return -ENOMEM;
+
+ (*entry)->template_desc = template_desc;
+ for (i = 0; i < template_desc->num_fields; i++) {
+ struct ima_template_field *field = template_desc->fields[i];
+ u32 len;
+
+ result = field->field_init(iint, file, filename,
+ xattr_value, xattr_len,
+ &((*entry)->template_data[i]));
+ if (result != 0)
+ goto out;
+
+ len = (*entry)->template_data[i].len;
+ (*entry)->template_data_len += sizeof(len);
+ (*entry)->template_data_len += len;
+ }
+ return 0;
+out:
+ ima_free_template_entry(*entry);
+ *entry = NULL;
+ return result;
+}
/*
* ima_store_template - store ima template measurements
@@ -35,28 +89,35 @@ static const char *IMA_TEMPLATE_NAME = "ima";
* Returns 0 on success, error code otherwise
*/
int ima_store_template(struct ima_template_entry *entry,
- int violation, struct inode *inode)
+ int violation, struct inode *inode,
+ const unsigned char *filename)
{
- const char *op = "add_template_measure";
- const char *audit_cause = "hashing_error";
+ static const char op[] = "add_template_measure";
+ static const char audit_cause[] = "hashing_error";
+ char *template_name = entry->template_desc->name;
int result;
-
- memset(entry->digest, 0, sizeof(entry->digest));
- entry->template_name = IMA_TEMPLATE_NAME;
- entry->template_len = sizeof(entry->template);
+ struct {
+ struct ima_digest_data hdr;
+ char digest[TPM_DIGEST_SIZE];
+ } hash;
if (!violation) {
- result = ima_calc_template_hash(entry->template_len,
- &entry->template,
- entry->digest);
+ int num_fields = entry->template_desc->num_fields;
+
+ /* this function uses default algo */
+ hash.hdr.algo = HASH_ALGO_SHA1;
+ result = ima_calc_field_array_hash(&entry->template_data[0],
+ entry->template_desc,
+ num_fields, &hash.hdr);
if (result < 0) {
integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode,
- entry->template_name, op,
+ template_name, op,
audit_cause, result, 0);
return result;
}
+ memcpy(entry->digest, hash.hdr.digest, hash.hdr.length);
}
- result = ima_add_template_entry(entry, violation, op, inode);
+ result = ima_add_template_entry(entry, violation, op, inode, filename);
return result;
}
@@ -67,53 +128,60 @@ int ima_store_template(struct ima_template_entry *entry,
* By extending the PCR with 0xFF's instead of with zeroes, the PCR
* value is invalidated.
*/
-void ima_add_violation(struct inode *inode, const unsigned char *filename,
+void ima_add_violation(struct file *file, const unsigned char *filename,
const char *op, const char *cause)
{
struct ima_template_entry *entry;
+ struct inode *inode = file_inode(file);
int violation = 1;
int result;
/* can overflow, only indicator */
atomic_long_inc(&ima_htable.violations);
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
+ result = ima_alloc_init_template(NULL, file, filename,
+ NULL, 0, &entry);
+ if (result < 0) {
result = -ENOMEM;
goto err_out;
}
- memset(&entry->template, 0, sizeof(entry->template));
- strncpy(entry->template.file_name, filename, IMA_EVENT_NAME_LEN_MAX);
- result = ima_store_template(entry, violation, inode);
+ result = ima_store_template(entry, violation, inode, filename);
if (result < 0)
- kfree(entry);
+ ima_free_template_entry(entry);
err_out:
integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
op, cause, result, 0);
}
/**
- * ima_must_measure - measure decision based on policy.
+ * ima_get_action - appraise & measure decision based on policy.
* @inode: pointer to inode to measure
* @mask: contains the permission mask (MAY_READ, MAY_WRITE, MAY_EXECUTE)
- * @function: calling function (FILE_CHECK, BPRM_CHECK, FILE_MMAP)
+ * @function: calling function (FILE_CHECK, BPRM_CHECK, MMAP_CHECK, MODULE_CHECK)
*
* The policy is defined in terms of keypairs:
- * subj=, obj=, type=, func=, mask=, fsmagic=
+ * subj=, obj=, type=, func=, mask=, fsmagic=
* subj,obj, and type: are LSM specific.
- * func: FILE_CHECK | BPRM_CHECK | FILE_MMAP
- * mask: contains the permission mask
+ * func: FILE_CHECK | BPRM_CHECK | MMAP_CHECK | MODULE_CHECK
+ * mask: contains the permission mask
* fsmagic: hex value
*
- * Return 0 to measure. For matching a DONT_MEASURE policy, no policy,
- * or other error, return an error code.
-*/
-int ima_must_measure(struct inode *inode, int mask, int function)
+ * Returns IMA_MEASURE, IMA_APPRAISE mask.
+ *
+ */
+int ima_get_action(struct inode *inode, int mask, int function)
{
- int must_measure;
+ int flags = IMA_MEASURE | IMA_AUDIT | IMA_APPRAISE;
+
+ if (!ima_appraise)
+ flags &= ~IMA_APPRAISE;
+
+ return ima_match_policy(inode, function, mask, flags);
+}
- must_measure = ima_match_policy(inode, function, mask);
- return must_measure ? 0 : -EACCES;
+int ima_must_measure(struct inode *inode, int mask, int function)
+{
+ return ima_match_policy(inode, function, mask, IMA_MEASURE);
}
/*
@@ -127,18 +195,56 @@ int ima_must_measure(struct inode *inode, int mask, int function)
* Return 0 on success, error code otherwise
*/
int ima_collect_measurement(struct integrity_iint_cache *iint,
- struct file *file)
+ struct file *file,
+ struct evm_ima_xattr_data **xattr_value,
+ int *xattr_len)
{
- int result = -EEXIST;
+ const char *audit_cause = "failed";
+ struct inode *inode = file_inode(file);
+ const char *filename = file->f_dentry->d_name.name;
+ int result = 0;
+ struct {
+ struct ima_digest_data hdr;
+ char digest[IMA_MAX_DIGEST_SIZE];
+ } hash;
+
+ if (xattr_value)
+ *xattr_len = ima_read_xattr(file->f_dentry, xattr_value);
+
+ if (!(iint->flags & IMA_COLLECTED)) {
+ u64 i_version = file_inode(file)->i_version;
+
+ if (file->f_flags & O_DIRECT) {
+ audit_cause = "failed(directio)";
+ result = -EACCES;
+ goto out;
+ }
+
+ /* use default hash algorithm */
+ hash.hdr.algo = ima_hash_algo;
- if (!(iint->flags & IMA_MEASURED)) {
- u64 i_version = file->f_dentry->d_inode->i_version;
+ if (xattr_value)
+ ima_get_hash_algo(*xattr_value, *xattr_len, &hash.hdr);
- memset(iint->digest, 0, IMA_DIGEST_SIZE);
- result = ima_calc_hash(file, iint->digest);
- if (!result)
- iint->version = i_version;
+ result = ima_calc_file_hash(file, &hash.hdr);
+ if (!result) {
+ int length = sizeof(hash.hdr) + hash.hdr.length;
+ void *tmpbuf = krealloc(iint->ima_hash, length,
+ GFP_NOFS);
+ if (tmpbuf) {
+ iint->ima_hash = tmpbuf;
+ memcpy(iint->ima_hash, &hash, length);
+ iint->version = i_version;
+ iint->flags |= IMA_COLLECTED;
+ } else
+ result = -ENOMEM;
+ }
}
+out:
+ if (result)
+ integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode,
+ filename, "collect_data", audit_cause,
+ result, 0);
return result;
}
@@ -150,7 +256,7 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
*
* We only get here if the inode has not already been measured,
* but the measurement could already exist:
- * - multiple copies of the same file on either the same or
+ * - multiple copies of the same file on either the same or
* different filesystems.
* - the inode was previously flushed as well as the iint info,
* containing the hashing info.
@@ -158,28 +264,81 @@ int ima_collect_measurement(struct integrity_iint_cache *iint,
* Must be called with iint->mutex held.
*/
void ima_store_measurement(struct integrity_iint_cache *iint,
- struct file *file, const unsigned char *filename)
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len)
{
- const char *op = "add_template_measure";
- const char *audit_cause = "ENOMEM";
+ static const char op[] = "add_template_measure";
+ static const char audit_cause[] = "ENOMEM";
int result = -ENOMEM;
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct ima_template_entry *entry;
int violation = 0;
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry) {
+ if (iint->flags & IMA_MEASURED)
+ return;
+
+ result = ima_alloc_init_template(iint, file, filename,
+ xattr_value, xattr_len, &entry);
+ if (result < 0) {
integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
op, audit_cause, result, 0);
return;
}
- memset(&entry->template, 0, sizeof(entry->template));
- memcpy(entry->template.digest, iint->digest, IMA_DIGEST_SIZE);
- strncpy(entry->template.file_name, filename, IMA_EVENT_NAME_LEN_MAX);
- result = ima_store_template(entry, violation, inode);
+ result = ima_store_template(entry, violation, inode, filename);
if (!result || result == -EEXIST)
iint->flags |= IMA_MEASURED;
if (result < 0)
- kfree(entry);
+ ima_free_template_entry(entry);
+}
+
+void ima_audit_measurement(struct integrity_iint_cache *iint,
+ const unsigned char *filename)
+{
+ struct audit_buffer *ab;
+ char hash[(iint->ima_hash->length * 2) + 1];
+ const char *algo_name = hash_algo_name[iint->ima_hash->algo];
+ char algo_hash[sizeof(hash) + strlen(algo_name) + 2];
+ int i;
+
+ if (iint->flags & IMA_AUDITED)
+ return;
+
+ for (i = 0; i < iint->ima_hash->length; i++)
+ hex_byte_pack(hash + (i * 2), iint->ima_hash->digest[i]);
+ hash[i * 2] = '\0';
+
+ ab = audit_log_start(current->audit_context, GFP_KERNEL,
+ AUDIT_INTEGRITY_RULE);
+ if (!ab)
+ return;
+
+ audit_log_format(ab, "file=");
+ audit_log_untrustedstring(ab, filename);
+ audit_log_format(ab, " hash=");
+ snprintf(algo_hash, sizeof(algo_hash), "%s:%s", algo_name, hash);
+ audit_log_untrustedstring(ab, algo_hash);
+
+ audit_log_task_info(ab, current);
+ audit_log_end(ab);
+
+ iint->flags |= IMA_AUDITED;
+}
+
+const char *ima_d_path(struct path *path, char **pathbuf)
+{
+ char *pathname = NULL;
+
+ /* We will allow 11 spaces for ' (deleted)' to be appended */
+ *pathbuf = kmalloc(PATH_MAX + 11, GFP_KERNEL);
+ if (*pathbuf) {
+ pathname = d_path(path, *pathbuf, PATH_MAX + 11);
+ if (IS_ERR(pathname)) {
+ kfree(*pathbuf);
+ *pathbuf = NULL;
+ pathname = NULL;
+ }
+ }
+ return pathname ?: (const char *)path->dentry->d_name.name;
}
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
new file mode 100644
index 00000000000..d3113d4aaa3
--- /dev/null
+++ b/security/integrity/ima/ima_appraise.c
@@ -0,0 +1,387 @@
+/*
+ * Copyright (C) 2011 IBM Corporation
+ *
+ * Author:
+ * Mimi Zohar <zohar@us.ibm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, version 2 of the License.
+ */
+#include <linux/module.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/xattr.h>
+#include <linux/magic.h>
+#include <linux/ima.h>
+#include <linux/evm.h>
+#include <crypto/hash_info.h>
+
+#include "ima.h"
+
+static int __init default_appraise_setup(char *str)
+{
+ if (strncmp(str, "off", 3) == 0)
+ ima_appraise = 0;
+ else if (strncmp(str, "fix", 3) == 0)
+ ima_appraise = IMA_APPRAISE_FIX;
+ return 1;
+}
+
+__setup("ima_appraise=", default_appraise_setup);
+
+/*
+ * ima_must_appraise - set appraise flag
+ *
+ * Return 1 to appraise
+ */
+int ima_must_appraise(struct inode *inode, int mask, enum ima_hooks func)
+{
+ if (!ima_appraise)
+ return 0;
+
+ return ima_match_policy(inode, func, mask, IMA_APPRAISE);
+}
+
+static int ima_fix_xattr(struct dentry *dentry,
+ struct integrity_iint_cache *iint)
+{
+ int rc, offset;
+ u8 algo = iint->ima_hash->algo;
+
+ if (algo <= HASH_ALGO_SHA1) {
+ offset = 1;
+ iint->ima_hash->xattr.sha1.type = IMA_XATTR_DIGEST;
+ } else {
+ offset = 0;
+ iint->ima_hash->xattr.ng.type = IMA_XATTR_DIGEST_NG;
+ iint->ima_hash->xattr.ng.algo = algo;
+ }
+ rc = __vfs_setxattr_noperm(dentry, XATTR_NAME_IMA,
+ &iint->ima_hash->xattr.data[offset],
+ (sizeof(iint->ima_hash->xattr) - offset) +
+ iint->ima_hash->length, 0);
+ return rc;
+}
+
+/* Return specific func appraised cached result */
+enum integrity_status ima_get_cache_status(struct integrity_iint_cache *iint,
+ int func)
+{
+ switch (func) {
+ case MMAP_CHECK:
+ return iint->ima_mmap_status;
+ case BPRM_CHECK:
+ return iint->ima_bprm_status;
+ case MODULE_CHECK:
+ return iint->ima_module_status;
+ case FILE_CHECK:
+ default:
+ return iint->ima_file_status;
+ }
+}
+
+static void ima_set_cache_status(struct integrity_iint_cache *iint,
+ int func, enum integrity_status status)
+{
+ switch (func) {
+ case MMAP_CHECK:
+ iint->ima_mmap_status = status;
+ break;
+ case BPRM_CHECK:
+ iint->ima_bprm_status = status;
+ break;
+ case MODULE_CHECK:
+ iint->ima_module_status = status;
+ break;
+ case FILE_CHECK:
+ default:
+ iint->ima_file_status = status;
+ break;
+ }
+}
+
+static void ima_cache_flags(struct integrity_iint_cache *iint, int func)
+{
+ switch (func) {
+ case MMAP_CHECK:
+ iint->flags |= (IMA_MMAP_APPRAISED | IMA_APPRAISED);
+ break;
+ case BPRM_CHECK:
+ iint->flags |= (IMA_BPRM_APPRAISED | IMA_APPRAISED);
+ break;
+ case MODULE_CHECK:
+ iint->flags |= (IMA_MODULE_APPRAISED | IMA_APPRAISED);
+ break;
+ case FILE_CHECK:
+ default:
+ iint->flags |= (IMA_FILE_APPRAISED | IMA_APPRAISED);
+ break;
+ }
+}
+
+void ima_get_hash_algo(struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_digest_data *hash)
+{
+ struct signature_v2_hdr *sig;
+
+ if (!xattr_value || xattr_len < 2)
+ return;
+
+ switch (xattr_value->type) {
+ case EVM_IMA_XATTR_DIGSIG:
+ sig = (typeof(sig))xattr_value;
+ if (sig->version != 2 || xattr_len <= sizeof(*sig))
+ return;
+ hash->algo = sig->hash_algo;
+ break;
+ case IMA_XATTR_DIGEST_NG:
+ hash->algo = xattr_value->digest[0];
+ break;
+ case IMA_XATTR_DIGEST:
+ /* this is for backward compatibility */
+ if (xattr_len == 21) {
+ unsigned int zero = 0;
+ if (!memcmp(&xattr_value->digest[16], &zero, 4))
+ hash->algo = HASH_ALGO_MD5;
+ else
+ hash->algo = HASH_ALGO_SHA1;
+ } else if (xattr_len == 17)
+ hash->algo = HASH_ALGO_MD5;
+ break;
+ }
+}
+
+int ima_read_xattr(struct dentry *dentry,
+ struct evm_ima_xattr_data **xattr_value)
+{
+ struct inode *inode = dentry->d_inode;
+
+ if (!inode->i_op->getxattr)
+ return 0;
+
+ return vfs_getxattr_alloc(dentry, XATTR_NAME_IMA, (char **)xattr_value,
+ 0, GFP_NOFS);
+}
+
+/*
+ * ima_appraise_measurement - appraise file measurement
+ *
+ * Call evm_verifyxattr() to verify the integrity of 'security.ima'.
+ * Assuming success, compare the xattr hash with the collected measurement.
+ *
+ * Return 0 on success, error code otherwise
+ */
+int ima_appraise_measurement(int func, struct integrity_iint_cache *iint,
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len)
+{
+ static const char op[] = "appraise_data";
+ char *cause = "unknown";
+ struct dentry *dentry = file->f_dentry;
+ struct inode *inode = dentry->d_inode;
+ enum integrity_status status = INTEGRITY_UNKNOWN;
+ int rc = xattr_len, hash_start = 0;
+
+ if (!ima_appraise)
+ return 0;
+ if (!inode->i_op->getxattr)
+ return INTEGRITY_UNKNOWN;
+
+ if (rc <= 0) {
+ if (rc && rc != -ENODATA)
+ goto out;
+
+ cause = "missing-hash";
+ status =
+ (inode->i_size == 0) ? INTEGRITY_PASS : INTEGRITY_NOLABEL;
+ goto out;
+ }
+
+ status = evm_verifyxattr(dentry, XATTR_NAME_IMA, xattr_value, rc, iint);
+ if ((status != INTEGRITY_PASS) && (status != INTEGRITY_UNKNOWN)) {
+ if ((status == INTEGRITY_NOLABEL)
+ || (status == INTEGRITY_NOXATTRS))
+ cause = "missing-HMAC";
+ else if (status == INTEGRITY_FAIL)
+ cause = "invalid-HMAC";
+ goto out;
+ }
+ switch (xattr_value->type) {
+ case IMA_XATTR_DIGEST_NG:
+ /* first byte contains algorithm id */
+ hash_start = 1;
+ case IMA_XATTR_DIGEST:
+ if (iint->flags & IMA_DIGSIG_REQUIRED) {
+ cause = "IMA signature required";
+ status = INTEGRITY_FAIL;
+ break;
+ }
+ if (xattr_len - sizeof(xattr_value->type) - hash_start >=
+ iint->ima_hash->length)
+ /* xattr length may be longer. md5 hash in previous
+ version occupied 20 bytes in xattr, instead of 16
+ */
+ rc = memcmp(&xattr_value->digest[hash_start],
+ iint->ima_hash->digest,
+ iint->ima_hash->length);
+ else
+ rc = -EINVAL;
+ if (rc) {
+ cause = "invalid-hash";
+ status = INTEGRITY_FAIL;
+ break;
+ }
+ status = INTEGRITY_PASS;
+ break;
+ case EVM_IMA_XATTR_DIGSIG:
+ iint->flags |= IMA_DIGSIG;
+ rc = integrity_digsig_verify(INTEGRITY_KEYRING_IMA,
+ (const char *)xattr_value, rc,
+ iint->ima_hash->digest,
+ iint->ima_hash->length);
+ if (rc == -EOPNOTSUPP) {
+ status = INTEGRITY_UNKNOWN;
+ } else if (rc) {
+ cause = "invalid-signature";
+ status = INTEGRITY_FAIL;
+ } else {
+ status = INTEGRITY_PASS;
+ }
+ break;
+ default:
+ status = INTEGRITY_UNKNOWN;
+ cause = "unknown-ima-data";
+ break;
+ }
+
+out:
+ if (status != INTEGRITY_PASS) {
+ if ((ima_appraise & IMA_APPRAISE_FIX) &&
+ (!xattr_value ||
+ xattr_value->type != EVM_IMA_XATTR_DIGSIG)) {
+ if (!ima_fix_xattr(dentry, iint))
+ status = INTEGRITY_PASS;
+ }
+ integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode, filename,
+ op, cause, rc, 0);
+ } else {
+ ima_cache_flags(iint, func);
+ }
+ ima_set_cache_status(iint, func, status);
+ return status;
+}
+
+/*
+ * ima_update_xattr - update 'security.ima' hash value
+ */
+void ima_update_xattr(struct integrity_iint_cache *iint, struct file *file)
+{
+ struct dentry *dentry = file->f_dentry;
+ int rc = 0;
+
+ /* do not collect and update hash for digital signatures */
+ if (iint->flags & IMA_DIGSIG)
+ return;
+
+ rc = ima_collect_measurement(iint, file, NULL, NULL);
+ if (rc < 0)
+ return;
+
+ ima_fix_xattr(dentry, iint);
+}
+
+/**
+ * ima_inode_post_setattr - reflect file metadata changes
+ * @dentry: pointer to the affected dentry
+ *
+ * Changes to a dentry's metadata might result in needing to appraise.
+ *
+ * This function is called from notify_change(), which expects the caller
+ * to lock the inode's i_mutex.
+ */
+void ima_inode_post_setattr(struct dentry *dentry)
+{
+ struct inode *inode = dentry->d_inode;
+ struct integrity_iint_cache *iint;
+ int must_appraise, rc;
+
+ if (!ima_initialized || !ima_appraise || !S_ISREG(inode->i_mode)
+ || !inode->i_op->removexattr)
+ return;
+
+ must_appraise = ima_must_appraise(inode, MAY_ACCESS, POST_SETATTR);
+ iint = integrity_iint_find(inode);
+ if (iint) {
+ iint->flags &= ~(IMA_APPRAISE | IMA_APPRAISED |
+ IMA_APPRAISE_SUBMASK | IMA_APPRAISED_SUBMASK |
+ IMA_ACTION_FLAGS);
+ if (must_appraise)
+ iint->flags |= IMA_APPRAISE;
+ }
+ if (!must_appraise)
+ rc = inode->i_op->removexattr(dentry, XATTR_NAME_IMA);
+ return;
+}
+
+/*
+ * ima_protect_xattr - protect 'security.ima'
+ *
+ * Ensure that not just anyone can modify or remove 'security.ima'.
+ */
+static int ima_protect_xattr(struct dentry *dentry, const char *xattr_name,
+ const void *xattr_value, size_t xattr_value_len)
+{
+ if (strcmp(xattr_name, XATTR_NAME_IMA) == 0) {
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ return 1;
+ }
+ return 0;
+}
+
+static void ima_reset_appraise_flags(struct inode *inode, int digsig)
+{
+ struct integrity_iint_cache *iint;
+
+ if (!ima_initialized || !ima_appraise || !S_ISREG(inode->i_mode))
+ return;
+
+ iint = integrity_iint_find(inode);
+ if (!iint)
+ return;
+
+ iint->flags &= ~IMA_DONE_MASK;
+ if (digsig)
+ iint->flags |= IMA_DIGSIG;
+ return;
+}
+
+int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
+ const void *xattr_value, size_t xattr_value_len)
+{
+ const struct evm_ima_xattr_data *xvalue = xattr_value;
+ int result;
+
+ result = ima_protect_xattr(dentry, xattr_name, xattr_value,
+ xattr_value_len);
+ if (result == 1) {
+ ima_reset_appraise_flags(dentry->d_inode,
+ (xvalue->type == EVM_IMA_XATTR_DIGSIG) ? 1 : 0);
+ result = 0;
+ }
+ return result;
+}
+
+int ima_inode_removexattr(struct dentry *dentry, const char *xattr_name)
+{
+ int result;
+
+ result = ima_protect_xattr(dentry, xattr_name, NULL, 0);
+ if (result == 1) {
+ ima_reset_appraise_flags(dentry->d_inode, 0);
+ result = 0;
+ }
+ return result;
+}
diff --git a/security/integrity/ima/ima_crypto.c b/security/integrity/ima/ima_crypto.c
index 9b3ade7468b..ccd0ac8fa9a 100644
--- a/security/integrity/ima/ima_crypto.c
+++ b/security/integrity/ima/ima_crypto.c
@@ -10,60 +10,131 @@
* the Free Software Foundation, version 2 of the License.
*
* File: ima_crypto.c
- * Calculates md5/sha1 file hash, template hash, boot-aggreate hash
+ * Calculates md5/sha1 file hash, template hash, boot-aggreate hash
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/kernel.h>
#include <linux/file.h>
#include <linux/crypto.h>
#include <linux/scatterlist.h>
#include <linux/err.h>
#include <linux/slab.h>
+#include <crypto/hash.h>
+#include <crypto/hash_info.h>
#include "ima.h"
-static int init_desc(struct hash_desc *desc)
+static struct crypto_shash *ima_shash_tfm;
+
+/**
+ * ima_kernel_read - read file content
+ *
+ * This is a function for reading file content instead of kernel_read().
+ * It does not perform locking checks to ensure it cannot be blocked.
+ * It does not perform security checks because it is irrelevant for IMA.
+ *
+ */
+static int ima_kernel_read(struct file *file, loff_t offset,
+ char *addr, unsigned long count)
{
- int rc;
+ mm_segment_t old_fs;
+ char __user *buf = addr;
+ ssize_t ret;
+
+ if (!(file->f_mode & FMODE_READ))
+ return -EBADF;
+ if (!file->f_op->read && !file->f_op->aio_read)
+ return -EINVAL;
+
+ old_fs = get_fs();
+ set_fs(get_ds());
+ if (file->f_op->read)
+ ret = file->f_op->read(file, buf, count, &offset);
+ else
+ ret = do_sync_read(file, buf, count, &offset);
+ set_fs(old_fs);
+ return ret;
+}
- desc->tfm = crypto_alloc_hash(ima_hash, 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR(desc->tfm)) {
- pr_info("IMA: failed to load %s transform: %ld\n",
- ima_hash, PTR_ERR(desc->tfm));
- rc = PTR_ERR(desc->tfm);
+int ima_init_crypto(void)
+{
+ long rc;
+
+ ima_shash_tfm = crypto_alloc_shash(hash_algo_name[ima_hash_algo], 0, 0);
+ if (IS_ERR(ima_shash_tfm)) {
+ rc = PTR_ERR(ima_shash_tfm);
+ pr_err("Can not allocate %s (reason: %ld)\n",
+ hash_algo_name[ima_hash_algo], rc);
return rc;
}
- desc->flags = 0;
- rc = crypto_hash_init(desc);
- if (rc)
- crypto_free_hash(desc->tfm);
- return rc;
+ return 0;
+}
+
+static struct crypto_shash *ima_alloc_tfm(enum hash_algo algo)
+{
+ struct crypto_shash *tfm = ima_shash_tfm;
+ int rc;
+
+ if (algo != ima_hash_algo && algo < HASH_ALGO__LAST) {
+ tfm = crypto_alloc_shash(hash_algo_name[algo], 0, 0);
+ if (IS_ERR(tfm)) {
+ rc = PTR_ERR(tfm);
+ pr_err("Can not allocate %s (reason: %d)\n",
+ hash_algo_name[algo], rc);
+ }
+ }
+ return tfm;
+}
+
+static void ima_free_tfm(struct crypto_shash *tfm)
+{
+ if (tfm != ima_shash_tfm)
+ crypto_free_shash(tfm);
}
/*
* Calculate the MD5/SHA1 file digest
*/
-int ima_calc_hash(struct file *file, char *digest)
+static int ima_calc_file_hash_tfm(struct file *file,
+ struct ima_digest_data *hash,
+ struct crypto_shash *tfm)
{
- struct hash_desc desc;
- struct scatterlist sg[1];
loff_t i_size, offset = 0;
char *rbuf;
- int rc;
+ int rc, read = 0;
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(tfm)];
+ } desc;
- rc = init_desc(&desc);
+ desc.shash.tfm = tfm;
+ desc.shash.flags = 0;
+
+ hash->length = crypto_shash_digestsize(tfm);
+
+ rc = crypto_shash_init(&desc.shash);
if (rc != 0)
return rc;
- rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
- if (!rbuf) {
- rc = -ENOMEM;
+ i_size = i_size_read(file_inode(file));
+
+ if (i_size == 0)
goto out;
+
+ rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!rbuf)
+ return -ENOMEM;
+
+ if (!(file->f_mode & FMODE_READ)) {
+ file->f_mode |= FMODE_READ;
+ read = 1;
}
- i_size = i_size_read(file->f_dentry->d_inode);
+
while (offset < i_size) {
int rbuf_len;
- rbuf_len = kernel_read(file, offset, rbuf, PAGE_SIZE);
+ rbuf_len = ima_kernel_read(file, offset, rbuf, PAGE_SIZE);
if (rbuf_len < 0) {
rc = rbuf_len;
break;
@@ -71,38 +142,103 @@ int ima_calc_hash(struct file *file, char *digest)
if (rbuf_len == 0)
break;
offset += rbuf_len;
- sg_init_one(sg, rbuf, rbuf_len);
- rc = crypto_hash_update(&desc, sg, rbuf_len);
+ rc = crypto_shash_update(&desc.shash, rbuf, rbuf_len);
if (rc)
break;
}
+ if (read)
+ file->f_mode &= ~FMODE_READ;
kfree(rbuf);
- if (!rc)
- rc = crypto_hash_final(&desc, digest);
out:
- crypto_free_hash(desc.tfm);
+ if (!rc)
+ rc = crypto_shash_final(&desc.shash, hash->digest);
+ return rc;
+}
+
+int ima_calc_file_hash(struct file *file, struct ima_digest_data *hash)
+{
+ struct crypto_shash *tfm;
+ int rc;
+
+ tfm = ima_alloc_tfm(hash->algo);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ rc = ima_calc_file_hash_tfm(file, hash, tfm);
+
+ ima_free_tfm(tfm);
+
return rc;
}
/*
- * Calculate the hash of a given template
+ * Calculate the hash of template data
*/
-int ima_calc_template_hash(int template_len, void *template, char *digest)
+static int ima_calc_field_array_hash_tfm(struct ima_field_data *field_data,
+ struct ima_template_desc *td,
+ int num_fields,
+ struct ima_digest_data *hash,
+ struct crypto_shash *tfm)
{
- struct hash_desc desc;
- struct scatterlist sg[1];
- int rc;
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(tfm)];
+ } desc;
+ int rc, i;
+
+ desc.shash.tfm = tfm;
+ desc.shash.flags = 0;
- rc = init_desc(&desc);
+ hash->length = crypto_shash_digestsize(tfm);
+
+ rc = crypto_shash_init(&desc.shash);
if (rc != 0)
return rc;
- sg_init_one(sg, template, template_len);
- rc = crypto_hash_update(&desc, sg, template_len);
+ for (i = 0; i < num_fields; i++) {
+ u8 buffer[IMA_EVENT_NAME_LEN_MAX + 1] = { 0 };
+ u8 *data_to_hash = field_data[i].data;
+ u32 datalen = field_data[i].len;
+
+ if (strcmp(td->name, IMA_TEMPLATE_IMA_NAME) != 0) {
+ rc = crypto_shash_update(&desc.shash,
+ (const u8 *) &field_data[i].len,
+ sizeof(field_data[i].len));
+ if (rc)
+ break;
+ } else if (strcmp(td->fields[i]->field_id, "n") == 0) {
+ memcpy(buffer, data_to_hash, datalen);
+ data_to_hash = buffer;
+ datalen = IMA_EVENT_NAME_LEN_MAX + 1;
+ }
+ rc = crypto_shash_update(&desc.shash, data_to_hash, datalen);
+ if (rc)
+ break;
+ }
+
if (!rc)
- rc = crypto_hash_final(&desc, digest);
- crypto_free_hash(desc.tfm);
+ rc = crypto_shash_final(&desc.shash, hash->digest);
+
+ return rc;
+}
+
+int ima_calc_field_array_hash(struct ima_field_data *field_data,
+ struct ima_template_desc *desc, int num_fields,
+ struct ima_digest_data *hash)
+{
+ struct crypto_shash *tfm;
+ int rc;
+
+ tfm = ima_alloc_tfm(hash->algo);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ rc = ima_calc_field_array_hash_tfm(field_data, desc, num_fields,
+ hash, tfm);
+
+ ima_free_tfm(tfm);
+
return rc;
}
@@ -112,20 +248,26 @@ static void __init ima_pcrread(int idx, u8 *pcr)
return;
if (tpm_pcr_read(TPM_ANY_NUM, idx, pcr) != 0)
- pr_err("IMA: Error Communicating to TPM chip\n");
+ pr_err("Error Communicating to TPM chip\n");
}
/*
* Calculate the boot aggregate hash
*/
-int __init ima_calc_boot_aggregate(char *digest)
+static int __init ima_calc_boot_aggregate_tfm(char *digest,
+ struct crypto_shash *tfm)
{
- struct hash_desc desc;
- struct scatterlist sg;
- u8 pcr_i[IMA_DIGEST_SIZE];
+ u8 pcr_i[TPM_DIGEST_SIZE];
int rc, i;
+ struct {
+ struct shash_desc shash;
+ char ctx[crypto_shash_descsize(tfm)];
+ } desc;
+
+ desc.shash.tfm = tfm;
+ desc.shash.flags = 0;
- rc = init_desc(&desc);
+ rc = crypto_shash_init(&desc.shash);
if (rc != 0)
return rc;
@@ -133,11 +275,26 @@ int __init ima_calc_boot_aggregate(char *digest)
for (i = TPM_PCR0; i < TPM_PCR8; i++) {
ima_pcrread(i, pcr_i);
/* now accumulate with current aggregate */
- sg_init_one(&sg, pcr_i, IMA_DIGEST_SIZE);
- rc = crypto_hash_update(&desc, &sg, IMA_DIGEST_SIZE);
+ rc = crypto_shash_update(&desc.shash, pcr_i, TPM_DIGEST_SIZE);
}
if (!rc)
- crypto_hash_final(&desc, digest);
- crypto_free_hash(desc.tfm);
+ crypto_shash_final(&desc.shash, digest);
+ return rc;
+}
+
+int __init ima_calc_boot_aggregate(struct ima_digest_data *hash)
+{
+ struct crypto_shash *tfm;
+ int rc;
+
+ tfm = ima_alloc_tfm(hash->algo);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ hash->length = crypto_shash_digestsize(tfm);
+ rc = ima_calc_boot_aggregate_tfm(hash->digest, tfm);
+
+ ima_free_tfm(tfm);
+
return rc;
}
diff --git a/security/integrity/ima/ima_fs.c b/security/integrity/ima/ima_fs.c
index e1aa2b482dd..da92fcc08d1 100644
--- a/security/integrity/ima/ima_fs.c
+++ b/security/integrity/ima/ima_fs.c
@@ -88,8 +88,7 @@ static void *ima_measurements_next(struct seq_file *m, void *v, loff_t *pos)
* against concurrent list-extension
*/
rcu_read_lock();
- qe = list_entry_rcu(qe->later.next,
- struct ima_queue_entry, later);
+ qe = list_entry_rcu(qe->later.next, struct ima_queue_entry, later);
rcu_read_unlock();
(*pos)++;
@@ -100,7 +99,7 @@ static void ima_measurements_stop(struct seq_file *m, void *v)
{
}
-static void ima_putc(struct seq_file *m, void *data, int datalen)
+void ima_putc(struct seq_file *m, void *data, int datalen)
{
while (datalen--)
seq_putc(m, *(char *)data++);
@@ -111,6 +110,7 @@ static void ima_putc(struct seq_file *m, void *data, int datalen)
* char[20]=template digest
* 32bit-le=template name size
* char[n]=template name
+ * [eventdata length]
* eventdata[n]=template specific data
*/
static int ima_measurements_show(struct seq_file *m, void *v)
@@ -120,6 +120,8 @@ static int ima_measurements_show(struct seq_file *m, void *v)
struct ima_template_entry *e;
int namelen;
u32 pcr = CONFIG_IMA_MEASURE_PCR_IDX;
+ bool is_ima_template = false;
+ int i;
/* get entry */
e = qe->entry;
@@ -131,21 +133,37 @@ static int ima_measurements_show(struct seq_file *m, void *v)
* PCR used is always the same (config option) in
* little-endian format
*/
- ima_putc(m, &pcr, sizeof pcr);
+ ima_putc(m, &pcr, sizeof(pcr));
/* 2nd: template digest */
- ima_putc(m, e->digest, IMA_DIGEST_SIZE);
+ ima_putc(m, e->digest, TPM_DIGEST_SIZE);
/* 3rd: template name size */
- namelen = strlen(e->template_name);
- ima_putc(m, &namelen, sizeof namelen);
+ namelen = strlen(e->template_desc->name);
+ ima_putc(m, &namelen, sizeof(namelen));
/* 4th: template name */
- ima_putc(m, (void *)e->template_name, namelen);
-
- /* 5th: template specific data */
- ima_template_show(m, (struct ima_template_data *)&e->template,
- IMA_SHOW_BINARY);
+ ima_putc(m, e->template_desc->name, namelen);
+
+ /* 5th: template length (except for 'ima' template) */
+ if (strcmp(e->template_desc->name, IMA_TEMPLATE_IMA_NAME) == 0)
+ is_ima_template = true;
+
+ if (!is_ima_template)
+ ima_putc(m, &e->template_data_len,
+ sizeof(e->template_data_len));
+
+ /* 6th: template specific data */
+ for (i = 0; i < e->template_desc->num_fields; i++) {
+ enum ima_show_type show = IMA_SHOW_BINARY;
+ struct ima_template_field *field = e->template_desc->fields[i];
+
+ if (is_ima_template && strcmp(field->field_id, "d") == 0)
+ show = IMA_SHOW_BINARY_NO_FIELD_LEN;
+ if (is_ima_template && strcmp(field->field_id, "n") == 0)
+ show = IMA_SHOW_BINARY_OLD_STRING_FMT;
+ field->field_show(m, show, &e->template_data[i]);
+ }
return 0;
}
@@ -168,41 +186,21 @@ static const struct file_operations ima_measurements_ops = {
.release = seq_release,
};
-static void ima_print_digest(struct seq_file *m, u8 *digest)
+void ima_print_digest(struct seq_file *m, u8 *digest, int size)
{
int i;
- for (i = 0; i < IMA_DIGEST_SIZE; i++)
+ for (i = 0; i < size; i++)
seq_printf(m, "%02x", *(digest + i));
}
-void ima_template_show(struct seq_file *m, void *e, enum ima_show_type show)
-{
- struct ima_template_data *entry = e;
- int namelen;
-
- switch (show) {
- case IMA_SHOW_ASCII:
- ima_print_digest(m, entry->digest);
- seq_printf(m, " %s\n", entry->file_name);
- break;
- case IMA_SHOW_BINARY:
- ima_putc(m, entry->digest, IMA_DIGEST_SIZE);
-
- namelen = strlen(entry->file_name);
- ima_putc(m, &namelen, sizeof namelen);
- ima_putc(m, entry->file_name, namelen);
- default:
- break;
- }
-}
-
/* print in ascii */
static int ima_ascii_measurements_show(struct seq_file *m, void *v)
{
/* the list never shrinks, so we don't need a lock here */
struct ima_queue_entry *qe = v;
struct ima_template_entry *e;
+ int i;
/* get entry */
e = qe->entry;
@@ -213,14 +211,21 @@ static int ima_ascii_measurements_show(struct seq_file *m, void *v)
seq_printf(m, "%2d ", CONFIG_IMA_MEASURE_PCR_IDX);
/* 2nd: SHA1 template hash */
- ima_print_digest(m, e->digest);
+ ima_print_digest(m, e->digest, TPM_DIGEST_SIZE);
/* 3th: template name */
- seq_printf(m, " %s ", e->template_name);
+ seq_printf(m, " %s", e->template_desc->name);
/* 4th: template specific data */
- ima_template_show(m, (struct ima_template_data *)&e->template,
- IMA_SHOW_ASCII);
+ for (i = 0; i < e->template_desc->num_fields; i++) {
+ seq_puts(m, " ");
+ if (e->template_data[i].len == 0)
+ continue;
+
+ e->template_desc->fields[i]->field_show(m, IMA_SHOW_ASCII,
+ &e->template_data[i]);
+ }
+ seq_puts(m, "\n");
return 0;
}
@@ -287,7 +292,7 @@ static atomic_t policy_opencount = ATOMIC_INIT(1);
/*
* ima_open_policy: sequentialize access to the policy file
*/
-static int ima_open_policy(struct inode * inode, struct file * filp)
+static int ima_open_policy(struct inode *inode, struct file *filp)
{
/* No point in being allowed to open it if you aren't going to write */
if (!(filp->f_flags & O_WRONLY))
@@ -367,20 +372,11 @@ int __init ima_fs_init(void)
return 0;
out:
- securityfs_remove(runtime_measurements_count);
- securityfs_remove(ascii_runtime_measurements);
- securityfs_remove(binary_runtime_measurements);
- securityfs_remove(ima_dir);
- securityfs_remove(ima_policy);
- return -1;
-}
-
-void __exit ima_fs_cleanup(void)
-{
securityfs_remove(violations);
securityfs_remove(runtime_measurements_count);
securityfs_remove(ascii_runtime_measurements);
securityfs_remove(binary_runtime_measurements);
securityfs_remove(ima_dir);
securityfs_remove(ima_policy);
+ return -1;
}
diff --git a/security/integrity/ima/ima_init.c b/security/integrity/ima/ima_init.c
index 17f1f060306..e8f9d70a465 100644
--- a/security/integrity/ima/ima_init.c
+++ b/security/integrity/ima/ima_init.c
@@ -14,10 +14,14 @@
* File: ima_init.c
* initialization and cleanup functions
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/err.h>
+#include <crypto/hash_info.h>
#include "ima.h"
/* name for boot aggregate entry */
@@ -41,31 +45,40 @@ int ima_used_chip;
*/
static void __init ima_add_boot_aggregate(void)
{
- struct ima_template_entry *entry;
- const char *op = "add_boot_aggregate";
+ static const char op[] = "add_boot_aggregate";
const char *audit_cause = "ENOMEM";
+ struct ima_template_entry *entry;
+ struct integrity_iint_cache tmp_iint, *iint = &tmp_iint;
int result = -ENOMEM;
- int violation = 1;
+ int violation = 0;
+ struct {
+ struct ima_digest_data hdr;
+ char digest[TPM_DIGEST_SIZE];
+ } hash;
- entry = kmalloc(sizeof(*entry), GFP_KERNEL);
- if (!entry)
- goto err_out;
+ memset(iint, 0, sizeof(*iint));
+ memset(&hash, 0, sizeof(hash));
+ iint->ima_hash = &hash.hdr;
+ iint->ima_hash->algo = HASH_ALGO_SHA1;
+ iint->ima_hash->length = SHA1_DIGEST_SIZE;
- memset(&entry->template, 0, sizeof(entry->template));
- strncpy(entry->template.file_name, boot_aggregate_name,
- IMA_EVENT_NAME_LEN_MAX);
if (ima_used_chip) {
- violation = 0;
- result = ima_calc_boot_aggregate(entry->template.digest);
+ result = ima_calc_boot_aggregate(&hash.hdr);
if (result < 0) {
audit_cause = "hashing_error";
- kfree(entry);
goto err_out;
}
}
- result = ima_store_template(entry, violation, NULL);
+
+ result = ima_alloc_init_template(iint, NULL, boot_aggregate_name,
+ NULL, 0, &entry);
if (result < 0)
- kfree(entry);
+ return;
+
+ result = ima_store_template(entry, violation, NULL,
+ boot_aggregate_name);
+ if (result < 0)
+ ima_free_template_entry(entry);
return;
err_out:
integrity_audit_msg(AUDIT_INTEGRITY_PCR, NULL, boot_aggregate_name, op,
@@ -74,7 +87,7 @@ err_out:
int __init ima_init(void)
{
- u8 pcr_i[IMA_DIGEST_SIZE];
+ u8 pcr_i[TPM_DIGEST_SIZE];
int rc;
ima_used_chip = 0;
@@ -83,15 +96,17 @@ int __init ima_init(void)
ima_used_chip = 1;
if (!ima_used_chip)
- pr_info("IMA: No TPM chip found, activating TPM-bypass!\n");
+ pr_info("No TPM chip found, activating TPM-bypass!\n");
+
+ rc = ima_init_crypto();
+ if (rc)
+ return rc;
+ rc = ima_init_template();
+ if (rc != 0)
+ return rc;
ima_add_boot_aggregate(); /* boot aggregate must be first entry */
ima_init_policy();
return ima_fs_init();
}
-
-void __exit ima_cleanup(void)
-{
- ima_fs_cleanup();
-}
diff --git a/security/integrity/ima/ima_main.c b/security/integrity/ima/ima_main.c
index 1eff5cb001e..09baa335ebc 100644
--- a/security/integrity/ima/ima_main.c
+++ b/security/integrity/ima/ima_main.c
@@ -22,17 +22,47 @@
#include <linux/mount.h>
#include <linux/mman.h>
#include <linux/slab.h>
+#include <linux/xattr.h>
#include <linux/ima.h>
+#include <crypto/hash_info.h>
#include "ima.h"
int ima_initialized;
-char *ima_hash = "sha1";
+#ifdef CONFIG_IMA_APPRAISE
+int ima_appraise = IMA_APPRAISE_ENFORCE;
+#else
+int ima_appraise;
+#endif
+
+int ima_hash_algo = HASH_ALGO_SHA1;
+static int hash_setup_done;
+
static int __init hash_setup(char *str)
{
- if (strncmp(str, "md5", 3) == 0)
- ima_hash = "md5";
+ struct ima_template_desc *template_desc = ima_template_desc_current();
+ int i;
+
+ if (hash_setup_done)
+ return 1;
+
+ if (strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) == 0) {
+ if (strncmp(str, "sha1", 4) == 0)
+ ima_hash_algo = HASH_ALGO_SHA1;
+ else if (strncmp(str, "md5", 3) == 0)
+ ima_hash_algo = HASH_ALGO_MD5;
+ goto out;
+ }
+
+ for (i = 0; i < HASH_ALGO__LAST; i++) {
+ if (strcmp(str, hash_algo_name[i]) == 0) {
+ ima_hash_algo = i;
+ break;
+ }
+ }
+out:
+ hash_setup_done = 1;
return 1;
}
__setup("ima_hash=", hash_setup);
@@ -41,19 +71,19 @@ __setup("ima_hash=", hash_setup);
* ima_rdwr_violation_check
*
* Only invalidate the PCR for measured files:
- * - Opening a file for write when already open for read,
+ * - Opening a file for write when already open for read,
* results in a time of measure, time of use (ToMToU) error.
* - Opening a file for read when already open for write,
- * could result in a file measurement error.
+ * could result in a file measurement error.
*
*/
static void ima_rdwr_violation_check(struct file *file)
{
- struct dentry *dentry = file->f_path.dentry;
- struct inode *inode = dentry->d_inode;
+ struct inode *inode = file_inode(file);
fmode_t mode = file->f_mode;
- int rc;
bool send_tomtou = false, send_writers = false;
+ char *pathbuf = NULL;
+ const char *pathname;
if (!S_ISREG(inode->i_mode) || !ima_initialized)
return;
@@ -61,40 +91,50 @@ static void ima_rdwr_violation_check(struct file *file)
mutex_lock(&inode->i_mutex); /* file metadata: permissions, xattr */
if (mode & FMODE_WRITE) {
- if (atomic_read(&inode->i_readcount) && IS_IMA(inode))
- send_tomtou = true;
- goto out;
+ if (atomic_read(&inode->i_readcount) && IS_IMA(inode)) {
+ struct integrity_iint_cache *iint;
+ iint = integrity_iint_find(inode);
+ /* IMA_MEASURE is set from reader side */
+ if (iint && (iint->flags & IMA_MEASURE))
+ send_tomtou = true;
+ }
+ } else {
+ if ((atomic_read(&inode->i_writecount) > 0) &&
+ ima_must_measure(inode, MAY_READ, FILE_CHECK))
+ send_writers = true;
}
- rc = ima_must_measure(inode, MAY_READ, FILE_CHECK);
- if (rc < 0)
- goto out;
-
- if (atomic_read(&inode->i_writecount) > 0)
- send_writers = true;
-out:
mutex_unlock(&inode->i_mutex);
+ if (!send_tomtou && !send_writers)
+ return;
+
+ pathname = ima_d_path(&file->f_path, &pathbuf);
+
if (send_tomtou)
- ima_add_violation(inode, dentry->d_name.name, "invalid_pcr",
- "ToMToU");
+ ima_add_violation(file, pathname, "invalid_pcr", "ToMToU");
if (send_writers)
- ima_add_violation(inode, dentry->d_name.name, "invalid_pcr",
- "open_writers");
+ ima_add_violation(file, pathname,
+ "invalid_pcr", "open_writers");
+ kfree(pathbuf);
}
static void ima_check_last_writer(struct integrity_iint_cache *iint,
- struct inode *inode,
- struct file *file)
+ struct inode *inode, struct file *file)
{
fmode_t mode = file->f_mode;
- mutex_lock(&iint->mutex);
- if (mode & FMODE_WRITE &&
- atomic_read(&inode->i_writecount) == 1 &&
- iint->version != inode->i_version)
- iint->flags &= ~IMA_MEASURED;
- mutex_unlock(&iint->mutex);
+ if (!(mode & FMODE_WRITE))
+ return;
+
+ mutex_lock(&inode->i_mutex);
+ if (atomic_read(&inode->i_writecount) == 1 &&
+ iint->version != inode->i_version) {
+ iint->flags &= ~IMA_DONE_MASK;
+ if (iint->flags & IMA_APPRAISE)
+ ima_update_xattr(iint, file);
+ }
+ mutex_unlock(&inode->i_mutex);
}
/**
@@ -105,7 +145,7 @@ static void ima_check_last_writer(struct integrity_iint_cache *iint,
*/
void ima_file_free(struct file *file)
{
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct integrity_iint_cache *iint;
if (!iint_initialized || !S_ISREG(inode->i_mode))
@@ -118,40 +158,88 @@ void ima_file_free(struct file *file)
ima_check_last_writer(iint, inode, file);
}
-static int process_measurement(struct file *file, const unsigned char *filename,
+static int process_measurement(struct file *file, const char *filename,
int mask, int function)
{
- struct inode *inode = file->f_dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct integrity_iint_cache *iint;
- int rc = 0;
+ struct ima_template_desc *template_desc = ima_template_desc_current();
+ char *pathbuf = NULL;
+ const char *pathname = NULL;
+ int rc = -ENOMEM, action, must_appraise, _func;
+ struct evm_ima_xattr_data *xattr_value = NULL, **xattr_ptr = NULL;
+ int xattr_len = 0;
if (!ima_initialized || !S_ISREG(inode->i_mode))
return 0;
- rc = ima_must_measure(inode, mask, function);
- if (rc != 0)
- return rc;
-retry:
- iint = integrity_iint_find(inode);
- if (!iint) {
- rc = integrity_inode_alloc(inode);
- if (!rc || rc == -EEXIST)
- goto retry;
- return rc;
- }
+ /* Return an IMA_MEASURE, IMA_APPRAISE, IMA_AUDIT action
+ * bitmask based on the appraise/audit/measurement policy.
+ * Included is the appraise submask.
+ */
+ action = ima_get_action(inode, mask, function);
+ if (!action)
+ return 0;
+
+ must_appraise = action & IMA_APPRAISE;
+
+ /* Is the appraise rule hook specific? */
+ _func = (action & IMA_FILE_APPRAISE) ? FILE_CHECK : function;
- mutex_lock(&iint->mutex);
+ mutex_lock(&inode->i_mutex);
- rc = iint->flags & IMA_MEASURED ? 1 : 0;
- if (rc != 0)
+ iint = integrity_inode_get(inode);
+ if (!iint)
goto out;
- rc = ima_collect_measurement(iint, file);
- if (!rc)
- ima_store_measurement(iint, file, filename);
+ /* Determine if already appraised/measured based on bitmask
+ * (IMA_MEASURE, IMA_MEASURED, IMA_XXXX_APPRAISE, IMA_XXXX_APPRAISED,
+ * IMA_AUDIT, IMA_AUDITED)
+ */
+ iint->flags |= action;
+ action &= IMA_DO_MASK;
+ action &= ~((iint->flags & IMA_DONE_MASK) >> 1);
+
+ /* Nothing to do, just return existing appraised status */
+ if (!action) {
+ if (must_appraise)
+ rc = ima_get_cache_status(iint, _func);
+ goto out_digsig;
+ }
+
+ if (strcmp(template_desc->name, IMA_TEMPLATE_IMA_NAME) == 0) {
+ if (action & IMA_APPRAISE_SUBMASK)
+ xattr_ptr = &xattr_value;
+ } else
+ xattr_ptr = &xattr_value;
+
+ rc = ima_collect_measurement(iint, file, xattr_ptr, &xattr_len);
+ if (rc != 0) {
+ if (file->f_flags & O_DIRECT)
+ rc = (iint->flags & IMA_PERMIT_DIRECTIO) ? 0 : -EACCES;
+ goto out_digsig;
+ }
+
+ pathname = filename ?: ima_d_path(&file->f_path, &pathbuf);
+
+ if (action & IMA_MEASURE)
+ ima_store_measurement(iint, file, pathname,
+ xattr_value, xattr_len);
+ if (action & IMA_APPRAISE_SUBMASK)
+ rc = ima_appraise_measurement(_func, iint, file, pathname,
+ xattr_value, xattr_len);
+ if (action & IMA_AUDIT)
+ ima_audit_measurement(iint, pathname);
+ kfree(pathbuf);
+out_digsig:
+ if ((mask & MAY_WRITE) && (iint->flags & IMA_DIGSIG))
+ rc = -EACCES;
out:
- mutex_unlock(&iint->mutex);
- return rc;
+ mutex_unlock(&inode->i_mutex);
+ kfree(xattr_value);
+ if ((rc && must_appraise) && (ima_appraise & IMA_APPRAISE_ENFORCE))
+ return -EACCES;
+ return 0;
}
/**
@@ -162,18 +250,13 @@ out:
* Measure files being mmapped executable based on the ima_must_measure()
* policy decision.
*
- * Return 0 on success, an error code on failure.
- * (Based on the results of appraise_measurement().)
+ * On success return 0. On integrity appraisal error, assuming the file
+ * is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
*/
int ima_file_mmap(struct file *file, unsigned long prot)
{
- int rc;
-
- if (!file)
- return 0;
- if (prot & PROT_EXEC)
- rc = process_measurement(file, file->f_dentry->d_name.name,
- MAY_EXEC, FILE_MMAP);
+ if (file && (prot & PROT_EXEC))
+ return process_measurement(file, NULL, MAY_EXEC, MMAP_CHECK);
return 0;
}
@@ -187,16 +270,15 @@ int ima_file_mmap(struct file *file, unsigned long prot)
* So we can be certain that what we verify and measure here is actually
* what is being executed.
*
- * Return 0 on success, an error code on failure.
- * (Based on the results of appraise_measurement().)
+ * On success return 0. On integrity appraisal error, assuming the file
+ * is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
*/
int ima_bprm_check(struct linux_binprm *bprm)
{
- int rc;
-
- rc = process_measurement(bprm->file, bprm->filename,
- MAY_EXEC, BPRM_CHECK);
- return 0;
+ return process_measurement(bprm->file,
+ (strcmp(bprm->filename, bprm->interp) == 0) ?
+ bprm->filename : bprm->interp,
+ MAY_EXEC, BPRM_CHECK);
}
/**
@@ -206,35 +288,51 @@ int ima_bprm_check(struct linux_binprm *bprm)
*
* Measure files based on the ima_must_measure() policy decision.
*
- * Always return 0 and audit dentry_open failures.
- * (Return code will be based upon measurement appraisal.)
+ * On success return 0. On integrity appraisal error, assuming the file
+ * is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
*/
int ima_file_check(struct file *file, int mask)
{
- int rc;
-
ima_rdwr_violation_check(file);
- rc = process_measurement(file, file->f_dentry->d_name.name,
- mask & (MAY_READ | MAY_WRITE | MAY_EXEC),
- FILE_CHECK);
- return 0;
+ return process_measurement(file, NULL,
+ mask & (MAY_READ | MAY_WRITE | MAY_EXEC),
+ FILE_CHECK);
}
EXPORT_SYMBOL_GPL(ima_file_check);
+/**
+ * ima_module_check - based on policy, collect/store/appraise measurement.
+ * @file: pointer to the file to be measured/appraised
+ *
+ * Measure/appraise kernel modules based on policy.
+ *
+ * On success return 0. On integrity appraisal error, assuming the file
+ * is in policy and IMA-appraisal is in enforcing mode, return -EACCES.
+ */
+int ima_module_check(struct file *file)
+{
+ if (!file) {
+#ifndef CONFIG_MODULE_SIG_FORCE
+ if ((ima_appraise & IMA_APPRAISE_MODULES) &&
+ (ima_appraise & IMA_APPRAISE_ENFORCE))
+ return -EACCES; /* INTEGRITY_UNKNOWN */
+#endif
+ return 0; /* We rely on module signature checking */
+ }
+ return process_measurement(file, NULL, MAY_EXEC, MODULE_CHECK);
+}
+
static int __init init_ima(void)
{
int error;
+ hash_setup(CONFIG_IMA_DEFAULT_HASH);
error = ima_init();
- ima_initialized = 1;
+ if (!error)
+ ima_initialized = 1;
return error;
}
-static void __exit cleanup_ima(void)
-{
- ima_cleanup();
-}
-
late_initcall(init_ima); /* Start IMA after the TPM is available */
MODULE_DESCRIPTION("Integrity Measurement Architecture");
diff --git a/security/integrity/ima/ima_policy.c b/security/integrity/ima/ima_policy.c
index d45061d02fe..40a7488f672 100644
--- a/security/integrity/ima/ima_policy.c
+++ b/security/integrity/ima/ima_policy.c
@@ -7,7 +7,7 @@
* the Free Software Foundation, version 2 of the License.
*
* ima_policy.c
- * - initialize default measure policy rules
+ * - initialize default measure policy rules
*
*/
#include <linux/module.h>
@@ -16,39 +16,50 @@
#include <linux/magic.h>
#include <linux/parser.h>
#include <linux/slab.h>
+#include <linux/genhd.h>
#include "ima.h"
/* flags definitions */
-#define IMA_FUNC 0x0001
-#define IMA_MASK 0x0002
+#define IMA_FUNC 0x0001
+#define IMA_MASK 0x0002
#define IMA_FSMAGIC 0x0004
#define IMA_UID 0x0008
+#define IMA_FOWNER 0x0010
+#define IMA_FSUUID 0x0020
-enum ima_action { UNKNOWN = -1, DONT_MEASURE = 0, MEASURE };
+#define UNKNOWN 0
+#define MEASURE 0x0001 /* same as IMA_MEASURE */
+#define DONT_MEASURE 0x0002
+#define APPRAISE 0x0004 /* same as IMA_APPRAISE */
+#define DONT_APPRAISE 0x0008
+#define AUDIT 0x0040
#define MAX_LSM_RULES 6
enum lsm_rule_types { LSM_OBJ_USER, LSM_OBJ_ROLE, LSM_OBJ_TYPE,
LSM_SUBJ_USER, LSM_SUBJ_ROLE, LSM_SUBJ_TYPE
};
-struct ima_measure_rule_entry {
+struct ima_rule_entry {
struct list_head list;
- enum ima_action action;
+ int action;
unsigned int flags;
enum ima_hooks func;
int mask;
unsigned long fsmagic;
- uid_t uid;
+ u8 fsuuid[16];
+ kuid_t uid;
+ kuid_t fowner;
struct {
void *rule; /* LSM file metadata specific */
+ void *args_p; /* audit value */
int type; /* audit type */
} lsm[MAX_LSM_RULES];
};
/*
* Without LSM specific knowledge, the default policy can only be
- * written in terms of .action, .func, .mask, .fsmagic, and .uid
+ * written in terms of .action, .func, .mask, .fsmagic, .uid, and .fowner
*/
/*
@@ -57,34 +68,88 @@ struct ima_measure_rule_entry {
* normal users can easily run the machine out of memory simply building
* and running executables.
*/
-static struct ima_measure_rule_entry default_rules[] = {
- {.action = DONT_MEASURE,.fsmagic = PROC_SUPER_MAGIC,.flags = IMA_FSMAGIC},
- {.action = DONT_MEASURE,.fsmagic = SYSFS_MAGIC,.flags = IMA_FSMAGIC},
- {.action = DONT_MEASURE,.fsmagic = DEBUGFS_MAGIC,.flags = IMA_FSMAGIC},
- {.action = DONT_MEASURE,.fsmagic = TMPFS_MAGIC,.flags = IMA_FSMAGIC},
- {.action = DONT_MEASURE,.fsmagic = SECURITYFS_MAGIC,.flags = IMA_FSMAGIC},
- {.action = DONT_MEASURE,.fsmagic = SELINUX_MAGIC,.flags = IMA_FSMAGIC},
- {.action = MEASURE,.func = FILE_MMAP,.mask = MAY_EXEC,
+static struct ima_rule_entry default_rules[] = {
+ {.action = DONT_MEASURE, .fsmagic = PROC_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = SYSFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = DEBUGFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = TMPFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = DEVPTS_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = BINFMTFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = SECURITYFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_MEASURE, .fsmagic = SELINUX_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = MEASURE, .func = MMAP_CHECK, .mask = MAY_EXEC,
.flags = IMA_FUNC | IMA_MASK},
- {.action = MEASURE,.func = BPRM_CHECK,.mask = MAY_EXEC,
+ {.action = MEASURE, .func = BPRM_CHECK, .mask = MAY_EXEC,
.flags = IMA_FUNC | IMA_MASK},
- {.action = MEASURE,.func = FILE_CHECK,.mask = MAY_READ,.uid = 0,
+ {.action = MEASURE, .func = FILE_CHECK, .mask = MAY_READ, .uid = GLOBAL_ROOT_UID,
.flags = IMA_FUNC | IMA_MASK | IMA_UID},
+ {.action = MEASURE, .func = MODULE_CHECK, .flags = IMA_FUNC},
};
-static LIST_HEAD(measure_default_rules);
-static LIST_HEAD(measure_policy_rules);
-static struct list_head *ima_measure;
+static struct ima_rule_entry default_appraise_rules[] = {
+ {.action = DONT_APPRAISE, .fsmagic = PROC_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = SYSFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = DEBUGFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = TMPFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = RAMFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = DEVPTS_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = BINFMTFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = SECURITYFS_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = SELINUX_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = DONT_APPRAISE, .fsmagic = CGROUP_SUPER_MAGIC, .flags = IMA_FSMAGIC},
+ {.action = APPRAISE, .fowner = GLOBAL_ROOT_UID, .flags = IMA_FOWNER},
+};
+
+static LIST_HEAD(ima_default_rules);
+static LIST_HEAD(ima_policy_rules);
+static struct list_head *ima_rules;
-static DEFINE_MUTEX(ima_measure_mutex);
+static DEFINE_MUTEX(ima_rules_mutex);
static bool ima_use_tcb __initdata;
-static int __init default_policy_setup(char *str)
+static int __init default_measure_policy_setup(char *str)
{
ima_use_tcb = 1;
return 1;
}
-__setup("ima_tcb", default_policy_setup);
+__setup("ima_tcb", default_measure_policy_setup);
+
+static bool ima_use_appraise_tcb __initdata;
+static int __init default_appraise_policy_setup(char *str)
+{
+ ima_use_appraise_tcb = 1;
+ return 1;
+}
+__setup("ima_appraise_tcb", default_appraise_policy_setup);
+
+/*
+ * Although the IMA policy does not change, the LSM policy can be
+ * reloaded, leaving the IMA LSM based rules referring to the old,
+ * stale LSM policy.
+ *
+ * Update the IMA LSM based rules to reflect the reloaded LSM policy.
+ * We assume the rules still exist; and BUG_ON() if they don't.
+ */
+static void ima_lsm_update_rules(void)
+{
+ struct ima_rule_entry *entry, *tmp;
+ int result;
+ int i;
+
+ mutex_lock(&ima_rules_mutex);
+ list_for_each_entry_safe(entry, tmp, &ima_policy_rules, list) {
+ for (i = 0; i < MAX_LSM_RULES; i++) {
+ if (!entry->lsm[i].rule)
+ continue;
+ result = security_filter_rule_init(entry->lsm[i].type,
+ Audit_equal,
+ entry->lsm[i].args_p,
+ &entry->lsm[i].rule);
+ BUG_ON(!entry->lsm[i].rule);
+ }
+ }
+ mutex_unlock(&ima_rules_mutex);
+}
/**
* ima_match_rules - determine whether an inode matches the measure rule.
@@ -95,29 +160,37 @@ __setup("ima_tcb", default_policy_setup);
*
* Returns true on rule match, false on failure.
*/
-static bool ima_match_rules(struct ima_measure_rule_entry *rule,
+static bool ima_match_rules(struct ima_rule_entry *rule,
struct inode *inode, enum ima_hooks func, int mask)
{
struct task_struct *tsk = current;
const struct cred *cred = current_cred();
int i;
- if ((rule->flags & IMA_FUNC) && rule->func != func)
+ if ((rule->flags & IMA_FUNC) &&
+ (rule->func != func && func != POST_SETATTR))
return false;
- if ((rule->flags & IMA_MASK) && rule->mask != mask)
+ if ((rule->flags & IMA_MASK) &&
+ (rule->mask != mask && func != POST_SETATTR))
return false;
if ((rule->flags & IMA_FSMAGIC)
&& rule->fsmagic != inode->i_sb->s_magic)
return false;
- if ((rule->flags & IMA_UID) && rule->uid != cred->uid)
+ if ((rule->flags & IMA_FSUUID) &&
+ memcmp(rule->fsuuid, inode->i_sb->s_uuid, sizeof(rule->fsuuid)))
+ return false;
+ if ((rule->flags & IMA_UID) && !uid_eq(rule->uid, cred->uid))
+ return false;
+ if ((rule->flags & IMA_FOWNER) && !uid_eq(rule->fowner, inode->i_uid))
return false;
for (i = 0; i < MAX_LSM_RULES; i++) {
int rc = 0;
u32 osid, sid;
+ int retried = 0;
if (!rule->lsm[i].rule)
continue;
-
+retry:
switch (i) {
case LSM_OBJ_USER:
case LSM_OBJ_ROLE:
@@ -141,12 +214,39 @@ static bool ima_match_rules(struct ima_measure_rule_entry *rule,
default:
break;
}
+ if ((rc < 0) && (!retried)) {
+ retried = 1;
+ ima_lsm_update_rules();
+ goto retry;
+ }
if (!rc)
return false;
}
return true;
}
+/*
+ * In addition to knowing that we need to appraise the file in general,
+ * we need to differentiate between calling hooks, for hook specific rules.
+ */
+static int get_subaction(struct ima_rule_entry *rule, int func)
+{
+ if (!(rule->flags & IMA_FUNC))
+ return IMA_FILE_APPRAISE;
+
+ switch (func) {
+ case MMAP_CHECK:
+ return IMA_MMAP_APPRAISE;
+ case BPRM_CHECK:
+ return IMA_BPRM_APPRAISE;
+ case MODULE_CHECK:
+ return IMA_MODULE_APPRAISE;
+ case FILE_CHECK:
+ default:
+ return IMA_FILE_APPRAISE;
+ }
+}
+
/**
* ima_match_policy - decision based on LSM and other conditions
* @inode: pointer to an inode for which the policy decision is being made
@@ -160,39 +260,66 @@ static bool ima_match_rules(struct ima_measure_rule_entry *rule,
* as elements in the list are never deleted, nor does the list
* change.)
*/
-int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask)
+int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask,
+ int flags)
{
- struct ima_measure_rule_entry *entry;
+ struct ima_rule_entry *entry;
+ int action = 0, actmask = flags | (flags << 1);
+
+ list_for_each_entry(entry, ima_rules, list) {
+
+ if (!(entry->action & actmask))
+ continue;
+
+ if (!ima_match_rules(entry, inode, func, mask))
+ continue;
+
+ action |= entry->flags & IMA_ACTION_FLAGS;
- list_for_each_entry(entry, ima_measure, list) {
- bool rc;
+ action |= entry->action & IMA_DO_MASK;
+ if (entry->action & IMA_APPRAISE)
+ action |= get_subaction(entry, func);
- rc = ima_match_rules(entry, inode, func, mask);
- if (rc)
- return entry->action;
+ if (entry->action & IMA_DO_MASK)
+ actmask &= ~(entry->action | entry->action << 1);
+ else
+ actmask &= ~(entry->action | entry->action >> 1);
+
+ if (!actmask)
+ break;
}
- return 0;
+
+ return action;
}
/**
* ima_init_policy - initialize the default measure rules.
*
- * ima_measure points to either the measure_default_rules or the
- * the new measure_policy_rules.
+ * ima_rules points to either the ima_default_rules or the
+ * the new ima_policy_rules.
*/
void __init ima_init_policy(void)
{
- int i, entries;
+ int i, measure_entries, appraise_entries;
/* if !ima_use_tcb set entries = 0 so we load NO default rules */
- if (ima_use_tcb)
- entries = ARRAY_SIZE(default_rules);
- else
- entries = 0;
-
- for (i = 0; i < entries; i++)
- list_add_tail(&default_rules[i].list, &measure_default_rules);
- ima_measure = &measure_default_rules;
+ measure_entries = ima_use_tcb ? ARRAY_SIZE(default_rules) : 0;
+ appraise_entries = ima_use_appraise_tcb ?
+ ARRAY_SIZE(default_appraise_rules) : 0;
+
+ for (i = 0; i < measure_entries + appraise_entries; i++) {
+ if (i < measure_entries)
+ list_add_tail(&default_rules[i].list,
+ &ima_default_rules);
+ else {
+ int j = i - measure_entries;
+
+ list_add_tail(&default_appraise_rules[j].list,
+ &ima_default_rules);
+ }
+ }
+
+ ima_rules = &ima_default_rules;
}
/**
@@ -204,13 +331,13 @@ void __init ima_init_policy(void)
*/
void ima_update_policy(void)
{
- const char *op = "policy_update";
+ static const char op[] = "policy_update";
const char *cause = "already exists";
int result = 1;
int audit_info = 0;
- if (ima_measure == &measure_default_rules) {
- ima_measure = &measure_policy_rules;
+ if (ima_rules == &ima_default_rules) {
+ ima_rules = &ima_policy_rules;
cause = "complete";
result = 0;
}
@@ -221,14 +348,20 @@ void ima_update_policy(void)
enum {
Opt_err = -1,
Opt_measure = 1, Opt_dont_measure,
+ Opt_appraise, Opt_dont_appraise,
+ Opt_audit,
Opt_obj_user, Opt_obj_role, Opt_obj_type,
Opt_subj_user, Opt_subj_role, Opt_subj_type,
- Opt_func, Opt_mask, Opt_fsmagic, Opt_uid
+ Opt_func, Opt_mask, Opt_fsmagic, Opt_uid, Opt_fowner,
+ Opt_appraise_type, Opt_fsuuid, Opt_permit_directio
};
static match_table_t policy_tokens = {
{Opt_measure, "measure"},
{Opt_dont_measure, "dont_measure"},
+ {Opt_appraise, "appraise"},
+ {Opt_dont_appraise, "dont_appraise"},
+ {Opt_audit, "audit"},
{Opt_obj_user, "obj_user=%s"},
{Opt_obj_role, "obj_role=%s"},
{Opt_obj_type, "obj_type=%s"},
@@ -238,24 +371,36 @@ static match_table_t policy_tokens = {
{Opt_func, "func=%s"},
{Opt_mask, "mask=%s"},
{Opt_fsmagic, "fsmagic=%s"},
+ {Opt_fsuuid, "fsuuid=%s"},
{Opt_uid, "uid=%s"},
+ {Opt_fowner, "fowner=%s"},
+ {Opt_appraise_type, "appraise_type=%s"},
+ {Opt_permit_directio, "permit_directio"},
{Opt_err, NULL}
};
-static int ima_lsm_rule_init(struct ima_measure_rule_entry *entry,
- char *args, int lsm_rule, int audit_type)
+static int ima_lsm_rule_init(struct ima_rule_entry *entry,
+ substring_t *args, int lsm_rule, int audit_type)
{
int result;
if (entry->lsm[lsm_rule].rule)
return -EINVAL;
+ entry->lsm[lsm_rule].args_p = match_strdup(args);
+ if (!entry->lsm[lsm_rule].args_p)
+ return -ENOMEM;
+
entry->lsm[lsm_rule].type = audit_type;
result = security_filter_rule_init(entry->lsm[lsm_rule].type,
- Audit_equal, args,
+ Audit_equal,
+ entry->lsm[lsm_rule].args_p,
&entry->lsm[lsm_rule].rule);
- if (!entry->lsm[lsm_rule].rule)
+ if (!entry->lsm[lsm_rule].rule) {
+ kfree(entry->lsm[lsm_rule].args_p);
return -EINVAL;
+ }
+
return result;
}
@@ -266,7 +411,7 @@ static void ima_log_string(struct audit_buffer *ab, char *key, char *value)
audit_log_format(ab, " ");
}
-static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry)
+static int ima_parse_rule(char *rule, struct ima_rule_entry *entry)
{
struct audit_buffer *ab;
char *p;
@@ -274,7 +419,8 @@ static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry)
ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_INTEGRITY_RULE);
- entry->uid = -1;
+ entry->uid = INVALID_UID;
+ entry->fowner = INVALID_UID;
entry->action = UNKNOWN;
while ((p = strsep(&rule, " \t")) != NULL) {
substring_t args[MAX_OPT_ARGS];
@@ -303,19 +449,46 @@ static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry)
entry->action = DONT_MEASURE;
break;
+ case Opt_appraise:
+ ima_log_string(ab, "action", "appraise");
+
+ if (entry->action != UNKNOWN)
+ result = -EINVAL;
+
+ entry->action = APPRAISE;
+ break;
+ case Opt_dont_appraise:
+ ima_log_string(ab, "action", "dont_appraise");
+
+ if (entry->action != UNKNOWN)
+ result = -EINVAL;
+
+ entry->action = DONT_APPRAISE;
+ break;
+ case Opt_audit:
+ ima_log_string(ab, "action", "audit");
+
+ if (entry->action != UNKNOWN)
+ result = -EINVAL;
+
+ entry->action = AUDIT;
+ break;
case Opt_func:
ima_log_string(ab, "func", args[0].from);
if (entry->func)
- result = -EINVAL;
+ result = -EINVAL;
if (strcmp(args[0].from, "FILE_CHECK") == 0)
entry->func = FILE_CHECK;
/* PATH_CHECK is for backwards compat */
else if (strcmp(args[0].from, "PATH_CHECK") == 0)
entry->func = FILE_CHECK;
- else if (strcmp(args[0].from, "FILE_MMAP") == 0)
- entry->func = FILE_MMAP;
+ else if (strcmp(args[0].from, "MODULE_CHECK") == 0)
+ entry->func = MODULE_CHECK;
+ else if ((strcmp(args[0].from, "FILE_MMAP") == 0)
+ || (strcmp(args[0].from, "MMAP_CHECK") == 0))
+ entry->func = MMAP_CHECK;
else if (strcmp(args[0].from, "BPRM_CHECK") == 0)
entry->func = BPRM_CHECK;
else
@@ -350,64 +523,109 @@ static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry)
break;
}
- result = strict_strtoul(args[0].from, 16,
- &entry->fsmagic);
+ result = kstrtoul(args[0].from, 16, &entry->fsmagic);
if (!result)
entry->flags |= IMA_FSMAGIC;
break;
+ case Opt_fsuuid:
+ ima_log_string(ab, "fsuuid", args[0].from);
+
+ if (memchr_inv(entry->fsuuid, 0x00,
+ sizeof(entry->fsuuid))) {
+ result = -EINVAL;
+ break;
+ }
+
+ result = blk_part_pack_uuid(args[0].from,
+ entry->fsuuid);
+ if (!result)
+ entry->flags |= IMA_FSUUID;
+ break;
case Opt_uid:
ima_log_string(ab, "uid", args[0].from);
- if (entry->uid != -1) {
+ if (uid_valid(entry->uid)) {
result = -EINVAL;
break;
}
- result = strict_strtoul(args[0].from, 10, &lnum);
+ result = kstrtoul(args[0].from, 10, &lnum);
if (!result) {
- entry->uid = (uid_t) lnum;
- if (entry->uid != lnum)
+ entry->uid = make_kuid(current_user_ns(), (uid_t)lnum);
+ if (!uid_valid(entry->uid) || (((uid_t)lnum) != lnum))
result = -EINVAL;
else
entry->flags |= IMA_UID;
}
break;
+ case Opt_fowner:
+ ima_log_string(ab, "fowner", args[0].from);
+
+ if (uid_valid(entry->fowner)) {
+ result = -EINVAL;
+ break;
+ }
+
+ result = kstrtoul(args[0].from, 10, &lnum);
+ if (!result) {
+ entry->fowner = make_kuid(current_user_ns(), (uid_t)lnum);
+ if (!uid_valid(entry->fowner) || (((uid_t)lnum) != lnum))
+ result = -EINVAL;
+ else
+ entry->flags |= IMA_FOWNER;
+ }
+ break;
case Opt_obj_user:
ima_log_string(ab, "obj_user", args[0].from);
- result = ima_lsm_rule_init(entry, args[0].from,
+ result = ima_lsm_rule_init(entry, args,
LSM_OBJ_USER,
AUDIT_OBJ_USER);
break;
case Opt_obj_role:
ima_log_string(ab, "obj_role", args[0].from);
- result = ima_lsm_rule_init(entry, args[0].from,
+ result = ima_lsm_rule_init(entry, args,
LSM_OBJ_ROLE,
AUDIT_OBJ_ROLE);
break;
case Opt_obj_type:
ima_log_string(ab, "obj_type", args[0].from);
- result = ima_lsm_rule_init(entry, args[0].from,
+ result = ima_lsm_rule_init(entry, args,
LSM_OBJ_TYPE,
AUDIT_OBJ_TYPE);
break;
case Opt_subj_user:
ima_log_string(ab, "subj_user", args[0].from);
- result = ima_lsm_rule_init(entry, args[0].from,
+ result = ima_lsm_rule_init(entry, args,
LSM_SUBJ_USER,
AUDIT_SUBJ_USER);
break;
case Opt_subj_role:
ima_log_string(ab, "subj_role", args[0].from);
- result = ima_lsm_rule_init(entry, args[0].from,
+ result = ima_lsm_rule_init(entry, args,
LSM_SUBJ_ROLE,
AUDIT_SUBJ_ROLE);
break;
case Opt_subj_type:
ima_log_string(ab, "subj_type", args[0].from);
- result = ima_lsm_rule_init(entry, args[0].from,
+ result = ima_lsm_rule_init(entry, args,
LSM_SUBJ_TYPE,
AUDIT_SUBJ_TYPE);
break;
+ case Opt_appraise_type:
+ if (entry->action != APPRAISE) {
+ result = -EINVAL;
+ break;
+ }
+
+ ima_log_string(ab, "appraise_type", args[0].from);
+ if ((strcmp(args[0].from, "imasig")) == 0)
+ entry->flags |= IMA_DIGSIG_REQUIRED;
+ else
+ result = -EINVAL;
+ break;
+ case Opt_permit_directio:
+ entry->flags |= IMA_PERMIT_DIRECTIO;
+ break;
case Opt_err:
ima_log_string(ab, "UNKNOWN", p);
result = -EINVAL;
@@ -416,14 +634,15 @@ static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry)
}
if (!result && (entry->action == UNKNOWN))
result = -EINVAL;
-
- audit_log_format(ab, "res=%d", !!result);
+ else if (entry->func == MODULE_CHECK)
+ ima_appraise |= IMA_APPRAISE_MODULES;
+ audit_log_format(ab, "res=%d", !result);
audit_log_end(ab);
return result;
}
/**
- * ima_parse_add_rule - add a rule to measure_policy_rules
+ * ima_parse_add_rule - add a rule to ima_policy_rules
* @rule - ima measurement policy rule
*
* Uses a mutex to protect the policy list from multiple concurrent writers.
@@ -431,14 +650,14 @@ static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry)
*/
ssize_t ima_parse_add_rule(char *rule)
{
- const char *op = "update_policy";
+ static const char op[] = "update_policy";
char *p;
- struct ima_measure_rule_entry *entry;
+ struct ima_rule_entry *entry;
ssize_t result, len;
int audit_info = 0;
/* Prevent installed policy from changing */
- if (ima_measure != &measure_default_rules) {
+ if (ima_rules != &ima_default_rules) {
integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL,
NULL, op, "already exists",
-EACCES, audit_info);
@@ -471,9 +690,9 @@ ssize_t ima_parse_add_rule(char *rule)
return result;
}
- mutex_lock(&ima_measure_mutex);
- list_add_tail(&entry->list, &measure_policy_rules);
- mutex_unlock(&ima_measure_mutex);
+ mutex_lock(&ima_rules_mutex);
+ list_add_tail(&entry->list, &ima_policy_rules);
+ mutex_unlock(&ima_rules_mutex);
return len;
}
@@ -481,12 +700,16 @@ ssize_t ima_parse_add_rule(char *rule)
/* ima_delete_rules called to cleanup invalid policy */
void ima_delete_rules(void)
{
- struct ima_measure_rule_entry *entry, *tmp;
+ struct ima_rule_entry *entry, *tmp;
+ int i;
+
+ mutex_lock(&ima_rules_mutex);
+ list_for_each_entry_safe(entry, tmp, &ima_policy_rules, list) {
+ for (i = 0; i < MAX_LSM_RULES; i++)
+ kfree(entry->lsm[i].args_p);
- mutex_lock(&ima_measure_mutex);
- list_for_each_entry_safe(entry, tmp, &measure_policy_rules, list) {
list_del(&entry->list);
kfree(entry);
}
- mutex_unlock(&ima_measure_mutex);
+ mutex_unlock(&ima_rules_mutex);
}
diff --git a/security/integrity/ima/ima_queue.c b/security/integrity/ima/ima_queue.c
index 55a6271bce7..552705d5a78 100644
--- a/security/integrity/ima/ima_queue.c
+++ b/security/integrity/ima/ima_queue.c
@@ -18,6 +18,9 @@
* The measurement list is append-only. No entry is
* ever removed or changed during the boot-cycle.
*/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/module.h>
#include <linux/rculist.h>
#include <linux/slab.h>
@@ -45,13 +48,12 @@ static struct ima_queue_entry *ima_lookup_digest_entry(u8 *digest_value)
{
struct ima_queue_entry *qe, *ret = NULL;
unsigned int key;
- struct hlist_node *pos;
int rc;
key = ima_hash_key(digest_value);
rcu_read_lock();
- hlist_for_each_entry_rcu(qe, pos, &ima_htable.queue[key], hnext) {
- rc = memcmp(qe->entry->digest, digest_value, IMA_DIGEST_SIZE);
+ hlist_for_each_entry_rcu(qe, &ima_htable.queue[key], hnext) {
+ rc = memcmp(qe->entry->digest, digest_value, TPM_DIGEST_SIZE);
if (rc == 0) {
ret = qe;
break;
@@ -73,7 +75,7 @@ static int ima_add_digest_entry(struct ima_template_entry *entry)
qe = kmalloc(sizeof(*qe), GFP_KERNEL);
if (qe == NULL) {
- pr_err("IMA: OUT OF MEMORY ERROR creating queue entry.\n");
+ pr_err("OUT OF MEMORY ERROR creating queue entry\n");
return -ENOMEM;
}
qe->entry = entry;
@@ -96,8 +98,7 @@ static int ima_pcr_extend(const u8 *hash)
result = tpm_pcr_extend(TPM_ANY_NUM, CONFIG_IMA_MEASURE_PCR_IDX, hash);
if (result != 0)
- pr_err("IMA: Error Communicating to TPM chip, result: %d\n",
- result);
+ pr_err("Error Communicating to TPM chip, result: %d\n", result);
return result;
}
@@ -105,9 +106,10 @@ static int ima_pcr_extend(const u8 *hash)
* and extend the pcr.
*/
int ima_add_template_entry(struct ima_template_entry *entry, int violation,
- const char *op, struct inode *inode)
+ const char *op, struct inode *inode,
+ const unsigned char *filename)
{
- u8 digest[IMA_DIGEST_SIZE];
+ u8 digest[TPM_DIGEST_SIZE];
const char *audit_cause = "hash_added";
char tpm_audit_cause[AUDIT_CAUSE_LEN_MAX];
int audit_info = 1;
@@ -115,7 +117,7 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
mutex_lock(&ima_extend_list_mutex);
if (!violation) {
- memcpy(digest, entry->digest, sizeof digest);
+ memcpy(digest, entry->digest, sizeof(digest));
if (ima_lookup_digest_entry(digest)) {
audit_cause = "hash_exists";
result = -EEXIST;
@@ -131,7 +133,7 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
}
if (violation) /* invalidate pcr */
- memset(digest, 0xff, sizeof digest);
+ memset(digest, 0xff, sizeof(digest));
tpmresult = ima_pcr_extend(digest);
if (tpmresult != 0) {
@@ -142,8 +144,7 @@ int ima_add_template_entry(struct ima_template_entry *entry, int violation,
}
out:
mutex_unlock(&ima_extend_list_mutex);
- integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode,
- entry->template.file_name,
+ integrity_audit_msg(AUDIT_INTEGRITY_PCR, inode, filename,
op, audit_cause, result, audit_info);
return result;
}
diff --git a/security/integrity/ima/ima_template.c b/security/integrity/ima/ima_template.c
new file mode 100644
index 00000000000..a076a967ec4
--- /dev/null
+++ b/security/integrity/ima/ima_template.c
@@ -0,0 +1,190 @@
+/*
+ * Copyright (C) 2013 Politecnico di Torino, Italy
+ * TORSEC group -- http://security.polito.it
+ *
+ * Author: Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: ima_template.c
+ * Helpers to manage template descriptors.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <crypto/hash_info.h>
+
+#include "ima.h"
+#include "ima_template_lib.h"
+
+static struct ima_template_desc defined_templates[] = {
+ {.name = IMA_TEMPLATE_IMA_NAME, .fmt = IMA_TEMPLATE_IMA_FMT},
+ {.name = "ima-ng", .fmt = "d-ng|n-ng"},
+ {.name = "ima-sig", .fmt = "d-ng|n-ng|sig"},
+};
+
+static struct ima_template_field supported_fields[] = {
+ {.field_id = "d", .field_init = ima_eventdigest_init,
+ .field_show = ima_show_template_digest},
+ {.field_id = "n", .field_init = ima_eventname_init,
+ .field_show = ima_show_template_string},
+ {.field_id = "d-ng", .field_init = ima_eventdigest_ng_init,
+ .field_show = ima_show_template_digest_ng},
+ {.field_id = "n-ng", .field_init = ima_eventname_ng_init,
+ .field_show = ima_show_template_string},
+ {.field_id = "sig", .field_init = ima_eventsig_init,
+ .field_show = ima_show_template_sig},
+};
+
+static struct ima_template_desc *ima_template;
+static struct ima_template_desc *lookup_template_desc(const char *name);
+
+static int __init ima_template_setup(char *str)
+{
+ struct ima_template_desc *template_desc;
+ int template_len = strlen(str);
+
+ /*
+ * Verify that a template with the supplied name exists.
+ * If not, use CONFIG_IMA_DEFAULT_TEMPLATE.
+ */
+ template_desc = lookup_template_desc(str);
+ if (!template_desc)
+ return 1;
+
+ /*
+ * Verify whether the current hash algorithm is supported
+ * by the 'ima' template.
+ */
+ if (template_len == 3 && strcmp(str, IMA_TEMPLATE_IMA_NAME) == 0 &&
+ ima_hash_algo != HASH_ALGO_SHA1 && ima_hash_algo != HASH_ALGO_MD5) {
+ pr_err("template does not support hash alg\n");
+ return 1;
+ }
+
+ ima_template = template_desc;
+ return 1;
+}
+__setup("ima_template=", ima_template_setup);
+
+static struct ima_template_desc *lookup_template_desc(const char *name)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(defined_templates); i++) {
+ if (strcmp(defined_templates[i].name, name) == 0)
+ return defined_templates + i;
+ }
+
+ return NULL;
+}
+
+static struct ima_template_field *lookup_template_field(const char *field_id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(supported_fields); i++)
+ if (strncmp(supported_fields[i].field_id, field_id,
+ IMA_TEMPLATE_FIELD_ID_MAX_LEN) == 0)
+ return &supported_fields[i];
+ return NULL;
+}
+
+static int template_fmt_size(const char *template_fmt)
+{
+ char c;
+ int template_fmt_len = strlen(template_fmt);
+ int i = 0, j = 0;
+
+ while (i < template_fmt_len) {
+ c = template_fmt[i];
+ if (c == '|')
+ j++;
+ i++;
+ }
+
+ return j + 1;
+}
+
+static int template_desc_init_fields(const char *template_fmt,
+ struct ima_template_field ***fields,
+ int *num_fields)
+{
+ char *c, *template_fmt_copy, *template_fmt_ptr;
+ int template_num_fields = template_fmt_size(template_fmt);
+ int i, result = 0;
+
+ if (template_num_fields > IMA_TEMPLATE_NUM_FIELDS_MAX)
+ return -EINVAL;
+
+ /* copying is needed as strsep() modifies the original buffer */
+ template_fmt_copy = kstrdup(template_fmt, GFP_KERNEL);
+ if (template_fmt_copy == NULL)
+ return -ENOMEM;
+
+ *fields = kzalloc(template_num_fields * sizeof(*fields), GFP_KERNEL);
+ if (*fields == NULL) {
+ result = -ENOMEM;
+ goto out;
+ }
+
+ template_fmt_ptr = template_fmt_copy;
+ for (i = 0; (c = strsep(&template_fmt_ptr, "|")) != NULL &&
+ i < template_num_fields; i++) {
+ struct ima_template_field *f = lookup_template_field(c);
+
+ if (!f) {
+ result = -ENOENT;
+ goto out;
+ }
+ (*fields)[i] = f;
+ }
+ *num_fields = i;
+out:
+ if (result < 0) {
+ kfree(*fields);
+ *fields = NULL;
+ }
+ kfree(template_fmt_copy);
+ return result;
+}
+
+static int init_defined_templates(void)
+{
+ int i = 0;
+ int result = 0;
+
+ /* Init defined templates. */
+ for (i = 0; i < ARRAY_SIZE(defined_templates); i++) {
+ struct ima_template_desc *template = &defined_templates[i];
+
+ result = template_desc_init_fields(template->fmt,
+ &(template->fields),
+ &(template->num_fields));
+ if (result < 0)
+ return result;
+ }
+ return result;
+}
+
+struct ima_template_desc *ima_template_desc_current(void)
+{
+ if (!ima_template)
+ ima_template =
+ lookup_template_desc(CONFIG_IMA_DEFAULT_TEMPLATE);
+ return ima_template;
+}
+
+int ima_init_template(void)
+{
+ int result;
+
+ result = init_defined_templates();
+ if (result < 0)
+ return result;
+
+ return 0;
+}
diff --git a/security/integrity/ima/ima_template_lib.c b/security/integrity/ima/ima_template_lib.c
new file mode 100644
index 00000000000..1506f024857
--- /dev/null
+++ b/security/integrity/ima/ima_template_lib.c
@@ -0,0 +1,342 @@
+/*
+ * Copyright (C) 2013 Politecnico di Torino, Italy
+ * TORSEC group -- http://security.polito.it
+ *
+ * Author: Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: ima_template_lib.c
+ * Library of supported template fields.
+ */
+#include <crypto/hash_info.h>
+
+#include "ima_template_lib.h"
+
+static bool ima_template_hash_algo_allowed(u8 algo)
+{
+ if (algo == HASH_ALGO_SHA1 || algo == HASH_ALGO_MD5)
+ return true;
+
+ return false;
+}
+
+enum data_formats {
+ DATA_FMT_DIGEST = 0,
+ DATA_FMT_DIGEST_WITH_ALGO,
+ DATA_FMT_STRING,
+ DATA_FMT_HEX
+};
+
+static int ima_write_template_field_data(const void *data, const u32 datalen,
+ enum data_formats datafmt,
+ struct ima_field_data *field_data)
+{
+ u8 *buf, *buf_ptr;
+ u32 buflen = datalen;
+
+ if (datafmt == DATA_FMT_STRING)
+ buflen = datalen + 1;
+
+ buf = kzalloc(buflen, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ memcpy(buf, data, datalen);
+
+ /*
+ * Replace all space characters with underscore for event names and
+ * strings. This avoid that, during the parsing of a measurements list,
+ * filenames with spaces or that end with the suffix ' (deleted)' are
+ * split into multiple template fields (the space is the delimitator
+ * character for measurements lists in ASCII format).
+ */
+ if (datafmt == DATA_FMT_STRING) {
+ for (buf_ptr = buf; buf_ptr - buf < datalen; buf_ptr++)
+ if (*buf_ptr == ' ')
+ *buf_ptr = '_';
+ }
+
+ field_data->data = buf;
+ field_data->len = buflen;
+ return 0;
+}
+
+static void ima_show_template_data_ascii(struct seq_file *m,
+ enum ima_show_type show,
+ enum data_formats datafmt,
+ struct ima_field_data *field_data)
+{
+ u8 *buf_ptr = field_data->data, buflen = field_data->len;
+
+ switch (datafmt) {
+ case DATA_FMT_DIGEST_WITH_ALGO:
+ buf_ptr = strnchr(field_data->data, buflen, ':');
+ if (buf_ptr != field_data->data)
+ seq_printf(m, "%s", field_data->data);
+
+ /* skip ':' and '\0' */
+ buf_ptr += 2;
+ buflen -= buf_ptr - field_data->data;
+ case DATA_FMT_DIGEST:
+ case DATA_FMT_HEX:
+ if (!buflen)
+ break;
+ ima_print_digest(m, buf_ptr, buflen);
+ break;
+ case DATA_FMT_STRING:
+ seq_printf(m, "%s", buf_ptr);
+ break;
+ default:
+ break;
+ }
+}
+
+static void ima_show_template_data_binary(struct seq_file *m,
+ enum ima_show_type show,
+ enum data_formats datafmt,
+ struct ima_field_data *field_data)
+{
+ u32 len = (show == IMA_SHOW_BINARY_OLD_STRING_FMT) ?
+ strlen(field_data->data) : field_data->len;
+
+ if (show != IMA_SHOW_BINARY_NO_FIELD_LEN)
+ ima_putc(m, &len, sizeof(len));
+
+ if (!len)
+ return;
+
+ ima_putc(m, field_data->data, len);
+}
+
+static void ima_show_template_field_data(struct seq_file *m,
+ enum ima_show_type show,
+ enum data_formats datafmt,
+ struct ima_field_data *field_data)
+{
+ switch (show) {
+ case IMA_SHOW_ASCII:
+ ima_show_template_data_ascii(m, show, datafmt, field_data);
+ break;
+ case IMA_SHOW_BINARY:
+ case IMA_SHOW_BINARY_NO_FIELD_LEN:
+ case IMA_SHOW_BINARY_OLD_STRING_FMT:
+ ima_show_template_data_binary(m, show, datafmt, field_data);
+ break;
+ default:
+ break;
+ }
+}
+
+void ima_show_template_digest(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_DIGEST, field_data);
+}
+
+void ima_show_template_digest_ng(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_DIGEST_WITH_ALGO,
+ field_data);
+}
+
+void ima_show_template_string(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_STRING, field_data);
+}
+
+void ima_show_template_sig(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data)
+{
+ ima_show_template_field_data(m, show, DATA_FMT_HEX, field_data);
+}
+
+static int ima_eventdigest_init_common(u8 *digest, u32 digestsize, u8 hash_algo,
+ struct ima_field_data *field_data)
+{
+ /*
+ * digest formats:
+ * - DATA_FMT_DIGEST: digest
+ * - DATA_FMT_DIGEST_WITH_ALGO: [<hash algo>] + ':' + '\0' + digest,
+ * where <hash algo> is provided if the hash algoritm is not
+ * SHA1 or MD5
+ */
+ u8 buffer[CRYPTO_MAX_ALG_NAME + 2 + IMA_MAX_DIGEST_SIZE] = { 0 };
+ enum data_formats fmt = DATA_FMT_DIGEST;
+ u32 offset = 0;
+
+ if (hash_algo < HASH_ALGO__LAST) {
+ fmt = DATA_FMT_DIGEST_WITH_ALGO;
+ offset += snprintf(buffer, CRYPTO_MAX_ALG_NAME + 1, "%s",
+ hash_algo_name[hash_algo]);
+ buffer[offset] = ':';
+ offset += 2;
+ }
+
+ if (digest)
+ memcpy(buffer + offset, digest, digestsize);
+ else
+ /*
+ * If digest is NULL, the event being recorded is a violation.
+ * Make room for the digest by increasing the offset of
+ * IMA_DIGEST_SIZE.
+ */
+ offset += IMA_DIGEST_SIZE;
+
+ return ima_write_template_field_data(buffer, offset + digestsize,
+ fmt, field_data);
+}
+
+/*
+ * This function writes the digest of an event (with size limit).
+ */
+int ima_eventdigest_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data)
+{
+ struct {
+ struct ima_digest_data hdr;
+ char digest[IMA_MAX_DIGEST_SIZE];
+ } hash;
+ u8 *cur_digest = NULL;
+ u32 cur_digestsize = 0;
+ struct inode *inode;
+ int result;
+
+ memset(&hash, 0, sizeof(hash));
+
+ if (!iint) /* recording a violation. */
+ goto out;
+
+ if (ima_template_hash_algo_allowed(iint->ima_hash->algo)) {
+ cur_digest = iint->ima_hash->digest;
+ cur_digestsize = iint->ima_hash->length;
+ goto out;
+ }
+
+ if (!file) /* missing info to re-calculate the digest */
+ return -EINVAL;
+
+ inode = file_inode(file);
+ hash.hdr.algo = ima_template_hash_algo_allowed(ima_hash_algo) ?
+ ima_hash_algo : HASH_ALGO_SHA1;
+ result = ima_calc_file_hash(file, &hash.hdr);
+ if (result) {
+ integrity_audit_msg(AUDIT_INTEGRITY_DATA, inode,
+ filename, "collect_data",
+ "failed", result, 0);
+ return result;
+ }
+ cur_digest = hash.hdr.digest;
+ cur_digestsize = hash.hdr.length;
+out:
+ return ima_eventdigest_init_common(cur_digest, cur_digestsize,
+ HASH_ALGO__LAST, field_data);
+}
+
+/*
+ * This function writes the digest of an event (without size limit).
+ */
+int ima_eventdigest_ng_init(struct integrity_iint_cache *iint,
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len, struct ima_field_data *field_data)
+{
+ u8 *cur_digest = NULL, hash_algo = HASH_ALGO_SHA1;
+ u32 cur_digestsize = 0;
+
+ /* If iint is NULL, we are recording a violation. */
+ if (!iint)
+ goto out;
+
+ cur_digest = iint->ima_hash->digest;
+ cur_digestsize = iint->ima_hash->length;
+
+ hash_algo = iint->ima_hash->algo;
+out:
+ return ima_eventdigest_init_common(cur_digest, cur_digestsize,
+ hash_algo, field_data);
+}
+
+static int ima_eventname_init_common(struct integrity_iint_cache *iint,
+ struct file *file,
+ const unsigned char *filename,
+ struct ima_field_data *field_data,
+ bool size_limit)
+{
+ const char *cur_filename = NULL;
+ u32 cur_filename_len = 0;
+
+ BUG_ON(filename == NULL && file == NULL);
+
+ if (filename) {
+ cur_filename = filename;
+ cur_filename_len = strlen(filename);
+
+ if (!size_limit || cur_filename_len <= IMA_EVENT_NAME_LEN_MAX)
+ goto out;
+ }
+
+ if (file) {
+ cur_filename = file->f_dentry->d_name.name;
+ cur_filename_len = strlen(cur_filename);
+ } else
+ /*
+ * Truncate filename if the latter is too long and
+ * the file descriptor is not available.
+ */
+ cur_filename_len = IMA_EVENT_NAME_LEN_MAX;
+out:
+ return ima_write_template_field_data(cur_filename, cur_filename_len,
+ DATA_FMT_STRING, field_data);
+}
+
+/*
+ * This function writes the name of an event (with size limit).
+ */
+int ima_eventname_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data)
+{
+ return ima_eventname_init_common(iint, file, filename,
+ field_data, true);
+}
+
+/*
+ * This function writes the name of an event (without size limit).
+ */
+int ima_eventname_ng_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data)
+{
+ return ima_eventname_init_common(iint, file, filename,
+ field_data, false);
+}
+
+/*
+ * ima_eventsig_init - include the file signature as part of the template data
+ */
+int ima_eventsig_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data)
+{
+ enum data_formats fmt = DATA_FMT_HEX;
+ int rc = 0;
+
+ if ((!xattr_value) || (xattr_value->type != EVM_IMA_XATTR_DIGSIG))
+ goto out;
+
+ rc = ima_write_template_field_data(xattr_value, xattr_len, fmt,
+ field_data);
+out:
+ return rc;
+}
diff --git a/security/integrity/ima/ima_template_lib.h b/security/integrity/ima/ima_template_lib.h
new file mode 100644
index 00000000000..63f6b52cb1c
--- /dev/null
+++ b/security/integrity/ima/ima_template_lib.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2013 Politecnico di Torino, Italy
+ * TORSEC group -- http://security.polito.it
+ *
+ * Author: Roberto Sassu <roberto.sassu@polito.it>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation, version 2 of the
+ * License.
+ *
+ * File: ima_template_lib.h
+ * Header for the library of supported template fields.
+ */
+#ifndef __LINUX_IMA_TEMPLATE_LIB_H
+#define __LINUX_IMA_TEMPLATE_LIB_H
+
+#include <linux/seq_file.h>
+#include "ima.h"
+
+void ima_show_template_digest(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+void ima_show_template_digest_ng(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+void ima_show_template_string(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+void ima_show_template_sig(struct seq_file *m, enum ima_show_type show,
+ struct ima_field_data *field_data);
+int ima_eventdigest_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data);
+int ima_eventname_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data);
+int ima_eventdigest_ng_init(struct integrity_iint_cache *iint,
+ struct file *file, const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value,
+ int xattr_len, struct ima_field_data *field_data);
+int ima_eventname_ng_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data);
+int ima_eventsig_init(struct integrity_iint_cache *iint, struct file *file,
+ const unsigned char *filename,
+ struct evm_ima_xattr_data *xattr_value, int xattr_len,
+ struct ima_field_data *field_data);
+#endif /* __LINUX_IMA_TEMPLATE_LIB_H */
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
index 7a25ecec5aa..33c0a70f6b1 100644
--- a/security/integrity/integrity.h
+++ b/security/integrity/integrity.h
@@ -14,30 +14,98 @@
#include <linux/types.h>
#include <linux/integrity.h>
#include <crypto/sha.h>
+#include <linux/key.h>
+
+/* iint action cache flags */
+#define IMA_MEASURE 0x00000001
+#define IMA_MEASURED 0x00000002
+#define IMA_APPRAISE 0x00000004
+#define IMA_APPRAISED 0x00000008
+/*#define IMA_COLLECT 0x00000010 do not use this flag */
+#define IMA_COLLECTED 0x00000020
+#define IMA_AUDIT 0x00000040
+#define IMA_AUDITED 0x00000080
/* iint cache flags */
-#define IMA_MEASURED 0x01
+#define IMA_ACTION_FLAGS 0xff000000
+#define IMA_DIGSIG 0x01000000
+#define IMA_DIGSIG_REQUIRED 0x02000000
+#define IMA_PERMIT_DIRECTIO 0x04000000
+
+#define IMA_DO_MASK (IMA_MEASURE | IMA_APPRAISE | IMA_AUDIT | \
+ IMA_APPRAISE_SUBMASK)
+#define IMA_DONE_MASK (IMA_MEASURED | IMA_APPRAISED | IMA_AUDITED | \
+ IMA_COLLECTED | IMA_APPRAISED_SUBMASK)
+
+/* iint subaction appraise cache flags */
+#define IMA_FILE_APPRAISE 0x00000100
+#define IMA_FILE_APPRAISED 0x00000200
+#define IMA_MMAP_APPRAISE 0x00000400
+#define IMA_MMAP_APPRAISED 0x00000800
+#define IMA_BPRM_APPRAISE 0x00001000
+#define IMA_BPRM_APPRAISED 0x00002000
+#define IMA_MODULE_APPRAISE 0x00004000
+#define IMA_MODULE_APPRAISED 0x00008000
+#define IMA_APPRAISE_SUBMASK (IMA_FILE_APPRAISE | IMA_MMAP_APPRAISE | \
+ IMA_BPRM_APPRAISE | IMA_MODULE_APPRAISE)
+#define IMA_APPRAISED_SUBMASK (IMA_FILE_APPRAISED | IMA_MMAP_APPRAISED | \
+ IMA_BPRM_APPRAISED | IMA_MODULE_APPRAISED)
enum evm_ima_xattr_type {
IMA_XATTR_DIGEST = 0x01,
EVM_XATTR_HMAC,
EVM_IMA_XATTR_DIGSIG,
+ IMA_XATTR_DIGEST_NG,
};
struct evm_ima_xattr_data {
u8 type;
u8 digest[SHA1_DIGEST_SIZE];
-} __attribute__((packed));
+} __packed;
+
+#define IMA_MAX_DIGEST_SIZE 64
+
+struct ima_digest_data {
+ u8 algo;
+ u8 length;
+ union {
+ struct {
+ u8 unused;
+ u8 type;
+ } sha1;
+ struct {
+ u8 type;
+ u8 algo;
+ } ng;
+ u8 data[2];
+ } xattr;
+ u8 digest[0];
+} __packed;
+
+/*
+ * signature format v2 - for using with asymmetric keys
+ */
+struct signature_v2_hdr {
+ uint8_t type; /* xattr type */
+ uint8_t version; /* signature format version */
+ uint8_t hash_algo; /* Digest algorithm [enum pkey_hash_algo] */
+ uint32_t keyid; /* IMA key identifier - not X509/PGP specific */
+ uint16_t sig_size; /* signature size */
+ uint8_t sig[0]; /* signature payload */
+} __packed;
/* integrity data associated with an inode */
struct integrity_iint_cache {
- struct rb_node rb_node; /* rooted in integrity_iint_tree */
+ struct rb_node rb_node; /* rooted in integrity_iint_tree */
struct inode *inode; /* back pointer to inode in question */
u64 version; /* track inode changes */
- unsigned char flags;
- u8 digest[SHA1_DIGEST_SIZE];
- struct mutex mutex; /* protects: version, flags, digest */
- enum integrity_status evm_status;
+ unsigned long flags;
+ enum integrity_status ima_file_status:4;
+ enum integrity_status ima_mmap_status:4;
+ enum integrity_status ima_bprm_status:4;
+ enum integrity_status ima_module_status:4;
+ enum integrity_status evm_status:4;
+ struct ima_digest_data *ima_hash;
};
/* rbtree tree calls to lookup, insert, delete
@@ -54,7 +122,7 @@ struct integrity_iint_cache *integrity_iint_find(struct inode *inode);
#ifdef CONFIG_INTEGRITY_SIGNATURE
int integrity_digsig_verify(const unsigned int id, const char *sig, int siglen,
- const char *digest, int digestlen);
+ const char *digest, int digestlen);
#else
@@ -67,5 +135,30 @@ static inline int integrity_digsig_verify(const unsigned int id,
#endif /* CONFIG_INTEGRITY_SIGNATURE */
+#ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS
+int asymmetric_verify(struct key *keyring, const char *sig,
+ int siglen, const char *data, int datalen);
+#else
+static inline int asymmetric_verify(struct key *keyring, const char *sig,
+ int siglen, const char *data, int datalen)
+{
+ return -EOPNOTSUPP;
+}
+#endif
+
+#ifdef CONFIG_INTEGRITY_AUDIT
+/* declarations */
+void integrity_audit_msg(int audit_msgno, struct inode *inode,
+ const unsigned char *fname, const char *op,
+ const char *cause, int result, int info);
+#else
+static inline void integrity_audit_msg(int audit_msgno, struct inode *inode,
+ const unsigned char *fname,
+ const char *op, const char *cause,
+ int result, int info)
+{
+}
+#endif
+
/* set during initialization */
extern int iint_initialized;
diff --git a/security/integrity/ima/ima_audit.c b/security/integrity/integrity_audit.c
index 2ad942fb1e2..90987d15b6f 100644
--- a/security/integrity/ima/ima_audit.c
+++ b/security/integrity/integrity_audit.c
@@ -7,43 +7,42 @@
* the Free Software Foundation, version 2 of the License.
*
* File: integrity_audit.c
- * Audit calls for the integrity subsystem
+ * Audit calls for the integrity subsystem
*/
#include <linux/fs.h>
#include <linux/gfp.h>
#include <linux/audit.h>
-#include "ima.h"
+#include "integrity.h"
-static int ima_audit;
-
-#ifdef CONFIG_IMA_AUDIT
+static int integrity_audit_info;
/* ima_audit_setup - enable informational auditing messages */
-static int __init ima_audit_setup(char *str)
+static int __init integrity_audit_setup(char *str)
{
unsigned long audit;
- if (!strict_strtoul(str, 0, &audit))
- ima_audit = audit ? 1 : 0;
+ if (!kstrtoul(str, 0, &audit))
+ integrity_audit_info = audit ? 1 : 0;
return 1;
}
-__setup("ima_audit=", ima_audit_setup);
-#endif
+__setup("integrity_audit=", integrity_audit_setup);
void integrity_audit_msg(int audit_msgno, struct inode *inode,
const unsigned char *fname, const char *op,
const char *cause, int result, int audit_info)
{
struct audit_buffer *ab;
+ char name[TASK_COMM_LEN];
- if (!ima_audit && audit_info == 1) /* Skip informational messages */
+ if (!integrity_audit_info && audit_info == 1) /* Skip info messages */
return;
ab = audit_log_start(current->audit_context, GFP_KERNEL, audit_msgno);
audit_log_format(ab, "pid=%d uid=%u auid=%u ses=%u",
- current->pid, current_cred()->uid,
- audit_get_loginuid(current),
+ task_pid_nr(current),
+ from_kuid(&init_user_ns, current_cred()->uid),
+ from_kuid(&init_user_ns, audit_get_loginuid(current)),
audit_get_sessionid(current));
audit_log_task_context(ab);
audit_log_format(ab, " op=");
@@ -51,7 +50,7 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode,
audit_log_format(ab, " cause=");
audit_log_string(ab, cause);
audit_log_format(ab, " comm=");
- audit_log_untrustedstring(ab, current->comm);
+ audit_log_untrustedstring(ab, get_task_comm(name, current));
if (fname) {
audit_log_format(ab, " name=");
audit_log_untrustedstring(ab, fname);
@@ -61,6 +60,6 @@ void integrity_audit_msg(int audit_msgno, struct inode *inode,
audit_log_untrustedstring(ab, inode->i_sb->s_id);
audit_log_format(ab, " ino=%lu", inode->i_ino);
}
- audit_log_format(ab, " res=%d", !result ? 0 : 1);
+ audit_log_format(ab, " res=%d", !result);
audit_log_end(ab);
}
diff --git a/security/keys/Kconfig b/security/keys/Kconfig
new file mode 100644
index 00000000000..a4f3f8c48d6
--- /dev/null
+++ b/security/keys/Kconfig
@@ -0,0 +1,100 @@
+#
+# Key management configuration
+#
+
+config KEYS
+ bool "Enable access key retention support"
+ select ASSOCIATIVE_ARRAY
+ help
+ This option provides support for retaining authentication tokens and
+ access keys in the kernel.
+
+ It also includes provision of methods by which such keys might be
+ associated with a process so that network filesystems, encryption
+ support and the like can find them.
+
+ Furthermore, a special type of key is available that acts as keyring:
+ a searchable sequence of keys. Each process is equipped with access
+ to five standard keyrings: UID-specific, GID-specific, session,
+ process and thread.
+
+ If you are unsure as to whether this is required, answer N.
+
+config PERSISTENT_KEYRINGS
+ bool "Enable register of persistent per-UID keyrings"
+ depends on KEYS
+ help
+ This option provides a register of persistent per-UID keyrings,
+ primarily aimed at Kerberos key storage. The keyrings are persistent
+ in the sense that they stay around after all processes of that UID
+ have exited, not that they survive the machine being rebooted.
+
+ A particular keyring may be accessed by either the user whose keyring
+ it is or by a process with administrative privileges. The active
+ LSMs gets to rule on which admin-level processes get to access the
+ cache.
+
+ Keyrings are created and added into the register upon demand and get
+ removed if they expire (a default timeout is set upon creation).
+
+config BIG_KEYS
+ bool "Large payload keys"
+ depends on KEYS
+ depends on TMPFS
+ help
+ This option provides support for holding large keys within the kernel
+ (for example Kerberos ticket caches). The data may be stored out to
+ swapspace by tmpfs.
+
+ If you are unsure as to whether this is required, answer N.
+
+config TRUSTED_KEYS
+ tristate "TRUSTED KEYS"
+ depends on KEYS && TCG_TPM
+ select CRYPTO
+ select CRYPTO_HMAC
+ select CRYPTO_SHA1
+ help
+ This option provides support for creating, sealing, and unsealing
+ keys in the kernel. Trusted keys are random number symmetric keys,
+ generated and RSA-sealed by the TPM. The TPM only unseals the keys,
+ if the boot PCRs and other criteria match. Userspace will only ever
+ see encrypted blobs.
+
+ If you are unsure as to whether this is required, answer N.
+
+config ENCRYPTED_KEYS
+ tristate "ENCRYPTED KEYS"
+ depends on KEYS
+ select CRYPTO
+ select CRYPTO_HMAC
+ select CRYPTO_AES
+ select CRYPTO_CBC
+ select CRYPTO_SHA256
+ select CRYPTO_RNG
+ help
+ This option provides support for create/encrypting/decrypting keys
+ in the kernel. Encrypted keys are kernel generated random numbers,
+ which are encrypted/decrypted with a 'master' symmetric key. The
+ 'master' key can be either a trusted-key or user-key type.
+ Userspace only ever sees/stores encrypted blobs.
+
+ If you are unsure as to whether this is required, answer N.
+
+config KEYS_DEBUG_PROC_KEYS
+ bool "Enable the /proc/keys file by which keys may be viewed"
+ depends on KEYS
+ help
+ This option turns on support for the /proc/keys file - through which
+ can be listed all the keys on the system that are viewable by the
+ reading process.
+
+ The only keys included in the list are those that grant View
+ permission to the reading process whether or not it possesses them.
+ Note that LSM security checks are still performed, and may further
+ filter out keys that the current process is not authorised to view.
+
+ Only key attributes are listed here; key payloads are not included in
+ the resulting table.
+
+ If you are unsure as to whether this is required, answer N.
diff --git a/security/keys/Makefile b/security/keys/Makefile
index a56f1ffdc64..dfb3a7beded 100644
--- a/security/keys/Makefile
+++ b/security/keys/Makefile
@@ -2,6 +2,9 @@
# Makefile for key management
#
+#
+# Core
+#
obj-y := \
gc.o \
key.o \
@@ -12,9 +15,14 @@ obj-y := \
request_key.o \
request_key_auth.o \
user_defined.o
-
-obj-$(CONFIG_TRUSTED_KEYS) += trusted.o
-obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted-keys/
obj-$(CONFIG_KEYS_COMPAT) += compat.o
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SYSCTL) += sysctl.o
+obj-$(CONFIG_PERSISTENT_KEYRINGS) += persistent.o
+
+#
+# Key types
+#
+obj-$(CONFIG_BIG_KEYS) += big_key.o
+obj-$(CONFIG_TRUSTED_KEYS) += trusted.o
+obj-$(CONFIG_ENCRYPTED_KEYS) += encrypted-keys/
diff --git a/security/keys/big_key.c b/security/keys/big_key.c
new file mode 100644
index 00000000000..8137b27d641
--- /dev/null
+++ b/security/keys/big_key.c
@@ -0,0 +1,207 @@
+/* Large capacity key type
+ *
+ * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/seq_file.h>
+#include <linux/file.h>
+#include <linux/shmem_fs.h>
+#include <linux/err.h>
+#include <keys/user-type.h>
+#include <keys/big_key-type.h>
+
+MODULE_LICENSE("GPL");
+
+/*
+ * If the data is under this limit, there's no point creating a shm file to
+ * hold it as the permanently resident metadata for the shmem fs will be at
+ * least as large as the data.
+ */
+#define BIG_KEY_FILE_THRESHOLD (sizeof(struct inode) + sizeof(struct dentry))
+
+/*
+ * big_key defined keys take an arbitrary string as the description and an
+ * arbitrary blob of data as the payload
+ */
+struct key_type key_type_big_key = {
+ .name = "big_key",
+ .def_lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .instantiate = big_key_instantiate,
+ .match = user_match,
+ .revoke = big_key_revoke,
+ .destroy = big_key_destroy,
+ .describe = big_key_describe,
+ .read = big_key_read,
+};
+
+/*
+ * Instantiate a big key
+ */
+int big_key_instantiate(struct key *key, struct key_preparsed_payload *prep)
+{
+ struct path *path = (struct path *)&key->payload.data2;
+ struct file *file;
+ ssize_t written;
+ size_t datalen = prep->datalen;
+ int ret;
+
+ ret = -EINVAL;
+ if (datalen <= 0 || datalen > 1024 * 1024 || !prep->data)
+ goto error;
+
+ /* Set an arbitrary quota */
+ ret = key_payload_reserve(key, 16);
+ if (ret < 0)
+ goto error;
+
+ key->type_data.x[1] = datalen;
+
+ if (datalen > BIG_KEY_FILE_THRESHOLD) {
+ /* Create a shmem file to store the data in. This will permit the data
+ * to be swapped out if needed.
+ *
+ * TODO: Encrypt the stored data with a temporary key.
+ */
+ file = shmem_kernel_file_setup("", datalen, 0);
+ if (IS_ERR(file)) {
+ ret = PTR_ERR(file);
+ goto err_quota;
+ }
+
+ written = kernel_write(file, prep->data, prep->datalen, 0);
+ if (written != datalen) {
+ ret = written;
+ if (written >= 0)
+ ret = -ENOMEM;
+ goto err_fput;
+ }
+
+ /* Pin the mount and dentry to the key so that we can open it again
+ * later
+ */
+ *path = file->f_path;
+ path_get(path);
+ fput(file);
+ } else {
+ /* Just store the data in a buffer */
+ void *data = kmalloc(datalen, GFP_KERNEL);
+ if (!data) {
+ ret = -ENOMEM;
+ goto err_quota;
+ }
+
+ key->payload.data = memcpy(data, prep->data, prep->datalen);
+ }
+ return 0;
+
+err_fput:
+ fput(file);
+err_quota:
+ key_payload_reserve(key, 0);
+error:
+ return ret;
+}
+
+/*
+ * dispose of the links from a revoked keyring
+ * - called with the key sem write-locked
+ */
+void big_key_revoke(struct key *key)
+{
+ struct path *path = (struct path *)&key->payload.data2;
+
+ /* clear the quota */
+ key_payload_reserve(key, 0);
+ if (key_is_instantiated(key) && key->type_data.x[1] > BIG_KEY_FILE_THRESHOLD)
+ vfs_truncate(path, 0);
+}
+
+/*
+ * dispose of the data dangling from the corpse of a big_key key
+ */
+void big_key_destroy(struct key *key)
+{
+ if (key->type_data.x[1] > BIG_KEY_FILE_THRESHOLD) {
+ struct path *path = (struct path *)&key->payload.data2;
+ path_put(path);
+ path->mnt = NULL;
+ path->dentry = NULL;
+ } else {
+ kfree(key->payload.data);
+ key->payload.data = NULL;
+ }
+}
+
+/*
+ * describe the big_key key
+ */
+void big_key_describe(const struct key *key, struct seq_file *m)
+{
+ unsigned long datalen = key->type_data.x[1];
+
+ seq_puts(m, key->description);
+
+ if (key_is_instantiated(key))
+ seq_printf(m, ": %lu [%s]",
+ datalen,
+ datalen > BIG_KEY_FILE_THRESHOLD ? "file" : "buff");
+}
+
+/*
+ * read the key data
+ * - the key's semaphore is read-locked
+ */
+long big_key_read(const struct key *key, char __user *buffer, size_t buflen)
+{
+ unsigned long datalen = key->type_data.x[1];
+ long ret;
+
+ if (!buffer || buflen < datalen)
+ return datalen;
+
+ if (datalen > BIG_KEY_FILE_THRESHOLD) {
+ struct path *path = (struct path *)&key->payload.data2;
+ struct file *file;
+ loff_t pos;
+
+ file = dentry_open(path, O_RDONLY, current_cred());
+ if (IS_ERR(file))
+ return PTR_ERR(file);
+
+ pos = 0;
+ ret = vfs_read(file, buffer, datalen, &pos);
+ fput(file);
+ if (ret >= 0 && ret != datalen)
+ ret = -EIO;
+ } else {
+ ret = datalen;
+ if (copy_to_user(buffer, key->payload.data, datalen) != 0)
+ ret = -EFAULT;
+ }
+
+ return ret;
+}
+
+/*
+ * Module stuff
+ */
+static int __init big_key_init(void)
+{
+ return register_key_type(&key_type_big_key);
+}
+
+static void __exit big_key_cleanup(void)
+{
+ unregister_key_type(&key_type_big_key);
+}
+
+module_init(big_key_init);
+module_exit(big_key_cleanup);
diff --git a/security/keys/compat.c b/security/keys/compat.c
index 4c48e13448f..347896548ad 100644
--- a/security/keys/compat.c
+++ b/security/keys/compat.c
@@ -24,7 +24,7 @@
*
* If successful, 0 will be returned.
*/
-long compat_keyctl_instantiate_key_iov(
+static long compat_keyctl_instantiate_key_iov(
key_serial_t id,
const struct compat_iovec __user *_payload_iov,
unsigned ioc,
@@ -33,19 +33,19 @@ long compat_keyctl_instantiate_key_iov(
struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
long ret;
- if (_payload_iov == 0 || ioc == 0)
+ if (!_payload_iov || !ioc)
goto no_payload;
ret = compat_rw_copy_check_uvector(WRITE, _payload_iov, ioc,
ARRAY_SIZE(iovstack),
- iovstack, &iov, 1);
+ iovstack, &iov);
if (ret < 0)
- return ret;
+ goto err;
if (ret == 0)
goto no_payload_free;
ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
-
+err:
if (iov != iovstack)
kfree(iov);
return ret;
@@ -65,8 +65,8 @@ no_payload:
* taking a 32-bit syscall are zero. If you can, you should call sys_keyctl()
* directly.
*/
-asmlinkage long compat_sys_keyctl(u32 option,
- u32 arg2, u32 arg3, u32 arg4, u32 arg5)
+COMPAT_SYSCALL_DEFINE5(keyctl, u32, option,
+ u32, arg2, u32, arg3, u32, arg4, u32, arg5)
{
switch (option) {
case KEYCTL_GET_KEYRING_ID:
@@ -135,6 +135,12 @@ asmlinkage long compat_sys_keyctl(u32 option,
return compat_keyctl_instantiate_key_iov(
arg2, compat_ptr(arg3), arg4, arg5);
+ case KEYCTL_INVALIDATE:
+ return keyctl_invalidate_key(arg2);
+
+ case KEYCTL_GET_PERSISTENT:
+ return keyctl_get_persistent(arg2, arg3);
+
default:
return -EOPNOTSUPP;
}
diff --git a/security/keys/encrypted-keys/encrypted.c b/security/keys/encrypted-keys/encrypted.c
index 2d1bb8af769..5fe443d120a 100644
--- a/security/keys/encrypted-keys/encrypted.c
+++ b/security/keys/encrypted-keys/encrypted.c
@@ -609,7 +609,7 @@ static struct encrypted_key_payload *encrypted_key_alloc(struct key *key,
long dlen;
int ret;
- ret = strict_strtol(datalen, 10, &dlen);
+ ret = kstrtol(datalen, 10, &dlen);
if (ret < 0 || dlen < MIN_DATA_SIZE || dlen > MAX_DATA_SIZE)
return ERR_PTR(-EINVAL);
@@ -773,8 +773,8 @@ static int encrypted_init(struct encrypted_key_payload *epayload,
*
* On success, return 0. Otherwise return errno.
*/
-static int encrypted_instantiate(struct key *key, const void *data,
- size_t datalen)
+static int encrypted_instantiate(struct key *key,
+ struct key_preparsed_payload *prep)
{
struct encrypted_key_payload *epayload = NULL;
char *datablob = NULL;
@@ -782,16 +782,17 @@ static int encrypted_instantiate(struct key *key, const void *data,
char *master_desc = NULL;
char *decrypted_datalen = NULL;
char *hex_encoded_iv = NULL;
+ size_t datalen = prep->datalen;
int ret;
- if (datalen <= 0 || datalen > 32767 || !data)
+ if (datalen <= 0 || datalen > 32767 || !prep->data)
return -EINVAL;
datablob = kmalloc(datalen + 1, GFP_KERNEL);
if (!datablob)
return -ENOMEM;
datablob[datalen] = 0;
- memcpy(datablob, data, datalen);
+ memcpy(datablob, prep->data, datalen);
ret = datablob_parse(datablob, &format, &master_desc,
&decrypted_datalen, &hex_encoded_iv);
if (ret < 0)
@@ -834,16 +835,17 @@ static void encrypted_rcu_free(struct rcu_head *rcu)
*
* On success, return 0. Otherwise return errno.
*/
-static int encrypted_update(struct key *key, const void *data, size_t datalen)
+static int encrypted_update(struct key *key, struct key_preparsed_payload *prep)
{
struct encrypted_key_payload *epayload = key->payload.data;
struct encrypted_key_payload *new_epayload;
char *buf;
char *new_master_desc = NULL;
const char *format = NULL;
+ size_t datalen = prep->datalen;
int ret = 0;
- if (datalen <= 0 || datalen > 32767 || !data)
+ if (datalen <= 0 || datalen > 32767 || !prep->data)
return -EINVAL;
buf = kmalloc(datalen + 1, GFP_KERNEL);
@@ -851,7 +853,7 @@ static int encrypted_update(struct key *key, const void *data, size_t datalen)
return -ENOMEM;
buf[datalen] = 0;
- memcpy(buf, data, datalen);
+ memcpy(buf, prep->data, datalen);
ret = datablob_parse(buf, &format, &new_master_desc, NULL, NULL);
if (ret < 0)
goto out;
diff --git a/security/keys/gc.c b/security/keys/gc.c
index a42b45531aa..d3222b6d7d5 100644
--- a/security/keys/gc.c
+++ b/security/keys/gc.c
@@ -62,7 +62,7 @@ void key_schedule_gc(time_t gc_at)
if (gc_at <= now || test_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags)) {
kdebug("IMMEDIATE");
- queue_work(system_nrt_wq, &key_gc_work);
+ schedule_work(&key_gc_work);
} else if (gc_at < key_gc_next_run) {
kdebug("DEFERRED");
key_gc_next_run = gc_at;
@@ -72,6 +72,15 @@ void key_schedule_gc(time_t gc_at)
}
/*
+ * Schedule a dead links collection run.
+ */
+void key_schedule_gc_links(void)
+{
+ set_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags);
+ schedule_work(&key_gc_work);
+}
+
+/*
* Some key's cleanup time was met after it expired, so we need to get the
* reaper to go through a cycle finding expired keys.
*/
@@ -79,8 +88,7 @@ static void key_gc_timer_func(unsigned long data)
{
kenter("");
key_gc_next_run = LONG_MAX;
- set_bit(KEY_GC_KEY_EXPIRED, &key_gc_flags);
- queue_work(system_nrt_wq, &key_gc_work);
+ key_schedule_gc_links();
}
/*
@@ -112,7 +120,7 @@ void key_gc_keytype(struct key_type *ktype)
set_bit(KEY_GC_REAP_KEYTYPE, &key_gc_flags);
kdebug("schedule");
- queue_work(system_nrt_wq, &key_gc_work);
+ schedule_work(&key_gc_work);
kdebug("sleep");
wait_on_bit(&key_gc_flags, KEY_GC_REAPING_KEYTYPE, key_gc_wait_bit,
@@ -123,83 +131,45 @@ void key_gc_keytype(struct key_type *ktype)
}
/*
- * Garbage collect pointers from a keyring.
- *
- * Not called with any locks held. The keyring's key struct will not be
- * deallocated under us as only our caller may deallocate it.
- */
-static void key_gc_keyring(struct key *keyring, time_t limit)
-{
- struct keyring_list *klist;
- struct key *key;
- int loop;
-
- kenter("%x", key_serial(keyring));
-
- if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
- goto dont_gc;
-
- /* scan the keyring looking for dead keys */
- rcu_read_lock();
- klist = rcu_dereference(keyring->payload.subscriptions);
- if (!klist)
- goto unlock_dont_gc;
-
- loop = klist->nkeys;
- smp_rmb();
- for (loop--; loop >= 0; loop--) {
- key = klist->keys[loop];
- if (test_bit(KEY_FLAG_DEAD, &key->flags) ||
- (key->expiry > 0 && key->expiry <= limit))
- goto do_gc;
- }
-
-unlock_dont_gc:
- rcu_read_unlock();
-dont_gc:
- kleave(" [no gc]");
- return;
-
-do_gc:
- rcu_read_unlock();
-
- keyring_gc(keyring, limit);
- kleave(" [gc]");
-}
-
-/*
- * Garbage collect an unreferenced, detached key
+ * Garbage collect a list of unreferenced, detached keys
*/
-static noinline void key_gc_unused_key(struct key *key)
+static noinline void key_gc_unused_keys(struct list_head *keys)
{
- key_check(key);
-
- security_key_free(key);
-
- /* deal with the user's key tracking and quota */
- if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
- spin_lock(&key->user->lock);
- key->user->qnkeys--;
- key->user->qnbytes -= key->quotalen;
- spin_unlock(&key->user->lock);
- }
+ while (!list_empty(keys)) {
+ struct key *key =
+ list_entry(keys->next, struct key, graveyard_link);
+ list_del(&key->graveyard_link);
+
+ kdebug("- %u", key->serial);
+ key_check(key);
+
+ security_key_free(key);
+
+ /* deal with the user's key tracking and quota */
+ if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
+ spin_lock(&key->user->lock);
+ key->user->qnkeys--;
+ key->user->qnbytes -= key->quotalen;
+ spin_unlock(&key->user->lock);
+ }
- atomic_dec(&key->user->nkeys);
- if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
- atomic_dec(&key->user->nikeys);
+ atomic_dec(&key->user->nkeys);
+ if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
+ atomic_dec(&key->user->nikeys);
- key_user_put(key->user);
+ key_user_put(key->user);
- /* now throw away the key memory */
- if (key->type->destroy)
- key->type->destroy(key);
+ /* now throw away the key memory */
+ if (key->type->destroy)
+ key->type->destroy(key);
- kfree(key->description);
+ kfree(key->description);
#ifdef KEY_DEBUGGING
- key->magic = KEY_DEBUG_MAGIC_X;
+ key->magic = KEY_DEBUG_MAGIC_X;
#endif
- kmem_cache_free(key_jar, key);
+ kmem_cache_free(key_jar, key);
+ }
}
/*
@@ -211,6 +181,7 @@ static noinline void key_gc_unused_key(struct key *key)
*/
static void key_garbage_collector(struct work_struct *work)
{
+ static LIST_HEAD(graveyard);
static u8 gc_state; /* Internal persistent state */
#define KEY_GC_REAP_AGAIN 0x01 /* - Need another cycle */
#define KEY_GC_REAPING_LINKS 0x02 /* - We need to reap links */
@@ -316,15 +287,22 @@ maybe_resched:
key_schedule_gc(new_timer);
}
- if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2)) {
- /* Make sure everyone revalidates their keys if we marked a
- * bunch as being dead and make sure all keyring ex-payloads
- * are destroyed.
+ if (unlikely(gc_state & KEY_GC_REAPING_DEAD_2) ||
+ !list_empty(&graveyard)) {
+ /* Make sure that all pending keyring payload destructions are
+ * fulfilled and that people aren't now looking at dead or
+ * dying keys that they don't have a reference upon or a link
+ * to.
*/
- kdebug("dead sync");
+ kdebug("gc sync");
synchronize_rcu();
}
+ if (!list_empty(&graveyard)) {
+ kdebug("gc keys");
+ key_gc_unused_keys(&graveyard);
+ }
+
if (unlikely(gc_state & (KEY_GC_REAPING_DEAD_1 |
KEY_GC_REAPING_DEAD_2))) {
if (!(gc_state & KEY_GC_FOUND_DEAD_KEY)) {
@@ -347,7 +325,7 @@ maybe_resched:
}
if (gc_state & KEY_GC_REAP_AGAIN)
- queue_work(system_nrt_wq, &key_gc_work);
+ schedule_work(&key_gc_work);
kleave(" [end %x]", gc_state);
return;
@@ -359,7 +337,7 @@ found_unreferenced_key:
rb_erase(&key->serial_node, &key_serial_tree);
spin_unlock(&key_serial_lock);
- key_gc_unused_key(key);
+ list_add_tail(&key->graveyard_link, &graveyard);
gc_state |= KEY_GC_REAP_AGAIN;
goto maybe_resched;
@@ -370,8 +348,7 @@ found_unreferenced_key:
*/
found_keyring:
spin_unlock(&key_serial_lock);
- kdebug("scan keyring %d", key->serial);
- key_gc_keyring(key, limit);
+ keyring_gc(key, limit);
goto maybe_resched;
/* We found a dead key that is still referenced. Reset its type and
diff --git a/security/keys/internal.h b/security/keys/internal.h
index 65647f82558..5f20da01fd8 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -14,6 +14,9 @@
#include <linux/sched.h>
#include <linux/key-type.h>
+#include <linux/task_work.h>
+
+struct iovec;
#ifdef __KDEBUG
#define kenter(FMT, ...) \
@@ -51,8 +54,7 @@ struct key_user {
atomic_t usage; /* for accessing qnkeys & qnbytes */
atomic_t nkeys; /* number of keys */
atomic_t nikeys; /* number of instantiated keys */
- uid_t uid;
- struct user_namespace *user_ns;
+ kuid_t uid;
int qnkeys; /* number of keys allocated to this user */
int qnbytes; /* number of bytes allocated to this user */
};
@@ -61,8 +63,7 @@ extern struct rb_root key_user_tree;
extern spinlock_t key_user_lock;
extern struct key_user root_key_user;
-extern struct key_user *key_user_lookup(uid_t uid,
- struct user_namespace *user_ns);
+extern struct key_user *key_user_lookup(kuid_t uid);
extern void key_user_put(struct key_user *user);
/*
@@ -88,42 +89,53 @@ extern struct key_type *key_type_lookup(const char *type);
extern void key_type_put(struct key_type *ktype);
extern int __key_link_begin(struct key *keyring,
- const struct key_type *type,
- const char *description,
- unsigned long *_prealloc);
+ const struct keyring_index_key *index_key,
+ struct assoc_array_edit **_edit);
extern int __key_link_check_live_key(struct key *keyring, struct key *key);
-extern void __key_link(struct key *keyring, struct key *key,
- unsigned long *_prealloc);
+extern void __key_link(struct key *key, struct assoc_array_edit **_edit);
extern void __key_link_end(struct key *keyring,
- struct key_type *type,
- unsigned long prealloc);
+ const struct keyring_index_key *index_key,
+ struct assoc_array_edit *edit);
-extern key_ref_t __keyring_search_one(key_ref_t keyring_ref,
- const struct key_type *type,
- const char *description,
- key_perm_t perm);
+extern key_ref_t find_key_to_update(key_ref_t keyring_ref,
+ const struct keyring_index_key *index_key);
extern struct key *keyring_search_instkey(struct key *keyring,
key_serial_t target_id);
+extern int iterate_over_keyring(const struct key *keyring,
+ int (*func)(const struct key *key, void *data),
+ void *data);
+
typedef int (*key_match_func_t)(const struct key *, const void *);
+struct keyring_search_context {
+ struct keyring_index_key index_key;
+ const struct cred *cred;
+ key_match_func_t match;
+ const void *match_data;
+ unsigned flags;
+#define KEYRING_SEARCH_LOOKUP_TYPE 0x0001 /* [as type->def_lookup_type] */
+#define KEYRING_SEARCH_NO_STATE_CHECK 0x0002 /* Skip state checks */
+#define KEYRING_SEARCH_DO_STATE_CHECK 0x0004 /* Override NO_STATE_CHECK */
+#define KEYRING_SEARCH_NO_UPDATE_TIME 0x0008 /* Don't update times */
+#define KEYRING_SEARCH_NO_CHECK_PERM 0x0010 /* Don't check permissions */
+#define KEYRING_SEARCH_DETECT_TOO_DEEP 0x0020 /* Give an error on excessive depth */
+
+ int (*iterator)(const void *object, void *iterator_data);
+
+ /* Internal stuff */
+ int skipped_ret;
+ bool possessed;
+ key_ref_t result;
+ struct timespec now;
+};
+
extern key_ref_t keyring_search_aux(key_ref_t keyring_ref,
- const struct cred *cred,
- struct key_type *type,
- const void *description,
- key_match_func_t match,
- bool no_state_check);
-
-extern key_ref_t search_my_process_keyrings(struct key_type *type,
- const void *description,
- key_match_func_t match,
- bool no_state_check,
- const struct cred *cred);
-extern key_ref_t search_process_keyrings(struct key_type *type,
- const void *description,
- key_match_func_t match,
- const struct cred *cred);
+ struct keyring_search_context *ctx);
+
+extern key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx);
+extern key_ref_t search_process_keyrings(struct keyring_search_context *ctx);
extern struct key *find_keyring_by_name(const char *name, bool skip_perm_check);
@@ -148,11 +160,13 @@ extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags,
#define KEY_LOOKUP_FOR_UNLINK 0x04
extern long join_session_keyring(const char *name);
+extern void key_change_session_keyring(struct callback_head *twork);
extern struct work_struct key_gc_work;
extern unsigned key_gc_delay;
extern void keyring_gc(struct key *keyring, time_t limit);
-extern void key_schedule_gc(time_t expiry_at);
+extern void key_schedule_gc(time_t gc_at);
+extern void key_schedule_gc_links(void);
extern void key_gc_keytype(struct key_type *ktype);
extern int key_task_permission(const key_ref_t key_ref,
@@ -162,20 +176,11 @@ extern int key_task_permission(const key_ref_t key_ref,
/*
* Check to see whether permission is granted to use a key in the desired way.
*/
-static inline int key_permission(const key_ref_t key_ref, key_perm_t perm)
+static inline int key_permission(const key_ref_t key_ref, unsigned perm)
{
return key_task_permission(key_ref, current_cred(), perm);
}
-/* required permissions */
-#define KEY_VIEW 0x01 /* require permission to view attributes */
-#define KEY_READ 0x02 /* require permission to read content */
-#define KEY_WRITE 0x04 /* require permission to update / modify */
-#define KEY_SEARCH 0x08 /* require permission to search (keyring) or find (key) */
-#define KEY_LINK 0x10 /* require permission to link */
-#define KEY_SETATTR 0x20 /* require permission to change attributes */
-#define KEY_ALL 0x3f /* all the above permissions */
-
/*
* Authorisation record for request_key().
*/
@@ -197,6 +202,17 @@ extern struct key *request_key_auth_new(struct key *target,
extern struct key *key_get_instantiation_authkey(key_serial_t target_id);
/*
+ * Determine whether a key is dead.
+ */
+static inline bool key_is_dead(const struct key *key, time_t limit)
+{
+ return
+ key->flags & ((1 << KEY_FLAG_DEAD) |
+ (1 << KEY_FLAG_INVALIDATED)) ||
+ (key->expiry > 0 && key->expiry <= limit);
+}
+
+/*
* keyctl() functions
*/
extern long keyctl_get_keyring_ID(key_serial_t, int);
@@ -225,10 +241,20 @@ extern long keyctl_reject_key(key_serial_t, unsigned, unsigned, key_serial_t);
extern long keyctl_instantiate_key_iov(key_serial_t,
const struct iovec __user *,
unsigned, key_serial_t);
+extern long keyctl_invalidate_key(key_serial_t);
extern long keyctl_instantiate_key_common(key_serial_t,
- const struct iovec __user *,
+ const struct iovec *,
unsigned, size_t, key_serial_t);
+#ifdef CONFIG_PERSISTENT_KEYRINGS
+extern long keyctl_get_persistent(uid_t, key_serial_t);
+extern unsigned persistent_keyring_expiry;
+#else
+static inline long keyctl_get_persistent(uid_t uid, key_serial_t destring)
+{
+ return -EOPNOTSUPP;
+}
+#endif
/*
* Debugging key validation
diff --git a/security/keys/key.c b/security/keys/key.c
index 7ada8019be1..2048a110e7f 100644
--- a/security/keys/key.c
+++ b/security/keys/key.c
@@ -18,7 +18,6 @@
#include <linux/workqueue.h>
#include <linux/random.h>
#include <linux/err.h>
-#include <linux/user_namespace.h>
#include "internal.h"
struct kmem_cache *key_jar;
@@ -52,7 +51,7 @@ void __key_check(const struct key *key)
* Get the key quota record for a user, allocating a new record if one doesn't
* already exist.
*/
-struct key_user *key_user_lookup(uid_t uid, struct user_namespace *user_ns)
+struct key_user *key_user_lookup(kuid_t uid)
{
struct key_user *candidate = NULL, *user;
struct rb_node *parent = NULL;
@@ -67,13 +66,9 @@ try_again:
parent = *p;
user = rb_entry(parent, struct key_user, node);
- if (uid < user->uid)
+ if (uid_lt(uid, user->uid))
p = &(*p)->rb_left;
- else if (uid > user->uid)
- p = &(*p)->rb_right;
- else if (user_ns < user->user_ns)
- p = &(*p)->rb_left;
- else if (user_ns > user->user_ns)
+ else if (uid_gt(uid, user->uid))
p = &(*p)->rb_right;
else
goto found;
@@ -102,7 +97,6 @@ try_again:
atomic_set(&candidate->nkeys, 0);
atomic_set(&candidate->nikeys, 0);
candidate->uid = uid;
- candidate->user_ns = get_user_ns(user_ns);
candidate->qnkeys = 0;
candidate->qnbytes = 0;
spin_lock_init(&candidate->lock);
@@ -131,7 +125,6 @@ void key_user_put(struct key_user *user)
if (atomic_dec_and_lock(&user->usage, &key_user_lock)) {
rb_erase(&user->node, &key_user_tree);
spin_unlock(&key_user_lock);
- put_user_ns(user->user_ns);
kfree(user);
}
@@ -229,7 +222,7 @@ serial_exists:
* key_alloc() calls don't race with module unloading.
*/
struct key *key_alloc(struct key_type *type, const char *desc,
- uid_t uid, gid_t gid, const struct cred *cred,
+ kuid_t uid, kgid_t gid, const struct cred *cred,
key_perm_t perm, unsigned long flags)
{
struct key_user *user = NULL;
@@ -249,20 +242,20 @@ struct key *key_alloc(struct key_type *type, const char *desc,
}
}
- desclen = strlen(desc) + 1;
- quotalen = desclen + type->def_datalen;
+ desclen = strlen(desc);
+ quotalen = desclen + 1 + type->def_datalen;
/* get hold of the key tracking for this user */
- user = key_user_lookup(uid, cred->user->user_ns);
+ user = key_user_lookup(uid);
if (!user)
goto no_memory_1;
/* check that the user's quota permits allocation of another key and
* its description */
if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
- unsigned maxkeys = (uid == 0) ?
+ unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxkeys : key_quota_maxkeys;
- unsigned maxbytes = (uid == 0) ?
+ unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxbytes : key_quota_maxbytes;
spin_lock(&user->lock);
@@ -279,12 +272,13 @@ struct key *key_alloc(struct key_type *type, const char *desc,
}
/* allocate and initialise the key and its description */
- key = kmem_cache_alloc(key_jar, GFP_KERNEL);
+ key = kmem_cache_zalloc(key_jar, GFP_KERNEL);
if (!key)
goto no_memory_2;
if (desc) {
- key->description = kmemdup(desc, desclen, GFP_KERNEL);
+ key->index_key.desc_len = desclen;
+ key->index_key.description = kmemdup(desc, desclen + 1, GFP_KERNEL);
if (!key->description)
goto no_memory_3;
}
@@ -292,22 +286,18 @@ struct key *key_alloc(struct key_type *type, const char *desc,
atomic_set(&key->usage, 1);
init_rwsem(&key->sem);
lockdep_set_class(&key->sem, &type->lock_class);
- key->type = type;
+ key->index_key.type = type;
key->user = user;
key->quotalen = quotalen;
key->datalen = type->def_datalen;
key->uid = uid;
key->gid = gid;
key->perm = perm;
- key->flags = 0;
- key->expiry = 0;
- key->payload.data = NULL;
- key->security = NULL;
if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
key->flags |= 1 << KEY_FLAG_IN_QUOTA;
-
- memset(&key->type_data, 0, sizeof(key->type_data));
+ if (flags & KEY_ALLOC_TRUSTED)
+ key->flags |= 1 << KEY_FLAG_TRUSTED;
#ifdef KEY_DEBUGGING
key->magic = KEY_DEBUG_MAGIC;
@@ -380,7 +370,7 @@ int key_payload_reserve(struct key *key, size_t datalen)
/* contemplate the quota adjustment */
if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
- unsigned maxbytes = (key->user->uid == 0) ?
+ unsigned maxbytes = uid_eq(key->user->uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxbytes : key_quota_maxbytes;
spin_lock(&key->user->lock);
@@ -412,11 +402,10 @@ EXPORT_SYMBOL(key_payload_reserve);
* key_construction_mutex.
*/
static int __key_instantiate_and_link(struct key *key,
- const void *data,
- size_t datalen,
+ struct key_preparsed_payload *prep,
struct key *keyring,
struct key *authkey,
- unsigned long *_prealloc)
+ struct assoc_array_edit **_edit)
{
int ret, awaken;
@@ -431,7 +420,7 @@ static int __key_instantiate_and_link(struct key *key,
/* can't instantiate twice */
if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
/* instantiate the key */
- ret = key->type->instantiate(key, data, datalen);
+ ret = key->type->instantiate(key, prep);
if (ret == 0) {
/* mark the key as being instantiated */
@@ -443,7 +432,7 @@ static int __key_instantiate_and_link(struct key *key,
/* and link it into the destination keyring */
if (keyring)
- __key_link(keyring, key, _prealloc);
+ __key_link(key, _edit);
/* disable the authorisation key */
if (authkey)
@@ -482,22 +471,35 @@ int key_instantiate_and_link(struct key *key,
struct key *keyring,
struct key *authkey)
{
- unsigned long prealloc;
+ struct key_preparsed_payload prep;
+ struct assoc_array_edit *edit;
int ret;
+ memset(&prep, 0, sizeof(prep));
+ prep.data = data;
+ prep.datalen = datalen;
+ prep.quotalen = key->type->def_datalen;
+ if (key->type->preparse) {
+ ret = key->type->preparse(&prep);
+ if (ret < 0)
+ goto error;
+ }
+
if (keyring) {
- ret = __key_link_begin(keyring, key->type, key->description,
- &prealloc);
+ ret = __key_link_begin(keyring, &key->index_key, &edit);
if (ret < 0)
- return ret;
+ goto error_free_preparse;
}
- ret = __key_instantiate_and_link(key, data, datalen, keyring, authkey,
- &prealloc);
+ ret = __key_instantiate_and_link(key, &prep, keyring, authkey, &edit);
if (keyring)
- __key_link_end(keyring, key->type, prealloc);
+ __key_link_end(keyring, &key->index_key, edit);
+error_free_preparse:
+ if (key->type->preparse)
+ key->type->free_preparse(&prep);
+error:
return ret;
}
@@ -530,7 +532,7 @@ int key_reject_and_link(struct key *key,
struct key *keyring,
struct key *authkey)
{
- unsigned long prealloc;
+ struct assoc_array_edit *edit;
struct timespec now;
int ret, awaken, link_ret = 0;
@@ -541,8 +543,7 @@ int key_reject_and_link(struct key *key,
ret = -EBUSY;
if (keyring)
- link_ret = __key_link_begin(keyring, key->type,
- key->description, &prealloc);
+ link_ret = __key_link_begin(keyring, &key->index_key, &edit);
mutex_lock(&key_construction_mutex);
@@ -550,9 +551,10 @@ int key_reject_and_link(struct key *key,
if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
/* mark the key as being negatively instantiated */
atomic_inc(&key->user->nikeys);
+ key->type_data.reject_error = -error;
+ smp_wmb();
set_bit(KEY_FLAG_NEGATIVE, &key->flags);
set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
- key->type_data.reject_error = -error;
now = current_kernel_time();
key->expiry = now.tv_sec + timeout;
key_schedule_gc(key->expiry + key_gc_delay);
@@ -564,7 +566,7 @@ int key_reject_and_link(struct key *key,
/* and link it into the destination keyring */
if (keyring && link_ret == 0)
- __key_link(keyring, key, &prealloc);
+ __key_link(key, &edit);
/* disable the authorisation key */
if (authkey)
@@ -574,7 +576,7 @@ int key_reject_and_link(struct key *key,
mutex_unlock(&key_construction_mutex);
if (keyring)
- __key_link_end(keyring, key->type, prealloc);
+ __key_link_end(keyring, &key->index_key, edit);
/* wake up anyone waiting for a key to be constructed */
if (awaken)
@@ -598,7 +600,7 @@ void key_put(struct key *key)
key_check(key);
if (atomic_dec_and_test(&key->usage))
- queue_work(system_nrt_wq, &key_gc_work);
+ schedule_work(&key_gc_work);
}
}
EXPORT_SYMBOL(key_put);
@@ -638,7 +640,7 @@ found:
/* this races with key_put(), but that doesn't matter since key_put()
* doesn't actually change the key
*/
- atomic_inc(&key->usage);
+ __key_get(key);
error:
spin_unlock(&key_serial_lock);
@@ -671,6 +673,26 @@ found_kernel_type:
return ktype;
}
+void key_set_timeout(struct key *key, unsigned timeout)
+{
+ struct timespec now;
+ time_t expiry = 0;
+
+ /* make the changes with the locks held to prevent races */
+ down_write(&key->sem);
+
+ if (timeout > 0) {
+ now = current_kernel_time();
+ expiry = now.tv_sec + timeout;
+ }
+
+ key->expiry = expiry;
+ key_schedule_gc(key->expiry + key_gc_delay);
+
+ up_write(&key->sem);
+}
+EXPORT_SYMBOL_GPL(key_set_timeout);
+
/*
* Unlock a key type locked by key_type_lookup().
*/
@@ -686,13 +708,13 @@ void key_type_put(struct key_type *ktype)
* if we get an error.
*/
static inline key_ref_t __key_update(key_ref_t key_ref,
- const void *payload, size_t plen)
+ struct key_preparsed_payload *prep)
{
struct key *key = key_ref_to_ptr(key_ref);
int ret;
/* need write permission on the key to update it */
- ret = key_permission(key_ref, KEY_WRITE);
+ ret = key_permission(key_ref, KEY_NEED_WRITE);
if (ret < 0)
goto error;
@@ -702,7 +724,7 @@ static inline key_ref_t __key_update(key_ref_t key_ref,
down_write(&key->sem);
- ret = key->type->update(key, payload, plen);
+ ret = key->type->update(key, prep);
if (ret == 0)
/* updating a negative key instantiates it */
clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
@@ -753,24 +775,28 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
key_perm_t perm,
unsigned long flags)
{
- unsigned long prealloc;
+ struct keyring_index_key index_key = {
+ .description = description,
+ };
+ struct key_preparsed_payload prep;
+ struct assoc_array_edit *edit;
const struct cred *cred = current_cred();
- struct key_type *ktype;
struct key *keyring, *key = NULL;
key_ref_t key_ref;
int ret;
/* look up the key type to see if it's one of the registered kernel
* types */
- ktype = key_type_lookup(type);
- if (IS_ERR(ktype)) {
+ index_key.type = key_type_lookup(type);
+ if (IS_ERR(index_key.type)) {
key_ref = ERR_PTR(-ENODEV);
goto error;
}
key_ref = ERR_PTR(-EINVAL);
- if (!ktype->match || !ktype->instantiate)
- goto error_2;
+ if (!index_key.type->match || !index_key.type->instantiate ||
+ (!index_key.description && !index_key.type->preparse))
+ goto error_put_type;
keyring = key_ref_to_ptr(keyring_ref);
@@ -778,78 +804,105 @@ key_ref_t key_create_or_update(key_ref_t keyring_ref,
key_ref = ERR_PTR(-ENOTDIR);
if (keyring->type != &key_type_keyring)
- goto error_2;
+ goto error_put_type;
+
+ memset(&prep, 0, sizeof(prep));
+ prep.data = payload;
+ prep.datalen = plen;
+ prep.quotalen = index_key.type->def_datalen;
+ prep.trusted = flags & KEY_ALLOC_TRUSTED;
+ if (index_key.type->preparse) {
+ ret = index_key.type->preparse(&prep);
+ if (ret < 0) {
+ key_ref = ERR_PTR(ret);
+ goto error_put_type;
+ }
+ if (!index_key.description)
+ index_key.description = prep.description;
+ key_ref = ERR_PTR(-EINVAL);
+ if (!index_key.description)
+ goto error_free_prep;
+ }
+ index_key.desc_len = strlen(index_key.description);
- ret = __key_link_begin(keyring, ktype, description, &prealloc);
- if (ret < 0)
- goto error_2;
+ key_ref = ERR_PTR(-EPERM);
+ if (!prep.trusted && test_bit(KEY_FLAG_TRUSTED_ONLY, &keyring->flags))
+ goto error_free_prep;
+ flags |= prep.trusted ? KEY_ALLOC_TRUSTED : 0;
+
+ ret = __key_link_begin(keyring, &index_key, &edit);
+ if (ret < 0) {
+ key_ref = ERR_PTR(ret);
+ goto error_free_prep;
+ }
/* if we're going to allocate a new key, we're going to have
* to modify the keyring */
- ret = key_permission(keyring_ref, KEY_WRITE);
+ ret = key_permission(keyring_ref, KEY_NEED_WRITE);
if (ret < 0) {
key_ref = ERR_PTR(ret);
- goto error_3;
+ goto error_link_end;
}
/* if it's possible to update this type of key, search for an existing
* key of the same type and description in the destination keyring and
* update that instead if possible
*/
- if (ktype->update) {
- key_ref = __keyring_search_one(keyring_ref, ktype, description,
- 0);
- if (!IS_ERR(key_ref))
+ if (index_key.type->update) {
+ key_ref = find_key_to_update(keyring_ref, &index_key);
+ if (key_ref)
goto found_matching_key;
}
/* if the client doesn't provide, decide on the permissions we want */
if (perm == KEY_PERM_UNDEF) {
perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
- perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR;
+ perm |= KEY_USR_VIEW;
- if (ktype->read)
- perm |= KEY_POS_READ | KEY_USR_READ;
+ if (index_key.type->read)
+ perm |= KEY_POS_READ;
- if (ktype == &key_type_keyring || ktype->update)
- perm |= KEY_USR_WRITE;
+ if (index_key.type == &key_type_keyring ||
+ index_key.type->update)
+ perm |= KEY_POS_WRITE;
}
/* allocate a new key */
- key = key_alloc(ktype, description, cred->fsuid, cred->fsgid, cred,
- perm, flags);
+ key = key_alloc(index_key.type, index_key.description,
+ cred->fsuid, cred->fsgid, cred, perm, flags);
if (IS_ERR(key)) {
key_ref = ERR_CAST(key);
- goto error_3;
+ goto error_link_end;
}
/* instantiate it and link it into the target keyring */
- ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL,
- &prealloc);
+ ret = __key_instantiate_and_link(key, &prep, keyring, NULL, &edit);
if (ret < 0) {
key_put(key);
key_ref = ERR_PTR(ret);
- goto error_3;
+ goto error_link_end;
}
key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
- error_3:
- __key_link_end(keyring, ktype, prealloc);
- error_2:
- key_type_put(ktype);
- error:
+error_link_end:
+ __key_link_end(keyring, &index_key, edit);
+error_free_prep:
+ if (index_key.type->preparse)
+ index_key.type->free_preparse(&prep);
+error_put_type:
+ key_type_put(index_key.type);
+error:
return key_ref;
found_matching_key:
/* we found a matching key, so we're going to try to update it
* - we can drop the locks first as we have the key pinned
*/
- __key_link_end(keyring, ktype, prealloc);
- key_type_put(ktype);
+ __key_link_end(keyring, &index_key, edit);
- key_ref = __key_update(key_ref, payload, plen);
- goto error;
+ key_ref = __key_update(key_ref, &prep);
+ goto error_free_prep;
}
EXPORT_SYMBOL(key_create_or_update);
@@ -868,30 +921,44 @@ EXPORT_SYMBOL(key_create_or_update);
*/
int key_update(key_ref_t key_ref, const void *payload, size_t plen)
{
+ struct key_preparsed_payload prep;
struct key *key = key_ref_to_ptr(key_ref);
int ret;
key_check(key);
/* the key must be writable */
- ret = key_permission(key_ref, KEY_WRITE);
+ ret = key_permission(key_ref, KEY_NEED_WRITE);
if (ret < 0)
goto error;
/* attempt to update it if supported */
ret = -EOPNOTSUPP;
- if (key->type->update) {
- down_write(&key->sem);
-
- ret = key->type->update(key, payload, plen);
- if (ret == 0)
- /* updating a negative key instantiates it */
- clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
+ if (!key->type->update)
+ goto error;
- up_write(&key->sem);
+ memset(&prep, 0, sizeof(prep));
+ prep.data = payload;
+ prep.datalen = plen;
+ prep.quotalen = key->type->def_datalen;
+ if (key->type->preparse) {
+ ret = key->type->preparse(&prep);
+ if (ret < 0)
+ goto error;
}
- error:
+ down_write(&key->sem);
+
+ ret = key->type->update(key, &prep);
+ if (ret == 0)
+ /* updating a negative key instantiates it */
+ clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
+
+ up_write(&key->sem);
+
+ if (key->type->preparse)
+ key->type->free_preparse(&prep);
+error:
return ret;
}
EXPORT_SYMBOL(key_update);
@@ -935,6 +1002,28 @@ void key_revoke(struct key *key)
EXPORT_SYMBOL(key_revoke);
/**
+ * key_invalidate - Invalidate a key.
+ * @key: The key to be invalidated.
+ *
+ * Mark a key as being invalidated and have it cleaned up immediately. The key
+ * is ignored by all searches and other operations from this point.
+ */
+void key_invalidate(struct key *key)
+{
+ kenter("%d", key_serial(key));
+
+ key_check(key);
+
+ if (!test_bit(KEY_FLAG_INVALIDATED, &key->flags)) {
+ down_write_nested(&key->sem, 1);
+ if (!test_and_set_bit(KEY_FLAG_INVALIDATED, &key->flags))
+ key_schedule_gc_links();
+ up_write(&key->sem);
+ }
+}
+EXPORT_SYMBOL(key_invalidate);
+
+/**
* register_key_type - Register a type of key.
* @ktype: The new key type.
*
@@ -960,6 +1049,8 @@ int register_key_type(struct key_type *ktype)
/* store the type */
list_add(&ktype->link, &key_types_list);
+
+ pr_notice("Key type %s registered\n", ktype->name);
ret = 0;
out:
@@ -982,6 +1073,7 @@ void unregister_key_type(struct key_type *ktype)
list_del_init(&ktype->link);
downgrade_write(&key_types_sem);
key_gc_keytype(ktype);
+ pr_notice("Key type %s unregistered\n", ktype->name);
up_read(&key_types_sem);
}
EXPORT_SYMBOL(unregister_key_type);
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index 0b3f5d72af1..cd5bd0cef25 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -14,6 +14,7 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/syscalls.h>
+#include <linux/key.h>
#include <linux/keyctl.h>
#include <linux/fs.h>
#include <linux/capability.h>
@@ -21,6 +22,7 @@
#include <linux/err.h>
#include <linux/vmalloc.h>
#include <linux/security.h>
+#include <linux/uio.h>
#include <asm/uaccess.h>
#include "internal.h"
@@ -45,6 +47,9 @@ static int key_get_type_from_user(char *type,
* Extract the description of a new key from userspace and either add it as a
* new key to the specified keyring or update a matching key in that keyring.
*
+ * If the description is NULL or an empty string, the key type is asked to
+ * generate one from the payload.
+ *
* The keyring must be writable so that we can attach the key to it.
*
* If successful, the new key's serial number is returned, otherwise an error
@@ -71,10 +76,17 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
if (ret < 0)
goto error;
- description = strndup_user(_description, PAGE_SIZE);
- if (IS_ERR(description)) {
- ret = PTR_ERR(description);
- goto error;
+ description = NULL;
+ if (_description) {
+ description = strndup_user(_description, PAGE_SIZE);
+ if (IS_ERR(description)) {
+ ret = PTR_ERR(description);
+ goto error;
+ }
+ if (!*description) {
+ kfree(description);
+ description = NULL;
+ }
}
/* pull the payload in if one was supplied */
@@ -83,7 +95,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
vm = false;
if (_payload) {
ret = -ENOMEM;
- payload = kmalloc(plen, GFP_KERNEL);
+ payload = kmalloc(plen, GFP_KERNEL | __GFP_NOWARN);
if (!payload) {
if (plen <= PAGE_SIZE)
goto error2;
@@ -99,7 +111,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
}
/* find the target keyring (which must be writable) */
- keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_WRITE);
+ keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
goto error3;
@@ -183,7 +195,7 @@ SYSCALL_DEFINE4(request_key, const char __user *, _type,
dest_ref = NULL;
if (destringid) {
dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE,
- KEY_WRITE);
+ KEY_NEED_WRITE);
if (IS_ERR(dest_ref)) {
ret = PTR_ERR(dest_ref);
goto error3;
@@ -241,7 +253,7 @@ long keyctl_get_keyring_ID(key_serial_t id, int create)
long ret;
lflags = create ? KEY_LOOKUP_CREATE : 0;
- key_ref = lookup_user_key(id, lflags, KEY_SEARCH);
+ key_ref = lookup_user_key(id, lflags, KEY_NEED_SEARCH);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error;
@@ -322,7 +334,7 @@ long keyctl_update_key(key_serial_t id,
}
/* find the target key (which must be writable) */
- key_ref = lookup_user_key(id, 0, KEY_WRITE);
+ key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error2;
@@ -353,12 +365,12 @@ long keyctl_revoke_key(key_serial_t id)
key_ref_t key_ref;
long ret;
- key_ref = lookup_user_key(id, 0, KEY_WRITE);
+ key_ref = lookup_user_key(id, 0, KEY_NEED_WRITE);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
if (ret != -EACCES)
goto error;
- key_ref = lookup_user_key(id, 0, KEY_SETATTR);
+ key_ref = lookup_user_key(id, 0, KEY_NEED_SETATTR);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error;
@@ -374,6 +386,37 @@ error:
}
/*
+ * Invalidate a key.
+ *
+ * The key must be grant the caller Invalidate permission for this to work.
+ * The key and any links to the key will be automatically garbage collected
+ * immediately.
+ *
+ * If successful, 0 is returned.
+ */
+long keyctl_invalidate_key(key_serial_t id)
+{
+ key_ref_t key_ref;
+ long ret;
+
+ kenter("%d", id);
+
+ key_ref = lookup_user_key(id, 0, KEY_NEED_SEARCH);
+ if (IS_ERR(key_ref)) {
+ ret = PTR_ERR(key_ref);
+ goto error;
+ }
+
+ key_invalidate(key_ref_to_ptr(key_ref));
+ ret = 0;
+
+ key_ref_put(key_ref);
+error:
+ kleave(" = %ld", ret);
+ return ret;
+}
+
+/*
* Clear the specified keyring, creating an empty process keyring if one of the
* special keyring IDs is used.
*
@@ -385,14 +428,27 @@ long keyctl_keyring_clear(key_serial_t ringid)
key_ref_t keyring_ref;
long ret;
- keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_WRITE);
+ keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
+
+ /* Root is permitted to invalidate certain special keyrings */
+ if (capable(CAP_SYS_ADMIN)) {
+ keyring_ref = lookup_user_key(ringid, 0, 0);
+ if (IS_ERR(keyring_ref))
+ goto error;
+ if (test_bit(KEY_FLAG_ROOT_CAN_CLEAR,
+ &key_ref_to_ptr(keyring_ref)->flags))
+ goto clear;
+ goto error_put;
+ }
+
goto error;
}
+clear:
ret = keyring_clear(key_ref_to_ptr(keyring_ref));
-
+error_put:
key_ref_put(keyring_ref);
error:
return ret;
@@ -414,13 +470,13 @@ long keyctl_keyring_link(key_serial_t id, key_serial_t ringid)
key_ref_t keyring_ref, key_ref;
long ret;
- keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_WRITE);
+ keyring_ref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
goto error;
}
- key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE, KEY_LINK);
+ key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE, KEY_NEED_LINK);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error2;
@@ -449,7 +505,7 @@ long keyctl_keyring_unlink(key_serial_t id, key_serial_t ringid)
key_ref_t keyring_ref, key_ref;
long ret;
- keyring_ref = lookup_user_key(ringid, 0, KEY_WRITE);
+ keyring_ref = lookup_user_key(ringid, 0, KEY_NEED_WRITE);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
goto error;
@@ -492,7 +548,7 @@ long keyctl_describe_key(key_serial_t keyid,
char *tmpbuf;
long ret;
- key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_VIEW);
+ key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW);
if (IS_ERR(key_ref)) {
/* viewing a key under construction is permitted if we have the
* authorisation token handy */
@@ -524,8 +580,8 @@ okay:
ret = snprintf(tmpbuf, PAGE_SIZE - 1,
"%s;%d;%d;%08x;%s",
key->type->name,
- key->uid,
- key->gid,
+ from_kuid_munged(current_user_ns(), key->uid),
+ from_kgid_munged(current_user_ns(), key->gid),
key->perm,
key->description ?: "");
@@ -583,7 +639,7 @@ long keyctl_keyring_search(key_serial_t ringid,
}
/* get the keyring at which to begin the search */
- keyring_ref = lookup_user_key(ringid, 0, KEY_SEARCH);
+ keyring_ref = lookup_user_key(ringid, 0, KEY_NEED_SEARCH);
if (IS_ERR(keyring_ref)) {
ret = PTR_ERR(keyring_ref);
goto error2;
@@ -593,7 +649,7 @@ long keyctl_keyring_search(key_serial_t ringid,
dest_ref = NULL;
if (destringid) {
dest_ref = lookup_user_key(destringid, KEY_LOOKUP_CREATE,
- KEY_WRITE);
+ KEY_NEED_WRITE);
if (IS_ERR(dest_ref)) {
ret = PTR_ERR(dest_ref);
goto error3;
@@ -620,7 +676,7 @@ long keyctl_keyring_search(key_serial_t ringid,
/* link the resulting key to the destination keyring if we can */
if (dest_ref) {
- ret = key_permission(key_ref, KEY_LINK);
+ ret = key_permission(key_ref, KEY_NEED_LINK);
if (ret < 0)
goto error6;
@@ -671,7 +727,7 @@ long keyctl_read_key(key_serial_t keyid, char __user *buffer, size_t buflen)
key = key_ref_to_ptr(key_ref);
/* see if we can read it directly */
- ret = key_permission(key_ref, KEY_READ);
+ ret = key_permission(key_ref, KEY_NEED_READ);
if (ret == 0)
goto can_read_key;
if (ret != -EACCES)
@@ -721,19 +777,29 @@ error:
*
* If successful, 0 will be returned.
*/
-long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid)
+long keyctl_chown_key(key_serial_t id, uid_t user, gid_t group)
{
struct key_user *newowner, *zapowner = NULL;
struct key *key;
key_ref_t key_ref;
long ret;
+ kuid_t uid;
+ kgid_t gid;
+
+ uid = make_kuid(current_user_ns(), user);
+ gid = make_kgid(current_user_ns(), group);
+ ret = -EINVAL;
+ if ((user != (uid_t) -1) && !uid_valid(uid))
+ goto error;
+ if ((group != (gid_t) -1) && !gid_valid(gid))
+ goto error;
ret = 0;
- if (uid == (uid_t) -1 && gid == (gid_t) -1)
+ if (user == (uid_t) -1 && group == (gid_t) -1)
goto error;
key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
- KEY_SETATTR);
+ KEY_NEED_SETATTR);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error;
@@ -747,27 +813,27 @@ long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid)
if (!capable(CAP_SYS_ADMIN)) {
/* only the sysadmin can chown a key to some other UID */
- if (uid != (uid_t) -1 && key->uid != uid)
+ if (user != (uid_t) -1 && !uid_eq(key->uid, uid))
goto error_put;
/* only the sysadmin can set the key's GID to a group other
* than one of those that the current process subscribes to */
- if (gid != (gid_t) -1 && gid != key->gid && !in_group_p(gid))
+ if (group != (gid_t) -1 && !gid_eq(gid, key->gid) && !in_group_p(gid))
goto error_put;
}
/* change the UID */
- if (uid != (uid_t) -1 && uid != key->uid) {
+ if (user != (uid_t) -1 && !uid_eq(uid, key->uid)) {
ret = -ENOMEM;
- newowner = key_user_lookup(uid, current_user_ns());
+ newowner = key_user_lookup(uid);
if (!newowner)
goto error_put;
/* transfer the quota burden to the new user */
if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
- unsigned maxkeys = (uid == 0) ?
+ unsigned maxkeys = uid_eq(uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxkeys : key_quota_maxkeys;
- unsigned maxbytes = (uid == 0) ?
+ unsigned maxbytes = uid_eq(uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxbytes : key_quota_maxbytes;
spin_lock(&newowner->lock);
@@ -801,7 +867,7 @@ long keyctl_chown_key(key_serial_t id, uid_t uid, gid_t gid)
}
/* change the GID */
- if (gid != (gid_t) -1)
+ if (group != (gid_t) -1)
key->gid = gid;
ret = 0;
@@ -839,7 +905,7 @@ long keyctl_setperm_key(key_serial_t id, key_perm_t perm)
goto error;
key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
- KEY_SETATTR);
+ KEY_NEED_SETATTR);
if (IS_ERR(key_ref)) {
ret = PTR_ERR(key_ref);
goto error;
@@ -852,7 +918,7 @@ long keyctl_setperm_key(key_serial_t id, key_perm_t perm)
down_write(&key->sem);
/* if we're not the sysadmin, we can only change a key that we own */
- if (capable(CAP_SYS_ADMIN) || key->uid == current_fsuid()) {
+ if (capable(CAP_SYS_ADMIN) || uid_eq(key->uid, current_fsuid())) {
key->perm = perm;
ret = 0;
}
@@ -881,7 +947,7 @@ static long get_instantiation_keyring(key_serial_t ringid,
/* if a specific keyring is nominated by ID, then use that */
if (ringid > 0) {
- dkref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_WRITE);
+ dkref = lookup_user_key(ringid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
if (IS_ERR(dkref))
return PTR_ERR(dkref);
*_dest_keyring = key_ref_to_ptr(dkref);
@@ -1061,18 +1127,18 @@ long keyctl_instantiate_key_iov(key_serial_t id,
struct iovec iovstack[UIO_FASTIOV], *iov = iovstack;
long ret;
- if (_payload_iov == 0 || ioc == 0)
+ if (!_payload_iov || !ioc)
goto no_payload;
ret = rw_copy_check_uvector(WRITE, _payload_iov, ioc,
- ARRAY_SIZE(iovstack), iovstack, &iov, 1);
+ ARRAY_SIZE(iovstack), iovstack, &iov);
if (ret < 0)
- return ret;
+ goto err;
if (ret == 0)
goto no_payload_free;
ret = keyctl_instantiate_key_common(id, iov, ioc, ret, ringid);
-
+err:
if (iov != iovstack)
kfree(iov);
return ret;
@@ -1244,14 +1310,12 @@ error:
*/
long keyctl_set_timeout(key_serial_t id, unsigned timeout)
{
- struct timespec now;
struct key *key, *instkey;
key_ref_t key_ref;
- time_t expiry;
long ret;
key_ref = lookup_user_key(id, KEY_LOOKUP_CREATE | KEY_LOOKUP_PARTIAL,
- KEY_SETATTR);
+ KEY_NEED_SETATTR);
if (IS_ERR(key_ref)) {
/* setting the timeout on a key under construction is permitted
* if we have the authorisation token handy */
@@ -1273,20 +1337,7 @@ long keyctl_set_timeout(key_serial_t id, unsigned timeout)
okay:
key = key_ref_to_ptr(key_ref);
-
- /* make the changes with the locks held to prevent races */
- down_write(&key->sem);
-
- expiry = 0;
- if (timeout > 0) {
- now = current_kernel_time();
- expiry = now.tv_sec + timeout;
- }
-
- key->expiry = expiry;
- key_schedule_gc(key->expiry + key_gc_delay);
-
- up_write(&key->sem);
+ key_set_timeout(key, timeout);
key_put(key);
ret = 0;
@@ -1367,7 +1418,7 @@ long keyctl_get_security(key_serial_t keyid,
char *context;
long ret;
- key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_VIEW);
+ key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW);
if (IS_ERR(key_ref)) {
if (PTR_ERR(key_ref) != -EACCES)
return PTR_ERR(key_ref);
@@ -1424,103 +1475,93 @@ long keyctl_get_security(key_serial_t keyid,
*/
long keyctl_session_to_parent(void)
{
-#ifdef TIF_NOTIFY_RESUME
struct task_struct *me, *parent;
const struct cred *mycred, *pcred;
- struct cred *cred, *oldcred;
+ struct callback_head *newwork, *oldwork;
key_ref_t keyring_r;
+ struct cred *cred;
int ret;
- keyring_r = lookup_user_key(KEY_SPEC_SESSION_KEYRING, 0, KEY_LINK);
+ keyring_r = lookup_user_key(KEY_SPEC_SESSION_KEYRING, 0, KEY_NEED_LINK);
if (IS_ERR(keyring_r))
return PTR_ERR(keyring_r);
+ ret = -ENOMEM;
+
/* our parent is going to need a new cred struct, a new tgcred struct
* and new security data, so we allocate them here to prevent ENOMEM in
* our parent */
- ret = -ENOMEM;
cred = cred_alloc_blank();
if (!cred)
goto error_keyring;
+ newwork = &cred->rcu;
- cred->tgcred->session_keyring = key_ref_to_ptr(keyring_r);
+ cred->session_keyring = key_ref_to_ptr(keyring_r);
keyring_r = NULL;
+ init_task_work(newwork, key_change_session_keyring);
me = current;
rcu_read_lock();
write_lock_irq(&tasklist_lock);
- parent = me->real_parent;
ret = -EPERM;
+ oldwork = NULL;
+ parent = me->real_parent;
/* the parent mustn't be init and mustn't be a kernel thread */
if (parent->pid <= 1 || !parent->mm)
- goto not_permitted;
+ goto unlock;
/* the parent must be single threaded */
if (!thread_group_empty(parent))
- goto not_permitted;
+ goto unlock;
/* the parent and the child must have different session keyrings or
* there's no point */
mycred = current_cred();
pcred = __task_cred(parent);
if (mycred == pcred ||
- mycred->tgcred->session_keyring == pcred->tgcred->session_keyring)
- goto already_same;
+ mycred->session_keyring == pcred->session_keyring) {
+ ret = 0;
+ goto unlock;
+ }
/* the parent must have the same effective ownership and mustn't be
* SUID/SGID */
- if (pcred->uid != mycred->euid ||
- pcred->euid != mycred->euid ||
- pcred->suid != mycred->euid ||
- pcred->gid != mycred->egid ||
- pcred->egid != mycred->egid ||
- pcred->sgid != mycred->egid)
- goto not_permitted;
+ if (!uid_eq(pcred->uid, mycred->euid) ||
+ !uid_eq(pcred->euid, mycred->euid) ||
+ !uid_eq(pcred->suid, mycred->euid) ||
+ !gid_eq(pcred->gid, mycred->egid) ||
+ !gid_eq(pcred->egid, mycred->egid) ||
+ !gid_eq(pcred->sgid, mycred->egid))
+ goto unlock;
/* the keyrings must have the same UID */
- if ((pcred->tgcred->session_keyring &&
- pcred->tgcred->session_keyring->uid != mycred->euid) ||
- mycred->tgcred->session_keyring->uid != mycred->euid)
- goto not_permitted;
+ if ((pcred->session_keyring &&
+ !uid_eq(pcred->session_keyring->uid, mycred->euid)) ||
+ !uid_eq(mycred->session_keyring->uid, mycred->euid))
+ goto unlock;
- /* if there's an already pending keyring replacement, then we replace
- * that */
- oldcred = parent->replacement_session_keyring;
+ /* cancel an already pending keyring replacement */
+ oldwork = task_work_cancel(parent, key_change_session_keyring);
/* the replacement session keyring is applied just prior to userspace
* restarting */
- parent->replacement_session_keyring = cred;
- cred = NULL;
- set_ti_thread_flag(task_thread_info(parent), TIF_NOTIFY_RESUME);
-
+ ret = task_work_add(parent, newwork, true);
+ if (!ret)
+ newwork = NULL;
+unlock:
write_unlock_irq(&tasklist_lock);
rcu_read_unlock();
- if (oldcred)
- put_cred(oldcred);
- return 0;
-
-already_same:
- ret = 0;
-not_permitted:
- write_unlock_irq(&tasklist_lock);
- rcu_read_unlock();
- put_cred(cred);
+ if (oldwork)
+ put_cred(container_of(oldwork, struct cred, rcu));
+ if (newwork)
+ put_cred(cred);
return ret;
error_keyring:
key_ref_put(keyring_r);
return ret;
-
-#else /* !TIF_NOTIFY_RESUME */
- /*
- * To be removed when TIF_NOTIFY_RESUME has been implemented on
- * m68k/xtensa
- */
-#warning TIF_NOTIFY_RESUME not implemented
- return -EOPNOTSUPP;
-#endif /* !TIF_NOTIFY_RESUME */
}
/*
@@ -1623,6 +1664,12 @@ SYSCALL_DEFINE5(keyctl, int, option, unsigned long, arg2, unsigned long, arg3,
(unsigned) arg4,
(key_serial_t) arg5);
+ case KEYCTL_INVALIDATE:
+ return keyctl_invalidate_key((key_serial_t) arg2);
+
+ case KEYCTL_GET_PERSISTENT:
+ return keyctl_get_persistent((uid_t)arg2, (key_serial_t)arg3);
+
default:
return -EOPNOTSUPP;
}
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index d605f75292e..9cf2575f0d9 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -1,6 +1,6 @@
/* Keyring handling
*
- * Copyright (C) 2004-2005, 2008 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2004-2005, 2008, 2013 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
@@ -17,16 +17,11 @@
#include <linux/seq_file.h>
#include <linux/err.h>
#include <keys/keyring-type.h>
+#include <keys/user-type.h>
+#include <linux/assoc_array_priv.h>
#include <linux/uaccess.h>
#include "internal.h"
-#define rcu_dereference_locked_keyring(keyring) \
- (rcu_dereference_protected( \
- (keyring)->payload.subscriptions, \
- rwsem_is_locked((struct rw_semaphore *)&(keyring)->sem)))
-
-#define KEY_LINK_FIXQUOTA 1UL
-
/*
* When plumbing the depths of the key tree, this sets a hard limit
* set on how deep we're willing to go.
@@ -38,6 +33,28 @@
*/
#define KEYRING_NAME_HASH_SIZE (1 << 5)
+/*
+ * We mark pointers we pass to the associative array with bit 1 set if
+ * they're keyrings and clear otherwise.
+ */
+#define KEYRING_PTR_SUBTYPE 0x2UL
+
+static inline bool keyring_ptr_is_keyring(const struct assoc_array_ptr *x)
+{
+ return (unsigned long)x & KEYRING_PTR_SUBTYPE;
+}
+static inline struct key *keyring_ptr_to_key(const struct assoc_array_ptr *x)
+{
+ void *object = assoc_array_ptr_to_leaf(x);
+ return (struct key *)((unsigned long)object & ~KEYRING_PTR_SUBTYPE);
+}
+static inline void *keyring_key_to_ptr(struct key *key)
+{
+ if (key->type == &key_type_keyring)
+ return (void *)((unsigned long)key | KEYRING_PTR_SUBTYPE);
+ return key;
+}
+
static struct list_head keyring_name_hash[KEYRING_NAME_HASH_SIZE];
static DEFINE_RWLOCK(keyring_name_lock);
@@ -57,8 +74,7 @@ static inline unsigned keyring_hash(const char *desc)
* operations.
*/
static int keyring_instantiate(struct key *keyring,
- const void *data, size_t datalen);
-static int keyring_match(const struct key *keyring, const void *criterion);
+ struct key_preparsed_payload *prep);
static void keyring_revoke(struct key *keyring);
static void keyring_destroy(struct key *keyring);
static void keyring_describe(const struct key *keyring, struct seq_file *m);
@@ -67,9 +83,9 @@ static long keyring_read(const struct key *keyring,
struct key_type key_type_keyring = {
.name = "keyring",
- .def_datalen = sizeof(struct keyring_list),
+ .def_datalen = 0,
.instantiate = keyring_instantiate,
- .match = keyring_match,
+ .match = user_match,
.revoke = keyring_revoke,
.destroy = keyring_destroy,
.describe = keyring_describe,
@@ -112,12 +128,13 @@ static void keyring_publish_name(struct key *keyring)
* Returns 0 on success, -EINVAL if given any data.
*/
static int keyring_instantiate(struct key *keyring,
- const void *data, size_t datalen)
+ struct key_preparsed_payload *prep)
{
int ret;
ret = -EINVAL;
- if (datalen == 0) {
+ if (prep->datalen == 0) {
+ assoc_array_init(&keyring->keys);
/* make the keyring available by name if it has one */
keyring_publish_name(keyring);
ret = 0;
@@ -127,23 +144,235 @@ static int keyring_instantiate(struct key *keyring,
}
/*
- * Match keyrings on their name
+ * Multiply 64-bits by 32-bits to 96-bits and fold back to 64-bit. Ideally we'd
+ * fold the carry back too, but that requires inline asm.
+ */
+static u64 mult_64x32_and_fold(u64 x, u32 y)
+{
+ u64 hi = (u64)(u32)(x >> 32) * y;
+ u64 lo = (u64)(u32)(x) * y;
+ return lo + ((u64)(u32)hi << 32) + (u32)(hi >> 32);
+}
+
+/*
+ * Hash a key type and description.
*/
-static int keyring_match(const struct key *keyring, const void *description)
+static unsigned long hash_key_type_and_desc(const struct keyring_index_key *index_key)
{
- return keyring->description &&
- strcmp(keyring->description, description) == 0;
+ const unsigned level_shift = ASSOC_ARRAY_LEVEL_STEP;
+ const unsigned long fan_mask = ASSOC_ARRAY_FAN_MASK;
+ const char *description = index_key->description;
+ unsigned long hash, type;
+ u32 piece;
+ u64 acc;
+ int n, desc_len = index_key->desc_len;
+
+ type = (unsigned long)index_key->type;
+
+ acc = mult_64x32_and_fold(type, desc_len + 13);
+ acc = mult_64x32_and_fold(acc, 9207);
+ for (;;) {
+ n = desc_len;
+ if (n <= 0)
+ break;
+ if (n > 4)
+ n = 4;
+ piece = 0;
+ memcpy(&piece, description, n);
+ description += n;
+ desc_len -= n;
+ acc = mult_64x32_and_fold(acc, piece);
+ acc = mult_64x32_and_fold(acc, 9207);
+ }
+
+ /* Fold the hash down to 32 bits if need be. */
+ hash = acc;
+ if (ASSOC_ARRAY_KEY_CHUNK_SIZE == 32)
+ hash ^= acc >> 32;
+
+ /* Squidge all the keyrings into a separate part of the tree to
+ * ordinary keys by making sure the lowest level segment in the hash is
+ * zero for keyrings and non-zero otherwise.
+ */
+ if (index_key->type != &key_type_keyring && (hash & fan_mask) == 0)
+ return hash | (hash >> (ASSOC_ARRAY_KEY_CHUNK_SIZE - level_shift)) | 1;
+ if (index_key->type == &key_type_keyring && (hash & fan_mask) != 0)
+ return (hash + (hash << level_shift)) & ~fan_mask;
+ return hash;
}
/*
+ * Build the next index key chunk.
+ *
+ * On 32-bit systems the index key is laid out as:
+ *
+ * 0 4 5 9...
+ * hash desclen typeptr desc[]
+ *
+ * On 64-bit systems:
+ *
+ * 0 8 9 17...
+ * hash desclen typeptr desc[]
+ *
+ * We return it one word-sized chunk at a time.
+ */
+static unsigned long keyring_get_key_chunk(const void *data, int level)
+{
+ const struct keyring_index_key *index_key = data;
+ unsigned long chunk = 0;
+ long offset = 0;
+ int desc_len = index_key->desc_len, n = sizeof(chunk);
+
+ level /= ASSOC_ARRAY_KEY_CHUNK_SIZE;
+ switch (level) {
+ case 0:
+ return hash_key_type_and_desc(index_key);
+ case 1:
+ return ((unsigned long)index_key->type << 8) | desc_len;
+ case 2:
+ if (desc_len == 0)
+ return (u8)((unsigned long)index_key->type >>
+ (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8));
+ n--;
+ offset = 1;
+ default:
+ offset += sizeof(chunk) - 1;
+ offset += (level - 3) * sizeof(chunk);
+ if (offset >= desc_len)
+ return 0;
+ desc_len -= offset;
+ if (desc_len > n)
+ desc_len = n;
+ offset += desc_len;
+ do {
+ chunk <<= 8;
+ chunk |= ((u8*)index_key->description)[--offset];
+ } while (--desc_len > 0);
+
+ if (level == 2) {
+ chunk <<= 8;
+ chunk |= (u8)((unsigned long)index_key->type >>
+ (ASSOC_ARRAY_KEY_CHUNK_SIZE - 8));
+ }
+ return chunk;
+ }
+}
+
+static unsigned long keyring_get_object_key_chunk(const void *object, int level)
+{
+ const struct key *key = keyring_ptr_to_key(object);
+ return keyring_get_key_chunk(&key->index_key, level);
+}
+
+static bool keyring_compare_object(const void *object, const void *data)
+{
+ const struct keyring_index_key *index_key = data;
+ const struct key *key = keyring_ptr_to_key(object);
+
+ return key->index_key.type == index_key->type &&
+ key->index_key.desc_len == index_key->desc_len &&
+ memcmp(key->index_key.description, index_key->description,
+ index_key->desc_len) == 0;
+}
+
+/*
+ * Compare the index keys of a pair of objects and determine the bit position
+ * at which they differ - if they differ.
+ */
+static int keyring_diff_objects(const void *object, const void *data)
+{
+ const struct key *key_a = keyring_ptr_to_key(object);
+ const struct keyring_index_key *a = &key_a->index_key;
+ const struct keyring_index_key *b = data;
+ unsigned long seg_a, seg_b;
+ int level, i;
+
+ level = 0;
+ seg_a = hash_key_type_and_desc(a);
+ seg_b = hash_key_type_and_desc(b);
+ if ((seg_a ^ seg_b) != 0)
+ goto differ;
+
+ /* The number of bits contributed by the hash is controlled by a
+ * constant in the assoc_array headers. Everything else thereafter we
+ * can deal with as being machine word-size dependent.
+ */
+ level += ASSOC_ARRAY_KEY_CHUNK_SIZE / 8;
+ seg_a = a->desc_len;
+ seg_b = b->desc_len;
+ if ((seg_a ^ seg_b) != 0)
+ goto differ;
+
+ /* The next bit may not work on big endian */
+ level++;
+ seg_a = (unsigned long)a->type;
+ seg_b = (unsigned long)b->type;
+ if ((seg_a ^ seg_b) != 0)
+ goto differ;
+
+ level += sizeof(unsigned long);
+ if (a->desc_len == 0)
+ goto same;
+
+ i = 0;
+ if (((unsigned long)a->description | (unsigned long)b->description) &
+ (sizeof(unsigned long) - 1)) {
+ do {
+ seg_a = *(unsigned long *)(a->description + i);
+ seg_b = *(unsigned long *)(b->description + i);
+ if ((seg_a ^ seg_b) != 0)
+ goto differ_plus_i;
+ i += sizeof(unsigned long);
+ } while (i < (a->desc_len & (sizeof(unsigned long) - 1)));
+ }
+
+ for (; i < a->desc_len; i++) {
+ seg_a = *(unsigned char *)(a->description + i);
+ seg_b = *(unsigned char *)(b->description + i);
+ if ((seg_a ^ seg_b) != 0)
+ goto differ_plus_i;
+ }
+
+same:
+ return -1;
+
+differ_plus_i:
+ level += i;
+differ:
+ i = level * 8 + __ffs(seg_a ^ seg_b);
+ return i;
+}
+
+/*
+ * Free an object after stripping the keyring flag off of the pointer.
+ */
+static void keyring_free_object(void *object)
+{
+ key_put(keyring_ptr_to_key(object));
+}
+
+/*
+ * Operations for keyring management by the index-tree routines.
+ */
+static const struct assoc_array_ops keyring_assoc_array_ops = {
+ .get_key_chunk = keyring_get_key_chunk,
+ .get_object_key_chunk = keyring_get_object_key_chunk,
+ .compare_object = keyring_compare_object,
+ .diff_objects = keyring_diff_objects,
+ .free_object = keyring_free_object,
+};
+
+/*
* Clean up a keyring when it is destroyed. Unpublish its name if it had one
* and dispose of its data.
+ *
+ * The garbage collector detects the final key_put(), removes the keyring from
+ * the serial number tree and then does RCU synchronisation before coming here,
+ * so we shouldn't need to worry about code poking around here with the RCU
+ * readlock held by this time.
*/
static void keyring_destroy(struct key *keyring)
{
- struct keyring_list *klist;
- int loop;
-
if (keyring->description) {
write_lock(&keyring_name_lock);
@@ -154,13 +383,7 @@ static void keyring_destroy(struct key *keyring)
write_unlock(&keyring_name_lock);
}
- klist = rcu_dereference_check(keyring->payload.subscriptions,
- atomic_read(&keyring->usage) == 0);
- if (klist) {
- for (loop = klist->nkeys - 1; loop >= 0; loop--)
- key_put(klist->keys[loop]);
- kfree(klist);
- }
+ assoc_array_destroy(&keyring->keys, &keyring_assoc_array_ops);
}
/*
@@ -168,92 +391,102 @@ static void keyring_destroy(struct key *keyring)
*/
static void keyring_describe(const struct key *keyring, struct seq_file *m)
{
- struct keyring_list *klist;
-
if (keyring->description)
seq_puts(m, keyring->description);
else
seq_puts(m, "[anon]");
if (key_is_instantiated(keyring)) {
- rcu_read_lock();
- klist = rcu_dereference(keyring->payload.subscriptions);
- if (klist)
- seq_printf(m, ": %u/%u", klist->nkeys, klist->maxkeys);
+ if (keyring->keys.nr_leaves_on_tree != 0)
+ seq_printf(m, ": %lu", keyring->keys.nr_leaves_on_tree);
else
seq_puts(m, ": empty");
- rcu_read_unlock();
}
}
+struct keyring_read_iterator_context {
+ size_t qty;
+ size_t count;
+ key_serial_t __user *buffer;
+};
+
+static int keyring_read_iterator(const void *object, void *data)
+{
+ struct keyring_read_iterator_context *ctx = data;
+ const struct key *key = keyring_ptr_to_key(object);
+ int ret;
+
+ kenter("{%s,%d},,{%zu/%zu}",
+ key->type->name, key->serial, ctx->count, ctx->qty);
+
+ if (ctx->count >= ctx->qty)
+ return 1;
+
+ ret = put_user(key->serial, ctx->buffer);
+ if (ret < 0)
+ return ret;
+ ctx->buffer++;
+ ctx->count += sizeof(key->serial);
+ return 0;
+}
+
/*
* Read a list of key IDs from the keyring's contents in binary form
*
- * The keyring's semaphore is read-locked by the caller.
+ * The keyring's semaphore is read-locked by the caller. This prevents someone
+ * from modifying it under us - which could cause us to read key IDs multiple
+ * times.
*/
static long keyring_read(const struct key *keyring,
char __user *buffer, size_t buflen)
{
- struct keyring_list *klist;
- struct key *key;
- size_t qty, tmp;
- int loop, ret;
+ struct keyring_read_iterator_context ctx;
+ unsigned long nr_keys;
+ int ret;
- ret = 0;
- klist = rcu_dereference_locked_keyring(keyring);
- if (klist) {
- /* calculate how much data we could return */
- qty = klist->nkeys * sizeof(key_serial_t);
-
- if (buffer && buflen > 0) {
- if (buflen > qty)
- buflen = qty;
-
- /* copy the IDs of the subscribed keys into the
- * buffer */
- ret = -EFAULT;
-
- for (loop = 0; loop < klist->nkeys; loop++) {
- key = klist->keys[loop];
-
- tmp = sizeof(key_serial_t);
- if (tmp > buflen)
- tmp = buflen;
-
- if (copy_to_user(buffer,
- &key->serial,
- tmp) != 0)
- goto error;
-
- buflen -= tmp;
- if (buflen == 0)
- break;
- buffer += tmp;
- }
- }
+ kenter("{%d},,%zu", key_serial(keyring), buflen);
+
+ if (buflen & (sizeof(key_serial_t) - 1))
+ return -EINVAL;
- ret = qty;
+ nr_keys = keyring->keys.nr_leaves_on_tree;
+ if (nr_keys == 0)
+ return 0;
+
+ /* Calculate how much data we could return */
+ ctx.qty = nr_keys * sizeof(key_serial_t);
+
+ if (!buffer || !buflen)
+ return ctx.qty;
+
+ if (buflen > ctx.qty)
+ ctx.qty = buflen;
+
+ /* Copy the IDs of the subscribed keys into the buffer */
+ ctx.buffer = (key_serial_t __user *)buffer;
+ ctx.count = 0;
+ ret = assoc_array_iterate(&keyring->keys, keyring_read_iterator, &ctx);
+ if (ret < 0) {
+ kleave(" = %d [iterate]", ret);
+ return ret;
}
-error:
- return ret;
+ kleave(" = %zu [ok]", ctx.count);
+ return ctx.count;
}
/*
* Allocate a keyring and link into the destination keyring.
*/
-struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
- const struct cred *cred, unsigned long flags,
- struct key *dest)
+struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid,
+ const struct cred *cred, key_perm_t perm,
+ unsigned long flags, struct key *dest)
{
struct key *keyring;
int ret;
keyring = key_alloc(&key_type_keyring, description,
- uid, gid, cred,
- (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL,
- flags);
-
+ uid, gid, cred, perm, flags);
if (!IS_ERR(keyring)) {
ret = key_instantiate_and_link(keyring, NULL, 0, dest, NULL);
if (ret < 0) {
@@ -264,215 +497,363 @@ struct key *keyring_alloc(const char *description, uid_t uid, gid_t gid,
return keyring;
}
+EXPORT_SYMBOL(keyring_alloc);
-/**
- * keyring_search_aux - Search a keyring tree for a key matching some criteria
- * @keyring_ref: A pointer to the keyring with possession indicator.
- * @cred: The credentials to use for permissions checks.
- * @type: The type of key to search for.
- * @description: Parameter for @match.
- * @match: Function to rule on whether or not a key is the one required.
- * @no_state_check: Don't check if a matching key is bad
- *
- * Search the supplied keyring tree for a key that matches the criteria given.
- * The root keyring and any linked keyrings must grant Search permission to the
- * caller to be searchable and keys can only be found if they too grant Search
- * to the caller. The possession flag on the root keyring pointer controls use
- * of the possessor bits in permissions checking of the entire tree. In
- * addition, the LSM gets to forbid keyring searches and key matches.
- *
- * The search is performed as a breadth-then-depth search up to the prescribed
- * limit (KEYRING_SEARCH_MAX_DEPTH).
- *
- * Keys are matched to the type provided and are then filtered by the match
- * function, which is given the description to use in any way it sees fit. The
- * match function may use any attributes of a key that it wishes to to
- * determine the match. Normally the match function from the key type would be
- * used.
- *
- * RCU is used to prevent the keyring key lists from disappearing without the
- * need to take lots of locks.
- *
- * Returns a pointer to the found key and increments the key usage count if
- * successful; -EAGAIN if no matching keys were found, or if expired or revoked
- * keys were found; -ENOKEY if only negative keys were found; -ENOTDIR if the
- * specified keyring wasn't a keyring.
- *
- * In the case of a successful return, the possession attribute from
- * @keyring_ref is propagated to the returned key reference.
+/*
+ * Iteration function to consider each key found.
*/
-key_ref_t keyring_search_aux(key_ref_t keyring_ref,
- const struct cred *cred,
- struct key_type *type,
- const void *description,
- key_match_func_t match,
- bool no_state_check)
+static int keyring_search_iterator(const void *object, void *iterator_data)
{
- struct {
- struct keyring_list *keylist;
- int kix;
- } stack[KEYRING_SEARCH_MAX_DEPTH];
-
- struct keyring_list *keylist;
- struct timespec now;
- unsigned long possessed, kflags;
- struct key *keyring, *key;
- key_ref_t key_ref;
- long err;
- int sp, nkeys, kix;
+ struct keyring_search_context *ctx = iterator_data;
+ const struct key *key = keyring_ptr_to_key(object);
+ unsigned long kflags = key->flags;
- keyring = key_ref_to_ptr(keyring_ref);
- possessed = is_key_possessed(keyring_ref);
- key_check(keyring);
+ kenter("{%d}", key->serial);
- /* top keyring must have search permission to begin the search */
- err = key_task_permission(keyring_ref, cred, KEY_SEARCH);
- if (err < 0) {
- key_ref = ERR_PTR(err);
- goto error;
+ /* ignore keys not of this type */
+ if (key->type != ctx->index_key.type) {
+ kleave(" = 0 [!type]");
+ return 0;
}
- key_ref = ERR_PTR(-ENOTDIR);
- if (keyring->type != &key_type_keyring)
- goto error;
-
- rcu_read_lock();
+ /* skip invalidated, revoked and expired keys */
+ if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
+ if (kflags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED))) {
+ ctx->result = ERR_PTR(-EKEYREVOKED);
+ kleave(" = %d [invrev]", ctx->skipped_ret);
+ goto skipped;
+ }
- now = current_kernel_time();
- err = -EAGAIN;
- sp = 0;
-
- /* firstly we should check to see if this top-level keyring is what we
- * are looking for */
- key_ref = ERR_PTR(-EAGAIN);
- kflags = keyring->flags;
- if (keyring->type == type && match(keyring, description)) {
- key = keyring;
- if (no_state_check)
- goto found;
+ if (key->expiry && ctx->now.tv_sec >= key->expiry) {
+ ctx->result = ERR_PTR(-EKEYEXPIRED);
+ kleave(" = %d [expire]", ctx->skipped_ret);
+ goto skipped;
+ }
+ }
- /* check it isn't negative and hasn't expired or been
- * revoked */
- if (kflags & (1 << KEY_FLAG_REVOKED))
- goto error_2;
- if (key->expiry && now.tv_sec >= key->expiry)
- goto error_2;
- key_ref = ERR_PTR(key->type_data.reject_error);
- if (kflags & (1 << KEY_FLAG_NEGATIVE))
- goto error_2;
- goto found;
+ /* keys that don't match */
+ if (!ctx->match(key, ctx->match_data)) {
+ kleave(" = 0 [!match]");
+ return 0;
}
- /* otherwise, the top keyring must not be revoked, expired, or
- * negatively instantiated if we are to search it */
- key_ref = ERR_PTR(-EAGAIN);
- if (kflags & ((1 << KEY_FLAG_REVOKED) | (1 << KEY_FLAG_NEGATIVE)) ||
- (keyring->expiry && now.tv_sec >= keyring->expiry))
- goto error_2;
+ /* key must have search permissions */
+ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) &&
+ key_task_permission(make_key_ref(key, ctx->possessed),
+ ctx->cred, KEY_NEED_SEARCH) < 0) {
+ ctx->result = ERR_PTR(-EACCES);
+ kleave(" = %d [!perm]", ctx->skipped_ret);
+ goto skipped;
+ }
- /* start processing a new keyring */
-descend:
- if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
- goto not_this_keyring;
+ if (ctx->flags & KEYRING_SEARCH_DO_STATE_CHECK) {
+ /* we set a different error code if we pass a negative key */
+ if (kflags & (1 << KEY_FLAG_NEGATIVE)) {
+ smp_rmb();
+ ctx->result = ERR_PTR(key->type_data.reject_error);
+ kleave(" = %d [neg]", ctx->skipped_ret);
+ goto skipped;
+ }
+ }
- keylist = rcu_dereference(keyring->payload.subscriptions);
- if (!keylist)
- goto not_this_keyring;
+ /* Found */
+ ctx->result = make_key_ref(key, ctx->possessed);
+ kleave(" = 1 [found]");
+ return 1;
- /* iterate through the keys in this keyring first */
- nkeys = keylist->nkeys;
- smp_rmb();
- for (kix = 0; kix < nkeys; kix++) {
- key = keylist->keys[kix];
- kflags = key->flags;
+skipped:
+ return ctx->skipped_ret;
+}
- /* ignore keys not of this type */
- if (key->type != type)
- continue;
+/*
+ * Search inside a keyring for a key. We can search by walking to it
+ * directly based on its index-key or we can iterate over the entire
+ * tree looking for it, based on the match function.
+ */
+static int search_keyring(struct key *keyring, struct keyring_search_context *ctx)
+{
+ if ((ctx->flags & KEYRING_SEARCH_LOOKUP_TYPE) ==
+ KEYRING_SEARCH_LOOKUP_DIRECT) {
+ const void *object;
+
+ object = assoc_array_find(&keyring->keys,
+ &keyring_assoc_array_ops,
+ &ctx->index_key);
+ return object ? ctx->iterator(object, ctx) : 0;
+ }
+ return assoc_array_iterate(&keyring->keys, ctx->iterator, ctx);
+}
- /* skip revoked keys and expired keys */
- if (!no_state_check) {
- if (kflags & (1 << KEY_FLAG_REVOKED))
- continue;
+/*
+ * Search a tree of keyrings that point to other keyrings up to the maximum
+ * depth.
+ */
+static bool search_nested_keyrings(struct key *keyring,
+ struct keyring_search_context *ctx)
+{
+ struct {
+ struct key *keyring;
+ struct assoc_array_node *node;
+ int slot;
+ } stack[KEYRING_SEARCH_MAX_DEPTH];
- if (key->expiry && now.tv_sec >= key->expiry)
- continue;
- }
+ struct assoc_array_shortcut *shortcut;
+ struct assoc_array_node *node;
+ struct assoc_array_ptr *ptr;
+ struct key *key;
+ int sp = 0, slot;
- /* keys that don't match */
- if (!match(key, description))
- continue;
+ kenter("{%d},{%s,%s}",
+ keyring->serial,
+ ctx->index_key.type->name,
+ ctx->index_key.description);
- /* key must have search permissions */
- if (key_task_permission(make_key_ref(key, possessed),
- cred, KEY_SEARCH) < 0)
- continue;
+ if (ctx->index_key.description)
+ ctx->index_key.desc_len = strlen(ctx->index_key.description);
- if (no_state_check)
+ /* Check to see if this top-level keyring is what we are looking for
+ * and whether it is valid or not.
+ */
+ if (ctx->flags & KEYRING_SEARCH_LOOKUP_ITERATE ||
+ keyring_compare_object(keyring, &ctx->index_key)) {
+ ctx->skipped_ret = 2;
+ ctx->flags |= KEYRING_SEARCH_DO_STATE_CHECK;
+ switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) {
+ case 1:
goto found;
-
- /* we set a different error code if we pass a negative key */
- if (kflags & (1 << KEY_FLAG_NEGATIVE)) {
- err = key->type_data.reject_error;
- continue;
+ case 2:
+ return false;
+ default:
+ break;
}
+ }
+
+ ctx->skipped_ret = 0;
+ if (ctx->flags & KEYRING_SEARCH_NO_STATE_CHECK)
+ ctx->flags &= ~KEYRING_SEARCH_DO_STATE_CHECK;
+ /* Start processing a new keyring */
+descend_to_keyring:
+ kdebug("descend to %d", keyring->serial);
+ if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED)))
+ goto not_this_keyring;
+
+ /* Search through the keys in this keyring before its searching its
+ * subtrees.
+ */
+ if (search_keyring(keyring, ctx))
goto found;
- }
- /* search through the keyrings nested in this one */
- kix = 0;
-ascend:
- nkeys = keylist->nkeys;
- smp_rmb();
- for (; kix < nkeys; kix++) {
- key = keylist->keys[kix];
- if (key->type != &key_type_keyring)
- continue;
+ /* Then manually iterate through the keyrings nested in this one.
+ *
+ * Start from the root node of the index tree. Because of the way the
+ * hash function has been set up, keyrings cluster on the leftmost
+ * branch of the root node (root slot 0) or in the root node itself.
+ * Non-keyrings avoid the leftmost branch of the root entirely (root
+ * slots 1-15).
+ */
+ ptr = ACCESS_ONCE(keyring->keys.root);
+ if (!ptr)
+ goto not_this_keyring;
- /* recursively search nested keyrings
- * - only search keyrings for which we have search permission
+ if (assoc_array_ptr_is_shortcut(ptr)) {
+ /* If the root is a shortcut, either the keyring only contains
+ * keyring pointers (everything clusters behind root slot 0) or
+ * doesn't contain any keyring pointers.
*/
- if (sp >= KEYRING_SEARCH_MAX_DEPTH)
+ shortcut = assoc_array_ptr_to_shortcut(ptr);
+ smp_read_barrier_depends();
+ if ((shortcut->index_key[0] & ASSOC_ARRAY_FAN_MASK) != 0)
+ goto not_this_keyring;
+
+ ptr = ACCESS_ONCE(shortcut->next_node);
+ node = assoc_array_ptr_to_node(ptr);
+ goto begin_node;
+ }
+
+ node = assoc_array_ptr_to_node(ptr);
+ smp_read_barrier_depends();
+
+ ptr = node->slots[0];
+ if (!assoc_array_ptr_is_meta(ptr))
+ goto begin_node;
+
+descend_to_node:
+ /* Descend to a more distal node in this keyring's content tree and go
+ * through that.
+ */
+ kdebug("descend");
+ if (assoc_array_ptr_is_shortcut(ptr)) {
+ shortcut = assoc_array_ptr_to_shortcut(ptr);
+ smp_read_barrier_depends();
+ ptr = ACCESS_ONCE(shortcut->next_node);
+ BUG_ON(!assoc_array_ptr_is_node(ptr));
+ }
+ node = assoc_array_ptr_to_node(ptr);
+
+begin_node:
+ kdebug("begin_node");
+ smp_read_barrier_depends();
+ slot = 0;
+ascend_to_node:
+ /* Go through the slots in a node */
+ for (; slot < ASSOC_ARRAY_FAN_OUT; slot++) {
+ ptr = ACCESS_ONCE(node->slots[slot]);
+
+ if (assoc_array_ptr_is_meta(ptr) && node->back_pointer)
+ goto descend_to_node;
+
+ if (!keyring_ptr_is_keyring(ptr))
continue;
- if (key_task_permission(make_key_ref(key, possessed),
- cred, KEY_SEARCH) < 0)
+ key = keyring_ptr_to_key(ptr);
+
+ if (sp >= KEYRING_SEARCH_MAX_DEPTH) {
+ if (ctx->flags & KEYRING_SEARCH_DETECT_TOO_DEEP) {
+ ctx->result = ERR_PTR(-ELOOP);
+ return false;
+ }
+ goto not_this_keyring;
+ }
+
+ /* Search a nested keyring */
+ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM) &&
+ key_task_permission(make_key_ref(key, ctx->possessed),
+ ctx->cred, KEY_NEED_SEARCH) < 0)
continue;
/* stack the current position */
- stack[sp].keylist = keylist;
- stack[sp].kix = kix;
+ stack[sp].keyring = keyring;
+ stack[sp].node = node;
+ stack[sp].slot = slot;
sp++;
/* begin again with the new keyring */
keyring = key;
- goto descend;
+ goto descend_to_keyring;
+ }
+
+ /* We've dealt with all the slots in the current node, so now we need
+ * to ascend to the parent and continue processing there.
+ */
+ ptr = ACCESS_ONCE(node->back_pointer);
+ slot = node->parent_slot;
+
+ if (ptr && assoc_array_ptr_is_shortcut(ptr)) {
+ shortcut = assoc_array_ptr_to_shortcut(ptr);
+ smp_read_barrier_depends();
+ ptr = ACCESS_ONCE(shortcut->back_pointer);
+ slot = shortcut->parent_slot;
+ }
+ if (!ptr)
+ goto not_this_keyring;
+ node = assoc_array_ptr_to_node(ptr);
+ smp_read_barrier_depends();
+ slot++;
+
+ /* If we've ascended to the root (zero backpointer), we must have just
+ * finished processing the leftmost branch rather than the root slots -
+ * so there can't be any more keyrings for us to find.
+ */
+ if (node->back_pointer) {
+ kdebug("ascend %d", slot);
+ goto ascend_to_node;
}
- /* the keyring we're looking at was disqualified or didn't contain a
- * matching key */
+ /* The keyring we're looking at was disqualified or didn't contain a
+ * matching key.
+ */
not_this_keyring:
- if (sp > 0) {
- /* resume the processing of a keyring higher up in the tree */
- sp--;
- keylist = stack[sp].keylist;
- kix = stack[sp].kix + 1;
- goto ascend;
+ kdebug("not_this_keyring %d", sp);
+ if (sp <= 0) {
+ kleave(" = false");
+ return false;
}
- key_ref = ERR_PTR(err);
- goto error_2;
+ /* Resume the processing of a keyring higher up in the tree */
+ sp--;
+ keyring = stack[sp].keyring;
+ node = stack[sp].node;
+ slot = stack[sp].slot + 1;
+ kdebug("ascend to %d [%d]", keyring->serial, slot);
+ goto ascend_to_node;
- /* we found a viable match */
+ /* We found a viable match */
found:
- atomic_inc(&key->usage);
+ key = key_ref_to_ptr(ctx->result);
key_check(key);
- key_ref = make_key_ref(key, possessed);
-error_2:
+ if (!(ctx->flags & KEYRING_SEARCH_NO_UPDATE_TIME)) {
+ key->last_used_at = ctx->now.tv_sec;
+ keyring->last_used_at = ctx->now.tv_sec;
+ while (sp > 0)
+ stack[--sp].keyring->last_used_at = ctx->now.tv_sec;
+ }
+ kleave(" = true");
+ return true;
+}
+
+/**
+ * keyring_search_aux - Search a keyring tree for a key matching some criteria
+ * @keyring_ref: A pointer to the keyring with possession indicator.
+ * @ctx: The keyring search context.
+ *
+ * Search the supplied keyring tree for a key that matches the criteria given.
+ * The root keyring and any linked keyrings must grant Search permission to the
+ * caller to be searchable and keys can only be found if they too grant Search
+ * to the caller. The possession flag on the root keyring pointer controls use
+ * of the possessor bits in permissions checking of the entire tree. In
+ * addition, the LSM gets to forbid keyring searches and key matches.
+ *
+ * The search is performed as a breadth-then-depth search up to the prescribed
+ * limit (KEYRING_SEARCH_MAX_DEPTH).
+ *
+ * Keys are matched to the type provided and are then filtered by the match
+ * function, which is given the description to use in any way it sees fit. The
+ * match function may use any attributes of a key that it wishes to to
+ * determine the match. Normally the match function from the key type would be
+ * used.
+ *
+ * RCU can be used to prevent the keyring key lists from disappearing without
+ * the need to take lots of locks.
+ *
+ * Returns a pointer to the found key and increments the key usage count if
+ * successful; -EAGAIN if no matching keys were found, or if expired or revoked
+ * keys were found; -ENOKEY if only negative keys were found; -ENOTDIR if the
+ * specified keyring wasn't a keyring.
+ *
+ * In the case of a successful return, the possession attribute from
+ * @keyring_ref is propagated to the returned key reference.
+ */
+key_ref_t keyring_search_aux(key_ref_t keyring_ref,
+ struct keyring_search_context *ctx)
+{
+ struct key *keyring;
+ long err;
+
+ ctx->iterator = keyring_search_iterator;
+ ctx->possessed = is_key_possessed(keyring_ref);
+ ctx->result = ERR_PTR(-EAGAIN);
+
+ keyring = key_ref_to_ptr(keyring_ref);
+ key_check(keyring);
+
+ if (keyring->type != &key_type_keyring)
+ return ERR_PTR(-ENOTDIR);
+
+ if (!(ctx->flags & KEYRING_SEARCH_NO_CHECK_PERM)) {
+ err = key_task_permission(keyring_ref, ctx->cred, KEY_NEED_SEARCH);
+ if (err < 0)
+ return ERR_PTR(err);
+ }
+
+ rcu_read_lock();
+ ctx->now = current_kernel_time();
+ if (search_nested_keyrings(keyring, ctx))
+ __key_get(key_ref_to_ptr(ctx->result));
rcu_read_unlock();
-error:
- return key_ref;
+ return ctx->result;
}
/**
@@ -482,75 +863,73 @@ error:
* @description: The name of the keyring we want to find.
*
* As keyring_search_aux() above, but using the current task's credentials and
- * type's default matching function.
+ * type's default matching function and preferred search method.
*/
key_ref_t keyring_search(key_ref_t keyring,
struct key_type *type,
const char *description)
{
- if (!type->match)
+ struct keyring_search_context ctx = {
+ .index_key.type = type,
+ .index_key.description = description,
+ .cred = current_cred(),
+ .match = type->match,
+ .match_data = description,
+ .flags = (type->def_lookup_type |
+ KEYRING_SEARCH_DO_STATE_CHECK),
+ };
+
+ if (!ctx.match)
return ERR_PTR(-ENOKEY);
- return keyring_search_aux(keyring, current->cred,
- type, description, type->match, false);
+ return keyring_search_aux(keyring, &ctx);
}
EXPORT_SYMBOL(keyring_search);
/*
- * Search the given keyring only (no recursion).
+ * Search the given keyring for a key that might be updated.
*
* The caller must guarantee that the keyring is a keyring and that the
- * permission is granted to search the keyring as no check is made here.
- *
- * RCU is used to make it unnecessary to lock the keyring key list here.
+ * permission is granted to modify the keyring as no check is made here. The
+ * caller must also hold a lock on the keyring semaphore.
*
* Returns a pointer to the found key with usage count incremented if
- * successful and returns -ENOKEY if not found. Revoked keys and keys not
- * providing the requested permission are skipped over.
+ * successful and returns NULL if not found. Revoked and invalidated keys are
+ * skipped over.
*
* If successful, the possession indicator is propagated from the keyring ref
* to the returned key reference.
*/
-key_ref_t __keyring_search_one(key_ref_t keyring_ref,
- const struct key_type *ktype,
- const char *description,
- key_perm_t perm)
+key_ref_t find_key_to_update(key_ref_t keyring_ref,
+ const struct keyring_index_key *index_key)
{
- struct keyring_list *klist;
- unsigned long possessed;
struct key *keyring, *key;
- int nkeys, loop;
+ const void *object;
keyring = key_ref_to_ptr(keyring_ref);
- possessed = is_key_possessed(keyring_ref);
- rcu_read_lock();
+ kenter("{%d},{%s,%s}",
+ keyring->serial, index_key->type->name, index_key->description);
- klist = rcu_dereference(keyring->payload.subscriptions);
- if (klist) {
- nkeys = klist->nkeys;
- smp_rmb();
- for (loop = 0; loop < nkeys ; loop++) {
- key = klist->keys[loop];
-
- if (key->type == ktype &&
- (!key->type->match ||
- key->type->match(key, description)) &&
- key_permission(make_key_ref(key, possessed),
- perm) == 0 &&
- !test_bit(KEY_FLAG_REVOKED, &key->flags)
- )
- goto found;
- }
- }
+ object = assoc_array_find(&keyring->keys, &keyring_assoc_array_ops,
+ index_key);
- rcu_read_unlock();
- return ERR_PTR(-ENOKEY);
+ if (object)
+ goto found;
+
+ kleave(" = NULL");
+ return NULL;
found:
- atomic_inc(&key->usage);
- rcu_read_unlock();
- return make_key_ref(key, possessed);
+ key = keyring_ptr_to_key(object);
+ if (key->flags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED))) {
+ kleave(" = NULL [x]");
+ return NULL;
+ }
+ __key_get(key);
+ kleave(" = {%d}", key->serial);
+ return make_key_ref(key, is_key_possessed(keyring_ref));
}
/*
@@ -583,7 +962,7 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
&keyring_name_hash[bucket],
type_data.link
) {
- if (keyring->user->user_ns != current_user_ns())
+ if (!kuid_has_mapping(current_user_ns(), keyring->user->uid))
continue;
if (test_bit(KEY_FLAG_REVOKED, &keyring->flags))
@@ -594,7 +973,7 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
if (!skip_perm_check &&
key_permission(make_key_ref(keyring, 0),
- KEY_SEARCH) < 0)
+ KEY_NEED_SEARCH) < 0)
continue;
/* we've got a match but we might end up racing with
@@ -602,6 +981,7 @@ struct key *find_keyring_by_name(const char *name, bool skip_perm_check)
* (ie. it has a zero usage count) */
if (!atomic_inc_not_zero(&keyring->usage))
continue;
+ keyring->last_used_at = current_kernel_time().tv_sec;
goto out;
}
}
@@ -612,6 +992,23 @@ out:
return keyring;
}
+static int keyring_detect_cycle_iterator(const void *object,
+ void *iterator_data)
+{
+ struct keyring_search_context *ctx = iterator_data;
+ const struct key *key = keyring_ptr_to_key(object);
+
+ kenter("{%d}", key->serial);
+
+ /* We might get a keyring with matching index-key that is nonetheless a
+ * different keyring. */
+ if (key != ctx->match_data)
+ return 0;
+
+ ctx->result = ERR_PTR(-EDEADLK);
+ return 1;
+}
+
/*
* See if a cycle will will be created by inserting acyclic tree B in acyclic
* tree A at the topmost level (ie: as a direct child of A).
@@ -621,114 +1018,39 @@ out:
*/
static int keyring_detect_cycle(struct key *A, struct key *B)
{
- struct {
- struct keyring_list *keylist;
- int kix;
- } stack[KEYRING_SEARCH_MAX_DEPTH];
-
- struct keyring_list *keylist;
- struct key *subtree, *key;
- int sp, nkeys, kix, ret;
+ struct keyring_search_context ctx = {
+ .index_key = A->index_key,
+ .match_data = A,
+ .iterator = keyring_detect_cycle_iterator,
+ .flags = (KEYRING_SEARCH_LOOKUP_DIRECT |
+ KEYRING_SEARCH_NO_STATE_CHECK |
+ KEYRING_SEARCH_NO_UPDATE_TIME |
+ KEYRING_SEARCH_NO_CHECK_PERM |
+ KEYRING_SEARCH_DETECT_TOO_DEEP),
+ };
rcu_read_lock();
-
- ret = -EDEADLK;
- if (A == B)
- goto cycle_detected;
-
- subtree = B;
- sp = 0;
-
- /* start processing a new keyring */
-descend:
- if (test_bit(KEY_FLAG_REVOKED, &subtree->flags))
- goto not_this_keyring;
-
- keylist = rcu_dereference(subtree->payload.subscriptions);
- if (!keylist)
- goto not_this_keyring;
- kix = 0;
-
-ascend:
- /* iterate through the remaining keys in this keyring */
- nkeys = keylist->nkeys;
- smp_rmb();
- for (; kix < nkeys; kix++) {
- key = keylist->keys[kix];
-
- if (key == A)
- goto cycle_detected;
-
- /* recursively check nested keyrings */
- if (key->type == &key_type_keyring) {
- if (sp >= KEYRING_SEARCH_MAX_DEPTH)
- goto too_deep;
-
- /* stack the current position */
- stack[sp].keylist = keylist;
- stack[sp].kix = kix;
- sp++;
-
- /* begin again with the new keyring */
- subtree = key;
- goto descend;
- }
- }
-
- /* the keyring we're looking at was disqualified or didn't contain a
- * matching key */
-not_this_keyring:
- if (sp > 0) {
- /* resume the checking of a keyring higher up in the tree */
- sp--;
- keylist = stack[sp].keylist;
- kix = stack[sp].kix + 1;
- goto ascend;
- }
-
- ret = 0; /* no cycles detected */
-
-error:
+ search_nested_keyrings(B, &ctx);
rcu_read_unlock();
- return ret;
-
-too_deep:
- ret = -ELOOP;
- goto error;
-
-cycle_detected:
- ret = -EDEADLK;
- goto error;
-}
-
-/*
- * Dispose of a keyring list after the RCU grace period, freeing the unlinked
- * key
- */
-static void keyring_unlink_rcu_disposal(struct rcu_head *rcu)
-{
- struct keyring_list *klist =
- container_of(rcu, struct keyring_list, rcu);
-
- if (klist->delkey != USHRT_MAX)
- key_put(klist->keys[klist->delkey]);
- kfree(klist);
+ return PTR_ERR(ctx.result) == -EAGAIN ? 0 : PTR_ERR(ctx.result);
}
/*
* Preallocate memory so that a key can be linked into to a keyring.
*/
-int __key_link_begin(struct key *keyring, const struct key_type *type,
- const char *description, unsigned long *_prealloc)
+int __key_link_begin(struct key *keyring,
+ const struct keyring_index_key *index_key,
+ struct assoc_array_edit **_edit)
__acquires(&keyring->sem)
+ __acquires(&keyring_serialise_link_sem)
{
- struct keyring_list *klist, *nklist;
- unsigned long prealloc;
- unsigned max;
- size_t size;
- int loop, ret;
+ struct assoc_array_edit *edit;
+ int ret;
+
+ kenter("%d,%s,%s,",
+ keyring->serial, index_key->type->name, index_key->description);
- kenter("%d,%s,%s,", key_serial(keyring), type->name, description);
+ BUG_ON(index_key->desc_len == 0);
if (keyring->type != &key_type_keyring)
return -ENOTDIR;
@@ -741,93 +1063,39 @@ int __key_link_begin(struct key *keyring, const struct key_type *type,
/* serialise link/link calls to prevent parallel calls causing a cycle
* when linking two keyring in opposite orders */
- if (type == &key_type_keyring)
+ if (index_key->type == &key_type_keyring)
down_write(&keyring_serialise_link_sem);
- klist = rcu_dereference_locked_keyring(keyring);
-
- /* see if there's a matching key we can displace */
- if (klist && klist->nkeys > 0) {
- for (loop = klist->nkeys - 1; loop >= 0; loop--) {
- if (klist->keys[loop]->type == type &&
- strcmp(klist->keys[loop]->description,
- description) == 0
- ) {
- /* found a match - we'll replace this one with
- * the new key */
- size = sizeof(struct key *) * klist->maxkeys;
- size += sizeof(*klist);
- BUG_ON(size > PAGE_SIZE);
-
- ret = -ENOMEM;
- nklist = kmemdup(klist, size, GFP_KERNEL);
- if (!nklist)
- goto error_sem;
-
- /* note replacement slot */
- klist->delkey = nklist->delkey = loop;
- prealloc = (unsigned long)nklist;
- goto done;
- }
- }
- }
-
- /* check that we aren't going to overrun the user's quota */
- ret = key_payload_reserve(keyring,
- keyring->datalen + KEYQUOTA_LINK_BYTES);
- if (ret < 0)
+ /* Create an edit script that will insert/replace the key in the
+ * keyring tree.
+ */
+ edit = assoc_array_insert(&keyring->keys,
+ &keyring_assoc_array_ops,
+ index_key,
+ NULL);
+ if (IS_ERR(edit)) {
+ ret = PTR_ERR(edit);
goto error_sem;
+ }
- if (klist && klist->nkeys < klist->maxkeys) {
- /* there's sufficient slack space to append directly */
- nklist = NULL;
- prealloc = KEY_LINK_FIXQUOTA;
- } else {
- /* grow the key list */
- max = 4;
- if (klist)
- max += klist->maxkeys;
-
- ret = -ENFILE;
- if (max > USHRT_MAX - 1)
- goto error_quota;
- size = sizeof(*klist) + sizeof(struct key *) * max;
- if (size > PAGE_SIZE)
- goto error_quota;
-
- ret = -ENOMEM;
- nklist = kmalloc(size, GFP_KERNEL);
- if (!nklist)
- goto error_quota;
-
- nklist->maxkeys = max;
- if (klist) {
- memcpy(nklist->keys, klist->keys,
- sizeof(struct key *) * klist->nkeys);
- nklist->delkey = klist->nkeys;
- nklist->nkeys = klist->nkeys + 1;
- klist->delkey = USHRT_MAX;
- } else {
- nklist->nkeys = 1;
- nklist->delkey = 0;
- }
-
- /* add the key into the new space */
- nklist->keys[nklist->delkey] = NULL;
+ /* If we're not replacing a link in-place then we're going to need some
+ * extra quota.
+ */
+ if (!edit->dead_leaf) {
+ ret = key_payload_reserve(keyring,
+ keyring->datalen + KEYQUOTA_LINK_BYTES);
+ if (ret < 0)
+ goto error_cancel;
}
- prealloc = (unsigned long)nklist | KEY_LINK_FIXQUOTA;
-done:
- *_prealloc = prealloc;
+ *_edit = edit;
kleave(" = 0");
return 0;
-error_quota:
- /* undo the quota changes */
- key_payload_reserve(keyring,
- keyring->datalen - KEYQUOTA_LINK_BYTES);
+error_cancel:
+ assoc_array_cancel_edit(edit);
error_sem:
- if (type == &key_type_keyring)
+ if (index_key->type == &key_type_keyring)
up_write(&keyring_serialise_link_sem);
error_krsem:
up_write(&keyring->sem);
@@ -858,43 +1126,12 @@ int __key_link_check_live_key(struct key *keyring, struct key *key)
* holds at most one link to any given key of a particular type+description
* combination.
*/
-void __key_link(struct key *keyring, struct key *key,
- unsigned long *_prealloc)
+void __key_link(struct key *key, struct assoc_array_edit **_edit)
{
- struct keyring_list *klist, *nklist;
-
- nklist = (struct keyring_list *)(*_prealloc & ~KEY_LINK_FIXQUOTA);
- *_prealloc = 0;
-
- kenter("%d,%d,%p", keyring->serial, key->serial, nklist);
-
- klist = rcu_dereference_locked_keyring(keyring);
-
- atomic_inc(&key->usage);
-
- /* there's a matching key we can displace or an empty slot in a newly
- * allocated list we can fill */
- if (nklist) {
- kdebug("replace %hu/%hu/%hu",
- nklist->delkey, nklist->nkeys, nklist->maxkeys);
-
- nklist->keys[nklist->delkey] = key;
-
- rcu_assign_pointer(keyring->payload.subscriptions, nklist);
-
- /* dispose of the old keyring list and, if there was one, the
- * displaced key */
- if (klist) {
- kdebug("dispose %hu/%hu/%hu",
- klist->delkey, klist->nkeys, klist->maxkeys);
- call_rcu(&klist->rcu, keyring_unlink_rcu_disposal);
- }
- } else {
- /* there's sufficient slack space to append directly */
- klist->keys[klist->nkeys] = key;
- smp_wmb();
- klist->nkeys++;
- }
+ __key_get(key);
+ assoc_array_insert_set_object(*_edit, keyring_key_to_ptr(key));
+ assoc_array_apply_edit(*_edit);
+ *_edit = NULL;
}
/*
@@ -902,23 +1139,22 @@ void __key_link(struct key *keyring, struct key *key,
*
* Must be called with __key_link_begin() having being called.
*/
-void __key_link_end(struct key *keyring, struct key_type *type,
- unsigned long prealloc)
+void __key_link_end(struct key *keyring,
+ const struct keyring_index_key *index_key,
+ struct assoc_array_edit *edit)
__releases(&keyring->sem)
+ __releases(&keyring_serialise_link_sem)
{
- BUG_ON(type == NULL);
- BUG_ON(type->name == NULL);
- kenter("%d,%s,%lx", keyring->serial, type->name, prealloc);
+ BUG_ON(index_key->type == NULL);
+ kenter("%d,%s,", keyring->serial, index_key->type->name);
- if (type == &key_type_keyring)
+ if (index_key->type == &key_type_keyring)
up_write(&keyring_serialise_link_sem);
- if (prealloc) {
- if (prealloc & KEY_LINK_FIXQUOTA)
- key_payload_reserve(keyring,
- keyring->datalen -
- KEYQUOTA_LINK_BYTES);
- kfree((struct keyring_list *)(prealloc & ~KEY_LINK_FIXQUOTA));
+ if (edit && !edit->dead_leaf) {
+ key_payload_reserve(keyring,
+ keyring->datalen - KEYQUOTA_LINK_BYTES);
+ assoc_array_cancel_edit(edit);
}
up_write(&keyring->sem);
}
@@ -945,20 +1181,28 @@ void __key_link_end(struct key *keyring, struct key_type *type,
*/
int key_link(struct key *keyring, struct key *key)
{
- unsigned long prealloc;
+ struct assoc_array_edit *edit;
int ret;
+ kenter("{%d,%d}", keyring->serial, atomic_read(&keyring->usage));
+
key_check(keyring);
key_check(key);
- ret = __key_link_begin(keyring, key->type, key->description, &prealloc);
+ if (test_bit(KEY_FLAG_TRUSTED_ONLY, &keyring->flags) &&
+ !test_bit(KEY_FLAG_TRUSTED, &key->flags))
+ return -EPERM;
+
+ ret = __key_link_begin(keyring, &key->index_key, &edit);
if (ret == 0) {
+ kdebug("begun {%d,%d}", keyring->serial, atomic_read(&keyring->usage));
ret = __key_link_check_live_key(keyring, key);
if (ret == 0)
- __key_link(keyring, key, &prealloc);
- __key_link_end(keyring, key->type, prealloc);
+ __key_link(key, &edit);
+ __key_link_end(keyring, &key->index_key, edit);
}
+ kleave(" = %d {%d,%d}", ret, keyring->serial, atomic_read(&keyring->usage));
return ret;
}
EXPORT_SYMBOL(key_link);
@@ -982,90 +1226,37 @@ EXPORT_SYMBOL(key_link);
*/
int key_unlink(struct key *keyring, struct key *key)
{
- struct keyring_list *klist, *nklist;
- int loop, ret;
+ struct assoc_array_edit *edit;
+ int ret;
key_check(keyring);
key_check(key);
- ret = -ENOTDIR;
if (keyring->type != &key_type_keyring)
- goto error;
+ return -ENOTDIR;
down_write(&keyring->sem);
- klist = rcu_dereference_locked_keyring(keyring);
- if (klist) {
- /* search the keyring for the key */
- for (loop = 0; loop < klist->nkeys; loop++)
- if (klist->keys[loop] == key)
- goto key_is_present;
+ edit = assoc_array_delete(&keyring->keys, &keyring_assoc_array_ops,
+ &key->index_key);
+ if (IS_ERR(edit)) {
+ ret = PTR_ERR(edit);
+ goto error;
}
-
- up_write(&keyring->sem);
ret = -ENOENT;
- goto error;
-
-key_is_present:
- /* we need to copy the key list for RCU purposes */
- nklist = kmalloc(sizeof(*klist) +
- sizeof(struct key *) * klist->maxkeys,
- GFP_KERNEL);
- if (!nklist)
- goto nomem;
- nklist->maxkeys = klist->maxkeys;
- nklist->nkeys = klist->nkeys - 1;
-
- if (loop > 0)
- memcpy(&nklist->keys[0],
- &klist->keys[0],
- loop * sizeof(struct key *));
-
- if (loop < nklist->nkeys)
- memcpy(&nklist->keys[loop],
- &klist->keys[loop + 1],
- (nklist->nkeys - loop) * sizeof(struct key *));
-
- /* adjust the user's quota */
- key_payload_reserve(keyring,
- keyring->datalen - KEYQUOTA_LINK_BYTES);
-
- rcu_assign_pointer(keyring->payload.subscriptions, nklist);
-
- up_write(&keyring->sem);
-
- /* schedule for later cleanup */
- klist->delkey = loop;
- call_rcu(&klist->rcu, keyring_unlink_rcu_disposal);
+ if (edit == NULL)
+ goto error;
+ assoc_array_apply_edit(edit);
+ key_payload_reserve(keyring, keyring->datalen - KEYQUOTA_LINK_BYTES);
ret = 0;
error:
- return ret;
-nomem:
- ret = -ENOMEM;
up_write(&keyring->sem);
- goto error;
+ return ret;
}
EXPORT_SYMBOL(key_unlink);
-/*
- * Dispose of a keyring list after the RCU grace period, releasing the keys it
- * links to.
- */
-static void keyring_clear_rcu_disposal(struct rcu_head *rcu)
-{
- struct keyring_list *klist;
- int loop;
-
- klist = container_of(rcu, struct keyring_list, rcu);
-
- for (loop = klist->nkeys - 1; loop >= 0; loop--)
- key_put(klist->keys[loop]);
-
- kfree(klist);
-}
-
/**
* keyring_clear - Clear a keyring
* @keyring: The keyring to clear.
@@ -1076,33 +1267,25 @@ static void keyring_clear_rcu_disposal(struct rcu_head *rcu)
*/
int keyring_clear(struct key *keyring)
{
- struct keyring_list *klist;
+ struct assoc_array_edit *edit;
int ret;
- ret = -ENOTDIR;
- if (keyring->type == &key_type_keyring) {
- /* detach the pointer block with the locks held */
- down_write(&keyring->sem);
-
- klist = rcu_dereference_locked_keyring(keyring);
- if (klist) {
- /* adjust the quota */
- key_payload_reserve(keyring,
- sizeof(struct keyring_list));
-
- rcu_assign_pointer(keyring->payload.subscriptions,
- NULL);
- }
-
- up_write(&keyring->sem);
+ if (keyring->type != &key_type_keyring)
+ return -ENOTDIR;
- /* free the keys after the locks have been dropped */
- if (klist)
- call_rcu(&klist->rcu, keyring_clear_rcu_disposal);
+ down_write(&keyring->sem);
+ edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops);
+ if (IS_ERR(edit)) {
+ ret = PTR_ERR(edit);
+ } else {
+ if (edit)
+ assoc_array_apply_edit(edit);
+ key_payload_reserve(keyring, 0);
ret = 0;
}
+ up_write(&keyring->sem);
return ret;
}
EXPORT_SYMBOL(keyring_clear);
@@ -1114,119 +1297,68 @@ EXPORT_SYMBOL(keyring_clear);
*/
static void keyring_revoke(struct key *keyring)
{
- struct keyring_list *klist;
+ struct assoc_array_edit *edit;
- klist = rcu_dereference_locked_keyring(keyring);
+ edit = assoc_array_clear(&keyring->keys, &keyring_assoc_array_ops);
+ if (!IS_ERR(edit)) {
+ if (edit)
+ assoc_array_apply_edit(edit);
+ key_payload_reserve(keyring, 0);
+ }
+}
- /* adjust the quota */
- key_payload_reserve(keyring, 0);
+static bool keyring_gc_select_iterator(void *object, void *iterator_data)
+{
+ struct key *key = keyring_ptr_to_key(object);
+ time_t *limit = iterator_data;
- if (klist) {
- rcu_assign_pointer(keyring->payload.subscriptions, NULL);
- call_rcu(&klist->rcu, keyring_clear_rcu_disposal);
- }
+ if (key_is_dead(key, *limit))
+ return false;
+ key_get(key);
+ return true;
}
-/*
- * Determine whether a key is dead.
- */
-static bool key_is_dead(struct key *key, time_t limit)
+static int keyring_gc_check_iterator(const void *object, void *iterator_data)
{
- return test_bit(KEY_FLAG_DEAD, &key->flags) ||
- (key->expiry > 0 && key->expiry <= limit);
+ const struct key *key = keyring_ptr_to_key(object);
+ time_t *limit = iterator_data;
+
+ key_check(key);
+ return key_is_dead(key, *limit);
}
/*
- * Collect garbage from the contents of a keyring, replacing the old list with
- * a new one with the pointers all shuffled down.
+ * Garbage collect pointers from a keyring.
*
- * Dead keys are classed as oned that are flagged as being dead or are revoked,
- * expired or negative keys that were revoked or expired before the specified
- * limit.
+ * Not called with any locks held. The keyring's key struct will not be
+ * deallocated under us as only our caller may deallocate it.
*/
void keyring_gc(struct key *keyring, time_t limit)
{
- struct keyring_list *klist, *new;
- struct key *key;
- int loop, keep, max;
-
- kenter("{%x,%s}", key_serial(keyring), keyring->description);
-
- down_write(&keyring->sem);
-
- klist = rcu_dereference_locked_keyring(keyring);
- if (!klist)
- goto no_klist;
-
- /* work out how many subscriptions we're keeping */
- keep = 0;
- for (loop = klist->nkeys - 1; loop >= 0; loop--)
- if (!key_is_dead(klist->keys[loop], limit))
- keep++;
-
- if (keep == klist->nkeys)
- goto just_return;
-
- /* allocate a new keyring payload */
- max = roundup(keep, 4);
- new = kmalloc(sizeof(struct keyring_list) + max * sizeof(struct key *),
- GFP_KERNEL);
- if (!new)
- goto nomem;
- new->maxkeys = max;
- new->nkeys = 0;
- new->delkey = 0;
-
- /* install the live keys
- * - must take care as expired keys may be updated back to life
- */
- keep = 0;
- for (loop = klist->nkeys - 1; loop >= 0; loop--) {
- key = klist->keys[loop];
- if (!key_is_dead(key, limit)) {
- if (keep >= max)
- goto discard_new;
- new->keys[keep++] = key_get(key);
- }
- }
- new->nkeys = keep;
-
- /* adjust the quota */
- key_payload_reserve(keyring,
- sizeof(struct keyring_list) +
- KEYQUOTA_LINK_BYTES * keep);
+ int result;
- if (keep == 0) {
- rcu_assign_pointer(keyring->payload.subscriptions, NULL);
- kfree(new);
- } else {
- rcu_assign_pointer(keyring->payload.subscriptions, new);
- }
+ kenter("%x{%s}", keyring->serial, keyring->description ?: "");
- up_write(&keyring->sem);
+ if (keyring->flags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED)))
+ goto dont_gc;
- call_rcu(&klist->rcu, keyring_clear_rcu_disposal);
- kleave(" [yes]");
- return;
-
-discard_new:
- new->nkeys = keep;
- keyring_clear_rcu_disposal(&new->rcu);
- up_write(&keyring->sem);
- kleave(" [discard]");
- return;
-
-just_return:
- up_write(&keyring->sem);
- kleave(" [no dead]");
- return;
+ /* scan the keyring looking for dead keys */
+ rcu_read_lock();
+ result = assoc_array_iterate(&keyring->keys,
+ keyring_gc_check_iterator, &limit);
+ rcu_read_unlock();
+ if (result == true)
+ goto do_gc;
-no_klist:
- up_write(&keyring->sem);
- kleave(" [no_klist]");
+dont_gc:
+ kleave(" [no gc]");
return;
-nomem:
+do_gc:
+ down_write(&keyring->sem);
+ assoc_array_gc(&keyring->keys, &keyring_assoc_array_ops,
+ keyring_gc_select_iterator, &limit);
up_write(&keyring->sem);
- kleave(" [oom]");
+ kleave(" [gc]");
}
diff --git a/security/keys/permission.c b/security/keys/permission.c
index c35b5229e3c..732cc0beffd 100644
--- a/security/keys/permission.c
+++ b/security/keys/permission.c
@@ -28,7 +28,7 @@
* permissions bits or the LSM check.
*/
int key_task_permission(const key_ref_t key_ref, const struct cred *cred,
- key_perm_t perm)
+ unsigned perm)
{
struct key *key;
key_perm_t kperm;
@@ -36,19 +36,16 @@ int key_task_permission(const key_ref_t key_ref, const struct cred *cred,
key = key_ref_to_ptr(key_ref);
- if (key->user->user_ns != cred->user->user_ns)
- goto use_other_perms;
-
/* use the second 8-bits of permissions for keys the caller owns */
- if (key->uid == cred->fsuid) {
+ if (uid_eq(key->uid, cred->fsuid)) {
kperm = key->perm >> 16;
goto use_these_perms;
}
/* use the third 8-bits of permissions for keys the caller has a group
* membership in common with */
- if (key->gid != -1 && key->perm & KEY_GRP_ALL) {
- if (key->gid == cred->fsgid) {
+ if (gid_valid(key->gid) && key->perm & KEY_GRP_ALL) {
+ if (gid_eq(key->gid, cred->fsgid)) {
kperm = key->perm >> 8;
goto use_these_perms;
}
@@ -60,8 +57,6 @@ int key_task_permission(const key_ref_t key_ref, const struct cred *cred,
}
}
-use_other_perms:
-
/* otherwise use the least-significant 8-bits */
kperm = key->perm;
@@ -73,7 +68,7 @@ use_these_perms:
if (is_key_possessed(key_ref))
kperm |= key->perm >> 24;
- kperm = kperm & perm & KEY_ALL;
+ kperm = kperm & perm & KEY_NEED_ALL;
if (kperm != perm)
return -EACCES;
@@ -87,32 +82,29 @@ EXPORT_SYMBOL(key_task_permission);
* key_validate - Validate a key.
* @key: The key to be validated.
*
- * Check that a key is valid, returning 0 if the key is okay, -EKEYREVOKED if
- * the key's type has been removed or if the key has been revoked or
- * -EKEYEXPIRED if the key has expired.
+ * Check that a key is valid, returning 0 if the key is okay, -ENOKEY if the
+ * key is invalidated, -EKEYREVOKED if the key's type has been removed or if
+ * the key has been revoked or -EKEYEXPIRED if the key has expired.
*/
-int key_validate(struct key *key)
+int key_validate(const struct key *key)
{
- struct timespec now;
- int ret = 0;
-
- if (key) {
- /* check it's still accessible */
- ret = -EKEYREVOKED;
- if (test_bit(KEY_FLAG_REVOKED, &key->flags) ||
- test_bit(KEY_FLAG_DEAD, &key->flags))
- goto error;
-
- /* check it hasn't expired */
- ret = 0;
- if (key->expiry) {
- now = current_kernel_time();
- if (now.tv_sec >= key->expiry)
- ret = -EKEYEXPIRED;
- }
+ unsigned long flags = key->flags;
+
+ if (flags & (1 << KEY_FLAG_INVALIDATED))
+ return -ENOKEY;
+
+ /* check it's still accessible */
+ if (flags & ((1 << KEY_FLAG_REVOKED) |
+ (1 << KEY_FLAG_DEAD)))
+ return -EKEYREVOKED;
+
+ /* check it hasn't expired */
+ if (key->expiry) {
+ struct timespec now = current_kernel_time();
+ if (now.tv_sec >= key->expiry)
+ return -EKEYEXPIRED;
}
-error:
- return ret;
+ return 0;
}
EXPORT_SYMBOL(key_validate);
diff --git a/security/keys/persistent.c b/security/keys/persistent.c
new file mode 100644
index 00000000000..c9fae5ea89f
--- /dev/null
+++ b/security/keys/persistent.c
@@ -0,0 +1,167 @@
+/* General persistent per-UID keyrings register
+ *
+ * Copyright (C) 2013 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/user_namespace.h>
+#include "internal.h"
+
+unsigned persistent_keyring_expiry = 3 * 24 * 3600; /* Expire after 3 days of non-use */
+
+/*
+ * Create the persistent keyring register for the current user namespace.
+ *
+ * Called with the namespace's sem locked for writing.
+ */
+static int key_create_persistent_register(struct user_namespace *ns)
+{
+ struct key *reg = keyring_alloc(".persistent_register",
+ KUIDT_INIT(0), KGIDT_INIT(0),
+ current_cred(),
+ ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ),
+ KEY_ALLOC_NOT_IN_QUOTA, NULL);
+ if (IS_ERR(reg))
+ return PTR_ERR(reg);
+
+ ns->persistent_keyring_register = reg;
+ return 0;
+}
+
+/*
+ * Create the persistent keyring for the specified user.
+ *
+ * Called with the namespace's sem locked for writing.
+ */
+static key_ref_t key_create_persistent(struct user_namespace *ns, kuid_t uid,
+ struct keyring_index_key *index_key)
+{
+ struct key *persistent;
+ key_ref_t reg_ref, persistent_ref;
+
+ if (!ns->persistent_keyring_register) {
+ long err = key_create_persistent_register(ns);
+ if (err < 0)
+ return ERR_PTR(err);
+ } else {
+ reg_ref = make_key_ref(ns->persistent_keyring_register, true);
+ persistent_ref = find_key_to_update(reg_ref, index_key);
+ if (persistent_ref)
+ return persistent_ref;
+ }
+
+ persistent = keyring_alloc(index_key->description,
+ uid, INVALID_GID, current_cred(),
+ ((KEY_POS_ALL & ~KEY_POS_SETATTR) |
+ KEY_USR_VIEW | KEY_USR_READ),
+ KEY_ALLOC_NOT_IN_QUOTA,
+ ns->persistent_keyring_register);
+ if (IS_ERR(persistent))
+ return ERR_CAST(persistent);
+
+ return make_key_ref(persistent, true);
+}
+
+/*
+ * Get the persistent keyring for a specific UID and link it to the nominated
+ * keyring.
+ */
+static long key_get_persistent(struct user_namespace *ns, kuid_t uid,
+ key_ref_t dest_ref)
+{
+ struct keyring_index_key index_key;
+ struct key *persistent;
+ key_ref_t reg_ref, persistent_ref;
+ char buf[32];
+ long ret;
+
+ /* Look in the register if it exists */
+ index_key.type = &key_type_keyring;
+ index_key.description = buf;
+ index_key.desc_len = sprintf(buf, "_persistent.%u", from_kuid(ns, uid));
+
+ if (ns->persistent_keyring_register) {
+ reg_ref = make_key_ref(ns->persistent_keyring_register, true);
+ down_read(&ns->persistent_keyring_register_sem);
+ persistent_ref = find_key_to_update(reg_ref, &index_key);
+ up_read(&ns->persistent_keyring_register_sem);
+
+ if (persistent_ref)
+ goto found;
+ }
+
+ /* It wasn't in the register, so we'll need to create it. We might
+ * also need to create the register.
+ */
+ down_write(&ns->persistent_keyring_register_sem);
+ persistent_ref = key_create_persistent(ns, uid, &index_key);
+ up_write(&ns->persistent_keyring_register_sem);
+ if (!IS_ERR(persistent_ref))
+ goto found;
+
+ return PTR_ERR(persistent_ref);
+
+found:
+ ret = key_task_permission(persistent_ref, current_cred(), KEY_NEED_LINK);
+ if (ret == 0) {
+ persistent = key_ref_to_ptr(persistent_ref);
+ ret = key_link(key_ref_to_ptr(dest_ref), persistent);
+ if (ret == 0) {
+ key_set_timeout(persistent, persistent_keyring_expiry);
+ ret = persistent->serial;
+ }
+ }
+
+ key_ref_put(persistent_ref);
+ return ret;
+}
+
+/*
+ * Get the persistent keyring for a specific UID and link it to the nominated
+ * keyring.
+ */
+long keyctl_get_persistent(uid_t _uid, key_serial_t destid)
+{
+ struct user_namespace *ns = current_user_ns();
+ key_ref_t dest_ref;
+ kuid_t uid;
+ long ret;
+
+ /* -1 indicates the current user */
+ if (_uid == (uid_t)-1) {
+ uid = current_uid();
+ } else {
+ uid = make_kuid(ns, _uid);
+ if (!uid_valid(uid))
+ return -EINVAL;
+
+ /* You can only see your own persistent cache if you're not
+ * sufficiently privileged.
+ */
+ if (!uid_eq(uid, current_uid()) &&
+ !uid_eq(uid, current_euid()) &&
+ !ns_capable(ns, CAP_SETUID))
+ return -EPERM;
+ }
+
+ /* There must be a destination keyring */
+ dest_ref = lookup_user_key(destid, KEY_LOOKUP_CREATE, KEY_NEED_WRITE);
+ if (IS_ERR(dest_ref))
+ return PTR_ERR(dest_ref);
+ if (key_ref_to_ptr(dest_ref)->type != &key_type_keyring) {
+ ret = -ENOTDIR;
+ goto out_put_dest;
+ }
+
+ ret = key_get_persistent(ns, uid, dest_ref);
+
+out_put_dest:
+ key_ref_put(dest_ref);
+ return ret;
+}
diff --git a/security/keys/proc.c b/security/keys/proc.c
index 49bbc97943a..d3f6f2fd21d 100644
--- a/security/keys/proc.c
+++ b/security/keys/proc.c
@@ -88,14 +88,14 @@ __initcall(key_proc_init);
*/
#ifdef CONFIG_KEYS_DEBUG_PROC_KEYS
-static struct rb_node *key_serial_next(struct rb_node *n)
+static struct rb_node *key_serial_next(struct seq_file *p, struct rb_node *n)
{
- struct user_namespace *user_ns = current_user_ns();
+ struct user_namespace *user_ns = seq_user_ns(p);
n = rb_next(n);
while (n) {
struct key *key = rb_entry(n, struct key, serial_node);
- if (key->user->user_ns == user_ns)
+ if (kuid_has_mapping(user_ns, key->user->uid))
break;
n = rb_next(n);
}
@@ -107,9 +107,9 @@ static int proc_keys_open(struct inode *inode, struct file *file)
return seq_open(file, &proc_keys_ops);
}
-static struct key *find_ge_key(key_serial_t id)
+static struct key *find_ge_key(struct seq_file *p, key_serial_t id)
{
- struct user_namespace *user_ns = current_user_ns();
+ struct user_namespace *user_ns = seq_user_ns(p);
struct rb_node *n = key_serial_tree.rb_node;
struct key *minkey = NULL;
@@ -132,7 +132,7 @@ static struct key *find_ge_key(key_serial_t id)
return NULL;
for (;;) {
- if (minkey->user->user_ns == user_ns)
+ if (kuid_has_mapping(user_ns, minkey->user->uid))
return minkey;
n = rb_next(&minkey->serial_node);
if (!n)
@@ -151,7 +151,7 @@ static void *proc_keys_start(struct seq_file *p, loff_t *_pos)
if (*_pos > INT_MAX)
return NULL;
- key = find_ge_key(pos);
+ key = find_ge_key(p, pos);
if (!key)
return NULL;
*_pos = key->serial;
@@ -168,7 +168,7 @@ static void *proc_keys_next(struct seq_file *p, void *v, loff_t *_pos)
{
struct rb_node *n;
- n = key_serial_next(v);
+ n = key_serial_next(p, v);
if (n)
*_pos = key_node_serial(n);
return n;
@@ -182,7 +182,6 @@ static void proc_keys_stop(struct seq_file *p, void *v)
static int proc_keys_show(struct seq_file *m, void *v)
{
- const struct cred *cred = current_cred();
struct rb_node *_p = v;
struct key *key = rb_entry(_p, struct key, serial_node);
struct timespec now;
@@ -191,15 +190,23 @@ static int proc_keys_show(struct seq_file *m, void *v)
char xbuf[12];
int rc;
+ struct keyring_search_context ctx = {
+ .index_key.type = key->type,
+ .index_key.description = key->description,
+ .cred = current_cred(),
+ .match = lookup_user_key_possessed,
+ .match_data = key,
+ .flags = (KEYRING_SEARCH_NO_STATE_CHECK |
+ KEYRING_SEARCH_LOOKUP_DIRECT),
+ };
+
key_ref = make_key_ref(key, 0);
/* determine if the key is possessed by this process (a test we can
* skip if the key does not indicate the possessor can view it
*/
if (key->perm & KEY_POS_VIEW) {
- skey_ref = search_my_process_keyrings(key->type, key,
- lookup_user_key_possessed,
- true, cred);
+ skey_ref = search_my_process_keyrings(&ctx);
if (!IS_ERR(skey_ref)) {
key_ref_put(skey_ref);
key_ref = make_key_ref(key, 1);
@@ -211,7 +218,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
* - the caller holds a spinlock, and thus the RCU read lock, making our
* access to __current_cred() safe
*/
- rc = key_task_permission(key_ref, cred, KEY_VIEW);
+ rc = key_task_permission(key_ref, ctx.cred, KEY_NEED_VIEW);
if (rc < 0)
return 0;
@@ -242,7 +249,7 @@ static int proc_keys_show(struct seq_file *m, void *v)
#define showflag(KEY, LETTER, FLAG) \
(test_bit(FLAG, &(KEY)->flags) ? LETTER : '-')
- seq_printf(m, "%08x %c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ",
+ seq_printf(m, "%08x %c%c%c%c%c%c%c %5d %4s %08x %5d %5d %-9.9s ",
key->serial,
showflag(key, 'I', KEY_FLAG_INSTANTIATED),
showflag(key, 'R', KEY_FLAG_REVOKED),
@@ -250,11 +257,12 @@ static int proc_keys_show(struct seq_file *m, void *v)
showflag(key, 'Q', KEY_FLAG_IN_QUOTA),
showflag(key, 'U', KEY_FLAG_USER_CONSTRUCT),
showflag(key, 'N', KEY_FLAG_NEGATIVE),
+ showflag(key, 'i', KEY_FLAG_INVALIDATED),
atomic_read(&key->usage),
xbuf,
key->perm,
- key->uid,
- key->gid,
+ from_kuid_munged(seq_user_ns(m), key->uid),
+ from_kgid_munged(seq_user_ns(m), key->gid),
key->type->name);
#undef showflag
@@ -269,26 +277,26 @@ static int proc_keys_show(struct seq_file *m, void *v)
#endif /* CONFIG_KEYS_DEBUG_PROC_KEYS */
-static struct rb_node *__key_user_next(struct rb_node *n)
+static struct rb_node *__key_user_next(struct user_namespace *user_ns, struct rb_node *n)
{
while (n) {
struct key_user *user = rb_entry(n, struct key_user, node);
- if (user->user_ns == current_user_ns())
+ if (kuid_has_mapping(user_ns, user->uid))
break;
n = rb_next(n);
}
return n;
}
-static struct rb_node *key_user_next(struct rb_node *n)
+static struct rb_node *key_user_next(struct user_namespace *user_ns, struct rb_node *n)
{
- return __key_user_next(rb_next(n));
+ return __key_user_next(user_ns, rb_next(n));
}
-static struct rb_node *key_user_first(struct rb_root *r)
+static struct rb_node *key_user_first(struct user_namespace *user_ns, struct rb_root *r)
{
struct rb_node *n = rb_first(r);
- return __key_user_next(n);
+ return __key_user_next(user_ns, n);
}
/*
@@ -308,10 +316,10 @@ static void *proc_key_users_start(struct seq_file *p, loff_t *_pos)
spin_lock(&key_user_lock);
- _p = key_user_first(&key_user_tree);
+ _p = key_user_first(seq_user_ns(p), &key_user_tree);
while (pos > 0 && _p) {
pos--;
- _p = key_user_next(_p);
+ _p = key_user_next(seq_user_ns(p), _p);
}
return _p;
@@ -320,7 +328,7 @@ static void *proc_key_users_start(struct seq_file *p, loff_t *_pos)
static void *proc_key_users_next(struct seq_file *p, void *v, loff_t *_pos)
{
(*_pos)++;
- return key_user_next((struct rb_node *)v);
+ return key_user_next(seq_user_ns(p), (struct rb_node *)v);
}
static void proc_key_users_stop(struct seq_file *p, void *v)
@@ -333,13 +341,13 @@ static int proc_key_users_show(struct seq_file *m, void *v)
{
struct rb_node *_p = v;
struct key_user *user = rb_entry(_p, struct key_user, node);
- unsigned maxkeys = (user->uid == 0) ?
+ unsigned maxkeys = uid_eq(user->uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxkeys : key_quota_maxkeys;
- unsigned maxbytes = (user->uid == 0) ?
+ unsigned maxbytes = uid_eq(user->uid, GLOBAL_ROOT_UID) ?
key_quota_root_maxbytes : key_quota_maxbytes;
seq_printf(m, "%5u: %5d %d/%d %d/%d %d/%d\n",
- user->uid,
+ from_kuid_munged(seq_user_ns(m), user->uid),
atomic_read(&user->usage),
atomic_read(&user->nkeys),
atomic_read(&user->nikeys),
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index 1068cb1939b..0cf8a130a26 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -34,8 +34,7 @@ struct key_user root_key_user = {
.lock = __SPIN_LOCK_UNLOCKED(root_key_user.lock),
.nkeys = ATOMIC_INIT(2),
.nikeys = ATOMIC_INIT(2),
- .uid = 0,
- .user_ns = &init_user_ns,
+ .uid = GLOBAL_ROOT_UID,
};
/*
@@ -46,15 +45,19 @@ int install_user_keyrings(void)
struct user_struct *user;
const struct cred *cred;
struct key *uid_keyring, *session_keyring;
+ key_perm_t user_keyring_perm;
char buf[20];
int ret;
+ uid_t uid;
+ user_keyring_perm = (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_ALL;
cred = current_cred();
user = cred->user;
+ uid = from_kuid(cred->user_ns, user->uid);
- kenter("%p{%u}", user, user->uid);
+ kenter("%p{%u}", user, uid);
- if (user->uid_keyring) {
+ if (user->uid_keyring && user->session_keyring) {
kleave(" = 0 [exist]");
return 0;
}
@@ -67,13 +70,13 @@ int install_user_keyrings(void)
* - there may be one in existence already as it may have been
* pinned by a session, but the user_struct pointing to it
* may have been destroyed by setuid */
- sprintf(buf, "_uid.%u", user->uid);
+ sprintf(buf, "_uid.%u", uid);
uid_keyring = find_keyring_by_name(buf, true);
if (IS_ERR(uid_keyring)) {
- uid_keyring = keyring_alloc(buf, user->uid, (gid_t) -1,
- cred, KEY_ALLOC_IN_QUOTA,
- NULL);
+ uid_keyring = keyring_alloc(buf, user->uid, INVALID_GID,
+ cred, user_keyring_perm,
+ KEY_ALLOC_IN_QUOTA, NULL);
if (IS_ERR(uid_keyring)) {
ret = PTR_ERR(uid_keyring);
goto error;
@@ -82,13 +85,14 @@ int install_user_keyrings(void)
/* get a default session keyring (which might also exist
* already) */
- sprintf(buf, "_uid_ses.%u", user->uid);
+ sprintf(buf, "_uid_ses.%u", uid);
session_keyring = find_keyring_by_name(buf, true);
if (IS_ERR(session_keyring)) {
session_keyring =
- keyring_alloc(buf, user->uid, (gid_t) -1,
- cred, KEY_ALLOC_IN_QUOTA, NULL);
+ keyring_alloc(buf, user->uid, INVALID_GID,
+ cred, user_keyring_perm,
+ KEY_ALLOC_IN_QUOTA, NULL);
if (IS_ERR(session_keyring)) {
ret = PTR_ERR(session_keyring);
goto error_release;
@@ -129,6 +133,7 @@ int install_thread_keyring_to_cred(struct cred *new)
struct key *keyring;
keyring = keyring_alloc("_tid", new->uid, new->gid, new,
+ KEY_POS_ALL | KEY_USR_VIEW,
KEY_ALLOC_QUOTA_OVERRUN, NULL);
if (IS_ERR(keyring))
return PTR_ERR(keyring);
@@ -169,27 +174,18 @@ static int install_thread_keyring(void)
int install_process_keyring_to_cred(struct cred *new)
{
struct key *keyring;
- int ret;
- if (new->tgcred->process_keyring)
+ if (new->process_keyring)
return -EEXIST;
- keyring = keyring_alloc("_pid", new->uid, new->gid,
- new, KEY_ALLOC_QUOTA_OVERRUN, NULL);
+ keyring = keyring_alloc("_pid", new->uid, new->gid, new,
+ KEY_POS_ALL | KEY_USR_VIEW,
+ KEY_ALLOC_QUOTA_OVERRUN, NULL);
if (IS_ERR(keyring))
return PTR_ERR(keyring);
- spin_lock_irq(&new->tgcred->lock);
- if (!new->tgcred->process_keyring) {
- new->tgcred->process_keyring = keyring;
- keyring = NULL;
- ret = 0;
- } else {
- ret = -EEXIST;
- }
- spin_unlock_irq(&new->tgcred->lock);
- key_put(keyring);
- return ret;
+ new->process_keyring = keyring;
+ return 0;
}
/*
@@ -230,29 +226,24 @@ int install_session_keyring_to_cred(struct cred *cred, struct key *keyring)
/* create an empty session keyring */
if (!keyring) {
flags = KEY_ALLOC_QUOTA_OVERRUN;
- if (cred->tgcred->session_keyring)
+ if (cred->session_keyring)
flags = KEY_ALLOC_IN_QUOTA;
- keyring = keyring_alloc("_ses", cred->uid, cred->gid,
- cred, flags, NULL);
+ keyring = keyring_alloc("_ses", cred->uid, cred->gid, cred,
+ KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ,
+ flags, NULL);
if (IS_ERR(keyring))
return PTR_ERR(keyring);
} else {
- atomic_inc(&keyring->usage);
+ __key_get(keyring);
}
/* install the keyring */
- spin_lock_irq(&cred->tgcred->lock);
- old = cred->tgcred->session_keyring;
- rcu_assign_pointer(cred->tgcred->session_keyring, keyring);
- spin_unlock_irq(&cred->tgcred->lock);
-
- /* we're using RCU on the pointer, but there's no point synchronising
- * on it if it didn't previously point to anything */
- if (old) {
- synchronize_rcu();
+ old = cred->session_keyring;
+ rcu_assign_pointer(cred->session_keyring, keyring);
+
+ if (old)
key_put(old);
- }
return 0;
}
@@ -328,11 +319,7 @@ void key_fsgid_changed(struct task_struct *tsk)
* In the case of a successful return, the possession attribute is set on the
* returned key reference.
*/
-key_ref_t search_my_process_keyrings(struct key_type *type,
- const void *description,
- key_match_func_t match,
- bool no_state_check,
- const struct cred *cred)
+key_ref_t search_my_process_keyrings(struct keyring_search_context *ctx)
{
key_ref_t key_ref, ret, err;
@@ -348,17 +335,14 @@ key_ref_t search_my_process_keyrings(struct key_type *type,
err = ERR_PTR(-EAGAIN);
/* search the thread keyring first */
- if (cred->thread_keyring) {
+ if (ctx->cred->thread_keyring) {
key_ref = keyring_search_aux(
- make_key_ref(cred->thread_keyring, 1),
- cred, type, description, match, no_state_check);
+ make_key_ref(ctx->cred->thread_keyring, 1), ctx);
if (!IS_ERR(key_ref))
goto found;
switch (PTR_ERR(key_ref)) {
case -EAGAIN: /* no key */
- if (ret)
- break;
case -ENOKEY: /* negative key */
ret = key_ref;
break;
@@ -369,10 +353,9 @@ key_ref_t search_my_process_keyrings(struct key_type *type,
}
/* search the process keyring second */
- if (cred->tgcred->process_keyring) {
+ if (ctx->cred->process_keyring) {
key_ref = keyring_search_aux(
- make_key_ref(cred->tgcred->process_keyring, 1),
- cred, type, description, match, no_state_check);
+ make_key_ref(ctx->cred->process_keyring, 1), ctx);
if (!IS_ERR(key_ref))
goto found;
@@ -390,13 +373,11 @@ key_ref_t search_my_process_keyrings(struct key_type *type,
}
/* search the session keyring */
- if (cred->tgcred->session_keyring) {
+ if (ctx->cred->session_keyring) {
rcu_read_lock();
key_ref = keyring_search_aux(
- make_key_ref(rcu_dereference(
- cred->tgcred->session_keyring),
- 1),
- cred, type, description, match, no_state_check);
+ make_key_ref(rcu_dereference(ctx->cred->session_keyring), 1),
+ ctx);
rcu_read_unlock();
if (!IS_ERR(key_ref))
@@ -415,10 +396,10 @@ key_ref_t search_my_process_keyrings(struct key_type *type,
}
}
/* or search the user-session keyring */
- else if (cred->user->session_keyring) {
+ else if (ctx->cred->user->session_keyring) {
key_ref = keyring_search_aux(
- make_key_ref(cred->user->session_keyring, 1),
- cred, type, description, match, no_state_check);
+ make_key_ref(ctx->cred->user->session_keyring, 1),
+ ctx);
if (!IS_ERR(key_ref))
goto found;
@@ -450,18 +431,14 @@ found:
*
* Return same as search_my_process_keyrings().
*/
-key_ref_t search_process_keyrings(struct key_type *type,
- const void *description,
- key_match_func_t match,
- const struct cred *cred)
+key_ref_t search_process_keyrings(struct keyring_search_context *ctx)
{
struct request_key_auth *rka;
key_ref_t key_ref, ret = ERR_PTR(-EACCES), err;
might_sleep();
- key_ref = search_my_process_keyrings(type, description, match,
- false, cred);
+ key_ref = search_my_process_keyrings(ctx);
if (!IS_ERR(key_ref))
goto found;
err = key_ref;
@@ -470,18 +447,21 @@ key_ref_t search_process_keyrings(struct key_type *type,
* search the keyrings of the process mentioned there
* - we don't permit access to request_key auth keys via this method
*/
- if (cred->request_key_auth &&
- cred == current_cred() &&
- type != &key_type_request_key_auth
+ if (ctx->cred->request_key_auth &&
+ ctx->cred == current_cred() &&
+ ctx->index_key.type != &key_type_request_key_auth
) {
+ const struct cred *cred = ctx->cred;
+
/* defend against the auth key being revoked */
down_read(&cred->request_key_auth->sem);
- if (key_validate(cred->request_key_auth) == 0) {
- rka = cred->request_key_auth->payload.data;
+ if (key_validate(ctx->cred->request_key_auth) == 0) {
+ rka = ctx->cred->request_key_auth->payload.data;
- key_ref = search_process_keyrings(type, description,
- match, rka->cred);
+ ctx->cred = rka->cred;
+ key_ref = search_process_keyrings(ctx);
+ ctx->cred = cred;
up_read(&cred->request_key_auth->sem);
@@ -535,19 +515,23 @@ int lookup_user_key_possessed(const struct key *key, const void *target)
key_ref_t lookup_user_key(key_serial_t id, unsigned long lflags,
key_perm_t perm)
{
+ struct keyring_search_context ctx = {
+ .match = lookup_user_key_possessed,
+ .flags = (KEYRING_SEARCH_NO_STATE_CHECK |
+ KEYRING_SEARCH_LOOKUP_DIRECT),
+ };
struct request_key_auth *rka;
- const struct cred *cred;
struct key *key;
key_ref_t key_ref, skey_ref;
int ret;
try_again:
- cred = get_current_cred();
+ ctx.cred = get_current_cred();
key_ref = ERR_PTR(-ENOKEY);
switch (id) {
case KEY_SPEC_THREAD_KEYRING:
- if (!cred->thread_keyring) {
+ if (!ctx.cred->thread_keyring) {
if (!(lflags & KEY_LOOKUP_CREATE))
goto error;
@@ -559,13 +543,13 @@ try_again:
goto reget_creds;
}
- key = cred->thread_keyring;
- atomic_inc(&key->usage);
+ key = ctx.cred->thread_keyring;
+ __key_get(key);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_PROCESS_KEYRING:
- if (!cred->tgcred->process_keyring) {
+ if (!ctx.cred->process_keyring) {
if (!(lflags & KEY_LOOKUP_CREATE))
goto error;
@@ -577,13 +561,13 @@ try_again:
goto reget_creds;
}
- key = cred->tgcred->process_keyring;
- atomic_inc(&key->usage);
+ key = ctx.cred->process_keyring;
+ __key_get(key);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_SESSION_KEYRING:
- if (!cred->tgcred->session_keyring) {
+ if (!ctx.cred->session_keyring) {
/* always install a session keyring upon access if one
* doesn't exist yet */
ret = install_user_keyrings();
@@ -593,13 +577,13 @@ try_again:
ret = join_session_keyring(NULL);
else
ret = install_session_keyring(
- cred->user->session_keyring);
+ ctx.cred->user->session_keyring);
if (ret < 0)
goto error;
goto reget_creds;
- } else if (cred->tgcred->session_keyring ==
- cred->user->session_keyring &&
+ } else if (ctx.cred->session_keyring ==
+ ctx.cred->user->session_keyring &&
lflags & KEY_LOOKUP_CREATE) {
ret = join_session_keyring(NULL);
if (ret < 0)
@@ -608,33 +592,33 @@ try_again:
}
rcu_read_lock();
- key = rcu_dereference(cred->tgcred->session_keyring);
- atomic_inc(&key->usage);
+ key = rcu_dereference(ctx.cred->session_keyring);
+ __key_get(key);
rcu_read_unlock();
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_USER_KEYRING:
- if (!cred->user->uid_keyring) {
+ if (!ctx.cred->user->uid_keyring) {
ret = install_user_keyrings();
if (ret < 0)
goto error;
}
- key = cred->user->uid_keyring;
- atomic_inc(&key->usage);
+ key = ctx.cred->user->uid_keyring;
+ __key_get(key);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_USER_SESSION_KEYRING:
- if (!cred->user->session_keyring) {
+ if (!ctx.cred->user->session_keyring) {
ret = install_user_keyrings();
if (ret < 0)
goto error;
}
- key = cred->user->session_keyring;
- atomic_inc(&key->usage);
+ key = ctx.cred->user->session_keyring;
+ __key_get(key);
key_ref = make_key_ref(key, 1);
break;
@@ -644,28 +628,29 @@ try_again:
goto error;
case KEY_SPEC_REQKEY_AUTH_KEY:
- key = cred->request_key_auth;
+ key = ctx.cred->request_key_auth;
if (!key)
goto error;
- atomic_inc(&key->usage);
+ __key_get(key);
key_ref = make_key_ref(key, 1);
break;
case KEY_SPEC_REQUESTOR_KEYRING:
- if (!cred->request_key_auth)
+ if (!ctx.cred->request_key_auth)
goto error;
- down_read(&cred->request_key_auth->sem);
- if (cred->request_key_auth->flags & KEY_FLAG_REVOKED) {
+ down_read(&ctx.cred->request_key_auth->sem);
+ if (test_bit(KEY_FLAG_REVOKED,
+ &ctx.cred->request_key_auth->flags)) {
key_ref = ERR_PTR(-EKEYREVOKED);
key = NULL;
} else {
- rka = cred->request_key_auth->payload.data;
+ rka = ctx.cred->request_key_auth->payload.data;
key = rka->dest_keyring;
- atomic_inc(&key->usage);
+ __key_get(key);
}
- up_read(&cred->request_key_auth->sem);
+ up_read(&ctx.cred->request_key_auth->sem);
if (!key)
goto error;
key_ref = make_key_ref(key, 1);
@@ -685,9 +670,13 @@ try_again:
key_ref = make_key_ref(key, 0);
/* check to see if we possess the key */
- skey_ref = search_process_keyrings(key->type, key,
- lookup_user_key_possessed,
- cred);
+ ctx.index_key.type = key->type;
+ ctx.index_key.description = key->description;
+ ctx.index_key.desc_len = strlen(key->description);
+ ctx.match_data = key;
+ kdebug("check possessed");
+ skey_ref = search_process_keyrings(&ctx);
+ kdebug("possessed=%p", skey_ref);
if (!IS_ERR(skey_ref)) {
key_put(key);
@@ -727,12 +716,14 @@ try_again:
goto invalid_key;
/* check the permissions */
- ret = key_task_permission(key_ref, cred, perm);
+ ret = key_task_permission(key_ref, ctx.cred, perm);
if (ret < 0)
goto invalid_key;
+ key->last_used_at = current_kernel_time().tv_sec;
+
error:
- put_cred(cred);
+ put_cred(ctx.cred);
return key_ref;
invalid_key:
@@ -743,7 +734,7 @@ invalid_key:
/* if we attempted to install a keyring, then it may have caused new
* creds to be installed */
reget_creds:
- put_cred(cred);
+ put_cred(ctx.cred);
goto try_again;
}
@@ -765,12 +756,6 @@ long join_session_keyring(const char *name)
struct key *keyring;
long ret, serial;
- /* only permit this if there's a single thread in the thread group -
- * this avoids us having to adjust the creds on all threads and risking
- * ENOMEM */
- if (!current_is_single_threaded())
- return -EMLINK;
-
new = prepare_creds();
if (!new)
return -ENOMEM;
@@ -782,7 +767,7 @@ long join_session_keyring(const char *name)
if (ret < 0)
goto error;
- serial = new->tgcred->session_keyring->serial;
+ serial = new->session_keyring->serial;
ret = commit_creds(new);
if (ret == 0)
ret = serial;
@@ -796,8 +781,10 @@ long join_session_keyring(const char *name)
keyring = find_keyring_by_name(name, false);
if (PTR_ERR(keyring) == -ENOKEY) {
/* not found - try and create a new one */
- keyring = keyring_alloc(name, old->uid, old->gid, old,
- KEY_ALLOC_IN_QUOTA, NULL);
+ keyring = keyring_alloc(
+ name, old->uid, old->gid, old,
+ KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ | KEY_USR_LINK,
+ KEY_ALLOC_IN_QUOTA, NULL);
if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto error2;
@@ -805,6 +792,9 @@ long join_session_keyring(const char *name)
} else if (IS_ERR(keyring)) {
ret = PTR_ERR(keyring);
goto error2;
+ } else if (keyring == new->session_keyring) {
+ ret = 0;
+ goto error2;
}
/* we've got a keyring - now to install it */
@@ -831,23 +821,16 @@ error:
* Replace a process's session keyring on behalf of one of its children when
* the target process is about to resume userspace execution.
*/
-void key_replace_session_keyring(void)
+void key_change_session_keyring(struct callback_head *twork)
{
- const struct cred *old;
- struct cred *new;
+ const struct cred *old = current_cred();
+ struct cred *new = container_of(twork, struct cred, rcu);
- if (!current->replacement_session_keyring)
- return;
-
- write_lock_irq(&tasklist_lock);
- new = current->replacement_session_keyring;
- current->replacement_session_keyring = NULL;
- write_unlock_irq(&tasklist_lock);
-
- if (!new)
+ if (unlikely(current->flags & PF_EXITING)) {
+ put_cred(new);
return;
+ }
- old = current_cred();
new-> uid = old-> uid;
new-> euid = old-> euid;
new-> suid = old-> suid;
@@ -857,7 +840,7 @@ void key_replace_session_keyring(void)
new-> sgid = old-> sgid;
new->fsgid = old->fsgid;
new->user = get_uid(old->user);
- new->user_ns = new->user->user_ns;
+ new->user_ns = get_user_ns(old->user_ns);
new->group_info = get_group_info(old->group_info);
new->securebits = old->securebits;
@@ -868,10 +851,19 @@ void key_replace_session_keyring(void)
new->jit_keyring = old->jit_keyring;
new->thread_keyring = key_get(old->thread_keyring);
- new->tgcred->tgid = old->tgcred->tgid;
- new->tgcred->process_keyring = key_get(old->tgcred->process_keyring);
+ new->process_keyring = key_get(old->process_keyring);
security_transfer_creds(new, old);
commit_creds(new);
}
+
+/*
+ * Make sure that root's user and user-session keyrings exist.
+ */
+static int __init init_root_keyring(void)
+{
+ return install_user_keyrings();
+}
+
+late_initcall(init_root_keyring);
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index 82465328c39..381411941cc 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -91,17 +91,17 @@ static void umh_keys_cleanup(struct subprocess_info *info)
* Call a usermode helper with a specific session keyring.
*/
static int call_usermodehelper_keys(char *path, char **argv, char **envp,
- struct key *session_keyring, enum umh_wait wait)
+ struct key *session_keyring, int wait)
{
- gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
- struct subprocess_info *info =
- call_usermodehelper_setup(path, argv, envp, gfp_mask);
+ struct subprocess_info *info;
+ info = call_usermodehelper_setup(path, argv, envp, GFP_KERNEL,
+ umh_keys_init, umh_keys_cleanup,
+ session_keyring);
if (!info)
return -ENOMEM;
- call_usermodehelper_setfns(info, umh_keys_init, umh_keys_cleanup,
- key_get(session_keyring));
+ key_get(session_keyring);
return call_usermodehelper_exec(info, wait);
}
@@ -133,6 +133,7 @@ static int call_sbin_request_key(struct key_construction *cons,
cred = get_current_cred();
keyring = keyring_alloc(desc, cred->fsuid, cred->fsgid, cred,
+ KEY_POS_ALL | KEY_USR_VIEW | KEY_USR_READ,
KEY_ALLOC_QUOTA_OVERRUN, NULL);
put_cred(cred);
if (IS_ERR(keyring)) {
@@ -146,8 +147,8 @@ static int call_sbin_request_key(struct key_construction *cons,
goto error_link;
/* record the UID and GID */
- sprintf(uid_str, "%d", cred->fsuid);
- sprintf(gid_str, "%d", cred->fsgid);
+ sprintf(uid_str, "%d", from_kuid(&init_user_ns, cred->fsuid));
+ sprintf(gid_str, "%d", from_kgid(&init_user_ns, cred->fsgid));
/* we say which key is under construction */
sprintf(key_str, "%d", key->serial);
@@ -157,12 +158,12 @@ static int call_sbin_request_key(struct key_construction *cons,
cred->thread_keyring ? cred->thread_keyring->serial : 0);
prkey = 0;
- if (cred->tgcred->process_keyring)
- prkey = cred->tgcred->process_keyring->serial;
+ if (cred->process_keyring)
+ prkey = cred->process_keyring->serial;
sprintf(keyring_str[1], "%d", prkey);
rcu_read_lock();
- session = rcu_dereference(cred->tgcred->session_keyring);
+ session = rcu_dereference(cred->session_keyring);
if (!session)
session = cred->user->session_keyring;
sskey = session->serial;
@@ -304,14 +305,14 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
break;
case KEY_REQKEY_DEFL_PROCESS_KEYRING:
- dest_keyring = key_get(cred->tgcred->process_keyring);
+ dest_keyring = key_get(cred->process_keyring);
if (dest_keyring)
break;
case KEY_REQKEY_DEFL_SESSION_KEYRING:
rcu_read_lock();
dest_keyring = key_get(
- rcu_dereference(cred->tgcred->session_keyring));
+ rcu_dereference(cred->session_keyring));
rcu_read_unlock();
if (dest_keyring)
@@ -344,34 +345,42 @@ static void construct_get_dest_keyring(struct key **_dest_keyring)
* May return a key that's already under construction instead if there was a
* race between two thread calling request_key().
*/
-static int construct_alloc_key(struct key_type *type,
- const char *description,
+static int construct_alloc_key(struct keyring_search_context *ctx,
struct key *dest_keyring,
unsigned long flags,
struct key_user *user,
struct key **_key)
{
- const struct cred *cred = current_cred();
- unsigned long prealloc;
+ struct assoc_array_edit *edit;
struct key *key;
+ key_perm_t perm;
key_ref_t key_ref;
int ret;
- kenter("%s,%s,,,", type->name, description);
+ kenter("%s,%s,,,",
+ ctx->index_key.type->name, ctx->index_key.description);
*_key = NULL;
mutex_lock(&user->cons_lock);
- key = key_alloc(type, description, cred->fsuid, cred->fsgid, cred,
- KEY_POS_ALL, flags);
+ perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
+ perm |= KEY_USR_VIEW;
+ if (ctx->index_key.type->read)
+ perm |= KEY_POS_READ;
+ if (ctx->index_key.type == &key_type_keyring ||
+ ctx->index_key.type->update)
+ perm |= KEY_POS_WRITE;
+
+ key = key_alloc(ctx->index_key.type, ctx->index_key.description,
+ ctx->cred->fsuid, ctx->cred->fsgid, ctx->cred,
+ perm, flags);
if (IS_ERR(key))
goto alloc_failed;
set_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags);
if (dest_keyring) {
- ret = __key_link_begin(dest_keyring, type, description,
- &prealloc);
+ ret = __key_link_begin(dest_keyring, &ctx->index_key, &edit);
if (ret < 0)
goto link_prealloc_failed;
}
@@ -381,16 +390,16 @@ static int construct_alloc_key(struct key_type *type,
* waited for locks */
mutex_lock(&key_construction_mutex);
- key_ref = search_process_keyrings(type, description, type->match, cred);
+ key_ref = search_process_keyrings(ctx);
if (!IS_ERR(key_ref))
goto key_already_present;
if (dest_keyring)
- __key_link(dest_keyring, key, &prealloc);
+ __key_link(key, &edit);
mutex_unlock(&key_construction_mutex);
if (dest_keyring)
- __key_link_end(dest_keyring, type, prealloc);
+ __key_link_end(dest_keyring, &ctx->index_key, edit);
mutex_unlock(&user->cons_lock);
*_key = key;
kleave(" = 0 [%d]", key_serial(key));
@@ -405,8 +414,8 @@ key_already_present:
if (dest_keyring) {
ret = __key_link_check_live_key(dest_keyring, key);
if (ret == 0)
- __key_link(dest_keyring, key, &prealloc);
- __key_link_end(dest_keyring, type, prealloc);
+ __key_link(key, &edit);
+ __key_link_end(dest_keyring, &ctx->index_key, edit);
if (ret < 0)
goto link_check_failed;
}
@@ -435,8 +444,7 @@ alloc_failed:
/*
* Commence key construction.
*/
-static struct key *construct_key_and_link(struct key_type *type,
- const char *description,
+static struct key *construct_key_and_link(struct keyring_search_context *ctx,
const char *callout_info,
size_t callout_len,
void *aux,
@@ -449,14 +457,13 @@ static struct key *construct_key_and_link(struct key_type *type,
kenter("");
- user = key_user_lookup(current_fsuid(), current_user_ns());
+ user = key_user_lookup(current_fsuid());
if (!user)
return ERR_PTR(-ENOMEM);
construct_get_dest_keyring(&dest_keyring);
- ret = construct_alloc_key(type, description, dest_keyring, flags, user,
- &key);
+ ret = construct_alloc_key(ctx, dest_keyring, flags, user, &key);
key_user_put(user);
if (ret == 0) {
@@ -520,17 +527,24 @@ struct key *request_key_and_link(struct key_type *type,
struct key *dest_keyring,
unsigned long flags)
{
- const struct cred *cred = current_cred();
+ struct keyring_search_context ctx = {
+ .index_key.type = type,
+ .index_key.description = description,
+ .cred = current_cred(),
+ .match = type->match,
+ .match_data = description,
+ .flags = KEYRING_SEARCH_LOOKUP_DIRECT,
+ };
struct key *key;
key_ref_t key_ref;
int ret;
kenter("%s,%s,%p,%zu,%p,%p,%lx",
- type->name, description, callout_info, callout_len, aux,
- dest_keyring, flags);
+ ctx.index_key.type->name, ctx.index_key.description,
+ callout_info, callout_len, aux, dest_keyring, flags);
/* search all the process keyrings for a key */
- key_ref = search_process_keyrings(type, description, type->match, cred);
+ key_ref = search_process_keyrings(&ctx);
if (!IS_ERR(key_ref)) {
key = key_ref_to_ptr(key_ref);
@@ -553,9 +567,8 @@ struct key *request_key_and_link(struct key_type *type,
if (!callout_info)
goto error;
- key = construct_key_and_link(type, description, callout_info,
- callout_len, aux, dest_keyring,
- flags);
+ key = construct_key_and_link(&ctx, callout_info, callout_len,
+ aux, dest_keyring, flags);
}
error:
@@ -583,8 +596,10 @@ int wait_for_key_construction(struct key *key, bool intr)
intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
if (ret < 0)
return ret;
- if (test_bit(KEY_FLAG_NEGATIVE, &key->flags))
+ if (test_bit(KEY_FLAG_NEGATIVE, &key->flags)) {
+ smp_rmb();
return key->type_data.reject_error;
+ }
return key_validate(key);
}
EXPORT_SYMBOL(wait_for_key_construction);
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 60d4e3f5e4b..7495a93b4b9 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -18,8 +18,10 @@
#include <linux/slab.h>
#include <asm/uaccess.h>
#include "internal.h"
+#include <keys/user-type.h>
-static int request_key_auth_instantiate(struct key *, const void *, size_t);
+static int request_key_auth_instantiate(struct key *,
+ struct key_preparsed_payload *);
static void request_key_auth_describe(const struct key *, struct seq_file *);
static void request_key_auth_revoke(struct key *);
static void request_key_auth_destroy(struct key *);
@@ -42,10 +44,9 @@ struct key_type key_type_request_key_auth = {
* Instantiate a request-key authorisation key.
*/
static int request_key_auth_instantiate(struct key *key,
- const void *data,
- size_t datalen)
+ struct key_preparsed_payload *prep)
{
- key->payload.data = (struct request_key_auth *) data;
+ key->payload.data = (struct request_key_auth *)prep->data;
return 0;
}
@@ -222,32 +223,26 @@ error_alloc:
}
/*
- * See if an authorisation key is associated with a particular key.
- */
-static int key_get_instantiation_authkey_match(const struct key *key,
- const void *_id)
-{
- struct request_key_auth *rka = key->payload.data;
- key_serial_t id = (key_serial_t)(unsigned long) _id;
-
- return rka->target_key->serial == id;
-}
-
-/*
* Search the current process's keyrings for the authorisation key for
* instantiation of a key.
*/
struct key *key_get_instantiation_authkey(key_serial_t target_id)
{
- const struct cred *cred = current_cred();
+ char description[16];
+ struct keyring_search_context ctx = {
+ .index_key.type = &key_type_request_key_auth,
+ .index_key.description = description,
+ .cred = current_cred(),
+ .match = user_match,
+ .match_data = description,
+ .flags = KEYRING_SEARCH_LOOKUP_DIRECT,
+ };
struct key *authkey;
key_ref_t authkey_ref;
- authkey_ref = search_process_keyrings(
- &key_type_request_key_auth,
- (void *) (unsigned long) target_id,
- key_get_instantiation_authkey_match,
- cred);
+ sprintf(description, "%x", target_id);
+
+ authkey_ref = search_process_keyrings(&ctx);
if (IS_ERR(authkey_ref)) {
authkey = ERR_CAST(authkey_ref);
diff --git a/security/keys/sysctl.c b/security/keys/sysctl.c
index ee32d181764..b68faa1a5cf 100644
--- a/security/keys/sysctl.c
+++ b/security/keys/sysctl.c
@@ -15,7 +15,7 @@
static const int zero, one = 1, max = INT_MAX;
-ctl_table key_sysctls[] = {
+struct ctl_table key_sysctls[] = {
{
.procname = "maxkeys",
.data = &key_quota_maxkeys,
@@ -61,5 +61,16 @@ ctl_table key_sysctls[] = {
.extra1 = (void *) &zero,
.extra2 = (void *) &max,
},
+#ifdef CONFIG_PERSISTENT_KEYRINGS
+ {
+ .procname = "persistent_keyring_expiry",
+ .data = &persistent_keyring_expiry,
+ .maxlen = sizeof(unsigned),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = (void *) &zero,
+ .extra2 = (void *) &max,
+ },
+#endif
{ }
};
diff --git a/security/keys/trusted.c b/security/keys/trusted.c
index 2d5d041f204..6b804aa4529 100644
--- a/security/keys/trusted.c
+++ b/security/keys/trusted.c
@@ -369,38 +369,6 @@ static int trusted_tpm_send(const u32 chip_num, unsigned char *cmd,
}
/*
- * get a random value from TPM
- */
-static int tpm_get_random(struct tpm_buf *tb, unsigned char *buf, uint32_t len)
-{
- int ret;
-
- INIT_BUF(tb);
- store16(tb, TPM_TAG_RQU_COMMAND);
- store32(tb, TPM_GETRANDOM_SIZE);
- store32(tb, TPM_ORD_GETRANDOM);
- store32(tb, len);
- ret = trusted_tpm_send(TPM_ANY_NUM, tb->data, sizeof tb->data);
- if (!ret)
- memcpy(buf, tb->data + TPM_GETRANDOM_SIZE, len);
- return ret;
-}
-
-static int my_get_random(unsigned char *buf, int len)
-{
- struct tpm_buf *tb;
- int ret;
-
- tb = kmalloc(sizeof *tb, GFP_KERNEL);
- if (!tb)
- return -ENOMEM;
- ret = tpm_get_random(tb, buf, len);
-
- kfree(tb);
- return ret;
-}
-
-/*
* Lock a trusted key, by extending a selected PCR.
*
* Prevents a trusted key that is sealed to PCRs from being accessed.
@@ -413,8 +381,8 @@ static int pcrlock(const int pcrnum)
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
- ret = my_get_random(hash, SHA1_DIGEST_SIZE);
- if (ret < 0)
+ ret = tpm_get_random(TPM_ANY_NUM, hash, SHA1_DIGEST_SIZE);
+ if (ret != SHA1_DIGEST_SIZE)
return ret;
return tpm_pcr_extend(TPM_ANY_NUM, pcrnum, hash) ? -EINVAL : 0;
}
@@ -429,8 +397,8 @@ static int osap(struct tpm_buf *tb, struct osapsess *s,
unsigned char ononce[TPM_NONCE_SIZE];
int ret;
- ret = tpm_get_random(tb, ononce, TPM_NONCE_SIZE);
- if (ret < 0)
+ ret = tpm_get_random(TPM_ANY_NUM, ononce, TPM_NONCE_SIZE);
+ if (ret != TPM_NONCE_SIZE)
return ret;
INIT_BUF(tb);
@@ -524,8 +492,8 @@ static int tpm_seal(struct tpm_buf *tb, uint16_t keytype,
if (ret < 0)
goto out;
- ret = tpm_get_random(tb, td->nonceodd, TPM_NONCE_SIZE);
- if (ret < 0)
+ ret = tpm_get_random(TPM_ANY_NUM, td->nonceodd, TPM_NONCE_SIZE);
+ if (ret != TPM_NONCE_SIZE)
goto out;
ordinal = htonl(TPM_ORD_SEAL);
datsize = htonl(datalen);
@@ -634,8 +602,8 @@ static int tpm_unseal(struct tpm_buf *tb,
ordinal = htonl(TPM_ORD_UNSEAL);
keyhndl = htonl(SRKHANDLE);
- ret = tpm_get_random(tb, nonceodd, TPM_NONCE_SIZE);
- if (ret < 0) {
+ ret = tpm_get_random(TPM_ANY_NUM, nonceodd, TPM_NONCE_SIZE);
+ if (ret != TPM_NONCE_SIZE) {
pr_info("trusted_key: tpm_get_random failed (%d)\n", ret);
return ret;
}
@@ -785,7 +753,7 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
return -EINVAL;
break;
case Opt_keyhandle:
- res = strict_strtoul(args[0].from, 16, &handle);
+ res = kstrtoul(args[0].from, 16, &handle);
if (res < 0)
return -EINVAL;
opt->keytype = SEAL_keytype;
@@ -814,7 +782,7 @@ static int getoptions(char *c, struct trusted_key_payload *pay,
return -EINVAL;
break;
case Opt_pcrlock:
- res = strict_strtoul(args[0].from, 10, &lock);
+ res = kstrtoul(args[0].from, 10, &lock);
if (res < 0)
return -EINVAL;
opt->pcrlock = lock;
@@ -852,7 +820,7 @@ static int datablob_parse(char *datablob, struct trusted_key_payload *p,
c = strsep(&datablob, " \t");
if (!c)
return -EINVAL;
- ret = strict_strtol(c, 10, &keylen);
+ ret = kstrtol(c, 10, &keylen);
if (ret < 0 || keylen < MIN_KEY_SIZE || keylen > MAX_KEY_SIZE)
return -EINVAL;
p->key_len = keylen;
@@ -927,22 +895,24 @@ static struct trusted_key_payload *trusted_payload_alloc(struct key *key)
*
* On success, return 0. Otherwise return errno.
*/
-static int trusted_instantiate(struct key *key, const void *data,
- size_t datalen)
+static int trusted_instantiate(struct key *key,
+ struct key_preparsed_payload *prep)
{
struct trusted_key_payload *payload = NULL;
struct trusted_key_options *options = NULL;
+ size_t datalen = prep->datalen;
char *datablob;
int ret = 0;
int key_cmd;
+ size_t key_len;
- if (datalen <= 0 || datalen > 32767 || !data)
+ if (datalen <= 0 || datalen > 32767 || !prep->data)
return -EINVAL;
datablob = kmalloc(datalen + 1, GFP_KERNEL);
if (!datablob)
return -ENOMEM;
- memcpy(datablob, data, datalen);
+ memcpy(datablob, prep->data, datalen);
datablob[datalen] = '\0';
options = trusted_options_alloc();
@@ -974,8 +944,9 @@ static int trusted_instantiate(struct key *key, const void *data,
pr_info("trusted_key: key_unseal failed (%d)\n", ret);
break;
case Opt_new:
- ret = my_get_random(payload->key, payload->key_len);
- if (ret < 0) {
+ key_len = payload->key_len;
+ ret = tpm_get_random(TPM_ANY_NUM, payload->key, key_len);
+ if (ret != key_len) {
pr_info("trusted_key: key_create failed (%d)\n", ret);
goto out;
}
@@ -1011,17 +982,18 @@ static void trusted_rcu_free(struct rcu_head *rcu)
/*
* trusted_update - reseal an existing key with new PCR values
*/
-static int trusted_update(struct key *key, const void *data, size_t datalen)
+static int trusted_update(struct key *key, struct key_preparsed_payload *prep)
{
struct trusted_key_payload *p = key->payload.data;
struct trusted_key_payload *new_p;
struct trusted_key_options *new_o;
+ size_t datalen = prep->datalen;
char *datablob;
int ret = 0;
if (!p->migratable)
return -EPERM;
- if (datalen <= 0 || datalen > 32767 || !data)
+ if (datalen <= 0 || datalen > 32767 || !prep->data)
return -EINVAL;
datablob = kmalloc(datalen + 1, GFP_KERNEL);
@@ -1038,7 +1010,7 @@ static int trusted_update(struct key *key, const void *data, size_t datalen)
goto out;
}
- memcpy(datablob, data, datalen);
+ memcpy(datablob, prep->data, datalen);
datablob[datalen] = '\0';
ret = datablob_parse(datablob, new_p, new_o);
if (ret != Opt_update) {
diff --git a/security/keys/user_defined.c b/security/keys/user_defined.c
index c7660a25a3e..faa2caeb593 100644
--- a/security/keys/user_defined.c
+++ b/security/keys/user_defined.c
@@ -25,14 +25,15 @@ static int logon_vet_description(const char *desc);
* arbitrary blob of data as the payload
*/
struct key_type key_type_user = {
- .name = "user",
- .instantiate = user_instantiate,
- .update = user_update,
- .match = user_match,
- .revoke = user_revoke,
- .destroy = user_destroy,
- .describe = user_describe,
- .read = user_read,
+ .name = "user",
+ .def_lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
+ .instantiate = user_instantiate,
+ .update = user_update,
+ .match = user_match,
+ .revoke = user_revoke,
+ .destroy = user_destroy,
+ .describe = user_describe,
+ .read = user_read,
};
EXPORT_SYMBOL_GPL(key_type_user);
@@ -45,6 +46,7 @@ EXPORT_SYMBOL_GPL(key_type_user);
*/
struct key_type key_type_logon = {
.name = "logon",
+ .def_lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
.instantiate = user_instantiate,
.update = user_update,
.match = user_match,
@@ -58,13 +60,14 @@ EXPORT_SYMBOL_GPL(key_type_logon);
/*
* instantiate a user defined key
*/
-int user_instantiate(struct key *key, const void *data, size_t datalen)
+int user_instantiate(struct key *key, struct key_preparsed_payload *prep)
{
struct user_key_payload *upayload;
+ size_t datalen = prep->datalen;
int ret;
ret = -EINVAL;
- if (datalen <= 0 || datalen > 32767 || !data)
+ if (datalen <= 0 || datalen > 32767 || !prep->data)
goto error;
ret = key_payload_reserve(key, datalen);
@@ -78,7 +81,7 @@ int user_instantiate(struct key *key, const void *data, size_t datalen)
/* attach the data */
upayload->datalen = datalen;
- memcpy(upayload->data, data, datalen);
+ memcpy(upayload->data, prep->data, datalen);
rcu_assign_keypointer(key, upayload);
ret = 0;
@@ -92,13 +95,14 @@ EXPORT_SYMBOL_GPL(user_instantiate);
* update a user defined key
* - the key's semaphore is write-locked
*/
-int user_update(struct key *key, const void *data, size_t datalen)
+int user_update(struct key *key, struct key_preparsed_payload *prep)
{
struct user_key_payload *upayload, *zap;
+ size_t datalen = prep->datalen;
int ret;
ret = -EINVAL;
- if (datalen <= 0 || datalen > 32767 || !data)
+ if (datalen <= 0 || datalen > 32767 || !prep->data)
goto error;
/* construct a replacement payload */
@@ -108,7 +112,7 @@ int user_update(struct key *key, const void *data, size_t datalen)
goto error;
upayload->datalen = datalen;
- memcpy(upayload->data, data, datalen);
+ memcpy(upayload->data, prep->data, datalen);
/* check the quota and attach the new data */
zap = upayload;
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index 293b8c45b1d..69fdf3bc765 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -49,8 +49,8 @@ int ipv4_skb_to_auditdata(struct sk_buff *skb,
if (ih == NULL)
return -EINVAL;
- ad->u.net.v4info.saddr = ih->saddr;
- ad->u.net.v4info.daddr = ih->daddr;
+ ad->u.net->v4info.saddr = ih->saddr;
+ ad->u.net->v4info.daddr = ih->daddr;
if (proto)
*proto = ih->protocol;
@@ -64,8 +64,8 @@ int ipv4_skb_to_auditdata(struct sk_buff *skb,
if (th == NULL)
break;
- ad->u.net.sport = th->source;
- ad->u.net.dport = th->dest;
+ ad->u.net->sport = th->source;
+ ad->u.net->dport = th->dest;
break;
}
case IPPROTO_UDP: {
@@ -73,8 +73,8 @@ int ipv4_skb_to_auditdata(struct sk_buff *skb,
if (uh == NULL)
break;
- ad->u.net.sport = uh->source;
- ad->u.net.dport = uh->dest;
+ ad->u.net->sport = uh->source;
+ ad->u.net->dport = uh->dest;
break;
}
case IPPROTO_DCCP: {
@@ -82,16 +82,16 @@ int ipv4_skb_to_auditdata(struct sk_buff *skb,
if (dh == NULL)
break;
- ad->u.net.sport = dh->dccph_sport;
- ad->u.net.dport = dh->dccph_dport;
+ ad->u.net->sport = dh->dccph_sport;
+ ad->u.net->dport = dh->dccph_dport;
break;
}
case IPPROTO_SCTP: {
struct sctphdr *sh = sctp_hdr(skb);
if (sh == NULL)
break;
- ad->u.net.sport = sh->source;
- ad->u.net.dport = sh->dest;
+ ad->u.net->sport = sh->source;
+ ad->u.net->dport = sh->dest;
break;
}
default:
@@ -119,8 +119,8 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb,
ip6 = ipv6_hdr(skb);
if (ip6 == NULL)
return -EINVAL;
- ad->u.net.v6info.saddr = ip6->saddr;
- ad->u.net.v6info.daddr = ip6->daddr;
+ ad->u.net->v6info.saddr = ip6->saddr;
+ ad->u.net->v6info.daddr = ip6->daddr;
ret = 0;
/* IPv6 can have several extension header before the Transport header
* skip them */
@@ -140,8 +140,8 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb,
if (th == NULL)
break;
- ad->u.net.sport = th->source;
- ad->u.net.dport = th->dest;
+ ad->u.net->sport = th->source;
+ ad->u.net->dport = th->dest;
break;
}
case IPPROTO_UDP: {
@@ -151,8 +151,8 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb,
if (uh == NULL)
break;
- ad->u.net.sport = uh->source;
- ad->u.net.dport = uh->dest;
+ ad->u.net->sport = uh->source;
+ ad->u.net->dport = uh->dest;
break;
}
case IPPROTO_DCCP: {
@@ -162,8 +162,8 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb,
if (dh == NULL)
break;
- ad->u.net.sport = dh->dccph_sport;
- ad->u.net.dport = dh->dccph_dport;
+ ad->u.net->sport = dh->dccph_sport;
+ ad->u.net->dport = dh->dccph_dport;
break;
}
case IPPROTO_SCTP: {
@@ -172,8 +172,8 @@ int ipv6_skb_to_auditdata(struct sk_buff *skb,
sh = skb_header_pointer(skb, offset, sizeof(_sctph), &_sctph);
if (sh == NULL)
break;
- ad->u.net.sport = sh->source;
- ad->u.net.dport = sh->dest;
+ ad->u.net->sport = sh->source;
+ ad->u.net->dport = sh->dest;
break;
}
default:
@@ -213,12 +213,15 @@ static void dump_common_audit_data(struct audit_buffer *ab,
{
struct task_struct *tsk = current;
- if (a->tsk)
- tsk = a->tsk;
- if (tsk && tsk->pid) {
- audit_log_format(ab, " pid=%d comm=", tsk->pid);
- audit_log_untrustedstring(ab, tsk->comm);
- }
+ /*
+ * To keep stack sizes in check force programers to notice if they
+ * start making this union too large! See struct lsm_network_audit
+ * as an example of how to deal with large data.
+ */
+ BUILD_BUG_ON(sizeof(a->u) > sizeof(void *)*2);
+
+ audit_log_format(ab, " pid=%d comm=", task_pid_nr(tsk));
+ audit_log_untrustedstring(ab, tsk->comm);
switch (a->type) {
case LSM_AUDIT_DATA_NONE:
@@ -275,14 +278,17 @@ static void dump_common_audit_data(struct audit_buffer *ab,
}
case LSM_AUDIT_DATA_TASK:
tsk = a->u.tsk;
- if (tsk && tsk->pid) {
- audit_log_format(ab, " pid=%d comm=", tsk->pid);
- audit_log_untrustedstring(ab, tsk->comm);
+ if (tsk) {
+ pid_t pid = task_pid_nr(tsk);
+ if (pid) {
+ audit_log_format(ab, " pid=%d comm=", pid);
+ audit_log_untrustedstring(ab, tsk->comm);
+ }
}
break;
case LSM_AUDIT_DATA_NET:
- if (a->u.net.sk) {
- struct sock *sk = a->u.net.sk;
+ if (a->u.net->sk) {
+ struct sock *sk = a->u.net->sk;
struct unix_sock *u;
int len = 0;
char *p = NULL;
@@ -299,26 +305,23 @@ static void dump_common_audit_data(struct audit_buffer *ab,
"faddr", "fport");
break;
}
+#if IS_ENABLED(CONFIG_IPV6)
case AF_INET6: {
struct inet_sock *inet = inet_sk(sk);
- struct ipv6_pinfo *inet6 = inet6_sk(sk);
- print_ipv6_addr(ab, &inet6->rcv_saddr,
+ print_ipv6_addr(ab, &sk->sk_v6_rcv_saddr,
inet->inet_sport,
"laddr", "lport");
- print_ipv6_addr(ab, &inet6->daddr,
+ print_ipv6_addr(ab, &sk->sk_v6_daddr,
inet->inet_dport,
"faddr", "fport");
break;
}
+#endif
case AF_UNIX:
u = unix_sk(sk);
- if (u->dentry) {
- struct path path = {
- .dentry = u->dentry,
- .mnt = u->mnt
- };
- audit_log_d_path(ab, " path=", &path);
+ if (u->path.dentry) {
+ audit_log_d_path(ab, " path=", &u->path);
break;
}
if (!u->addr)
@@ -334,29 +337,29 @@ static void dump_common_audit_data(struct audit_buffer *ab,
}
}
- switch (a->u.net.family) {
+ switch (a->u.net->family) {
case AF_INET:
- print_ipv4_addr(ab, a->u.net.v4info.saddr,
- a->u.net.sport,
+ print_ipv4_addr(ab, a->u.net->v4info.saddr,
+ a->u.net->sport,
"saddr", "src");
- print_ipv4_addr(ab, a->u.net.v4info.daddr,
- a->u.net.dport,
+ print_ipv4_addr(ab, a->u.net->v4info.daddr,
+ a->u.net->dport,
"daddr", "dest");
break;
case AF_INET6:
- print_ipv6_addr(ab, &a->u.net.v6info.saddr,
- a->u.net.sport,
+ print_ipv6_addr(ab, &a->u.net->v6info.saddr,
+ a->u.net->sport,
"saddr", "src");
- print_ipv6_addr(ab, &a->u.net.v6info.daddr,
- a->u.net.dport,
+ print_ipv6_addr(ab, &a->u.net->v6info.daddr,
+ a->u.net->dport,
"daddr", "dest");
break;
}
- if (a->u.net.netif > 0) {
+ if (a->u.net->netif > 0) {
struct net_device *dev;
/* NOTE: we always use init's namespace */
- dev = dev_get_by_index(&init_net, a->u.net.netif);
+ dev = dev_get_by_index(&init_net, a->u.net->netif);
if (dev) {
audit_log_format(ab, " netif=%s", dev->name);
dev_put(dev);
@@ -382,29 +385,34 @@ static void dump_common_audit_data(struct audit_buffer *ab,
/**
* common_lsm_audit - generic LSM auditing function
* @a: auxiliary audit data
+ * @pre_audit: lsm-specific pre-audit callback
+ * @post_audit: lsm-specific post-audit callback
*
* setup the audit buffer for common security information
* uses callback to print LSM specific information
*/
-void common_lsm_audit(struct common_audit_data *a)
+void common_lsm_audit(struct common_audit_data *a,
+ void (*pre_audit)(struct audit_buffer *, void *),
+ void (*post_audit)(struct audit_buffer *, void *))
{
struct audit_buffer *ab;
if (a == NULL)
return;
/* we use GFP_ATOMIC so we won't sleep */
- ab = audit_log_start(current->audit_context, GFP_ATOMIC, AUDIT_AVC);
+ ab = audit_log_start(current->audit_context, GFP_ATOMIC | __GFP_NOWARN,
+ AUDIT_AVC);
if (ab == NULL)
return;
- if (a->lsm_pre_audit)
- a->lsm_pre_audit(ab, a);
+ if (pre_audit)
+ pre_audit(ab, a);
dump_common_audit_data(ab, a);
- if (a->lsm_post_audit)
- a->lsm_post_audit(ab, a);
+ if (post_audit)
+ post_audit(ab, a);
audit_log_end(ab);
}
diff --git a/security/security.c b/security/security.c
index d7542493454..31614e9e96e 100644
--- a/security/security.c
+++ b/security/security.c
@@ -12,6 +12,7 @@
*/
#include <linux/capability.h>
+#include <linux/dcache.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/kernel.h>
@@ -19,6 +20,12 @@
#include <linux/integrity.h>
#include <linux/ima.h>
#include <linux/evm.h>
+#include <linux/fsnotify.h>
+#include <linux/mman.h>
+#include <linux/mount.h>
+#include <linux/personality.h>
+#include <linux/backing-dev.h>
+#include <net/flow.h>
#define MAX_LSM_EVM_XATTR 2
@@ -130,11 +137,23 @@ int __init register_security(struct security_operations *ops)
int security_ptrace_access_check(struct task_struct *child, unsigned int mode)
{
+#ifdef CONFIG_SECURITY_YAMA_STACKED
+ int rc;
+ rc = yama_ptrace_access_check(child, mode);
+ if (rc)
+ return rc;
+#endif
return security_ops->ptrace_access_check(child, mode);
}
int security_ptrace_traceme(struct task_struct *parent)
{
+#ifdef CONFIG_SECURITY_YAMA_STACKED
+ int rc;
+ rc = yama_ptrace_traceme(parent);
+ if (rc)
+ return rc;
+#endif
return security_ops->ptrace_traceme(parent);
}
@@ -187,25 +206,11 @@ int security_settime(const struct timespec *ts, const struct timezone *tz)
return security_ops->settime(ts, tz);
}
-int security_vm_enough_memory(long pages)
-{
- WARN_ON(current->mm == NULL);
- return security_ops->vm_enough_memory(current->mm, pages);
-}
-
int security_vm_enough_memory_mm(struct mm_struct *mm, long pages)
{
- WARN_ON(mm == NULL);
return security_ops->vm_enough_memory(mm, pages);
}
-int security_vm_enough_memory_kern(long pages)
-{
- /* If current->mm is a kernel thread then we will pass NULL,
- for this specific case that is fine */
- return security_ops->vm_enough_memory(current->mm, pages);
-}
-
int security_bprm_set_creds(struct linux_binprm *bprm)
{
return security_ops->bprm_set_creds(bprm);
@@ -272,8 +277,8 @@ int security_sb_statfs(struct dentry *dentry)
return security_ops->sb_statfs(dentry);
}
-int security_sb_mount(char *dev_name, struct path *path,
- char *type, unsigned long flags, void *data)
+int security_sb_mount(const char *dev_name, struct path *path,
+ const char *type, unsigned long flags, void *data)
{
return security_ops->sb_mount(dev_name, path, type, flags, data);
}
@@ -289,16 +294,19 @@ int security_sb_pivotroot(struct path *old_path, struct path *new_path)
}
int security_sb_set_mnt_opts(struct super_block *sb,
- struct security_mnt_opts *opts)
+ struct security_mnt_opts *opts,
+ unsigned long kern_flags,
+ unsigned long *set_kern_flags)
{
- return security_ops->sb_set_mnt_opts(sb, opts);
+ return security_ops->sb_set_mnt_opts(sb, opts, kern_flags,
+ set_kern_flags);
}
EXPORT_SYMBOL(security_sb_set_mnt_opts);
-void security_sb_clone_mnt_opts(const struct super_block *oldsb,
+int security_sb_clone_mnt_opts(const struct super_block *oldsb,
struct super_block *newsb)
{
- security_ops->sb_clone_mnt_opts(oldsb, newsb);
+ return security_ops->sb_clone_mnt_opts(oldsb, newsb);
}
EXPORT_SYMBOL(security_sb_clone_mnt_opts);
@@ -320,6 +328,15 @@ void security_inode_free(struct inode *inode)
security_ops->inode_free_security(inode);
}
+int security_dentry_init_security(struct dentry *dentry, int mode,
+ struct qstr *name, void **ctx,
+ u32 *ctxlen)
+{
+ return security_ops->dentry_init_security(dentry, mode, name,
+ ctx, ctxlen);
+}
+EXPORT_SYMBOL(security_dentry_init_security);
+
int security_inode_init_security(struct inode *inode, struct inode *dir,
const struct qstr *qstr,
const initxattrs initxattrs, void *fs_data)
@@ -331,10 +348,10 @@ int security_inode_init_security(struct inode *inode, struct inode *dir,
if (unlikely(IS_PRIVATE(inode)))
return 0;
- memset(new_xattrs, 0, sizeof new_xattrs);
if (!initxattrs)
return security_ops->inode_init_security(inode, dir, qstr,
NULL, NULL, NULL);
+ memset(new_xattrs, 0, sizeof(new_xattrs));
lsm_xattr = new_xattrs;
ret = security_ops->inode_init_security(inode, dir, qstr,
&lsm_xattr->name,
@@ -349,16 +366,14 @@ int security_inode_init_security(struct inode *inode, struct inode *dir,
goto out;
ret = initxattrs(inode, new_xattrs, fs_data);
out:
- for (xattr = new_xattrs; xattr->name != NULL; xattr++) {
- kfree(xattr->name);
+ for (xattr = new_xattrs; xattr->value != NULL; xattr++)
kfree(xattr->value);
- }
return (ret == -EOPNOTSUPP) ? 0 : ret;
}
EXPORT_SYMBOL(security_inode_init_security);
int security_old_inode_init_security(struct inode *inode, struct inode *dir,
- const struct qstr *qstr, char **name,
+ const struct qstr *qstr, const char **name,
void **value, size_t *len)
{
if (unlikely(IS_PRIVATE(inode)))
@@ -418,11 +433,20 @@ int security_path_link(struct dentry *old_dentry, struct path *new_dir,
}
int security_path_rename(struct path *old_dir, struct dentry *old_dentry,
- struct path *new_dir, struct dentry *new_dentry)
+ struct path *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
{
if (unlikely(IS_PRIVATE(old_dentry->d_inode) ||
(new_dentry->d_inode && IS_PRIVATE(new_dentry->d_inode))))
return 0;
+
+ if (flags & RENAME_EXCHANGE) {
+ int err = security_ops->path_rename(new_dir, new_dentry,
+ old_dir, old_dentry);
+ if (err)
+ return err;
+ }
+
return security_ops->path_rename(old_dir, old_dentry, new_dir,
new_dentry);
}
@@ -442,7 +466,7 @@ int security_path_chmod(struct path *path, umode_t mode)
return security_ops->path_chmod(path, mode);
}
-int security_path_chown(struct path *path, uid_t uid, gid_t gid)
+int security_path_chown(struct path *path, kuid_t uid, kgid_t gid)
{
if (unlikely(IS_PRIVATE(path->dentry->d_inode)))
return 0;
@@ -509,11 +533,20 @@ int security_inode_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
}
int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry)
+ struct inode *new_dir, struct dentry *new_dentry,
+ unsigned int flags)
{
if (unlikely(IS_PRIVATE(old_dentry->d_inode) ||
(new_dentry->d_inode && IS_PRIVATE(new_dentry->d_inode))))
return 0;
+
+ if (flags & RENAME_EXCHANGE) {
+ int err = security_ops->inode_rename(new_dir, new_dentry,
+ old_dir, old_dentry);
+ if (err)
+ return err;
+ }
+
return security_ops->inode_rename(old_dir, old_dentry,
new_dir, new_dentry);
}
@@ -569,6 +602,9 @@ int security_inode_setxattr(struct dentry *dentry, const char *name,
ret = security_ops->inode_setxattr(dentry, name, value, size, flags);
if (ret)
return ret;
+ ret = ima_inode_setxattr(dentry, name, value, size);
+ if (ret)
+ return ret;
return evm_inode_setxattr(dentry, name, value, size);
}
@@ -604,6 +640,9 @@ int security_inode_removexattr(struct dentry *dentry, const char *name)
ret = security_ops->inode_removexattr(dentry, name);
if (ret)
return ret;
+ ret = ima_inode_removexattr(dentry, name);
+ if (ret)
+ return ret;
return evm_inode_removexattr(dentry, name);
}
@@ -637,6 +676,7 @@ int security_inode_listsecurity(struct inode *inode, char *buffer, size_t buffer
return 0;
return security_ops->inode_listsecurity(inode, buffer, buffer_size);
}
+EXPORT_SYMBOL(security_inode_listsecurity);
void security_inode_getsecid(const struct inode *inode, u32 *secid)
{
@@ -669,18 +709,56 @@ int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return security_ops->file_ioctl(file, cmd, arg);
}
-int security_file_mmap(struct file *file, unsigned long reqprot,
- unsigned long prot, unsigned long flags,
- unsigned long addr, unsigned long addr_only)
+static inline unsigned long mmap_prot(struct file *file, unsigned long prot)
{
- int ret;
+ /*
+ * Does we have PROT_READ and does the application expect
+ * it to imply PROT_EXEC? If not, nothing to talk about...
+ */
+ if ((prot & (PROT_READ | PROT_EXEC)) != PROT_READ)
+ return prot;
+ if (!(current->personality & READ_IMPLIES_EXEC))
+ return prot;
+ /*
+ * if that's an anonymous mapping, let it.
+ */
+ if (!file)
+ return prot | PROT_EXEC;
+ /*
+ * ditto if it's not on noexec mount, except that on !MMU we need
+ * BDI_CAP_EXEC_MMAP (== VM_MAYEXEC) in this case
+ */
+ if (!(file->f_path.mnt->mnt_flags & MNT_NOEXEC)) {
+#ifndef CONFIG_MMU
+ unsigned long caps = 0;
+ struct address_space *mapping = file->f_mapping;
+ if (mapping && mapping->backing_dev_info)
+ caps = mapping->backing_dev_info->capabilities;
+ if (!(caps & BDI_CAP_EXEC_MAP))
+ return prot;
+#endif
+ return prot | PROT_EXEC;
+ }
+ /* anything on noexec mount won't get PROT_EXEC */
+ return prot;
+}
- ret = security_ops->file_mmap(file, reqprot, prot, flags, addr, addr_only);
+int security_mmap_file(struct file *file, unsigned long prot,
+ unsigned long flags)
+{
+ int ret;
+ ret = security_ops->mmap_file(file, prot,
+ mmap_prot(file, prot), flags);
if (ret)
return ret;
return ima_file_mmap(file, prot);
}
+int security_mmap_addr(unsigned long addr)
+{
+ return security_ops->mmap_addr(addr);
+}
+
int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
unsigned long prot)
{
@@ -713,11 +791,11 @@ int security_file_receive(struct file *file)
return security_ops->file_receive(file);
}
-int security_dentry_open(struct file *file, const struct cred *cred)
+int security_file_open(struct file *file, const struct cred *cred)
{
int ret;
- ret = security_ops->dentry_open(file, cred);
+ ret = security_ops->file_open(file, cred);
if (ret)
return ret;
@@ -729,6 +807,14 @@ int security_task_create(unsigned long clone_flags)
return security_ops->task_create(clone_flags);
}
+void security_task_free(struct task_struct *task)
+{
+#ifdef CONFIG_SECURITY_YAMA_STACKED
+ yama_task_free(task);
+#endif
+ security_ops->task_free(task);
+}
+
int security_cred_alloc_blank(struct cred *cred, gfp_t gfp)
{
return security_ops->cred_alloc_blank(cred, gfp);
@@ -764,6 +850,16 @@ int security_kernel_module_request(char *kmod_name)
return security_ops->kernel_module_request(kmod_name);
}
+int security_kernel_module_from_file(struct file *file)
+{
+ int ret;
+
+ ret = security_ops->kernel_module_from_file(file);
+ if (ret)
+ return ret;
+ return ima_module_check(file);
+}
+
int security_task_fix_setuid(struct cred *new, const struct cred *old,
int flags)
{
@@ -841,6 +937,12 @@ int security_task_wait(struct task_struct *p)
int security_task_prctl(int option, unsigned long arg2, unsigned long arg3,
unsigned long arg4, unsigned long arg5)
{
+#ifdef CONFIG_SECURITY_YAMA_STACKED
+ int rc;
+ rc = yama_task_prctl(option, arg2, arg3, arg4, arg5);
+ if (rc != -ENOSYS)
+ return rc;
+#endif
return security_ops->task_prctl(option, arg2, arg3, arg4, arg5);
}
@@ -975,6 +1077,12 @@ int security_netlink_send(struct sock *sk, struct sk_buff *skb)
return security_ops->netlink_send(sk, skb);
}
+int security_ismaclabel(const char *name)
+{
+ return security_ops->ismaclabel(name);
+}
+EXPORT_SYMBOL(security_ismaclabel);
+
int security_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
{
return security_ops->secid_to_secctx(secid, secdata, seclen);
@@ -1182,31 +1290,56 @@ void security_secmark_refcount_dec(void)
}
EXPORT_SYMBOL(security_secmark_refcount_dec);
+int security_tun_dev_alloc_security(void **security)
+{
+ return security_ops->tun_dev_alloc_security(security);
+}
+EXPORT_SYMBOL(security_tun_dev_alloc_security);
+
+void security_tun_dev_free_security(void *security)
+{
+ security_ops->tun_dev_free_security(security);
+}
+EXPORT_SYMBOL(security_tun_dev_free_security);
+
int security_tun_dev_create(void)
{
return security_ops->tun_dev_create();
}
EXPORT_SYMBOL(security_tun_dev_create);
-void security_tun_dev_post_create(struct sock *sk)
+int security_tun_dev_attach_queue(void *security)
{
- return security_ops->tun_dev_post_create(sk);
+ return security_ops->tun_dev_attach_queue(security);
}
-EXPORT_SYMBOL(security_tun_dev_post_create);
+EXPORT_SYMBOL(security_tun_dev_attach_queue);
-int security_tun_dev_attach(struct sock *sk)
+int security_tun_dev_attach(struct sock *sk, void *security)
{
- return security_ops->tun_dev_attach(sk);
+ return security_ops->tun_dev_attach(sk, security);
}
EXPORT_SYMBOL(security_tun_dev_attach);
+int security_tun_dev_open(void *security)
+{
+ return security_ops->tun_dev_open(security);
+}
+EXPORT_SYMBOL(security_tun_dev_open);
+
+void security_skb_owned_by(struct sk_buff *skb, struct sock *sk)
+{
+ security_ops->skb_owned_by(skb, sk);
+}
+
#endif /* CONFIG_SECURITY_NETWORK */
#ifdef CONFIG_SECURITY_NETWORK_XFRM
-int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx)
+int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
+ struct xfrm_user_sec_ctx *sec_ctx,
+ gfp_t gfp)
{
- return security_ops->xfrm_policy_alloc_security(ctxp, sec_ctx);
+ return security_ops->xfrm_policy_alloc_security(ctxp, sec_ctx, gfp);
}
EXPORT_SYMBOL(security_xfrm_policy_alloc);
@@ -1227,22 +1360,17 @@ int security_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
return security_ops->xfrm_policy_delete_security(ctx);
}
-int security_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *sec_ctx)
+int security_xfrm_state_alloc(struct xfrm_state *x,
+ struct xfrm_user_sec_ctx *sec_ctx)
{
- return security_ops->xfrm_state_alloc_security(x, sec_ctx, 0);
+ return security_ops->xfrm_state_alloc(x, sec_ctx);
}
EXPORT_SYMBOL(security_xfrm_state_alloc);
int security_xfrm_state_alloc_acquire(struct xfrm_state *x,
struct xfrm_sec_ctx *polsec, u32 secid)
{
- if (!polsec)
- return 0;
- /*
- * We want the context to be taken from secid which is usually
- * from the sock.
- */
- return security_ops->xfrm_state_alloc_security(x, NULL, secid);
+ return security_ops->xfrm_state_alloc_acquire(x, polsec, secid);
}
int security_xfrm_state_delete(struct xfrm_state *x)
@@ -1297,7 +1425,7 @@ void security_key_free(struct key *key)
}
int security_key_permission(key_ref_t key_ref,
- const struct cred *cred, key_perm_t perm)
+ const struct cred *cred, unsigned perm)
{
return security_ops->key_permission(key_ref, cred, perm);
}
diff --git a/security/selinux/avc.c b/security/selinux/avc.c
index dca1c22d927..a18f1fa6440 100644
--- a/security/selinux/avc.c
+++ b/security/selinux/avc.c
@@ -65,14 +65,8 @@ struct avc_cache {
};
struct avc_callback_node {
- int (*callback) (u32 event, u32 ssid, u32 tsid,
- u16 tclass, u32 perms,
- u32 *out_retained);
+ int (*callback) (u32 event);
u32 events;
- u32 ssid;
- u32 tsid;
- u16 tclass;
- u32 perms;
struct avc_callback_node *next;
};
@@ -194,11 +188,9 @@ int avc_get_hash_stats(char *page)
for (i = 0; i < AVC_CACHE_SLOTS; i++) {
head = &avc_cache.slots[i];
if (!hlist_empty(head)) {
- struct hlist_node *next;
-
slots_used++;
chain_len = 0;
- hlist_for_each_entry_rcu(node, next, head, list)
+ hlist_for_each_entry_rcu(node, head, list)
chain_len++;
if (chain_len > max_chain_len)
max_chain_len = chain_len;
@@ -247,7 +239,6 @@ static inline int avc_reclaim_node(void)
int hvalue, try, ecx;
unsigned long flags;
struct hlist_head *head;
- struct hlist_node *next;
spinlock_t *lock;
for (try = 0, ecx = 0; try < AVC_CACHE_SLOTS; try++) {
@@ -259,7 +250,7 @@ static inline int avc_reclaim_node(void)
continue;
rcu_read_lock();
- hlist_for_each_entry(node, next, head, list) {
+ hlist_for_each_entry(node, head, list) {
avc_node_delete(node);
avc_cache_stats_incr(reclaims);
ecx++;
@@ -280,7 +271,7 @@ static struct avc_node *avc_alloc_node(void)
{
struct avc_node *node;
- node = kmem_cache_zalloc(avc_node_cachep, GFP_ATOMIC);
+ node = kmem_cache_zalloc(avc_node_cachep, GFP_ATOMIC|__GFP_NOMEMALLOC);
if (!node)
goto out;
@@ -307,11 +298,10 @@ static inline struct avc_node *avc_search_node(u32 ssid, u32 tsid, u16 tclass)
struct avc_node *node, *ret = NULL;
int hvalue;
struct hlist_head *head;
- struct hlist_node *next;
hvalue = avc_hash(ssid, tsid, tclass);
head = &avc_cache.slots[hvalue];
- hlist_for_each_entry_rcu(node, next, head, list) {
+ hlist_for_each_entry_rcu(node, head, list) {
if (ssid == node->ae.ssid &&
tclass == node->ae.tclass &&
tsid == node->ae.tsid) {
@@ -400,7 +390,6 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_dec
node = avc_alloc_node();
if (node) {
struct hlist_head *head;
- struct hlist_node *next;
spinlock_t *lock;
hvalue = avc_hash(ssid, tsid, tclass);
@@ -410,7 +399,7 @@ static struct avc_node *avc_insert(u32 ssid, u32 tsid, u16 tclass, struct av_dec
lock = &avc_cache.slots_lock[hvalue];
spin_lock_irqsave(lock, flag);
- hlist_for_each_entry(pos, next, head, list) {
+ hlist_for_each_entry(pos, head, list) {
if (pos->ae.ssid == ssid &&
pos->ae.tsid == tsid &&
pos->ae.tclass == tclass) {
@@ -436,9 +425,9 @@ static void avc_audit_pre_callback(struct audit_buffer *ab, void *a)
{
struct common_audit_data *ad = a;
audit_log_format(ab, "avc: %s ",
- ad->selinux_audit_data.denied ? "denied" : "granted");
- avc_dump_av(ab, ad->selinux_audit_data.tclass,
- ad->selinux_audit_data.audited);
+ ad->selinux_audit_data->denied ? "denied" : "granted");
+ avc_dump_av(ab, ad->selinux_audit_data->tclass,
+ ad->selinux_audit_data->audited);
audit_log_format(ab, " for ");
}
@@ -452,71 +441,27 @@ static void avc_audit_post_callback(struct audit_buffer *ab, void *a)
{
struct common_audit_data *ad = a;
audit_log_format(ab, " ");
- avc_dump_query(ab, ad->selinux_audit_data.ssid,
- ad->selinux_audit_data.tsid,
- ad->selinux_audit_data.tclass);
+ avc_dump_query(ab, ad->selinux_audit_data->ssid,
+ ad->selinux_audit_data->tsid,
+ ad->selinux_audit_data->tclass);
+ if (ad->selinux_audit_data->denied) {
+ audit_log_format(ab, " permissive=%u",
+ ad->selinux_audit_data->result ? 0 : 1);
+ }
}
-/**
- * avc_audit - Audit the granting or denial of permissions.
- * @ssid: source security identifier
- * @tsid: target security identifier
- * @tclass: target security class
- * @requested: requested permissions
- * @avd: access vector decisions
- * @result: result from avc_has_perm_noaudit
- * @a: auxiliary audit data
- * @flags: VFS walk flags
- *
- * Audit the granting or denial of permissions in accordance
- * with the policy. This function is typically called by
- * avc_has_perm() after a permission check, but can also be
- * called directly by callers who use avc_has_perm_noaudit()
- * in order to separate the permission check from the auditing.
- * For example, this separation is useful when the permission check must
- * be performed under a lock, to allow the lock to be released
- * before calling the auditing code.
- */
-int avc_audit(u32 ssid, u32 tsid,
- u16 tclass, u32 requested,
- struct av_decision *avd, int result, struct common_audit_data *a,
- unsigned flags)
+/* This is the slow part of avc audit with big stack footprint */
+noinline int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass,
+ u32 requested, u32 audited, u32 denied, int result,
+ struct common_audit_data *a,
+ unsigned flags)
{
struct common_audit_data stack_data;
- u32 denied, audited;
- denied = requested & ~avd->allowed;
- if (denied) {
- audited = denied & avd->auditdeny;
- /*
- * a->selinux_audit_data.auditdeny is TRICKY! Setting a bit in
- * this field means that ANY denials should NOT be audited if
- * the policy contains an explicit dontaudit rule for that
- * permission. Take notice that this is unrelated to the
- * actual permissions that were denied. As an example lets
- * assume:
- *
- * denied == READ
- * avd.auditdeny & ACCESS == 0 (not set means explicit rule)
- * selinux_audit_data.auditdeny & ACCESS == 1
- *
- * We will NOT audit the denial even though the denied
- * permission was READ and the auditdeny checks were for
- * ACCESS
- */
- if (a &&
- a->selinux_audit_data.auditdeny &&
- !(a->selinux_audit_data.auditdeny & avd->auditdeny))
- audited = 0;
- } else if (result)
- audited = denied = requested;
- else
- audited = requested & avd->auditallow;
- if (!audited)
- return 0;
+ struct selinux_audit_data sad;
if (!a) {
a = &stack_data;
- COMMON_AUDIT_DATA_INIT(a, NONE);
+ a->type = LSM_AUDIT_DATA_NONE;
}
/*
@@ -530,15 +475,17 @@ int avc_audit(u32 ssid, u32 tsid,
(flags & MAY_NOT_BLOCK))
return -ECHILD;
- a->selinux_audit_data.tclass = tclass;
- a->selinux_audit_data.requested = requested;
- a->selinux_audit_data.ssid = ssid;
- a->selinux_audit_data.tsid = tsid;
- a->selinux_audit_data.audited = audited;
- a->selinux_audit_data.denied = denied;
- a->lsm_pre_audit = avc_audit_pre_callback;
- a->lsm_post_audit = avc_audit_post_callback;
- common_lsm_audit(a);
+ sad.tclass = tclass;
+ sad.requested = requested;
+ sad.ssid = ssid;
+ sad.tsid = tsid;
+ sad.audited = audited;
+ sad.denied = denied;
+ sad.result = result;
+
+ a->selinux_audit_data = &sad;
+
+ common_lsm_audit(a, avc_audit_pre_callback, avc_audit_post_callback);
return 0;
}
@@ -546,27 +493,17 @@ int avc_audit(u32 ssid, u32 tsid,
* avc_add_callback - Register a callback for security events.
* @callback: callback function
* @events: security events
- * @ssid: source security identifier or %SECSID_WILD
- * @tsid: target security identifier or %SECSID_WILD
- * @tclass: target security class
- * @perms: permissions
*
- * Register a callback function for events in the set @events
- * related to the SID pair (@ssid, @tsid)
- * and the permissions @perms, interpreting
- * @perms based on @tclass. Returns %0 on success or
- * -%ENOMEM if insufficient memory exists to add the callback.
+ * Register a callback function for events in the set @events.
+ * Returns %0 on success or -%ENOMEM if insufficient memory
+ * exists to add the callback.
*/
-int avc_add_callback(int (*callback)(u32 event, u32 ssid, u32 tsid,
- u16 tclass, u32 perms,
- u32 *out_retained),
- u32 events, u32 ssid, u32 tsid,
- u16 tclass, u32 perms)
+int __init avc_add_callback(int (*callback)(u32 event), u32 events)
{
struct avc_callback_node *c;
int rc = 0;
- c = kmalloc(sizeof(*c), GFP_ATOMIC);
+ c = kmalloc(sizeof(*c), GFP_KERNEL);
if (!c) {
rc = -ENOMEM;
goto out;
@@ -574,9 +511,6 @@ int avc_add_callback(int (*callback)(u32 event, u32 ssid, u32 tsid,
c->callback = callback;
c->events = events;
- c->ssid = ssid;
- c->tsid = tsid;
- c->perms = perms;
c->next = avc_callbacks;
avc_callbacks = c;
out:
@@ -607,7 +541,6 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass,
unsigned long flag;
struct avc_node *pos, *node, *orig = NULL;
struct hlist_head *head;
- struct hlist_node *next;
spinlock_t *lock;
node = avc_alloc_node();
@@ -624,7 +557,7 @@ static int avc_update_node(u32 event, u32 perms, u32 ssid, u32 tsid, u16 tclass,
spin_lock_irqsave(lock, flag);
- hlist_for_each_entry(pos, next, head, list) {
+ hlist_for_each_entry(pos, head, list) {
if (ssid == pos->ae.ssid &&
tsid == pos->ae.tsid &&
tclass == pos->ae.tclass &&
@@ -680,7 +613,6 @@ out:
static void avc_flush(void)
{
struct hlist_head *head;
- struct hlist_node *next;
struct avc_node *node;
spinlock_t *lock;
unsigned long flag;
@@ -696,7 +628,7 @@ static void avc_flush(void)
* prevent RCU grace periods from ending.
*/
rcu_read_lock();
- hlist_for_each_entry(node, next, head, list)
+ hlist_for_each_entry(node, head, list)
avc_node_delete(node);
rcu_read_unlock();
spin_unlock_irqrestore(lock, flag);
@@ -716,8 +648,7 @@ int avc_ss_reset(u32 seqno)
for (c = avc_callbacks; c; c = c->next) {
if (c->events & AVC_CALLBACK_RESET) {
- tmprc = c->callback(AVC_CALLBACK_RESET,
- 0, 0, 0, 0, NULL);
+ tmprc = c->callback(AVC_CALLBACK_RESET);
/* save the first error encountered for the return
value and continue processing the callbacks */
if (!rc)
@@ -729,6 +660,41 @@ int avc_ss_reset(u32 seqno)
return rc;
}
+/*
+ * Slow-path helper function for avc_has_perm_noaudit,
+ * when the avc_node lookup fails. We get called with
+ * the RCU read lock held, and need to return with it
+ * still held, but drop if for the security compute.
+ *
+ * Don't inline this, since it's the slow-path and just
+ * results in a bigger stack frame.
+ */
+static noinline struct avc_node *avc_compute_av(u32 ssid, u32 tsid,
+ u16 tclass, struct av_decision *avd)
+{
+ rcu_read_unlock();
+ security_compute_av(ssid, tsid, tclass, avd);
+ rcu_read_lock();
+ return avc_insert(ssid, tsid, tclass, avd);
+}
+
+static noinline int avc_denied(u32 ssid, u32 tsid,
+ u16 tclass, u32 requested,
+ unsigned flags,
+ struct av_decision *avd)
+{
+ if (flags & AVC_STRICT)
+ return -EACCES;
+
+ if (selinux_enforcing && !(avd->flags & AVD_FLAGS_PERMISSIVE))
+ return -EACCES;
+
+ avc_update_node(AVC_CALLBACK_GRANT, requested, ssid,
+ tsid, tclass, avd->seqno);
+ return 0;
+}
+
+
/**
* avc_has_perm_noaudit - Check permissions but perform no auditing.
* @ssid: source security identifier
@@ -749,7 +715,7 @@ int avc_ss_reset(u32 seqno)
* auditing, e.g. in cases where a lock must be held for the check but
* should be released for the auditing.
*/
-int avc_has_perm_noaudit(u32 ssid, u32 tsid,
+inline int avc_has_perm_noaudit(u32 ssid, u32 tsid,
u16 tclass, u32 requested,
unsigned flags,
struct av_decision *avd)
@@ -764,26 +730,15 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
node = avc_lookup(ssid, tsid, tclass);
if (unlikely(!node)) {
- rcu_read_unlock();
- security_compute_av(ssid, tsid, tclass, avd);
- rcu_read_lock();
- node = avc_insert(ssid, tsid, tclass, avd);
+ node = avc_compute_av(ssid, tsid, tclass, avd);
} else {
memcpy(avd, &node->ae.avd, sizeof(*avd));
avd = &node->ae.avd;
}
denied = requested & ~(avd->allowed);
-
- if (denied) {
- if (flags & AVC_STRICT)
- rc = -EACCES;
- else if (!selinux_enforcing || (avd->flags & AVD_FLAGS_PERMISSIVE))
- avc_update_node(AVC_CALLBACK_GRANT, requested, ssid,
- tsid, tclass, avd->seqno);
- else
- rc = -EACCES;
- }
+ if (unlikely(denied))
+ rc = avc_denied(ssid, tsid, tclass, requested, flags, avd);
rcu_read_unlock();
return rc;
@@ -796,7 +751,6 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
* @tclass: target security class
* @requested: requested permissions, interpreted based on @tclass
* @auditdata: auxiliary audit data
- * @flags: VFS walk flags
*
* Check the AVC to determine whether the @requested permissions are granted
* for the SID pair (@ssid, @tsid), interpreting the permissions
@@ -806,17 +760,15 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
* permissions are granted, -%EACCES if any permissions are denied, or
* another -errno upon other errors.
*/
-int avc_has_perm_flags(u32 ssid, u32 tsid, u16 tclass,
- u32 requested, struct common_audit_data *auditdata,
- unsigned flags)
+int avc_has_perm(u32 ssid, u32 tsid, u16 tclass,
+ u32 requested, struct common_audit_data *auditdata)
{
struct av_decision avd;
int rc, rc2;
rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd);
- rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata,
- flags);
+ rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata);
if (rc2)
return rc2;
return rc;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 6a3683e2842..83d06db34d0 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -28,7 +28,6 @@
#include <linux/kernel.h>
#include <linux/tracehook.h>
#include <linux/errno.h>
-#include <linux/ext2_fs.h>
#include <linux/sched.h>
#include <linux/security.h>
#include <linux/xattr.h>
@@ -52,7 +51,9 @@
#include <linux/tty.h>
#include <net/icmp.h>
#include <net/ip.h> /* for local_port_range[] */
+#include <net/sock.h>
#include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */
+#include <net/inet_connection_sock.h>
#include <net/net_namespace.h>
#include <net/netlabel.h>
#include <linux/uaccess.h>
@@ -61,7 +62,7 @@
#include <linux/bitops.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h> /* for network interface checks */
-#include <linux/netlink.h>
+#include <net/netlink.h>
#include <linux/tcp.h>
#include <linux/udp.h>
#include <linux/dccp.h>
@@ -81,6 +82,8 @@
#include <linux/syslog.h>
#include <linux/user_namespace.h>
#include <linux/export.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
#include "avc.h"
#include "objsec.h"
@@ -92,8 +95,6 @@
#include "audit.h"
#include "avc_ss.h"
-#define NUM_SEL_MNT_OPTS 5
-
extern struct security_operations *security_ops;
/* SECMARK reference count */
@@ -105,7 +106,7 @@ int selinux_enforcing;
static int __init enforcing_setup(char *str)
{
unsigned long enforcing;
- if (!strict_strtoul(str, 0, &enforcing))
+ if (!kstrtoul(str, 0, &enforcing))
selinux_enforcing = enforcing ? 1 : 0;
return 1;
}
@@ -118,7 +119,7 @@ int selinux_enabled = CONFIG_SECURITY_SELINUX_BOOTPARAM_VALUE;
static int __init selinux_enabled_setup(char *str)
{
unsigned long enabled;
- if (!strict_strtoul(str, 0, &enabled))
+ if (!kstrtoul(str, 0, &enabled))
selinux_enabled = enabled ? 1 : 0;
return 1;
}
@@ -136,12 +137,28 @@ static struct kmem_cache *sel_inode_cache;
* This function checks the SECMARK reference counter to see if any SECMARK
* targets are currently configured, if the reference counter is greater than
* zero SECMARK is considered to be enabled. Returns true (1) if SECMARK is
- * enabled, false (0) if SECMARK is disabled.
+ * enabled, false (0) if SECMARK is disabled. If the always_check_network
+ * policy capability is enabled, SECMARK is always considered enabled.
*
*/
static int selinux_secmark_enabled(void)
{
- return (atomic_read(&selinux_secmark_refcount) > 0);
+ return (selinux_policycap_alwaysnetwork || atomic_read(&selinux_secmark_refcount));
+}
+
+/**
+ * selinux_peerlbl_enabled - Check to see if peer labeling is currently enabled
+ *
+ * Description:
+ * This function checks if NetLabel or labeled IPSEC is enabled. Returns true
+ * (1) if any are enabled or false (0) if neither are enabled. If the
+ * always_check_network policy capability is enabled, peer labeling
+ * is always considered enabled.
+ *
+ */
+static int selinux_peerlbl_enabled(void)
+{
+ return (selinux_policycap_alwaysnetwork || netlbl_enabled() || selinux_xfrm_enabled());
}
/*
@@ -216,6 +233,14 @@ static int inode_alloc_security(struct inode *inode)
return 0;
}
+static void inode_free_rcu(struct rcu_head *head)
+{
+ struct inode_security_struct *isec;
+
+ isec = container_of(head, struct inode_security_struct, rcu);
+ kmem_cache_free(sel_inode_cache, isec);
+}
+
static void inode_free_security(struct inode *inode)
{
struct inode_security_struct *isec = inode->i_security;
@@ -226,8 +251,16 @@ static void inode_free_security(struct inode *inode)
list_del_init(&isec->list);
spin_unlock(&sbsec->isec_lock);
- inode->i_security = NULL;
- kmem_cache_free(sel_inode_cache, isec);
+ /*
+ * The inode may still be referenced in a path walk and
+ * a call to selinux_inode_permission() can be made
+ * after inode_free_security() is called. Ideally, the VFS
+ * wouldn't do this, but fixing that is a much harder
+ * job. For now, simply free the i_security via RCU, and
+ * leave the current inode->i_security pointer intact.
+ * The inode will be freed after the RCU grace period too.
+ */
+ call_rcu(&isec->rcu, inode_free_rcu);
}
static int file_alloc_security(struct file *file)
@@ -282,13 +315,14 @@ static void superblock_free_security(struct super_block *sb)
/* The file system's label must be initialized prior to use. */
-static const char *labeling_behaviors[6] = {
+static const char *labeling_behaviors[7] = {
"uses xattr",
"uses transition SIDs",
"uses task SIDs",
"uses genfs_contexts",
"not configured for labeling",
"uses mountpoint labeling",
+ "uses native labeling",
};
static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dentry);
@@ -305,8 +339,11 @@ enum {
Opt_defcontext = 3,
Opt_rootcontext = 4,
Opt_labelsupport = 5,
+ Opt_nextmntopt = 6,
};
+#define NUM_SEL_MNT_OPTS (Opt_nextmntopt - 1)
+
static const match_table_t tokens = {
{Opt_context, CONTEXT_STR "%s"},
{Opt_fscontext, FSCONTEXT_STR "%s"},
@@ -351,6 +388,29 @@ static int may_context_mount_inode_relabel(u32 sid,
return rc;
}
+static int selinux_is_sblabel_mnt(struct super_block *sb)
+{
+ struct superblock_security_struct *sbsec = sb->s_security;
+
+ if (sbsec->behavior == SECURITY_FS_USE_XATTR ||
+ sbsec->behavior == SECURITY_FS_USE_TRANS ||
+ sbsec->behavior == SECURITY_FS_USE_TASK)
+ return 1;
+
+ /* Special handling for sysfs. Is genfs but also has setxattr handler*/
+ if (strncmp(sb->s_type->name, "sysfs", sizeof("sysfs")) == 0)
+ return 1;
+
+ /*
+ * Special handling for rootfs. Is genfs but supports
+ * setting SELinux context on in-core inodes.
+ */
+ if (strncmp(sb->s_type->name, "rootfs", sizeof("rootfs")) == 0)
+ return 1;
+
+ return 0;
+}
+
static int sb_finish_set_opts(struct super_block *sb)
{
struct superblock_security_struct *sbsec = sb->s_security;
@@ -384,8 +444,6 @@ static int sb_finish_set_opts(struct super_block *sb)
}
}
- sbsec->flags |= (SE_SBINITIALIZED | SE_SBLABELSUPP);
-
if (sbsec->behavior > ARRAY_SIZE(labeling_behaviors))
printk(KERN_ERR "SELinux: initialized (dev %s, type %s), unknown behavior\n",
sb->s_id, sb->s_type->name);
@@ -394,15 +452,9 @@ static int sb_finish_set_opts(struct super_block *sb)
sb->s_id, sb->s_type->name,
labeling_behaviors[sbsec->behavior-1]);
- if (sbsec->behavior == SECURITY_FS_USE_GENFS ||
- sbsec->behavior == SECURITY_FS_USE_MNTPOINT ||
- sbsec->behavior == SECURITY_FS_USE_NONE ||
- sbsec->behavior > ARRAY_SIZE(labeling_behaviors))
- sbsec->flags &= ~SE_SBLABELSUPP;
-
- /* Special handling for sysfs. Is genfs but also has setxattr handler*/
- if (strncmp(sb->s_type->name, "sysfs", sizeof("sysfs")) == 0)
- sbsec->flags |= SE_SBLABELSUPP;
+ sbsec->flags |= SE_SBINITIALIZED;
+ if (selinux_is_sblabel_mnt(sb))
+ sbsec->flags |= SBLABEL_MNT;
/* Initialize the root inode. */
rc = inode_doinit_with_dentry(root_inode, root);
@@ -456,15 +508,18 @@ static int selinux_get_mnt_opts(const struct super_block *sb,
if (!ss_initialized)
return -EINVAL;
+ /* make sure we always check enough bits to cover the mask */
+ BUILD_BUG_ON(SE_MNTMASK >= (1 << NUM_SEL_MNT_OPTS));
+
tmp = sbsec->flags & SE_MNTMASK;
/* count the number of mount options for this sb */
- for (i = 0; i < 8; i++) {
+ for (i = 0; i < NUM_SEL_MNT_OPTS; i++) {
if (tmp & 0x01)
opts->num_mnt_opts++;
tmp >>= 1;
}
/* Check if the Label support flag is set */
- if (sbsec->flags & SE_SBLABELSUPP)
+ if (sbsec->flags & SBLABEL_MNT)
opts->num_mnt_opts++;
opts->mnt_opts = kcalloc(opts->num_mnt_opts, sizeof(char *), GFP_ATOMIC);
@@ -511,9 +566,9 @@ static int selinux_get_mnt_opts(const struct super_block *sb,
opts->mnt_opts[i] = context;
opts->mnt_opts_flags[i++] = ROOTCONTEXT_MNT;
}
- if (sbsec->flags & SE_SBLABELSUPP) {
+ if (sbsec->flags & SBLABEL_MNT) {
opts->mnt_opts[i] = NULL;
- opts->mnt_opts_flags[i++] = SE_SBLABELSUPP;
+ opts->mnt_opts_flags[i++] = SBLABEL_MNT;
}
BUG_ON(i != opts->num_mnt_opts);
@@ -550,7 +605,9 @@ static int bad_option(struct superblock_security_struct *sbsec, char flag,
* labeling information.
*/
static int selinux_set_mnt_opts(struct super_block *sb,
- struct security_mnt_opts *opts)
+ struct security_mnt_opts *opts,
+ unsigned long kern_flags,
+ unsigned long *set_kern_flags)
{
const struct cred *cred = current_cred();
int rc = 0, i;
@@ -578,6 +635,12 @@ static int selinux_set_mnt_opts(struct super_block *sb,
"before the security server is initialized\n");
goto out;
}
+ if (kern_flags && !set_kern_flags) {
+ /* Specifying internal flags without providing a place to
+ * place the results is not allowed */
+ rc = -EINVAL;
+ goto out;
+ }
/*
* Binary mount data FS will come through this function twice. Once
@@ -602,10 +665,10 @@ static int selinux_set_mnt_opts(struct super_block *sb,
for (i = 0; i < num_opts; i++) {
u32 sid;
- if (flags[i] == SE_SBLABELSUPP)
+ if (flags[i] == SBLABEL_MNT)
continue;
rc = security_context_to_sid(mount_options[i],
- strlen(mount_options[i]), &sid);
+ strlen(mount_options[i]), &sid, GFP_KERNEL);
if (rc) {
printk(KERN_WARNING "SELinux: security_context_to_sid"
"(%s) failed for (dev %s, type %s) errno=%d\n",
@@ -668,14 +731,19 @@ static int selinux_set_mnt_opts(struct super_block *sb,
if (strcmp(sb->s_type->name, "proc") == 0)
sbsec->flags |= SE_SBPROC;
- /* Determine the labeling behavior to use for this filesystem type. */
- rc = security_fs_use((sbsec->flags & SE_SBPROC) ? "proc" : sb->s_type->name, &sbsec->behavior, &sbsec->sid);
- if (rc) {
- printk(KERN_WARNING "%s: security_fs_use(%s) returned %d\n",
- __func__, sb->s_type->name, rc);
- goto out;
+ if (!sbsec->behavior) {
+ /*
+ * Determine the labeling behavior to use for this
+ * filesystem type.
+ */
+ rc = security_fs_use(sb);
+ if (rc) {
+ printk(KERN_WARNING
+ "%s: security_fs_use(%s) returned %d\n",
+ __func__, sb->s_type->name, rc);
+ goto out;
+ }
}
-
/* sets the context of the superblock for the fs being mounted. */
if (fscontext_sid) {
rc = may_context_mount_sb_relabel(fscontext_sid, sbsec, cred);
@@ -690,6 +758,11 @@ static int selinux_set_mnt_opts(struct super_block *sb,
* sets the label used on all file below the mountpoint, and will set
* the superblock context if not already set.
*/
+ if (kern_flags & SECURITY_LSM_NATIVE_LABELS && !context_sid) {
+ sbsec->behavior = SECURITY_FS_USE_NATIVE;
+ *set_kern_flags |= SECURITY_LSM_NATIVE_LABELS;
+ }
+
if (context_sid) {
if (!fscontext_sid) {
rc = may_context_mount_sb_relabel(context_sid, sbsec,
@@ -721,7 +794,8 @@ static int selinux_set_mnt_opts(struct super_block *sb,
}
if (defcontext_sid) {
- if (sbsec->behavior != SECURITY_FS_USE_XATTR) {
+ if (sbsec->behavior != SECURITY_FS_USE_XATTR &&
+ sbsec->behavior != SECURITY_FS_USE_NATIVE) {
rc = -EINVAL;
printk(KERN_WARNING "SELinux: defcontext option is "
"invalid for this filesystem type\n");
@@ -749,7 +823,37 @@ out_double_mount:
goto out;
}
-static void selinux_sb_clone_mnt_opts(const struct super_block *oldsb,
+static int selinux_cmp_sb_context(const struct super_block *oldsb,
+ const struct super_block *newsb)
+{
+ struct superblock_security_struct *old = oldsb->s_security;
+ struct superblock_security_struct *new = newsb->s_security;
+ char oldflags = old->flags & SE_MNTMASK;
+ char newflags = new->flags & SE_MNTMASK;
+
+ if (oldflags != newflags)
+ goto mismatch;
+ if ((oldflags & FSCONTEXT_MNT) && old->sid != new->sid)
+ goto mismatch;
+ if ((oldflags & CONTEXT_MNT) && old->mntpoint_sid != new->mntpoint_sid)
+ goto mismatch;
+ if ((oldflags & DEFCONTEXT_MNT) && old->def_sid != new->def_sid)
+ goto mismatch;
+ if (oldflags & ROOTCONTEXT_MNT) {
+ struct inode_security_struct *oldroot = oldsb->s_root->d_inode->i_security;
+ struct inode_security_struct *newroot = newsb->s_root->d_inode->i_security;
+ if (oldroot->sid != newroot->sid)
+ goto mismatch;
+ }
+ return 0;
+mismatch:
+ printk(KERN_WARNING "SELinux: mount invalid. Same superblock, "
+ "different security settings for (dev %s, "
+ "type %s)\n", newsb->s_id, newsb->s_type->name);
+ return -EBUSY;
+}
+
+static int selinux_sb_clone_mnt_opts(const struct super_block *oldsb,
struct super_block *newsb)
{
const struct superblock_security_struct *oldsbsec = oldsb->s_security;
@@ -764,14 +868,14 @@ static void selinux_sb_clone_mnt_opts(const struct super_block *oldsb,
* mount options. thus we can safely deal with this superblock later
*/
if (!ss_initialized)
- return;
+ return 0;
/* how can we clone if the old one wasn't set up?? */
BUG_ON(!(oldsbsec->flags & SE_SBINITIALIZED));
- /* if fs is reusing a sb, just let its options stand... */
+ /* if fs is reusing a sb, make sure that the contexts match */
if (newsbsec->flags & SE_SBINITIALIZED)
- return;
+ return selinux_cmp_sb_context(oldsb, newsb);
mutex_lock(&newsbsec->lock);
@@ -804,6 +908,7 @@ static void selinux_sb_clone_mnt_opts(const struct super_block *oldsb,
sb_finish_set_opts(newsb);
mutex_unlock(&newsbsec->lock);
+ return 0;
}
static int selinux_parse_opts_str(char *options,
@@ -947,7 +1052,7 @@ static int superblock_doinit(struct super_block *sb, void *data)
goto out_err;
out:
- rc = selinux_set_mnt_opts(sb, &opts);
+ rc = selinux_set_mnt_opts(sb, &opts, 0, NULL);
out_err:
security_free_mnt_opts(&opts);
@@ -981,7 +1086,7 @@ static void selinux_write_opts(struct seq_file *m,
case DEFCONTEXT_MNT:
prefix = DEFCONTEXT_STR;
break;
- case SE_SBLABELSUPP:
+ case SBLABEL_MNT:
seq_putc(m, ',');
seq_puts(m, LABELSUPP_STR);
continue;
@@ -1189,6 +1294,8 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
}
switch (sbsec->behavior) {
+ case SECURITY_FS_USE_NATIVE:
+ break;
case SECURITY_FS_USE_XATTR:
if (!inode->i_op->getxattr) {
isec->sid = sbsec->def_sid;
@@ -1311,15 +1418,33 @@ static int inode_doinit_with_dentry(struct inode *inode, struct dentry *opt_dent
isec->sid = sbsec->sid;
if ((sbsec->flags & SE_SBPROC) && !S_ISLNK(inode->i_mode)) {
- if (opt_dentry) {
- isec->sclass = inode_mode_to_security_class(inode->i_mode);
- rc = selinux_proc_get_sid(opt_dentry,
- isec->sclass,
- &sid);
- if (rc)
- goto out_unlock;
- isec->sid = sid;
- }
+ /* We must have a dentry to determine the label on
+ * procfs inodes */
+ if (opt_dentry)
+ /* Called from d_instantiate or
+ * d_splice_alias. */
+ dentry = dget(opt_dentry);
+ else
+ /* Called from selinux_complete_init, try to
+ * find a dentry. */
+ dentry = d_find_alias(inode);
+ /*
+ * This can be hit on boot when a file is accessed
+ * before the policy is loaded. When we load policy we
+ * may find inodes that have no dentry on the
+ * sbsec->isec_head list. No reason to complain as
+ * these will get fixed up the next time we go through
+ * inode_doinit() with a dentry, before these inodes
+ * could be used again by userspace.
+ */
+ if (!dentry)
+ goto out_unlock;
+ isec->sclass = inode_mode_to_security_class(inode->i_mode);
+ rc = selinux_proc_get_sid(dentry, isec->sclass, &sid);
+ dput(dentry);
+ if (rc)
+ goto out_unlock;
+ isec->sid = sid;
}
break;
}
@@ -1425,8 +1550,7 @@ static int cred_has_capability(const struct cred *cred,
u32 av = CAP_TO_MASK(cap);
int rc;
- COMMON_AUDIT_DATA_INIT(&ad, CAP);
- ad.tsk = current;
+ ad.type = LSM_AUDIT_DATA_CAP;
ad.u.cap = cap;
switch (CAP_TO_INDEX(cap)) {
@@ -1445,7 +1569,7 @@ static int cred_has_capability(const struct cred *cred,
rc = avc_has_perm_noaudit(sid, sid, sclass, av, 0, &avd);
if (audit == SECURITY_CAP_AUDIT) {
- int rc2 = avc_audit(sid, sid, sclass, av, &avd, rc, &ad, 0);
+ int rc2 = avc_audit(sid, sid, sclass, av, &avd, rc, &ad);
if (rc2)
return rc2;
}
@@ -1468,8 +1592,7 @@ static int task_has_system(struct task_struct *tsk,
static int inode_has_perm(const struct cred *cred,
struct inode *inode,
u32 perms,
- struct common_audit_data *adp,
- unsigned flags)
+ struct common_audit_data *adp)
{
struct inode_security_struct *isec;
u32 sid;
@@ -1482,19 +1605,7 @@ static int inode_has_perm(const struct cred *cred,
sid = cred_sid(cred);
isec = inode->i_security;
- return avc_has_perm_flags(sid, isec->sid, isec->sclass, perms, adp, flags);
-}
-
-static int inode_has_perm_noadp(const struct cred *cred,
- struct inode *inode,
- u32 perms,
- unsigned flags)
-{
- struct common_audit_data ad;
-
- COMMON_AUDIT_DATA_INIT(&ad, INODE);
- ad.u.inode = inode;
- return inode_has_perm(cred, inode, perms, &ad, flags);
+ return avc_has_perm(sid, isec->sid, isec->sclass, perms, adp);
}
/* Same as inode_has_perm, but pass explicit audit data containing
@@ -1507,9 +1618,9 @@ static inline int dentry_has_perm(const struct cred *cred,
struct inode *inode = dentry->d_inode;
struct common_audit_data ad;
- COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
+ ad.type = LSM_AUDIT_DATA_DENTRY;
ad.u.dentry = dentry;
- return inode_has_perm(cred, inode, av, &ad, 0);
+ return inode_has_perm(cred, inode, av, &ad);
}
/* Same as inode_has_perm, but pass explicit audit data containing
@@ -1522,9 +1633,21 @@ static inline int path_has_perm(const struct cred *cred,
struct inode *inode = path->dentry->d_inode;
struct common_audit_data ad;
- COMMON_AUDIT_DATA_INIT(&ad, PATH);
+ ad.type = LSM_AUDIT_DATA_PATH;
ad.u.path = *path;
- return inode_has_perm(cred, inode, av, &ad, 0);
+ return inode_has_perm(cred, inode, av, &ad);
+}
+
+/* Same as path_has_perm, but uses the inode from the file struct. */
+static inline int file_path_has_perm(const struct cred *cred,
+ struct file *file,
+ u32 av)
+{
+ struct common_audit_data ad;
+
+ ad.type = LSM_AUDIT_DATA_PATH;
+ ad.u.path = file->f_path;
+ return inode_has_perm(cred, file_inode(file), av, &ad);
}
/* Check whether a task can use an open file descriptor to
@@ -1540,12 +1663,12 @@ static int file_has_perm(const struct cred *cred,
u32 av)
{
struct file_security_struct *fsec = file->f_security;
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct common_audit_data ad;
u32 sid = cred_sid(cred);
int rc;
- COMMON_AUDIT_DATA_INIT(&ad, PATH);
+ ad.type = LSM_AUDIT_DATA_PATH;
ad.u.path = file->f_path;
if (sid != fsec->sid) {
@@ -1560,7 +1683,7 @@ static int file_has_perm(const struct cred *cred,
/* av is zero if only checking access to the descriptor. */
rc = 0;
if (av)
- rc = inode_has_perm(cred, inode, av, &ad, 0);
+ rc = inode_has_perm(cred, inode, av, &ad);
out:
return rc;
@@ -1584,7 +1707,7 @@ static int may_create(struct inode *dir,
sid = tsec->sid;
newsid = tsec->create_sid;
- COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
+ ad.type = LSM_AUDIT_DATA_DENTRY;
ad.u.dentry = dentry;
rc = avc_has_perm(sid, dsec->sid, SECCLASS_DIR,
@@ -1593,7 +1716,7 @@ static int may_create(struct inode *dir,
if (rc)
return rc;
- if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) {
+ if (!newsid || !(sbsec->flags & SBLABEL_MNT)) {
rc = security_transition_sid(sid, dsec->sid, tclass,
&dentry->d_name, &newsid);
if (rc)
@@ -1637,7 +1760,7 @@ static int may_link(struct inode *dir,
dsec = dir->i_security;
isec = dentry->d_inode->i_security;
- COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
+ ad.type = LSM_AUDIT_DATA_DENTRY;
ad.u.dentry = dentry;
av = DIR__SEARCH;
@@ -1683,7 +1806,7 @@ static inline int may_rename(struct inode *old_dir,
old_is_dir = S_ISDIR(old_dentry->d_inode->i_mode);
new_dsec = new_dir->i_security;
- COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
+ ad.type = LSM_AUDIT_DATA_DENTRY;
ad.u.dentry = old_dentry;
rc = avc_has_perm(sid, old_dsec->sid, SECCLASS_DIR,
@@ -1969,7 +2092,7 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
struct task_security_struct *new_tsec;
struct inode_security_struct *isec;
struct common_audit_data ad;
- struct inode *inode = bprm->file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(bprm->file);
int rc;
rc = cap_bprm_set_creds(bprm);
@@ -1998,6 +2121,15 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
new_tsec->sid = old_tsec->exec_sid;
/* Reset exec SID on execve. */
new_tsec->exec_sid = 0;
+
+ /*
+ * Minimize confusion: if no_new_privs or nosuid and a
+ * transition is explicitly requested, then fail the exec.
+ */
+ if (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS)
+ return -EPERM;
+ if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
+ return -EACCES;
} else {
/* Check for a default transition on this program. */
rc = security_transition_sid(old_tsec->sid, isec->sid,
@@ -2007,10 +2139,11 @@ static int selinux_bprm_set_creds(struct linux_binprm *bprm)
return rc;
}
- COMMON_AUDIT_DATA_INIT(&ad, PATH);
+ ad.type = LSM_AUDIT_DATA_PATH;
ad.u.path = bprm->file->f_path;
- if (bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID)
+ if ((bprm->file->f_path.mnt->mnt_flags & MNT_NOSUID) ||
+ (bprm->unsafe & LSM_UNSAFE_NO_NEW_PRIVS))
new_tsec->sid = old_tsec->sid;
if (new_tsec->sid == old_tsec->sid) {
@@ -2092,37 +2225,36 @@ static int selinux_bprm_secureexec(struct linux_binprm *bprm)
return (atsecure || cap_bprm_secureexec(bprm));
}
+static int match_file(const void *p, struct file *file, unsigned fd)
+{
+ return file_has_perm(p, file, file_to_av(file)) ? fd + 1 : 0;
+}
+
/* Derived from fs/exec.c:flush_old_files. */
static inline void flush_unauthorized_files(const struct cred *cred,
struct files_struct *files)
{
- struct common_audit_data ad;
struct file *file, *devnull = NULL;
struct tty_struct *tty;
- struct fdtable *fdt;
- long j = -1;
int drop_tty = 0;
+ unsigned n;
tty = get_current_tty();
if (tty) {
spin_lock(&tty_files_lock);
if (!list_empty(&tty->tty_files)) {
struct tty_file_private *file_priv;
- struct inode *inode;
/* Revalidate access to controlling tty.
- Use inode_has_perm on the tty inode directly rather
- than using file_has_perm, as this particular open
- file may belong to another process and we are only
- interested in the inode-based check here. */
+ Use file_path_has_perm on the tty path directly
+ rather than using file_has_perm, as this particular
+ open file may belong to another process and we are
+ only interested in the inode-based check here. */
file_priv = list_first_entry(&tty->tty_files,
struct tty_file_private, list);
file = file_priv->file;
- inode = file->f_path.dentry->d_inode;
- if (inode_has_perm_noadp(cred, inode,
- FILE__READ | FILE__WRITE, 0)) {
+ if (file_path_has_perm(cred, file, FILE__READ | FILE__WRITE))
drop_tty = 1;
- }
}
spin_unlock(&tty_files_lock);
tty_kref_put(tty);
@@ -2132,62 +2264,19 @@ static inline void flush_unauthorized_files(const struct cred *cred,
no_tty();
/* Revalidate access to inherited open files. */
+ n = iterate_fd(files, 0, match_file, cred);
+ if (!n) /* none found? */
+ return;
- COMMON_AUDIT_DATA_INIT(&ad, INODE);
-
- spin_lock(&files->file_lock);
- for (;;) {
- unsigned long set, i;
- int fd;
-
- j++;
- i = j * __NFDBITS;
- fdt = files_fdtable(files);
- if (i >= fdt->max_fds)
- break;
- set = fdt->open_fds->fds_bits[j];
- if (!set)
- continue;
- spin_unlock(&files->file_lock);
- for ( ; set ; i++, set >>= 1) {
- if (set & 1) {
- file = fget(i);
- if (!file)
- continue;
- if (file_has_perm(cred,
- file,
- file_to_av(file))) {
- sys_close(i);
- fd = get_unused_fd();
- if (fd != i) {
- if (fd >= 0)
- put_unused_fd(fd);
- fput(file);
- continue;
- }
- if (devnull) {
- get_file(devnull);
- } else {
- devnull = dentry_open(
- dget(selinux_null),
- mntget(selinuxfs_mount),
- O_RDWR, cred);
- if (IS_ERR(devnull)) {
- devnull = NULL;
- put_unused_fd(fd);
- fput(file);
- continue;
- }
- }
- fd_install(fd, devnull);
- }
- fput(file);
- }
- }
- spin_lock(&files->file_lock);
-
- }
- spin_unlock(&files->file_lock);
+ devnull = dentry_open(&selinux_null, O_RDWR, cred);
+ if (IS_ERR(devnull))
+ devnull = NULL;
+ /* replace all the matching ones with this */
+ do {
+ replace_fd(n - 1, devnull, 0);
+ } while ((n = iterate_fd(files, n, match_file, cred)) != 0);
+ if (devnull)
+ fput(devnull);
}
/*
@@ -2417,10 +2506,11 @@ static int selinux_sb_remount(struct super_block *sb, void *data)
u32 sid;
size_t len;
- if (flags[i] == SE_SBLABELSUPP)
+ if (flags[i] == SBLABEL_MNT)
continue;
len = strlen(mount_options[i]);
- rc = security_context_to_sid(mount_options[i], len, &sid);
+ rc = security_context_to_sid(mount_options[i], len, &sid,
+ GFP_KERNEL);
if (rc) {
printk(KERN_WARNING "SELinux: security_context_to_sid"
"(%s) failed for (dev %s, type %s) errno=%d\n",
@@ -2481,7 +2571,7 @@ static int selinux_sb_kern_mount(struct super_block *sb, int flags, void *data)
if (flags & MS_KERNMOUNT)
return 0;
- COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
+ ad.type = LSM_AUDIT_DATA_DENTRY;
ad.u.dentry = sb->s_root;
return superblock_has_perm(cred, sb, FILESYSTEM__MOUNT, &ad);
}
@@ -2491,14 +2581,14 @@ static int selinux_sb_statfs(struct dentry *dentry)
const struct cred *cred = current_cred();
struct common_audit_data ad;
- COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
+ ad.type = LSM_AUDIT_DATA_DENTRY;
ad.u.dentry = dentry->d_sb->s_root;
return superblock_has_perm(cred, dentry->d_sb, FILESYSTEM__GETATTR, &ad);
}
-static int selinux_mount(char *dev_name,
+static int selinux_mount(const char *dev_name,
struct path *path,
- char *type,
+ const char *type,
unsigned long flags,
void *data)
{
@@ -2531,8 +2621,43 @@ static void selinux_inode_free_security(struct inode *inode)
inode_free_security(inode);
}
+static int selinux_dentry_init_security(struct dentry *dentry, int mode,
+ struct qstr *name, void **ctx,
+ u32 *ctxlen)
+{
+ const struct cred *cred = current_cred();
+ struct task_security_struct *tsec;
+ struct inode_security_struct *dsec;
+ struct superblock_security_struct *sbsec;
+ struct inode *dir = dentry->d_parent->d_inode;
+ u32 newsid;
+ int rc;
+
+ tsec = cred->security;
+ dsec = dir->i_security;
+ sbsec = dir->i_sb->s_security;
+
+ if (tsec->create_sid && sbsec->behavior != SECURITY_FS_USE_MNTPOINT) {
+ newsid = tsec->create_sid;
+ } else {
+ rc = security_transition_sid(tsec->sid, dsec->sid,
+ inode_mode_to_security_class(mode),
+ name,
+ &newsid);
+ if (rc) {
+ printk(KERN_WARNING
+ "%s: security_transition_sid failed, rc=%d\n",
+ __func__, -rc);
+ return rc;
+ }
+ }
+
+ return security_sid_to_context(newsid, (char **)ctx, ctxlen);
+}
+
static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
- const struct qstr *qstr, char **name,
+ const struct qstr *qstr,
+ const char **name,
void **value, size_t *len)
{
const struct task_security_struct *tsec = current_security();
@@ -2540,7 +2665,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
struct superblock_security_struct *sbsec;
u32 sid, newsid, clen;
int rc;
- char *namep = NULL, *context;
+ char *context;
dsec = dir->i_security;
sbsec = dir->i_sb->s_security;
@@ -2551,7 +2676,7 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
if ((sbsec->flags & SE_SBINITIALIZED) &&
(sbsec->behavior == SECURITY_FS_USE_MNTPOINT))
newsid = sbsec->mntpoint_sid;
- else if (!newsid || !(sbsec->flags & SE_SBLABELSUPP)) {
+ else if (!newsid || !(sbsec->flags & SBLABEL_MNT)) {
rc = security_transition_sid(sid, dsec->sid,
inode_mode_to_security_class(inode->i_mode),
qstr, &newsid);
@@ -2573,22 +2698,16 @@ static int selinux_inode_init_security(struct inode *inode, struct inode *dir,
isec->initialized = 1;
}
- if (!ss_initialized || !(sbsec->flags & SE_SBLABELSUPP))
+ if (!ss_initialized || !(sbsec->flags & SBLABEL_MNT))
return -EOPNOTSUPP;
- if (name) {
- namep = kstrdup(XATTR_SELINUX_SUFFIX, GFP_NOFS);
- if (!namep)
- return -ENOMEM;
- *name = namep;
- }
+ if (name)
+ *name = XATTR_SELINUX_SUFFIX;
if (value && len) {
rc = security_sid_to_context_force(newsid, &context, &clen);
- if (rc) {
- kfree(namep);
+ if (rc)
return rc;
- }
*value = context;
*len = clen;
}
@@ -2651,13 +2770,36 @@ static int selinux_inode_follow_link(struct dentry *dentry, struct nameidata *na
return dentry_has_perm(cred, dentry, FILE__READ);
}
+static noinline int audit_inode_permission(struct inode *inode,
+ u32 perms, u32 audited, u32 denied,
+ int result,
+ unsigned flags)
+{
+ struct common_audit_data ad;
+ struct inode_security_struct *isec = inode->i_security;
+ int rc;
+
+ ad.type = LSM_AUDIT_DATA_INODE;
+ ad.u.inode = inode;
+
+ rc = slow_avc_audit(current_sid(), isec->sid, isec->sclass, perms,
+ audited, denied, result, &ad, flags);
+ if (rc)
+ return rc;
+ return 0;
+}
+
static int selinux_inode_permission(struct inode *inode, int mask)
{
const struct cred *cred = current_cred();
- struct common_audit_data ad;
u32 perms;
bool from_access;
unsigned flags = mask & MAY_NOT_BLOCK;
+ struct inode_security_struct *isec;
+ u32 sid;
+ struct av_decision avd;
+ int rc, rc2;
+ u32 audited, denied;
from_access = mask & MAY_ACCESS;
mask &= (MAY_READ|MAY_WRITE|MAY_EXEC|MAY_APPEND);
@@ -2666,21 +2808,34 @@ static int selinux_inode_permission(struct inode *inode, int mask)
if (!mask)
return 0;
- COMMON_AUDIT_DATA_INIT(&ad, INODE);
- ad.u.inode = inode;
+ validate_creds(cred);
- if (from_access)
- ad.selinux_audit_data.auditdeny |= FILE__AUDIT_ACCESS;
+ if (unlikely(IS_PRIVATE(inode)))
+ return 0;
perms = file_mask_to_av(inode->i_mode, mask);
- return inode_has_perm(cred, inode, perms, &ad, flags);
+ sid = cred_sid(cred);
+ isec = inode->i_security;
+
+ rc = avc_has_perm_noaudit(sid, isec->sid, isec->sclass, perms, 0, &avd);
+ audited = avc_audit_required(perms, &avd, rc,
+ from_access ? FILE__AUDIT_ACCESS : 0,
+ &denied);
+ if (likely(!audited))
+ return rc;
+
+ rc2 = audit_inode_permission(inode, perms, audited, denied, rc, flags);
+ if (rc2)
+ return rc2;
+ return rc;
}
static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
{
const struct cred *cred = current_cred();
unsigned int ia_valid = iattr->ia_valid;
+ __u32 av = FILE__WRITE;
/* ATTR_FORCE is just used for ATTR_KILL_S[UG]ID. */
if (ia_valid & ATTR_FORCE) {
@@ -2694,7 +2849,10 @@ static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_TIMES_SET))
return dentry_has_perm(cred, dentry, FILE__SETATTR);
- return dentry_has_perm(cred, dentry, FILE__WRITE);
+ if (selinux_policycap_openperm && (ia_valid & ATTR_SIZE))
+ av |= FILE__OPEN;
+
+ return dentry_has_perm(cred, dentry, av);
}
static int selinux_inode_getattr(struct vfsmount *mnt, struct dentry *dentry)
@@ -2743,13 +2901,13 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
return selinux_inode_setotherxattr(dentry, name);
sbsec = inode->i_sb->s_security;
- if (!(sbsec->flags & SE_SBLABELSUPP))
+ if (!(sbsec->flags & SBLABEL_MNT))
return -EOPNOTSUPP;
if (!inode_owner_or_capable(inode))
return -EPERM;
- COMMON_AUDIT_DATA_INIT(&ad, DENTRY);
+ ad.type = LSM_AUDIT_DATA_DENTRY;
ad.u.dentry = dentry;
rc = avc_has_perm(sid, isec->sid, isec->sclass,
@@ -2757,10 +2915,32 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
if (rc)
return rc;
- rc = security_context_to_sid(value, size, &newsid);
+ rc = security_context_to_sid(value, size, &newsid, GFP_KERNEL);
if (rc == -EINVAL) {
- if (!capable(CAP_MAC_ADMIN))
+ if (!capable(CAP_MAC_ADMIN)) {
+ struct audit_buffer *ab;
+ size_t audit_size;
+ const char *str;
+
+ /* We strip a nul only if it is at the end, otherwise the
+ * context contains a nul and we should audit that */
+ if (value) {
+ str = value;
+ if (str[size - 1] == '\0')
+ audit_size = size - 1;
+ else
+ audit_size = size;
+ } else {
+ str = "";
+ audit_size = 0;
+ }
+ ab = audit_log_start(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR);
+ audit_log_format(ab, "op=setxattr invalid_context=");
+ audit_log_n_untrustedstring(ab, value, audit_size);
+ audit_log_end(ab);
+
return rc;
+ }
rc = security_context_to_sid_force(value, size, &newsid);
}
if (rc)
@@ -2805,7 +2985,10 @@ static void selinux_inode_post_setxattr(struct dentry *dentry, const char *name,
return;
}
+ isec->sclass = inode_mode_to_security_class(inode->i_mode);
isec->sid = newsid;
+ isec->initialized = 1;
+
return;
}
@@ -2889,10 +3072,11 @@ static int selinux_inode_setsecurity(struct inode *inode, const char *name,
if (!value || !size)
return -EACCES;
- rc = security_context_to_sid((void *)value, size, &newsid);
+ rc = security_context_to_sid((void *)value, size, &newsid, GFP_KERNEL);
if (rc)
return rc;
+ isec->sclass = inode_mode_to_security_class(inode->i_mode);
isec->sid = newsid;
isec->initialized = 1;
return 0;
@@ -2917,7 +3101,7 @@ static void selinux_inode_getsecid(const struct inode *inode, u32 *secid)
static int selinux_revalidate_file_permission(struct file *file, int mask)
{
const struct cred *cred = current_cred();
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
/* file_mask_to_av won't add FILE__WRITE if MAY_APPEND is set */
if ((file->f_flags & O_APPEND) && (mask & MAY_WRITE))
@@ -2929,7 +3113,7 @@ static int selinux_revalidate_file_permission(struct file *file, int mask)
static int selinux_file_permission(struct file *file, int mask)
{
- struct inode *inode = file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(file);
struct file_security_struct *fsec = file->f_security;
struct inode_security_struct *isec = inode->i_security;
u32 sid = current_sid();
@@ -2940,7 +3124,7 @@ static int selinux_file_permission(struct file *file, int mask)
if (sid == fsec->sid && fsec->isid == isec->sid &&
fsec->pseqno == avc_policy_seqno())
- /* No change since dentry_open check. */
+ /* No change since file_open check. */
return 0;
return selinux_revalidate_file_permission(file, mask);
@@ -2969,15 +3153,15 @@ static int selinux_file_ioctl(struct file *file, unsigned int cmd,
/* fall through */
case FIGETBSZ:
/* fall through */
- case EXT2_IOC_GETFLAGS:
+ case FS_IOC_GETFLAGS:
/* fall through */
- case EXT2_IOC_GETVERSION:
+ case FS_IOC_GETVERSION:
error = file_has_perm(cred, file, FILE__GETATTR);
break;
- case EXT2_IOC_SETFLAGS:
+ case FS_IOC_SETFLAGS:
/* fall through */
- case EXT2_IOC_SETVERSION:
+ case FS_IOC_SETVERSION:
error = file_has_perm(cred, file, FILE__SETATTR);
break;
@@ -3040,31 +3224,27 @@ error:
return rc;
}
-static int selinux_file_mmap(struct file *file, unsigned long reqprot,
- unsigned long prot, unsigned long flags,
- unsigned long addr, unsigned long addr_only)
+static int selinux_mmap_addr(unsigned long addr)
{
- int rc = 0;
- u32 sid = current_sid();
+ int rc;
+
+ /* do DAC check on address space usage */
+ rc = cap_mmap_addr(addr);
+ if (rc)
+ return rc;
- /*
- * notice that we are intentionally putting the SELinux check before
- * the secondary cap_file_mmap check. This is such a likely attempt
- * at bad behaviour/exploit that we always want to get the AVC, even
- * if DAC would have also denied the operation.
- */
if (addr < CONFIG_LSM_MMAP_MIN_ADDR) {
+ u32 sid = current_sid();
rc = avc_has_perm(sid, sid, SECCLASS_MEMPROTECT,
MEMPROTECT__MMAP_ZERO, NULL);
- if (rc)
- return rc;
}
- /* do DAC check on address space usage */
- rc = cap_file_mmap(file, reqprot, prot, flags, addr, addr_only);
- if (rc || addr_only)
- return rc;
+ return rc;
+}
+static int selinux_mmap_file(struct file *file, unsigned long reqprot,
+ unsigned long prot, unsigned long flags)
+{
if (selinux_checkreqprot)
prot = reqprot;
@@ -3123,11 +3303,6 @@ static int selinux_file_fcntl(struct file *file, unsigned int cmd,
switch (cmd) {
case F_SETFL:
- if (!file->f_path.dentry || !file->f_path.dentry->d_inode) {
- err = -EINVAL;
- break;
- }
-
if ((file->f_flags & O_APPEND) && !(arg & O_APPEND)) {
err = file_has_perm(cred, file, FILE__WRITE);
break;
@@ -3138,21 +3313,21 @@ static int selinux_file_fcntl(struct file *file, unsigned int cmd,
case F_GETFL:
case F_GETOWN:
case F_GETSIG:
+ case F_GETOWNER_UIDS:
/* Just check FD__USE permission */
err = file_has_perm(cred, file, 0);
break;
case F_GETLK:
case F_SETLK:
case F_SETLKW:
+ case F_OFD_GETLK:
+ case F_OFD_SETLK:
+ case F_OFD_SETLKW:
#if BITS_PER_LONG == 32
case F_GETLK64:
case F_SETLK64:
case F_SETLKW64:
#endif
- if (!file->f_path.dentry || !file->f_path.dentry->d_inode) {
- err = -EINVAL;
- break;
- }
err = file_has_perm(cred, file, FILE__LOCK);
break;
}
@@ -3199,15 +3374,13 @@ static int selinux_file_receive(struct file *file)
return file_has_perm(cred, file, file_to_av(file));
}
-static int selinux_dentry_open(struct file *file, const struct cred *cred)
+static int selinux_file_open(struct file *file, const struct cred *cred)
{
struct file_security_struct *fsec;
- struct inode *inode;
struct inode_security_struct *isec;
- inode = file->f_path.dentry->d_inode;
fsec = file->f_security;
- isec = inode->i_security;
+ isec = file_inode(file)->i_security;
/*
* Save inode label and policy sequence number
* at open-time so that selinux_file_permission
@@ -3225,7 +3398,7 @@ static int selinux_dentry_open(struct file *file, const struct cred *cred)
* new inode label or new policy.
* This check is not redundant - do not remove.
*/
- return inode_has_perm_noadp(cred, inode, open_file_to_av(file), 0);
+ return file_path_has_perm(cred, file, open_file_to_av(file));
}
/* task security operations */
@@ -3347,7 +3520,7 @@ static int selinux_kernel_module_request(char *kmod_name)
sid = task_sid(current);
- COMMON_AUDIT_DATA_INIT(&ad, KMOD);
+ ad.type = LSM_AUDIT_DATA_KMOD;
ad.u.kmod_name = kmod_name;
return avc_has_perm(sid, SECINITSID_KERNEL, SECCLASS_SYSTEM,
@@ -3486,8 +3659,8 @@ static int selinux_parse_skb_ipv4(struct sk_buff *skb,
if (ihlen < sizeof(_iph))
goto out;
- ad->u.net.v4info.saddr = ih->saddr;
- ad->u.net.v4info.daddr = ih->daddr;
+ ad->u.net->v4info.saddr = ih->saddr;
+ ad->u.net->v4info.daddr = ih->daddr;
ret = 0;
if (proto)
@@ -3505,8 +3678,8 @@ static int selinux_parse_skb_ipv4(struct sk_buff *skb,
if (th == NULL)
break;
- ad->u.net.sport = th->source;
- ad->u.net.dport = th->dest;
+ ad->u.net->sport = th->source;
+ ad->u.net->dport = th->dest;
break;
}
@@ -3521,8 +3694,8 @@ static int selinux_parse_skb_ipv4(struct sk_buff *skb,
if (uh == NULL)
break;
- ad->u.net.sport = uh->source;
- ad->u.net.dport = uh->dest;
+ ad->u.net->sport = uh->source;
+ ad->u.net->dport = uh->dest;
break;
}
@@ -3537,8 +3710,8 @@ static int selinux_parse_skb_ipv4(struct sk_buff *skb,
if (dh == NULL)
break;
- ad->u.net.sport = dh->dccph_sport;
- ad->u.net.dport = dh->dccph_dport;
+ ad->u.net->sport = dh->dccph_sport;
+ ad->u.net->dport = dh->dccph_dport;
break;
}
@@ -3565,8 +3738,8 @@ static int selinux_parse_skb_ipv6(struct sk_buff *skb,
if (ip6 == NULL)
goto out;
- ad->u.net.v6info.saddr = ip6->saddr;
- ad->u.net.v6info.daddr = ip6->daddr;
+ ad->u.net->v6info.saddr = ip6->saddr;
+ ad->u.net->v6info.daddr = ip6->daddr;
ret = 0;
nexthdr = ip6->nexthdr;
@@ -3586,8 +3759,8 @@ static int selinux_parse_skb_ipv6(struct sk_buff *skb,
if (th == NULL)
break;
- ad->u.net.sport = th->source;
- ad->u.net.dport = th->dest;
+ ad->u.net->sport = th->source;
+ ad->u.net->dport = th->dest;
break;
}
@@ -3598,8 +3771,8 @@ static int selinux_parse_skb_ipv6(struct sk_buff *skb,
if (uh == NULL)
break;
- ad->u.net.sport = uh->source;
- ad->u.net.dport = uh->dest;
+ ad->u.net->sport = uh->source;
+ ad->u.net->dport = uh->dest;
break;
}
@@ -3610,8 +3783,8 @@ static int selinux_parse_skb_ipv6(struct sk_buff *skb,
if (dh == NULL)
break;
- ad->u.net.sport = dh->dccph_sport;
- ad->u.net.dport = dh->dccph_dport;
+ ad->u.net->sport = dh->dccph_sport;
+ ad->u.net->dport = dh->dccph_dport;
break;
}
@@ -3631,13 +3804,13 @@ static int selinux_parse_skb(struct sk_buff *skb, struct common_audit_data *ad,
char *addrp;
int ret;
- switch (ad->u.net.family) {
+ switch (ad->u.net->family) {
case PF_INET:
ret = selinux_parse_skb_ipv4(skb, ad, proto);
if (ret)
goto parse_error;
- addrp = (char *)(src ? &ad->u.net.v4info.saddr :
- &ad->u.net.v4info.daddr);
+ addrp = (char *)(src ? &ad->u.net->v4info.saddr :
+ &ad->u.net->v4info.daddr);
goto okay;
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -3645,8 +3818,8 @@ static int selinux_parse_skb(struct sk_buff *skb, struct common_audit_data *ad,
ret = selinux_parse_skb_ipv6(skb, ad, proto);
if (ret)
goto parse_error;
- addrp = (char *)(src ? &ad->u.net.v6info.saddr :
- &ad->u.net.v6info.daddr);
+ addrp = (char *)(src ? &ad->u.net->v6info.saddr :
+ &ad->u.net->v6info.daddr);
goto okay;
#endif /* IPV6 */
default:
@@ -3688,8 +3861,12 @@ static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid)
u32 nlbl_sid;
u32 nlbl_type;
- selinux_skb_xfrm_sid(skb, &xfrm_sid);
- selinux_netlbl_skbuff_getsid(skb, family, &nlbl_type, &nlbl_sid);
+ err = selinux_xfrm_skb_sid(skb, &xfrm_sid);
+ if (unlikely(err))
+ return -EACCES;
+ err = selinux_netlbl_skbuff_getsid(skb, family, &nlbl_type, &nlbl_sid);
+ if (unlikely(err))
+ return -EACCES;
err = security_net_peersid_resolve(nlbl_sid, nlbl_type, xfrm_sid, sid);
if (unlikely(err)) {
@@ -3702,6 +3879,30 @@ static int selinux_skb_peerlbl_sid(struct sk_buff *skb, u16 family, u32 *sid)
return 0;
}
+/**
+ * selinux_conn_sid - Determine the child socket label for a connection
+ * @sk_sid: the parent socket's SID
+ * @skb_sid: the packet's SID
+ * @conn_sid: the resulting connection SID
+ *
+ * If @skb_sid is valid then the user:role:type information from @sk_sid is
+ * combined with the MLS information from @skb_sid in order to create
+ * @conn_sid. If @skb_sid is not valid then then @conn_sid is simply a copy
+ * of @sk_sid. Returns zero on success, negative values on failure.
+ *
+ */
+static int selinux_conn_sid(u32 sk_sid, u32 skb_sid, u32 *conn_sid)
+{
+ int err = 0;
+
+ if (skb_sid != SECSID_NULL)
+ err = security_sid_mls_copy(sk_sid, skb_sid, conn_sid);
+ else
+ *conn_sid = sk_sid;
+
+ return err;
+}
+
/* socket security operations */
static int socket_sockcreate_sid(const struct task_security_struct *tsec,
@@ -3720,13 +3921,15 @@ static int sock_has_perm(struct task_struct *task, struct sock *sk, u32 perms)
{
struct sk_security_struct *sksec = sk->sk_security;
struct common_audit_data ad;
+ struct lsm_network_audit net = {0,};
u32 tsid = task_sid(task);
if (sksec->sid == SECINITSID_KERNEL)
return 0;
- COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.sk = sk;
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->sk = sk;
return avc_has_perm(tsid, sksec->sid, sksec->sclass, perms, &ad);
}
@@ -3804,6 +4007,7 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
char *addrp;
struct sk_security_struct *sksec = sk->sk_security;
struct common_audit_data ad;
+ struct lsm_network_audit net = {0,};
struct sockaddr_in *addr4 = NULL;
struct sockaddr_in6 *addr6 = NULL;
unsigned short snum;
@@ -3822,16 +4026,17 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
if (snum) {
int low, high;
- inet_get_local_port_range(&low, &high);
+ inet_get_local_port_range(sock_net(sk), &low, &high);
if (snum < max(PROT_SOCK, low) || snum > high) {
err = sel_netport_sid(sk->sk_protocol,
snum, &sid);
if (err)
goto out;
- COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.sport = htons(snum);
- ad.u.net.family = family;
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->sport = htons(snum);
+ ad.u.net->family = family;
err = avc_has_perm(sksec->sid, sid,
sksec->sclass,
SOCKET__NAME_BIND, &ad);
@@ -3862,14 +4067,15 @@ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, in
if (err)
goto out;
- COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.sport = htons(snum);
- ad.u.net.family = family;
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->sport = htons(snum);
+ ad.u.net->family = family;
if (family == PF_INET)
- ad.u.net.v4info.saddr = addr4->sin_addr.s_addr;
+ ad.u.net->v4info.saddr = addr4->sin_addr.s_addr;
else
- ad.u.net.v6info.saddr = addr6->sin6_addr;
+ ad.u.net->v6info.saddr = addr6->sin6_addr;
err = avc_has_perm(sksec->sid, sid,
sksec->sclass, node_perm, &ad);
@@ -3896,6 +4102,7 @@ static int selinux_socket_connect(struct socket *sock, struct sockaddr *address,
if (sksec->sclass == SECCLASS_TCP_SOCKET ||
sksec->sclass == SECCLASS_DCCP_SOCKET) {
struct common_audit_data ad;
+ struct lsm_network_audit net = {0,};
struct sockaddr_in *addr4 = NULL;
struct sockaddr_in6 *addr6 = NULL;
unsigned short snum;
@@ -3920,9 +4127,10 @@ static int selinux_socket_connect(struct socket *sock, struct sockaddr *address,
perm = (sksec->sclass == SECCLASS_TCP_SOCKET) ?
TCP_SOCKET__NAME_CONNECT : DCCP_SOCKET__NAME_CONNECT;
- COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.dport = htons(snum);
- ad.u.net.family = sk->sk_family;
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->dport = htons(snum);
+ ad.u.net->family = sk->sk_family;
err = avc_has_perm(sksec->sid, sid, sksec->sclass, perm, &ad);
if (err)
goto out;
@@ -4011,10 +4219,12 @@ static int selinux_socket_unix_stream_connect(struct sock *sock,
struct sk_security_struct *sksec_other = other->sk_security;
struct sk_security_struct *sksec_new = newsk->sk_security;
struct common_audit_data ad;
+ struct lsm_network_audit net = {0,};
int err;
- COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.sk = other;
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->sk = other;
err = avc_has_perm(sksec_sock->sid, sksec_other->sid,
sksec_other->sclass,
@@ -4041,9 +4251,11 @@ static int selinux_socket_unix_may_send(struct socket *sock,
struct sk_security_struct *ssec = sock->sk->sk_security;
struct sk_security_struct *osec = other->sk->sk_security;
struct common_audit_data ad;
+ struct lsm_network_audit net = {0,};
- COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.sk = other->sk;
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->sk = other->sk;
return avc_has_perm(ssec->sid, osec->sid, osec->sclass, SOCKET__SENDTO,
&ad);
@@ -4079,11 +4291,13 @@ static int selinux_sock_rcv_skb_compat(struct sock *sk, struct sk_buff *skb,
struct sk_security_struct *sksec = sk->sk_security;
u32 sk_sid = sksec->sid;
struct common_audit_data ad;
+ struct lsm_network_audit net = {0,};
char *addrp;
- COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.netif = skb->skb_iif;
- ad.u.net.family = family;
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->netif = skb->skb_iif;
+ ad.u.net->family = family;
err = selinux_parse_skb(skb, &ad, &addrp, 1, NULL);
if (err)
return err;
@@ -4110,6 +4324,7 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
u16 family = sk->sk_family;
u32 sk_sid = sksec->sid;
struct common_audit_data ad;
+ struct lsm_network_audit net = {0,};
char *addrp;
u8 secmark_active;
u8 peerlbl_active;
@@ -4129,13 +4344,14 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
return selinux_sock_rcv_skb_compat(sk, skb, family);
secmark_active = selinux_secmark_enabled();
- peerlbl_active = netlbl_enabled() || selinux_xfrm_enabled();
+ peerlbl_active = selinux_peerlbl_enabled();
if (!secmark_active && !peerlbl_active)
return 0;
- COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.netif = skb->skb_iif;
- ad.u.net.family = family;
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->netif = skb->skb_iif;
+ ad.u.net->family = family;
err = selinux_parse_skb(skb, &ad, &addrp, 1, NULL);
if (err)
return err;
@@ -4154,8 +4370,10 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
}
err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER,
PEER__RECV, &ad);
- if (err)
+ if (err) {
selinux_netlbl_err(skb, err, 0);
+ return err;
+ }
}
if (secmark_active) {
@@ -4292,27 +4510,18 @@ static int selinux_inet_conn_request(struct sock *sk, struct sk_buff *skb,
{
struct sk_security_struct *sksec = sk->sk_security;
int err;
- u16 family = sk->sk_family;
- u32 newsid;
+ u16 family = req->rsk_ops->family;
+ u32 connsid;
u32 peersid;
- /* handle mapped IPv4 packets arriving via IPv6 sockets */
- if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP))
- family = PF_INET;
-
err = selinux_skb_peerlbl_sid(skb, family, &peersid);
if (err)
return err;
- if (peersid == SECSID_NULL) {
- req->secid = sksec->sid;
- req->peer_secid = SECSID_NULL;
- } else {
- err = security_sid_mls_copy(sksec->sid, peersid, &newsid);
- if (err)
- return err;
- req->secid = newsid;
- req->peer_secid = peersid;
- }
+ err = selinux_conn_sid(sksec->sid, peersid, &connsid);
+ if (err)
+ return err;
+ req->secid = connsid;
+ req->peer_secid = peersid;
return selinux_netlbl_inet_conn_request(req, family);
}
@@ -4346,6 +4555,11 @@ static void selinux_inet_conn_established(struct sock *sk, struct sk_buff *skb)
selinux_skb_peerlbl_sid(skb, family, &sksec->peer_sid);
}
+static void selinux_skb_owned_by(struct sk_buff *skb, struct sock *sk)
+{
+ skb_set_owner_w(skb, sk);
+}
+
static int selinux_secmark_relabel_packet(u32 sid)
{
const struct task_security_struct *__tsec;
@@ -4373,6 +4587,24 @@ static void selinux_req_classify_flow(const struct request_sock *req,
fl->flowi_secid = req->secid;
}
+static int selinux_tun_dev_alloc_security(void **security)
+{
+ struct tun_security_struct *tunsec;
+
+ tunsec = kzalloc(sizeof(*tunsec), GFP_KERNEL);
+ if (!tunsec)
+ return -ENOMEM;
+ tunsec->sid = current_sid();
+
+ *security = tunsec;
+ return 0;
+}
+
+static void selinux_tun_dev_free_security(void *security)
+{
+ kfree(security);
+}
+
static int selinux_tun_dev_create(void)
{
u32 sid = current_sid();
@@ -4388,8 +4620,17 @@ static int selinux_tun_dev_create(void)
NULL);
}
-static void selinux_tun_dev_post_create(struct sock *sk)
+static int selinux_tun_dev_attach_queue(void *security)
{
+ struct tun_security_struct *tunsec = security;
+
+ return avc_has_perm(current_sid(), tunsec->sid, SECCLASS_TUN_SOCKET,
+ TUN_SOCKET__ATTACH_QUEUE, NULL);
+}
+
+static int selinux_tun_dev_attach(struct sock *sk, void *security)
+{
+ struct tun_security_struct *tunsec = security;
struct sk_security_struct *sksec = sk->sk_security;
/* we don't currently perform any NetLabel based labeling here and it
@@ -4399,20 +4640,19 @@ static void selinux_tun_dev_post_create(struct sock *sk)
* cause confusion to the TUN user that had no idea network labeling
* protocols were being used */
- /* see the comments in selinux_tun_dev_create() about why we don't use
- * the sockcreate SID here */
-
- sksec->sid = current_sid();
+ sksec->sid = tunsec->sid;
sksec->sclass = SECCLASS_TUN_SOCKET;
+
+ return 0;
}
-static int selinux_tun_dev_attach(struct sock *sk)
+static int selinux_tun_dev_open(void *security)
{
- struct sk_security_struct *sksec = sk->sk_security;
+ struct tun_security_struct *tunsec = security;
u32 sid = current_sid();
int err;
- err = avc_has_perm(sid, sksec->sid, SECCLASS_TUN_SOCKET,
+ err = avc_has_perm(sid, tunsec->sid, SECCLASS_TUN_SOCKET,
TUN_SOCKET__RELABELFROM, NULL);
if (err)
return err;
@@ -4420,8 +4660,7 @@ static int selinux_tun_dev_attach(struct sock *sk)
TUN_SOCKET__RELABELTO, NULL);
if (err)
return err;
-
- sksec->sid = sid;
+ tunsec->sid = sid;
return 0;
}
@@ -4433,7 +4672,7 @@ static int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb)
struct nlmsghdr *nlh;
struct sk_security_struct *sksec = sk->sk_security;
- if (skb->len < NLMSG_SPACE(0)) {
+ if (skb->len < NLMSG_HDRLEN) {
err = -EINVAL;
goto out;
}
@@ -4470,6 +4709,7 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb, int ifindex,
char *addrp;
u32 peer_sid;
struct common_audit_data ad;
+ struct lsm_network_audit net = {0,};
u8 secmark_active;
u8 netlbl_active;
u8 peerlbl_active;
@@ -4479,16 +4719,17 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb, int ifindex,
secmark_active = selinux_secmark_enabled();
netlbl_active = netlbl_enabled();
- peerlbl_active = netlbl_active || selinux_xfrm_enabled();
+ peerlbl_active = selinux_peerlbl_enabled();
if (!secmark_active && !peerlbl_active)
return NF_ACCEPT;
if (selinux_skb_peerlbl_sid(skb, family, &peer_sid) != 0)
return NF_DROP;
- COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.netif = ifindex;
- ad.u.net.family = family;
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->netif = ifindex;
+ ad.u.net->family = family;
if (selinux_parse_skb(skb, &ad, &addrp, 1, NULL) != 0)
return NF_DROP;
@@ -4517,7 +4758,7 @@ static unsigned int selinux_ip_forward(struct sk_buff *skb, int ifindex,
return NF_ACCEPT;
}
-static unsigned int selinux_ipv4_forward(unsigned int hooknum,
+static unsigned int selinux_ipv4_forward(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -4527,7 +4768,7 @@ static unsigned int selinux_ipv4_forward(unsigned int hooknum,
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static unsigned int selinux_ipv6_forward(unsigned int hooknum,
+static unsigned int selinux_ipv6_forward(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -4540,6 +4781,7 @@ static unsigned int selinux_ipv6_forward(unsigned int hooknum,
static unsigned int selinux_ip_output(struct sk_buff *skb,
u16 family)
{
+ struct sock *sk;
u32 sid;
if (!netlbl_enabled())
@@ -4548,8 +4790,27 @@ static unsigned int selinux_ip_output(struct sk_buff *skb,
/* we do this in the LOCAL_OUT path and not the POST_ROUTING path
* because we want to make sure we apply the necessary labeling
* before IPsec is applied so we can leverage AH protection */
- if (skb->sk) {
- struct sk_security_struct *sksec = skb->sk->sk_security;
+ sk = skb->sk;
+ if (sk) {
+ struct sk_security_struct *sksec;
+
+ if (sk->sk_state == TCP_LISTEN)
+ /* if the socket is the listening state then this
+ * packet is a SYN-ACK packet which means it needs to
+ * be labeled based on the connection/request_sock and
+ * not the parent socket. unfortunately, we can't
+ * lookup the request_sock yet as it isn't queued on
+ * the parent socket until after the SYN-ACK is sent.
+ * the "solution" is to simply pass the packet as-is
+ * as any IP option based labeling should be copied
+ * from the initial connection request (in the IP
+ * layer). it is far from ideal, but until we get a
+ * security label in the packet itself this is the
+ * best we can do. */
+ return NF_ACCEPT;
+
+ /* standard practice, label using the parent socket */
+ sksec = sk->sk_security;
sid = sksec->sid;
} else
sid = SECINITSID_KERNEL;
@@ -4559,7 +4820,7 @@ static unsigned int selinux_ip_output(struct sk_buff *skb,
return NF_ACCEPT;
}
-static unsigned int selinux_ipv4_output(unsigned int hooknum,
+static unsigned int selinux_ipv4_output(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -4575,6 +4836,7 @@ static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb,
struct sock *sk = skb->sk;
struct sk_security_struct *sksec;
struct common_audit_data ad;
+ struct lsm_network_audit net = {0,};
char *addrp;
u8 proto;
@@ -4582,9 +4844,10 @@ static unsigned int selinux_ip_postroute_compat(struct sk_buff *skb,
return NF_ACCEPT;
sksec = sk->sk_security;
- COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.netif = ifindex;
- ad.u.net.family = family;
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->netif = ifindex;
+ ad.u.net->family = family;
if (selinux_parse_skb(skb, &ad, &addrp, 0, &proto))
return NF_DROP;
@@ -4606,6 +4869,7 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
u32 peer_sid;
struct sock *sk;
struct common_audit_data ad;
+ struct lsm_network_audit net = {0,};
char *addrp;
u8 secmark_active;
u8 peerlbl_active;
@@ -4616,27 +4880,36 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
* as fast and as clean as possible. */
if (!selinux_policycap_netpeer)
return selinux_ip_postroute_compat(skb, ifindex, family);
+
+ secmark_active = selinux_secmark_enabled();
+ peerlbl_active = selinux_peerlbl_enabled();
+ if (!secmark_active && !peerlbl_active)
+ return NF_ACCEPT;
+
+ sk = skb->sk;
+
#ifdef CONFIG_XFRM
/* If skb->dst->xfrm is non-NULL then the packet is undergoing an IPsec
* packet transformation so allow the packet to pass without any checks
* since we'll have another chance to perform access control checks
* when the packet is on it's final way out.
* NOTE: there appear to be some IPv6 multicast cases where skb->dst
- * is NULL, in this case go ahead and apply access control. */
- if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL)
+ * is NULL, in this case go ahead and apply access control.
+ * NOTE: if this is a local socket (skb->sk != NULL) that is in the
+ * TCP listening state we cannot wait until the XFRM processing
+ * is done as we will miss out on the SA label if we do;
+ * unfortunately, this means more work, but it is only once per
+ * connection. */
+ if (skb_dst(skb) != NULL && skb_dst(skb)->xfrm != NULL &&
+ !(sk != NULL && sk->sk_state == TCP_LISTEN))
return NF_ACCEPT;
#endif
- secmark_active = selinux_secmark_enabled();
- peerlbl_active = netlbl_enabled() || selinux_xfrm_enabled();
- if (!secmark_active && !peerlbl_active)
- return NF_ACCEPT;
- /* if the packet is being forwarded then get the peer label from the
- * packet itself; otherwise check to see if it is from a local
- * application or the kernel, if from an application get the peer label
- * from the sending socket, otherwise use the kernel's sid */
- sk = skb->sk;
if (sk == NULL) {
+ /* Without an associated socket the packet is either coming
+ * from the kernel or it is being forwarded; check the packet
+ * to determine which and if the packet is being forwarded
+ * query the packet directly to determine the security label. */
if (skb->skb_iif) {
secmark_perm = PACKET__FORWARD_OUT;
if (selinux_skb_peerlbl_sid(skb, family, &peer_sid))
@@ -4645,15 +4918,54 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
secmark_perm = PACKET__SEND;
peer_sid = SECINITSID_KERNEL;
}
+ } else if (sk->sk_state == TCP_LISTEN) {
+ /* Locally generated packet but the associated socket is in the
+ * listening state which means this is a SYN-ACK packet. In
+ * this particular case the correct security label is assigned
+ * to the connection/request_sock but unfortunately we can't
+ * query the request_sock as it isn't queued on the parent
+ * socket until after the SYN-ACK packet is sent; the only
+ * viable choice is to regenerate the label like we do in
+ * selinux_inet_conn_request(). See also selinux_ip_output()
+ * for similar problems. */
+ u32 skb_sid;
+ struct sk_security_struct *sksec = sk->sk_security;
+ if (selinux_skb_peerlbl_sid(skb, family, &skb_sid))
+ return NF_DROP;
+ /* At this point, if the returned skb peerlbl is SECSID_NULL
+ * and the packet has been through at least one XFRM
+ * transformation then we must be dealing with the "final"
+ * form of labeled IPsec packet; since we've already applied
+ * all of our access controls on this packet we can safely
+ * pass the packet. */
+ if (skb_sid == SECSID_NULL) {
+ switch (family) {
+ case PF_INET:
+ if (IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)
+ return NF_ACCEPT;
+ break;
+ case PF_INET6:
+ if (IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED)
+ return NF_ACCEPT;
+ default:
+ return NF_DROP_ERR(-ECONNREFUSED);
+ }
+ }
+ if (selinux_conn_sid(sksec->sid, skb_sid, &peer_sid))
+ return NF_DROP;
+ secmark_perm = PACKET__SEND;
} else {
+ /* Locally generated packet, fetch the security label from the
+ * associated socket. */
struct sk_security_struct *sksec = sk->sk_security;
peer_sid = sksec->sid;
secmark_perm = PACKET__SEND;
}
- COMMON_AUDIT_DATA_INIT(&ad, NET);
- ad.u.net.netif = ifindex;
- ad.u.net.family = family;
+ ad.type = LSM_AUDIT_DATA_NET;
+ ad.u.net = &net;
+ ad.u.net->netif = ifindex;
+ ad.u.net->family = family;
if (selinux_parse_skb(skb, &ad, &addrp, 0, NULL))
return NF_DROP;
@@ -4682,7 +4994,7 @@ static unsigned int selinux_ip_postroute(struct sk_buff *skb, int ifindex,
return NF_ACCEPT;
}
-static unsigned int selinux_ipv4_postroute(unsigned int hooknum,
+static unsigned int selinux_ipv4_postroute(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -4692,7 +5004,7 @@ static unsigned int selinux_ipv4_postroute(unsigned int hooknum,
}
#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
-static unsigned int selinux_ipv6_postroute(unsigned int hooknum,
+static unsigned int selinux_ipv6_postroute(const struct nf_hook_ops *ops,
struct sk_buff *skb,
const struct net_device *in,
const struct net_device *out,
@@ -4772,7 +5084,7 @@ static int ipc_has_perm(struct kern_ipc_perm *ipc_perms,
isec = ipc_perms->security;
- COMMON_AUDIT_DATA_INIT(&ad, IPC);
+ ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = ipc_perms->key;
return avc_has_perm(sid, isec->sid, isec->sclass, perms, &ad);
@@ -4802,7 +5114,7 @@ static int selinux_msg_queue_alloc_security(struct msg_queue *msq)
isec = msq->q_perm.security;
- COMMON_AUDIT_DATA_INIT(&ad, IPC);
+ ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = msq->q_perm.key;
rc = avc_has_perm(sid, isec->sid, SECCLASS_MSGQ,
@@ -4827,7 +5139,7 @@ static int selinux_msg_queue_associate(struct msg_queue *msq, int msqflg)
isec = msq->q_perm.security;
- COMMON_AUDIT_DATA_INIT(&ad, IPC);
+ ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = msq->q_perm.key;
return avc_has_perm(sid, isec->sid, SECCLASS_MSGQ,
@@ -4887,7 +5199,7 @@ static int selinux_msg_queue_msgsnd(struct msg_queue *msq, struct msg_msg *msg,
return rc;
}
- COMMON_AUDIT_DATA_INIT(&ad, IPC);
+ ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = msq->q_perm.key;
/* Can this process write to the queue? */
@@ -4918,7 +5230,7 @@ static int selinux_msg_queue_msgrcv(struct msg_queue *msq, struct msg_msg *msg,
isec = msq->q_perm.security;
msec = msg->security;
- COMMON_AUDIT_DATA_INIT(&ad, IPC);
+ ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = msq->q_perm.key;
rc = avc_has_perm(sid, isec->sid,
@@ -4943,7 +5255,7 @@ static int selinux_shm_alloc_security(struct shmid_kernel *shp)
isec = shp->shm_perm.security;
- COMMON_AUDIT_DATA_INIT(&ad, IPC);
+ ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = shp->shm_perm.key;
rc = avc_has_perm(sid, isec->sid, SECCLASS_SHM,
@@ -4968,7 +5280,7 @@ static int selinux_shm_associate(struct shmid_kernel *shp, int shmflg)
isec = shp->shm_perm.security;
- COMMON_AUDIT_DATA_INIT(&ad, IPC);
+ ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = shp->shm_perm.key;
return avc_has_perm(sid, isec->sid, SECCLASS_SHM,
@@ -5035,7 +5347,7 @@ static int selinux_sem_alloc_security(struct sem_array *sma)
isec = sma->sem_perm.security;
- COMMON_AUDIT_DATA_INIT(&ad, IPC);
+ ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = sma->sem_perm.key;
rc = avc_has_perm(sid, isec->sid, SECCLASS_SEM,
@@ -5060,7 +5372,7 @@ static int selinux_sem_associate(struct sem_array *sma, int semflg)
isec = sma->sem_perm.security;
- COMMON_AUDIT_DATA_INIT(&ad, IPC);
+ ad.type = LSM_AUDIT_DATA_IPC;
ad.u.ipc_id = sma->sem_perm.key;
return avc_has_perm(sid, isec->sid, SECCLASS_SEM,
@@ -5238,10 +5550,25 @@ static int selinux_setprocattr(struct task_struct *p,
str[size-1] = 0;
size--;
}
- error = security_context_to_sid(value, size, &sid);
+ error = security_context_to_sid(value, size, &sid, GFP_KERNEL);
if (error == -EINVAL && !strcmp(name, "fscreate")) {
- if (!capable(CAP_MAC_ADMIN))
+ if (!capable(CAP_MAC_ADMIN)) {
+ struct audit_buffer *ab;
+ size_t audit_size;
+
+ /* We strip a nul only if it is at the end, otherwise the
+ * context contains a nul and we should audit that */
+ if (str[size - 1] == '\0')
+ audit_size = size - 1;
+ else
+ audit_size = size;
+ ab = audit_log_start(current->audit_context, GFP_ATOMIC, AUDIT_SELINUX_ERR);
+ audit_log_format(ab, "op=fscreate invalid_context=");
+ audit_log_n_untrustedstring(ab, value, audit_size);
+ audit_log_end(ab);
+
return error;
+ }
error = security_context_to_sid_force(value, size,
&sid);
}
@@ -5293,11 +5620,11 @@ static int selinux_setprocattr(struct task_struct *p,
/* Check for ptracing, and update the task SID if ok.
Otherwise, leave SID unchanged and fail. */
ptsid = 0;
- task_lock(p);
+ rcu_read_lock();
tracer = ptrace_parent(p);
if (tracer)
ptsid = task_sid(tracer);
- task_unlock(p);
+ rcu_read_unlock();
if (tracer) {
error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS,
@@ -5320,6 +5647,11 @@ abort_change:
return error;
}
+static int selinux_ismaclabel(const char *name)
+{
+ return (strcmp(name, XATTR_SELINUX_SUFFIX) == 0);
+}
+
static int selinux_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
{
return security_sid_to_context(secid, secdata, seclen);
@@ -5327,7 +5659,7 @@ static int selinux_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
static int selinux_secctx_to_secid(const char *secdata, u32 seclen, u32 *secid)
{
- return security_context_to_sid(secdata, seclen, secid);
+ return security_context_to_sid(secdata, seclen, secid, GFP_KERNEL);
}
static void selinux_release_secctx(char *secdata, u32 seclen)
@@ -5393,7 +5725,7 @@ static void selinux_key_free(struct key *k)
static int selinux_key_permission(key_ref_t key_ref,
const struct cred *cred,
- key_perm_t perm)
+ unsigned perm)
{
struct key *key;
struct key_security_struct *ksec;
@@ -5462,6 +5794,7 @@ static struct security_operations selinux_ops = {
.sb_clone_mnt_opts = selinux_sb_clone_mnt_opts,
.sb_parse_opts_str = selinux_parse_opts_str,
+ .dentry_init_security = selinux_dentry_init_security,
.inode_alloc_security = selinux_inode_alloc_security,
.inode_free_security = selinux_inode_free_security,
@@ -5493,7 +5826,8 @@ static struct security_operations selinux_ops = {
.file_alloc_security = selinux_file_alloc_security,
.file_free_security = selinux_file_free_security,
.file_ioctl = selinux_file_ioctl,
- .file_mmap = selinux_file_mmap,
+ .mmap_file = selinux_mmap_file,
+ .mmap_addr = selinux_mmap_addr,
.file_mprotect = selinux_file_mprotect,
.file_lock = selinux_file_lock,
.file_fcntl = selinux_file_fcntl,
@@ -5501,7 +5835,7 @@ static struct security_operations selinux_ops = {
.file_send_sigiotask = selinux_file_send_sigiotask,
.file_receive = selinux_file_receive,
- .dentry_open = selinux_dentry_open,
+ .file_open = selinux_file_open,
.task_create = selinux_task_create,
.cred_alloc_blank = selinux_cred_alloc_blank,
@@ -5556,6 +5890,7 @@ static struct security_operations selinux_ops = {
.getprocattr = selinux_getprocattr,
.setprocattr = selinux_setprocattr,
+ .ismaclabel = selinux_ismaclabel,
.secid_to_secctx = selinux_secid_to_secctx,
.secctx_to_secid = selinux_secctx_to_secid,
.release_secctx = selinux_release_secctx,
@@ -5594,16 +5929,21 @@ static struct security_operations selinux_ops = {
.secmark_refcount_inc = selinux_secmark_refcount_inc,
.secmark_refcount_dec = selinux_secmark_refcount_dec,
.req_classify_flow = selinux_req_classify_flow,
+ .tun_dev_alloc_security = selinux_tun_dev_alloc_security,
+ .tun_dev_free_security = selinux_tun_dev_free_security,
.tun_dev_create = selinux_tun_dev_create,
- .tun_dev_post_create = selinux_tun_dev_post_create,
+ .tun_dev_attach_queue = selinux_tun_dev_attach_queue,
.tun_dev_attach = selinux_tun_dev_attach,
+ .tun_dev_open = selinux_tun_dev_open,
+ .skb_owned_by = selinux_skb_owned_by,
#ifdef CONFIG_SECURITY_NETWORK_XFRM
.xfrm_policy_alloc_security = selinux_xfrm_policy_alloc,
.xfrm_policy_clone_security = selinux_xfrm_policy_clone,
.xfrm_policy_free_security = selinux_xfrm_policy_free,
.xfrm_policy_delete_security = selinux_xfrm_policy_delete,
- .xfrm_state_alloc_security = selinux_xfrm_state_alloc,
+ .xfrm_state_alloc = selinux_xfrm_state_alloc,
+ .xfrm_state_alloc_acquire = selinux_xfrm_state_alloc_acquire,
.xfrm_state_free_security = selinux_xfrm_state_free,
.xfrm_state_delete_security = selinux_xfrm_state_delete,
.xfrm_policy_lookup = selinux_xfrm_policy_lookup,
@@ -5685,21 +6025,21 @@ static struct nf_hook_ops selinux_ipv4_ops[] = {
{
.hook = selinux_ipv4_postroute,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP_PRI_SELINUX_LAST,
},
{
.hook = selinux_ipv4_forward,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_FORWARD,
.priority = NF_IP_PRI_SELINUX_FIRST,
},
{
.hook = selinux_ipv4_output,
.owner = THIS_MODULE,
- .pf = PF_INET,
+ .pf = NFPROTO_IPV4,
.hooknum = NF_INET_LOCAL_OUT,
.priority = NF_IP_PRI_SELINUX_FIRST,
}
@@ -5711,14 +6051,14 @@ static struct nf_hook_ops selinux_ipv6_ops[] = {
{
.hook = selinux_ipv6_postroute,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_POST_ROUTING,
.priority = NF_IP6_PRI_SELINUX_LAST,
},
{
.hook = selinux_ipv6_forward,
.owner = THIS_MODULE,
- .pf = PF_INET6,
+ .pf = NFPROTO_IPV6,
.hooknum = NF_INET_FORWARD,
.priority = NF_IP6_PRI_SELINUX_FIRST,
}
diff --git a/security/selinux/include/avc.h b/security/selinux/include/avc.h
index 47fda963495..ddf8eec03f2 100644
--- a/security/selinux/include/avc.h
+++ b/security/selinux/include/avc.h
@@ -15,7 +15,6 @@
#include <linux/audit.h>
#include <linux/lsm_audit.h>
#include <linux/in6.h>
-#include <asm/system.h>
#include "flask.h"
#include "av_permissions.h"
#include "security.h"
@@ -48,16 +47,99 @@ struct avc_cache_stats {
};
/*
+ * We only need this data after we have decided to send an audit message.
+ */
+struct selinux_audit_data {
+ u32 ssid;
+ u32 tsid;
+ u16 tclass;
+ u32 requested;
+ u32 audited;
+ u32 denied;
+ int result;
+};
+
+/*
* AVC operations
*/
void __init avc_init(void);
-int avc_audit(u32 ssid, u32 tsid,
- u16 tclass, u32 requested,
- struct av_decision *avd,
- int result,
- struct common_audit_data *a, unsigned flags);
+static inline u32 avc_audit_required(u32 requested,
+ struct av_decision *avd,
+ int result,
+ u32 auditdeny,
+ u32 *deniedp)
+{
+ u32 denied, audited;
+ denied = requested & ~avd->allowed;
+ if (unlikely(denied)) {
+ audited = denied & avd->auditdeny;
+ /*
+ * auditdeny is TRICKY! Setting a bit in
+ * this field means that ANY denials should NOT be audited if
+ * the policy contains an explicit dontaudit rule for that
+ * permission. Take notice that this is unrelated to the
+ * actual permissions that were denied. As an example lets
+ * assume:
+ *
+ * denied == READ
+ * avd.auditdeny & ACCESS == 0 (not set means explicit rule)
+ * auditdeny & ACCESS == 1
+ *
+ * We will NOT audit the denial even though the denied
+ * permission was READ and the auditdeny checks were for
+ * ACCESS
+ */
+ if (auditdeny && !(auditdeny & avd->auditdeny))
+ audited = 0;
+ } else if (result)
+ audited = denied = requested;
+ else
+ audited = requested & avd->auditallow;
+ *deniedp = denied;
+ return audited;
+}
+
+int slow_avc_audit(u32 ssid, u32 tsid, u16 tclass,
+ u32 requested, u32 audited, u32 denied, int result,
+ struct common_audit_data *a,
+ unsigned flags);
+
+/**
+ * avc_audit - Audit the granting or denial of permissions.
+ * @ssid: source security identifier
+ * @tsid: target security identifier
+ * @tclass: target security class
+ * @requested: requested permissions
+ * @avd: access vector decisions
+ * @result: result from avc_has_perm_noaudit
+ * @a: auxiliary audit data
+ * @flags: VFS walk flags
+ *
+ * Audit the granting or denial of permissions in accordance
+ * with the policy. This function is typically called by
+ * avc_has_perm() after a permission check, but can also be
+ * called directly by callers who use avc_has_perm_noaudit()
+ * in order to separate the permission check from the auditing.
+ * For example, this separation is useful when the permission check must
+ * be performed under a lock, to allow the lock to be released
+ * before calling the auditing code.
+ */
+static inline int avc_audit(u32 ssid, u32 tsid,
+ u16 tclass, u32 requested,
+ struct av_decision *avd,
+ int result,
+ struct common_audit_data *a)
+{
+ u32 audited, denied;
+ audited = avc_audit_required(requested, avd, result, 0, &denied);
+ if (likely(!audited))
+ return 0;
+ return slow_avc_audit(ssid, tsid, tclass,
+ requested, audited, denied, result,
+ a, 0);
+}
#define AVC_STRICT 1 /* Ignore permissive mode. */
int avc_has_perm_noaudit(u32 ssid, u32 tsid,
@@ -65,17 +147,9 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
unsigned flags,
struct av_decision *avd);
-int avc_has_perm_flags(u32 ssid, u32 tsid,
- u16 tclass, u32 requested,
- struct common_audit_data *auditdata,
- unsigned);
-
-static inline int avc_has_perm(u32 ssid, u32 tsid,
- u16 tclass, u32 requested,
- struct common_audit_data *auditdata)
-{
- return avc_has_perm_flags(ssid, tsid, tclass, requested, auditdata, 0);
-}
+int avc_has_perm(u32 ssid, u32 tsid,
+ u16 tclass, u32 requested,
+ struct common_audit_data *auditdata);
u32 avc_policy_seqno(void);
@@ -88,11 +162,7 @@ u32 avc_policy_seqno(void);
#define AVC_CALLBACK_AUDITDENY_ENABLE 64
#define AVC_CALLBACK_AUDITDENY_DISABLE 128
-int avc_add_callback(int (*callback)(u32 event, u32 ssid, u32 tsid,
- u16 tclass, u32 perms,
- u32 *out_retained),
- u32 events, u32 ssid, u32 tsid,
- u16 tclass, u32 perms);
+int avc_add_callback(int (*callback)(u32 event), u32 events);
/* Exported to selinuxfs */
int avc_get_hash_stats(char *page);
diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h
index b8c53723e09..be491a74c1e 100644
--- a/security/selinux/include/classmap.h
+++ b/security/selinux/include/classmap.h
@@ -145,9 +145,11 @@ struct security_class_mapping secclass_map[] = {
"node_bind", "name_connect", NULL } },
{ "memprotect", { "mmap_zero", NULL } },
{ "peer", { "recv", NULL } },
- { "capability2", { "mac_override", "mac_admin", "syslog", NULL } },
+ { "capability2",
+ { "mac_override", "mac_admin", "syslog", "wake_alarm", "block_suspend",
+ "audit_read", NULL } },
{ "kernel_service", { "use_as_override", "create_files_as", NULL } },
{ "tun_socket",
- { COMMON_SOCK_PERMS, NULL } },
+ { COMMON_SOCK_PERMS, "attach_queue", NULL } },
{ NULL }
};
diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h
index 26c7eee1c30..078e553f52f 100644
--- a/security/selinux/include/objsec.h
+++ b/security/selinux/include/objsec.h
@@ -38,7 +38,10 @@ struct task_security_struct {
struct inode_security_struct {
struct inode *inode; /* back pointer to inode object */
- struct list_head list; /* list of inode_security_struct */
+ union {
+ struct list_head list; /* list of inode_security_struct */
+ struct rcu_head rcu; /* for freeing the inode_security_struct */
+ };
u32 task_sid; /* SID of creating task */
u32 sid; /* SID of this object */
u16 sclass; /* security class of this object */
@@ -58,8 +61,8 @@ struct superblock_security_struct {
u32 sid; /* SID of file system superblock */
u32 def_sid; /* default SID for labeling */
u32 mntpoint_sid; /* SECURITY_FS_USE_MNTPOINT context for files */
- unsigned int behavior; /* labeling behavior */
- unsigned char flags; /* which mount options were specified */
+ unsigned short behavior; /* labeling behavior */
+ unsigned short flags; /* which mount options were specified */
struct mutex lock;
struct list_head isec_head;
spinlock_t isec_lock;
@@ -110,6 +113,10 @@ struct sk_security_struct {
u16 sclass; /* sock security class */
};
+struct tun_security_struct {
+ u32 sid; /* SID for the tun device sockets */
+};
+
struct key_security_struct {
u32 sid; /* SID of key */
};
diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h
index d871e8ad210..ce7852cf526 100644
--- a/security/selinux/include/security.h
+++ b/security/selinux/include/security.h
@@ -31,26 +31,30 @@
#define POLICYDB_VERSION_BOUNDARY 24
#define POLICYDB_VERSION_FILENAME_TRANS 25
#define POLICYDB_VERSION_ROLETRANS 26
+#define POLICYDB_VERSION_NEW_OBJECT_DEFAULTS 27
+#define POLICYDB_VERSION_DEFAULT_TYPE 28
+#define POLICYDB_VERSION_CONSTRAINT_NAMES 29
/* Range of policy versions we understand*/
#define POLICYDB_VERSION_MIN POLICYDB_VERSION_BASE
#ifdef CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX
#define POLICYDB_VERSION_MAX CONFIG_SECURITY_SELINUX_POLICYDB_VERSION_MAX_VALUE
#else
-#define POLICYDB_VERSION_MAX POLICYDB_VERSION_ROLETRANS
+#define POLICYDB_VERSION_MAX POLICYDB_VERSION_CONSTRAINT_NAMES
#endif
/* Mask for just the mount related flags */
#define SE_MNTMASK 0x0f
/* Super block security struct flags for mount options */
+/* BE CAREFUL, these need to be the low order bits for selinux_get_mnt_opts */
#define CONTEXT_MNT 0x01
#define FSCONTEXT_MNT 0x02
#define ROOTCONTEXT_MNT 0x04
#define DEFCONTEXT_MNT 0x08
+#define SBLABEL_MNT 0x10
/* Non-mount related flags */
-#define SE_SBINITIALIZED 0x10
-#define SE_SBPROC 0x20
-#define SE_SBLABELSUPP 0x40
+#define SE_SBINITIALIZED 0x0100
+#define SE_SBPROC 0x0200
#define CONTEXT_STR "context="
#define FSCONTEXT_STR "fscontext="
@@ -66,12 +70,15 @@ extern int selinux_enabled;
enum {
POLICYDB_CAPABILITY_NETPEER,
POLICYDB_CAPABILITY_OPENPERM,
+ POLICYDB_CAPABILITY_REDHAT1,
+ POLICYDB_CAPABILITY_ALWAYSNETWORK,
__POLICYDB_CAPABILITY_MAX
};
#define POLICYDB_CAPABILITY_MAX (__POLICYDB_CAPABILITY_MAX - 1)
extern int selinux_policycap_netpeer;
extern int selinux_policycap_openperm;
+extern int selinux_policycap_alwaysnetwork;
/*
* type_datum properties
@@ -127,7 +134,7 @@ int security_sid_to_context(u32 sid, char **scontext,
int security_sid_to_context_force(u32 sid, char **scontext, u32 *scontext_len);
int security_context_to_sid(const char *scontext, u32 scontext_len,
- u32 *out_sid);
+ u32 *out_sid, gfp_t gfp);
int security_context_to_sid_default(const char *scontext, u32 scontext_len,
u32 *out_sid, u32 def_sid, gfp_t gfp_flags);
@@ -167,9 +174,10 @@ int security_get_allow_unknown(void);
#define SECURITY_FS_USE_GENFS 4 /* use the genfs support */
#define SECURITY_FS_USE_NONE 5 /* no labeling support */
#define SECURITY_FS_USE_MNTPOINT 6 /* use mountpoint labeling */
+#define SECURITY_FS_USE_NATIVE 7 /* use native label support */
+#define SECURITY_FS_USE_MAX 7 /* Highest SECURITY_FS_USE_XXX */
-int security_fs_use(const char *fstype, unsigned int *behavior,
- u32 *sid);
+int security_fs_use(struct super_block *sb);
int security_genfs_sid(const char *fstype, char *name, u16 sclass,
u32 *sid);
@@ -219,7 +227,7 @@ extern void selinux_status_update_policyload(int seqno);
extern void selinux_complete_init(void);
extern int selinux_disable(void);
extern void exit_sel_fs(void);
-extern struct dentry *selinux_null;
+extern struct path selinux_null;
extern struct vfsmount *selinuxfs_mount;
extern void selnl_notify_setenforce(int val);
extern void selnl_notify_policyload(u32 seqno);
diff --git a/security/selinux/include/xfrm.h b/security/selinux/include/xfrm.h
index b43813c9e04..1450f85b946 100644
--- a/security/selinux/include/xfrm.h
+++ b/security/selinux/include/xfrm.h
@@ -7,30 +7,25 @@
#ifndef _SELINUX_XFRM_H_
#define _SELINUX_XFRM_H_
+#include <net/flow.h>
+
int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
- struct xfrm_user_sec_ctx *sec_ctx);
+ struct xfrm_user_sec_ctx *uctx,
+ gfp_t gfp);
int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
struct xfrm_sec_ctx **new_ctxp);
void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx);
int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx);
int selinux_xfrm_state_alloc(struct xfrm_state *x,
- struct xfrm_user_sec_ctx *sec_ctx, u32 secid);
+ struct xfrm_user_sec_ctx *uctx);
+int selinux_xfrm_state_alloc_acquire(struct xfrm_state *x,
+ struct xfrm_sec_ctx *polsec, u32 secid);
void selinux_xfrm_state_free(struct xfrm_state *x);
int selinux_xfrm_state_delete(struct xfrm_state *x);
int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir);
int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x,
- struct xfrm_policy *xp, const struct flowi *fl);
-
-/*
- * Extract the security blob from the sock (it's actually on the socket)
- */
-static inline struct inode_security_struct *get_sock_isec(struct sock *sk)
-{
- if (!sk->sk_socket)
- return NULL;
-
- return SOCK_INODE(sk->sk_socket)->i_security;
-}
+ struct xfrm_policy *xp,
+ const struct flowi *fl);
#ifdef CONFIG_SECURITY_NETWORK_XFRM
extern atomic_t selinux_xfrm_refcount;
@@ -40,15 +35,23 @@ static inline int selinux_xfrm_enabled(void)
return (atomic_read(&selinux_xfrm_refcount) > 0);
}
-int selinux_xfrm_sock_rcv_skb(u32 sid, struct sk_buff *skb,
- struct common_audit_data *ad);
-int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb,
- struct common_audit_data *ad, u8 proto);
+int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
+ struct common_audit_data *ad);
+int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
+ struct common_audit_data *ad, u8 proto);
int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall);
+int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid);
static inline void selinux_xfrm_notify_policyload(void)
{
- atomic_inc(&flow_cache_genid);
+ struct net *net;
+
+ rtnl_lock();
+ for_each_net(net) {
+ atomic_inc(&net->xfrm.flow_cache_genid);
+ rt_genid_bump_all(net);
+ }
+ rtnl_unlock();
}
#else
static inline int selinux_xfrm_enabled(void)
@@ -56,19 +59,21 @@ static inline int selinux_xfrm_enabled(void)
return 0;
}
-static inline int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb,
- struct common_audit_data *ad)
+static inline int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
+ struct common_audit_data *ad)
{
return 0;
}
-static inline int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb,
- struct common_audit_data *ad, u8 proto)
+static inline int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
+ struct common_audit_data *ad,
+ u8 proto)
{
return 0;
}
-static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
+static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid,
+ int ckall)
{
*sid = SECSID_NULL;
return 0;
@@ -77,12 +82,12 @@ static inline int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int
static inline void selinux_xfrm_notify_policyload(void)
{
}
-#endif
-static inline void selinux_skb_xfrm_sid(struct sk_buff *skb, u32 *sid)
+static inline int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid)
{
- int err = selinux_xfrm_decode_session(skb, sid, 0);
- BUG_ON(err);
+ *sid = SECSID_NULL;
+ return 0;
}
+#endif
#endif /* _SELINUX_XFRM_H_ */
diff --git a/security/selinux/netif.c b/security/selinux/netif.c
index 326f22cbe40..694e9e43855 100644
--- a/security/selinux/netif.c
+++ b/security/selinux/netif.c
@@ -252,8 +252,7 @@ static void sel_netif_flush(void)
spin_unlock_bh(&sel_netif_lock);
}
-static int sel_netif_avc_callback(u32 event, u32 ssid, u32 tsid,
- u16 class, u32 perms, u32 *retained)
+static int sel_netif_avc_callback(u32 event)
{
if (event == AVC_CALLBACK_RESET) {
sel_netif_flush();
@@ -265,7 +264,7 @@ static int sel_netif_avc_callback(u32 event, u32 ssid, u32 tsid,
static int sel_netif_netdev_notifier_handler(struct notifier_block *this,
unsigned long event, void *ptr)
{
- struct net_device *dev = ptr;
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
if (dev_net(dev) != &init_net)
return NOTIFY_DONE;
@@ -292,8 +291,7 @@ static __init int sel_netif_init(void)
register_netdevice_notifier(&sel_netif_netdev_notifier);
- err = avc_add_callback(sel_netif_avc_callback, AVC_CALLBACK_RESET,
- SECSID_NULL, SECSID_NULL, SECCLASS_NULL, 0);
+ err = avc_add_callback(sel_netif_avc_callback, AVC_CALLBACK_RESET);
if (err)
panic("avc_add_callback() failed, error %d\n", err);
diff --git a/security/selinux/netlabel.c b/security/selinux/netlabel.c
index da4b8b23328..0364120d1ec 100644
--- a/security/selinux/netlabel.c
+++ b/security/selinux/netlabel.c
@@ -101,6 +101,32 @@ static struct netlbl_lsm_secattr *selinux_netlbl_sock_genattr(struct sock *sk)
}
/**
+ * selinux_netlbl_sock_getattr - Get the cached NetLabel secattr
+ * @sk: the socket
+ * @sid: the SID
+ *
+ * Query the socket's cached secattr and if the SID matches the cached value
+ * return the cache, otherwise return NULL.
+ *
+ */
+static struct netlbl_lsm_secattr *selinux_netlbl_sock_getattr(
+ const struct sock *sk,
+ u32 sid)
+{
+ struct sk_security_struct *sksec = sk->sk_security;
+ struct netlbl_lsm_secattr *secattr = sksec->nlbl_secattr;
+
+ if (secattr == NULL)
+ return NULL;
+
+ if ((secattr->flags & NETLBL_SECATTR_SECID) &&
+ (secattr->attr.secid == sid))
+ return secattr;
+
+ return NULL;
+}
+
+/**
* selinux_netlbl_cache_invalidate - Invalidate the NetLabel cache
*
* Description:
@@ -224,7 +250,7 @@ int selinux_netlbl_skbuff_setsid(struct sk_buff *skb,
struct sk_security_struct *sksec = sk->sk_security;
if (sksec->nlbl_state != NLBL_REQSKB)
return 0;
- secattr = sksec->nlbl_secattr;
+ secattr = selinux_netlbl_sock_getattr(sk, sid);
}
if (secattr == NULL) {
secattr = &secattr_storage;
@@ -410,6 +436,9 @@ int selinux_netlbl_socket_setsockopt(struct socket *sock,
sksec->nlbl_state == NLBL_CONNLABELED)) {
netlbl_secattr_init(&secattr);
lock_sock(sk);
+ /* call the netlabel function directly as we want to see the
+ * on-the-wire label that is assigned via the socket's options
+ * and not the cached netlabel/lsm attributes */
rc = netlbl_sock_getattr(sk, &secattr);
release_sock(sk);
if (rc == 0)
@@ -442,8 +471,7 @@ int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr)
sksec->nlbl_state != NLBL_CONNLABELED)
return 0;
- local_bh_disable();
- bh_lock_sock_nested(sk);
+ lock_sock(sk);
/* connected sockets are allowed to disconnect when the address family
* is set to AF_UNSPEC, if that is what is happening we want to reset
@@ -464,7 +492,6 @@ int selinux_netlbl_socket_connect(struct sock *sk, struct sockaddr *addr)
sksec->nlbl_state = NLBL_CONNLABELED;
socket_connect_return:
- bh_unlock_sock(sk);
- local_bh_enable();
+ release_sock(sk);
return rc;
}
diff --git a/security/selinux/netlink.c b/security/selinux/netlink.c
index 161e01a6c7e..828fb6a4e94 100644
--- a/security/selinux/netlink.c
+++ b/security/selinux/netlink.c
@@ -16,9 +16,9 @@
#include <linux/kernel.h>
#include <linux/export.h>
#include <linux/skbuff.h>
-#include <linux/netlink.h>
#include <linux/selinux_netlink.h>
#include <net/net_namespace.h>
+#include <net/netlink.h>
#include "security.h"
@@ -47,7 +47,7 @@ static void selnl_add_payload(struct nlmsghdr *nlh, int len, int msgtype, void *
{
switch (msgtype) {
case SELNL_MSG_SETENFORCE: {
- struct selnl_msg_setenforce *msg = NLMSG_DATA(nlh);
+ struct selnl_msg_setenforce *msg = nlmsg_data(nlh);
memset(msg, 0, len);
msg->val = *((int *)data);
@@ -55,7 +55,7 @@ static void selnl_add_payload(struct nlmsghdr *nlh, int len, int msgtype, void *
}
case SELNL_MSG_POLICYLOAD: {
- struct selnl_msg_policyload *msg = NLMSG_DATA(nlh);
+ struct selnl_msg_policyload *msg = nlmsg_data(nlh);
memset(msg, 0, len);
msg->seqno = *((u32 *)data);
@@ -76,12 +76,14 @@ static void selnl_notify(int msgtype, void *data)
len = selnl_msglen(msgtype);
- skb = alloc_skb(NLMSG_SPACE(len), GFP_USER);
+ skb = nlmsg_new(len, GFP_USER);
if (!skb)
goto oom;
tmp = skb->tail;
- nlh = NLMSG_PUT(skb, 0, 0, msgtype, len);
+ nlh = nlmsg_put(skb, 0, 0, msgtype, len, 0);
+ if (!nlh)
+ goto out_kfree_skb;
selnl_add_payload(nlh, len, msgtype, data);
nlh->nlmsg_len = skb->tail - tmp;
NETLINK_CB(skb).dst_group = SELNLGRP_AVC;
@@ -89,7 +91,7 @@ static void selnl_notify(int msgtype, void *data)
out:
return;
-nlmsg_failure:
+out_kfree_skb:
kfree_skb(skb);
oom:
printk(KERN_ERR "SELinux: OOM in %s\n", __func__);
@@ -108,11 +110,14 @@ void selnl_notify_policyload(u32 seqno)
static int __init selnl_init(void)
{
- selnl = netlink_kernel_create(&init_net, NETLINK_SELINUX,
- SELNLGRP_MAX, NULL, NULL, THIS_MODULE);
+ struct netlink_kernel_cfg cfg = {
+ .groups = SELNLGRP_MAX,
+ .flags = NL_CFG_F_NONROOT_RECV,
+ };
+
+ selnl = netlink_kernel_create(&init_net, NETLINK_SELINUX, &cfg);
if (selnl == NULL)
panic("SELinux: Cannot create netlink socket.");
- netlink_set_nonroot(NETLINK_SELINUX, NL_NONROOT_RECV);
return 0;
}
diff --git a/security/selinux/netnode.c b/security/selinux/netnode.c
index 86365857c08..03a72c32afd 100644
--- a/security/selinux/netnode.c
+++ b/security/selinux/netnode.c
@@ -166,6 +166,7 @@ static void sel_netnode_insert(struct sel_netnode *node)
break;
default:
BUG();
+ return;
}
/* we need to impose a limit on the growth of the hash table so check
@@ -174,7 +175,8 @@ static void sel_netnode_insert(struct sel_netnode *node)
if (sel_netnode_hash[idx].size == SEL_NETNODE_HASH_BKT_LIMIT) {
struct sel_netnode *tail;
tail = list_entry(
- rcu_dereference(sel_netnode_hash[idx].list.prev),
+ rcu_dereference_protected(sel_netnode_hash[idx].list.prev,
+ lockdep_is_held(&sel_netnode_lock)),
struct sel_netnode, list);
list_del_rcu(&tail->list);
kfree_rcu(tail, rcu);
@@ -224,6 +226,7 @@ static int sel_netnode_sid_slow(void *addr, u16 family, u32 *sid)
break;
default:
BUG();
+ ret = -EINVAL;
}
if (ret != 0)
goto out;
@@ -297,8 +300,7 @@ static void sel_netnode_flush(void)
spin_unlock_bh(&sel_netnode_lock);
}
-static int sel_netnode_avc_callback(u32 event, u32 ssid, u32 tsid,
- u16 class, u32 perms, u32 *retained)
+static int sel_netnode_avc_callback(u32 event)
{
if (event == AVC_CALLBACK_RESET) {
sel_netnode_flush();
@@ -320,8 +322,7 @@ static __init int sel_netnode_init(void)
sel_netnode_hash[iter].size = 0;
}
- ret = avc_add_callback(sel_netnode_avc_callback, AVC_CALLBACK_RESET,
- SECSID_NULL, SECSID_NULL, SECCLASS_NULL, 0);
+ ret = avc_add_callback(sel_netnode_avc_callback, AVC_CALLBACK_RESET);
if (ret != 0)
panic("avc_add_callback() failed, error %d\n", ret);
diff --git a/security/selinux/netport.c b/security/selinux/netport.c
index 7b9eb1faf68..d35379781c2 100644
--- a/security/selinux/netport.c
+++ b/security/selinux/netport.c
@@ -234,8 +234,7 @@ static void sel_netport_flush(void)
spin_unlock_bh(&sel_netport_lock);
}
-static int sel_netport_avc_callback(u32 event, u32 ssid, u32 tsid,
- u16 class, u32 perms, u32 *retained)
+static int sel_netport_avc_callback(u32 event)
{
if (event == AVC_CALLBACK_RESET) {
sel_netport_flush();
@@ -257,8 +256,7 @@ static __init int sel_netport_init(void)
sel_netport_hash[iter].size = 0;
}
- ret = avc_add_callback(sel_netport_avc_callback, AVC_CALLBACK_RESET,
- SECSID_NULL, SECSID_NULL, SECCLASS_NULL, 0);
+ ret = avc_add_callback(sel_netport_avc_callback, AVC_CALLBACK_RESET);
if (ret != 0)
panic("avc_add_callback() failed, error %d\n", ret);
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 0920ea3bf59..2df7b900e25 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -14,10 +14,10 @@
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
#include <linux/if.h>
-#include <linux/netfilter_ipv4/ip_queue.h>
#include <linux/inet_diag.h>
#include <linux/xfrm.h>
#include <linux/audit.h>
+#include <linux/sock_diag.h>
#include "flask.h"
#include "av_permissions.h"
@@ -68,18 +68,18 @@ static struct nlmsg_perm nlmsg_route_perms[] =
{ RTM_GETADDRLABEL, NETLINK_ROUTE_SOCKET__NLMSG_READ },
{ RTM_GETDCB, NETLINK_ROUTE_SOCKET__NLMSG_READ },
{ RTM_SETDCB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
-};
-
-static struct nlmsg_perm nlmsg_firewall_perms[] =
-{
- { IPQM_MODE, NETLINK_FIREWALL_SOCKET__NLMSG_WRITE },
- { IPQM_VERDICT, NETLINK_FIREWALL_SOCKET__NLMSG_WRITE },
+ { RTM_NEWNETCONF, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_GETNETCONF, NETLINK_ROUTE_SOCKET__NLMSG_READ },
+ { RTM_NEWMDB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_DELMDB, NETLINK_ROUTE_SOCKET__NLMSG_WRITE },
+ { RTM_GETMDB, NETLINK_ROUTE_SOCKET__NLMSG_READ },
};
static struct nlmsg_perm nlmsg_tcpdiag_perms[] =
{
{ TCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
{ DCCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
+ { SOCK_DIAG_BY_FAMILY, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
};
static struct nlmsg_perm nlmsg_xfrm_perms[] =
@@ -118,6 +118,8 @@ static struct nlmsg_perm nlmsg_audit_perms[] =
{ AUDIT_MAKE_EQUIV, NETLINK_AUDIT_SOCKET__NLMSG_WRITE },
{ AUDIT_TTY_GET, NETLINK_AUDIT_SOCKET__NLMSG_READ },
{ AUDIT_TTY_SET, NETLINK_AUDIT_SOCKET__NLMSG_TTY_AUDIT },
+ { AUDIT_GET_FEATURE, NETLINK_AUDIT_SOCKET__NLMSG_READ },
+ { AUDIT_SET_FEATURE, NETLINK_AUDIT_SOCKET__NLMSG_WRITE },
};
@@ -145,12 +147,6 @@ int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm)
sizeof(nlmsg_route_perms));
break;
- case SECCLASS_NETLINK_FIREWALL_SOCKET:
- case SECCLASS_NETLINK_IP6FW_SOCKET:
- err = nlmsg_perm(nlmsg_type, perm, nlmsg_firewall_perms,
- sizeof(nlmsg_firewall_perms));
- break;
-
case SECCLASS_NETLINK_TCPDIAG_SOCKET:
err = nlmsg_perm(nlmsg_type, perm, nlmsg_tcpdiag_perms,
sizeof(nlmsg_tcpdiag_perms));
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 48a7d0014b4..c71737f6d1c 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -44,7 +44,9 @@
/* Policy capability filenames */
static char *policycap_names[] = {
"network_peer_controls",
- "open_perms"
+ "open_perms",
+ "redhat1",
+ "always_check_network"
};
unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE;
@@ -52,7 +54,7 @@ unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE;
static int __init checkreqprot_setup(char *str)
{
unsigned long checkreqprot;
- if (!strict_strtoul(str, 0, &checkreqprot))
+ if (!kstrtoul(str, 0, &checkreqprot))
selinux_checkreqprot = checkreqprot ? 1 : 0;
return 1;
}
@@ -174,7 +176,7 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf,
audit_log(current->audit_context, GFP_KERNEL, AUDIT_MAC_STATUS,
"enforcing=%d old_enforcing=%d auid=%u ses=%u",
new_value, selinux_enforcing,
- audit_get_loginuid(current),
+ from_kuid(&init_user_ns, audit_get_loginuid(current)),
audit_get_sessionid(current));
selinux_enforcing = new_value;
if (selinux_enforcing)
@@ -202,7 +204,7 @@ static ssize_t sel_read_handle_unknown(struct file *filp, char __user *buf,
{
char tmpbuf[TMPBUFLEN];
ssize_t length;
- ino_t ino = filp->f_path.dentry->d_inode->i_ino;
+ ino_t ino = file_inode(filp)->i_ino;
int handle_unknown = (ino == SEL_REJECT_UNKNOWN) ?
security_get_reject_unknown() : !security_get_allow_unknown();
@@ -305,7 +307,7 @@ static ssize_t sel_write_disable(struct file *file, const char __user *buf,
goto out;
audit_log(current->audit_context, GFP_KERNEL, AUDIT_MAC_STATUS,
"selinux=0 auid=%u ses=%u",
- audit_get_loginuid(current),
+ from_kuid(&init_user_ns, audit_get_loginuid(current)),
audit_get_sessionid(current));
}
@@ -344,7 +346,7 @@ static int sel_make_classes(void);
static int sel_make_policycap(void);
/* declaration for sel_make_class_dirs */
-static int sel_make_dir(struct inode *dir, struct dentry *dentry,
+static struct dentry *sel_make_dir(struct dentry *dir, const char *name,
unsigned long *ino);
static ssize_t sel_read_mls(struct file *filp, char __user *buf,
@@ -485,7 +487,7 @@ static int sel_mmap_policy(struct file *filp, struct vm_area_struct *vma)
return -EACCES;
}
- vma->vm_flags |= VM_RESERVED;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_ops = &sel_mmap_policy_ops;
return 0;
@@ -496,6 +498,7 @@ static const struct file_operations sel_policy_ops = {
.read = sel_read_policy,
.mmap = sel_mmap_policy,
.release = sel_release_policy,
+ .llseek = generic_file_llseek,
};
static ssize_t sel_write_load(struct file *file, const char __user *buf,
@@ -550,7 +553,7 @@ static ssize_t sel_write_load(struct file *file, const char __user *buf,
out1:
audit_log(current->audit_context, GFP_KERNEL, AUDIT_MAC_POLICY_LOAD,
"policy loaded auid=%u ses=%u",
- audit_get_loginuid(current),
+ from_kuid(&init_user_ns, audit_get_loginuid(current)),
audit_get_sessionid(current));
out:
mutex_unlock(&sel_mutex);
@@ -573,7 +576,7 @@ static ssize_t sel_write_context(struct file *file, char *buf, size_t size)
if (length)
goto out;
- length = security_context_to_sid(buf, size, &sid);
+ length = security_context_to_sid(buf, size, &sid, GFP_KERNEL);
if (length)
goto out;
@@ -670,7 +673,7 @@ static ssize_t (*write_op[])(struct file *, char *, size_t) = {
static ssize_t selinux_transaction_write(struct file *file, const char __user *buf, size_t size, loff_t *pos)
{
- ino_t ino = file->f_path.dentry->d_inode->i_ino;
+ ino_t ino = file_inode(file)->i_ino;
char *data;
ssize_t rv;
@@ -728,11 +731,13 @@ static ssize_t sel_write_access(struct file *file, char *buf, size_t size)
if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
goto out;
- length = security_context_to_sid(scon, strlen(scon) + 1, &ssid);
+ length = security_context_to_sid(scon, strlen(scon) + 1, &ssid,
+ GFP_KERNEL);
if (length)
goto out;
- length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid);
+ length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid,
+ GFP_KERNEL);
if (length)
goto out;
@@ -814,11 +819,13 @@ static ssize_t sel_write_create(struct file *file, char *buf, size_t size)
objname = namebuf;
}
- length = security_context_to_sid(scon, strlen(scon) + 1, &ssid);
+ length = security_context_to_sid(scon, strlen(scon) + 1, &ssid,
+ GFP_KERNEL);
if (length)
goto out;
- length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid);
+ length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid,
+ GFP_KERNEL);
if (length)
goto out;
@@ -875,11 +882,13 @@ static ssize_t sel_write_relabel(struct file *file, char *buf, size_t size)
if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
goto out;
- length = security_context_to_sid(scon, strlen(scon) + 1, &ssid);
+ length = security_context_to_sid(scon, strlen(scon) + 1, &ssid,
+ GFP_KERNEL);
if (length)
goto out;
- length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid);
+ length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid,
+ GFP_KERNEL);
if (length)
goto out;
@@ -931,7 +940,7 @@ static ssize_t sel_write_user(struct file *file, char *buf, size_t size)
if (sscanf(buf, "%s %s", con, user) != 2)
goto out;
- length = security_context_to_sid(con, strlen(con) + 1, &sid);
+ length = security_context_to_sid(con, strlen(con) + 1, &sid, GFP_KERNEL);
if (length)
goto out;
@@ -991,11 +1000,13 @@ static ssize_t sel_write_member(struct file *file, char *buf, size_t size)
if (sscanf(buf, "%s %s %hu", scon, tcon, &tclass) != 3)
goto out;
- length = security_context_to_sid(scon, strlen(scon) + 1, &ssid);
+ length = security_context_to_sid(scon, strlen(scon) + 1, &ssid,
+ GFP_KERNEL);
if (length)
goto out;
- length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid);
+ length = security_context_to_sid(tcon, strlen(tcon) + 1, &tsid,
+ GFP_KERNEL);
if (length)
goto out;
@@ -1041,8 +1052,7 @@ static ssize_t sel_read_bool(struct file *filep, char __user *buf,
ssize_t length;
ssize_t ret;
int cur_enforcing;
- struct inode *inode = filep->f_path.dentry->d_inode;
- unsigned index = inode->i_ino & SEL_INO_MASK;
+ unsigned index = file_inode(filep)->i_ino & SEL_INO_MASK;
const char *name = filep->f_path.dentry->d_name.name;
mutex_lock(&sel_mutex);
@@ -1076,8 +1086,7 @@ static ssize_t sel_write_bool(struct file *filep, const char __user *buf,
char *page = NULL;
ssize_t length;
int new_value;
- struct inode *inode = filep->f_path.dentry->d_inode;
- unsigned index = inode->i_ino & SEL_INO_MASK;
+ unsigned index = file_inode(filep)->i_ino & SEL_INO_MASK;
const char *name = filep->f_path.dentry->d_name.name;
mutex_lock(&sel_mutex);
@@ -1232,6 +1241,7 @@ static int sel_make_bools(void)
kfree(bool_pending_names[i]);
kfree(bool_pending_names);
kfree(bool_pending_values);
+ bool_num = 0;
bool_pending_names = NULL;
bool_pending_values = NULL;
@@ -1257,12 +1267,8 @@ static int sel_make_bools(void)
if (!inode)
goto out;
- ret = -EINVAL;
- len = snprintf(page, PAGE_SIZE, "/%s/%s", BOOL_DIR_NAME, names[i]);
- if (len < 0)
- goto out;
-
ret = -ENAMETOOLONG;
+ len = snprintf(page, PAGE_SIZE, "/%s/%s", BOOL_DIR_NAME, names[i]);
if (len >= PAGE_SIZE)
goto out;
@@ -1299,7 +1305,7 @@ out:
#define NULL_FILE_NAME "null"
-struct dentry *selinux_null;
+struct path selinux_null;
static ssize_t sel_read_avc_cache_threshold(struct file *filp, char __user *buf,
size_t count, loff_t *ppos)
@@ -1488,13 +1494,11 @@ static int sel_make_avc_files(struct dentry *dir)
static ssize_t sel_read_initcon(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- struct inode *inode;
char *con;
u32 sid, len;
ssize_t ret;
- inode = file->f_path.dentry->d_inode;
- sid = inode->i_ino&SEL_INO_MASK;
+ sid = file_inode(file)->i_ino&SEL_INO_MASK;
ret = security_sid_to_context(sid, &con, &len);
if (ret)
return ret;
@@ -1532,11 +1536,6 @@ static int sel_make_initcon_files(struct dentry *dir)
return 0;
}
-static inline unsigned int sel_div(unsigned long a, unsigned long b)
-{
- return a / b - (a % b < 0);
-}
-
static inline unsigned long sel_class_to_ino(u16 class)
{
return (class * (SEL_VEC_MAX + 1)) | SEL_CLASS_INO_OFFSET;
@@ -1544,7 +1543,7 @@ static inline unsigned long sel_class_to_ino(u16 class)
static inline u16 sel_ino_to_class(unsigned long ino)
{
- return sel_div(ino & SEL_INO_MASK, SEL_VEC_MAX + 1);
+ return (ino & SEL_INO_MASK) / (SEL_VEC_MAX + 1);
}
static inline unsigned long sel_perm_to_ino(u16 class, u32 perm)
@@ -1560,19 +1559,10 @@ static inline u32 sel_ino_to_perm(unsigned long ino)
static ssize_t sel_read_class(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- ssize_t rc, len;
- char *page;
- unsigned long ino = file->f_path.dentry->d_inode->i_ino;
-
- page = (char *)__get_free_page(GFP_KERNEL);
- if (!page)
- return -ENOMEM;
-
- len = snprintf(page, PAGE_SIZE, "%d", sel_ino_to_class(ino));
- rc = simple_read_from_buffer(buf, count, ppos, page, len);
- free_page((unsigned long)page);
-
- return rc;
+ unsigned long ino = file_inode(file)->i_ino;
+ char res[TMPBUFLEN];
+ ssize_t len = snprintf(res, sizeof(res), "%d", sel_ino_to_class(ino));
+ return simple_read_from_buffer(buf, count, ppos, res, len);
}
static const struct file_operations sel_class_ops = {
@@ -1583,19 +1573,10 @@ static const struct file_operations sel_class_ops = {
static ssize_t sel_read_perm(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
- ssize_t rc, len;
- char *page;
- unsigned long ino = file->f_path.dentry->d_inode->i_ino;
-
- page = (char *)__get_free_page(GFP_KERNEL);
- if (!page)
- return -ENOMEM;
-
- len = snprintf(page, PAGE_SIZE, "%d", sel_ino_to_perm(ino));
- rc = simple_read_from_buffer(buf, count, ppos, page, len);
- free_page((unsigned long)page);
-
- return rc;
+ unsigned long ino = file_inode(file)->i_ino;
+ char res[TMPBUFLEN];
+ ssize_t len = snprintf(res, sizeof(res), "%d", sel_ino_to_perm(ino));
+ return simple_read_from_buffer(buf, count, ppos, res, len);
}
static const struct file_operations sel_perm_ops = {
@@ -1609,7 +1590,7 @@ static ssize_t sel_read_policycap(struct file *file, char __user *buf,
int value;
char tmpbuf[TMPBUFLEN];
ssize_t length;
- unsigned long i_ino = file->f_path.dentry->d_inode->i_ino;
+ unsigned long i_ino = file_inode(file)->i_ino;
value = security_policycap_supported(i_ino & SEL_INO_MASK);
length = scnprintf(tmpbuf, TMPBUFLEN, "%d", value);
@@ -1678,13 +1659,9 @@ static int sel_make_class_dir_entries(char *classname, int index,
inode->i_ino = sel_class_to_ino(index);
d_add(dentry, inode);
- dentry = d_alloc_name(dir, "perms");
- if (!dentry)
- return -ENOMEM;
-
- rc = sel_make_dir(dir->d_inode, dentry, &last_class_ino);
- if (rc)
- return rc;
+ dentry = sel_make_dir(dir, "perms", &last_class_ino);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
rc = sel_make_perm_files(classname, index, dentry);
@@ -1733,15 +1710,12 @@ static int sel_make_classes(void)
for (i = 0; i < nclasses; i++) {
struct dentry *class_name_dir;
- rc = -ENOMEM;
- class_name_dir = d_alloc_name(class_dir, classes[i]);
- if (!class_name_dir)
- goto out;
-
- rc = sel_make_dir(class_dir->d_inode, class_name_dir,
+ class_name_dir = sel_make_dir(class_dir, classes[i],
&last_class_ino);
- if (rc)
+ if (IS_ERR(class_name_dir)) {
+ rc = PTR_ERR(class_name_dir);
goto out;
+ }
/* i+1 since class values are 1-indexed */
rc = sel_make_class_dir_entries(classes[i], i + 1,
@@ -1787,14 +1761,20 @@ static int sel_make_policycap(void)
return 0;
}
-static int sel_make_dir(struct inode *dir, struct dentry *dentry,
+static struct dentry *sel_make_dir(struct dentry *dir, const char *name,
unsigned long *ino)
{
+ struct dentry *dentry = d_alloc_name(dir, name);
struct inode *inode;
- inode = sel_make_inode(dir->i_sb, S_IFDIR | S_IRUGO | S_IXUGO);
- if (!inode)
- return -ENOMEM;
+ if (!dentry)
+ return ERR_PTR(-ENOMEM);
+
+ inode = sel_make_inode(dir->d_sb, S_IFDIR | S_IRUGO | S_IXUGO);
+ if (!inode) {
+ dput(dentry);
+ return ERR_PTR(-ENOMEM);
+ }
inode->i_op = &simple_dir_inode_operations;
inode->i_fop = &simple_dir_operations;
@@ -1803,16 +1783,16 @@ static int sel_make_dir(struct inode *dir, struct dentry *dentry,
inc_nlink(inode);
d_add(dentry, inode);
/* bump link count on parent directory, too */
- inc_nlink(dir);
+ inc_nlink(dir->d_inode);
- return 0;
+ return dentry;
}
static int sel_fill_super(struct super_block *sb, void *data, int silent)
{
int ret;
struct dentry *dentry;
- struct inode *inode, *root_inode;
+ struct inode *inode;
struct inode_security_struct *isec;
static struct tree_descr selinux_files[] = {
@@ -1832,25 +1812,19 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent)
[SEL_REJECT_UNKNOWN] = {"reject_unknown", &sel_handle_unknown_ops, S_IRUGO},
[SEL_DENY_UNKNOWN] = {"deny_unknown", &sel_handle_unknown_ops, S_IRUGO},
[SEL_STATUS] = {"status", &sel_handle_status_ops, S_IRUGO},
- [SEL_POLICY] = {"policy", &sel_policy_ops, S_IRUSR},
+ [SEL_POLICY] = {"policy", &sel_policy_ops, S_IRUGO},
/* last one */ {""}
};
ret = simple_fill_super(sb, SELINUX_MAGIC, selinux_files);
if (ret)
goto err;
- root_inode = sb->s_root->d_inode;
-
- ret = -ENOMEM;
- dentry = d_alloc_name(sb->s_root, BOOL_DIR_NAME);
- if (!dentry)
- goto err;
-
- ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
- if (ret)
+ bool_dir = sel_make_dir(sb->s_root, BOOL_DIR_NAME, &sel_last_ino);
+ if (IS_ERR(bool_dir)) {
+ ret = PTR_ERR(bool_dir);
+ bool_dir = NULL;
goto err;
-
- bool_dir = dentry;
+ }
ret = -ENOMEM;
dentry = d_alloc_name(sb->s_root, NULL_FILE_NAME);
@@ -1870,56 +1844,41 @@ static int sel_fill_super(struct super_block *sb, void *data, int silent)
init_special_inode(inode, S_IFCHR | S_IRUGO | S_IWUGO, MKDEV(MEM_MAJOR, 3));
d_add(dentry, inode);
- selinux_null = dentry;
-
- ret = -ENOMEM;
- dentry = d_alloc_name(sb->s_root, "avc");
- if (!dentry)
- goto err;
+ selinux_null.dentry = dentry;
- ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
- if (ret)
+ dentry = sel_make_dir(sb->s_root, "avc", &sel_last_ino);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
goto err;
+ }
ret = sel_make_avc_files(dentry);
if (ret)
goto err;
- ret = -ENOMEM;
- dentry = d_alloc_name(sb->s_root, "initial_contexts");
- if (!dentry)
- goto err;
-
- ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
- if (ret)
+ dentry = sel_make_dir(sb->s_root, "initial_contexts", &sel_last_ino);
+ if (IS_ERR(dentry)) {
+ ret = PTR_ERR(dentry);
goto err;
+ }
ret = sel_make_initcon_files(dentry);
if (ret)
goto err;
- ret = -ENOMEM;
- dentry = d_alloc_name(sb->s_root, "class");
- if (!dentry)
- goto err;
-
- ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
- if (ret)
+ class_dir = sel_make_dir(sb->s_root, "class", &sel_last_ino);
+ if (IS_ERR(class_dir)) {
+ ret = PTR_ERR(class_dir);
+ class_dir = NULL;
goto err;
+ }
- class_dir = dentry;
-
- ret = -ENOMEM;
- dentry = d_alloc_name(sb->s_root, "policy_capabilities");
- if (!dentry)
+ policycap_dir = sel_make_dir(sb->s_root, "policy_capabilities", &sel_last_ino);
+ if (IS_ERR(policycap_dir)) {
+ ret = PTR_ERR(policycap_dir);
+ policycap_dir = NULL;
goto err;
-
- ret = sel_make_dir(root_inode, dentry, &sel_last_ino);
- if (ret)
- goto err;
-
- policycap_dir = dentry;
-
+ }
return 0;
err:
printk(KERN_ERR "SELinux: %s: failed while creating inodes\n",
@@ -1959,7 +1918,7 @@ static int __init init_sel_fs(void)
return err;
}
- selinuxfs_mount = kern_mount(&sel_fs_type);
+ selinux_null.mnt = selinuxfs_mount = kern_mount(&sel_fs_type);
if (IS_ERR(selinuxfs_mount)) {
printk(KERN_ERR "selinuxfs: could not mount!\n");
err = PTR_ERR(selinuxfs_mount);
diff --git a/security/selinux/ss/constraint.h b/security/selinux/ss/constraint.h
index 149dda731fd..96fd947c494 100644
--- a/security/selinux/ss/constraint.h
+++ b/security/selinux/ss/constraint.h
@@ -48,6 +48,7 @@ struct constraint_expr {
u32 op; /* operator */
struct ebitmap names; /* names */
+ struct type_set *type_names;
struct constraint_expr *next; /* next expression */
};
diff --git a/security/selinux/ss/context.h b/security/selinux/ss/context.h
index 45e8fb0515f..212e3479a0d 100644
--- a/security/selinux/ss/context.h
+++ b/security/selinux/ss/context.h
@@ -74,6 +74,26 @@ out:
return rc;
}
+/*
+ * Sets both levels in the MLS range of 'dst' to the high level of 'src'.
+ */
+static inline int mls_context_cpy_high(struct context *dst, struct context *src)
+{
+ int rc;
+
+ dst->range.level[0].sens = src->range.level[1].sens;
+ rc = ebitmap_cpy(&dst->range.level[0].cat, &src->range.level[1].cat);
+ if (rc)
+ goto out;
+
+ dst->range.level[1].sens = src->range.level[1].sens;
+ rc = ebitmap_cpy(&dst->range.level[1].cat, &src->range.level[1].cat);
+ if (rc)
+ ebitmap_destroy(&dst->range.level[0].cat);
+out:
+ return rc;
+}
+
static inline int mls_context_cmp(struct context *c1, struct context *c2)
{
return ((c1->range.level[0].sens == c2->range.level[0].sens) &&
diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c
index 30f119b1d1e..820313a04d4 100644
--- a/security/selinux/ss/ebitmap.c
+++ b/security/selinux/ss/ebitmap.c
@@ -213,7 +213,12 @@ netlbl_import_failure:
}
#endif /* CONFIG_NETLABEL */
-int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2)
+/*
+ * Check to see if all the bits set in e2 are also set in e1. Optionally,
+ * if last_e2bit is non-zero, the highest set bit in e2 cannot exceed
+ * last_e2bit.
+ */
+int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit)
{
struct ebitmap_node *n1, *n2;
int i;
@@ -223,14 +228,25 @@ int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2)
n1 = e1->node;
n2 = e2->node;
+
while (n1 && n2 && (n1->startbit <= n2->startbit)) {
if (n1->startbit < n2->startbit) {
n1 = n1->next;
continue;
}
- for (i = 0; i < EBITMAP_UNIT_NUMS; i++) {
+ for (i = EBITMAP_UNIT_NUMS - 1; (i >= 0) && !n2->maps[i]; )
+ i--; /* Skip trailing NULL map entries */
+ if (last_e2bit && (i >= 0)) {
+ u32 lastsetbit = n2->startbit + i * EBITMAP_UNIT_SIZE +
+ __fls(n2->maps[i]);
+ if (lastsetbit > last_e2bit)
+ return 0;
+ }
+
+ while (i >= 0) {
if ((n1->maps[i] & n2->maps[i]) != n2->maps[i])
return 0;
+ i--;
}
n1 = n1->next;
diff --git a/security/selinux/ss/ebitmap.h b/security/selinux/ss/ebitmap.h
index 922f8afa89d..712c8a7b8e8 100644
--- a/security/selinux/ss/ebitmap.h
+++ b/security/selinux/ss/ebitmap.h
@@ -16,7 +16,13 @@
#include <net/netlabel.h>
-#define EBITMAP_UNIT_NUMS ((32 - sizeof(void *) - sizeof(u32)) \
+#ifdef CONFIG_64BIT
+#define EBITMAP_NODE_SIZE 64
+#else
+#define EBITMAP_NODE_SIZE 32
+#endif
+
+#define EBITMAP_UNIT_NUMS ((EBITMAP_NODE_SIZE-sizeof(void *)-sizeof(u32))\
/ sizeof(unsigned long))
#define EBITMAP_UNIT_SIZE BITS_PER_LONG
#define EBITMAP_SIZE (EBITMAP_UNIT_NUMS * EBITMAP_UNIT_SIZE)
@@ -117,7 +123,7 @@ static inline void ebitmap_node_clr_bit(struct ebitmap_node *n,
int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2);
int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src);
-int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2);
+int ebitmap_contains(struct ebitmap *e1, struct ebitmap *e2, u32 last_e2bit);
int ebitmap_get_bit(struct ebitmap *e, unsigned long bit);
int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value);
void ebitmap_destroy(struct ebitmap *e);
diff --git a/security/selinux/ss/hashtab.c b/security/selinux/ss/hashtab.c
index 933e735bb18..2cc49614984 100644
--- a/security/selinux/ss/hashtab.c
+++ b/security/selinux/ss/hashtab.c
@@ -6,6 +6,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/errno.h>
+#include <linux/sched.h>
#include "hashtab.h"
struct hashtab *hashtab_create(u32 (*hash_value)(struct hashtab *h, const void *key),
@@ -40,6 +41,8 @@ int hashtab_insert(struct hashtab *h, void *key, void *datum)
u32 hvalue;
struct hashtab_node *prev, *cur, *newnode;
+ cond_resched();
+
if (!h || h->nel == HASHTAB_MAX_NODES)
return -EINVAL;
diff --git a/security/selinux/ss/mls.c b/security/selinux/ss/mls.c
index fbf9c5816c7..d307b37ddc2 100644
--- a/security/selinux/ss/mls.c
+++ b/security/selinux/ss/mls.c
@@ -160,8 +160,6 @@ void mls_sid_to_context(struct context *context,
int mls_level_isvalid(struct policydb *p, struct mls_level *l)
{
struct level_datum *levdatum;
- struct ebitmap_node *node;
- int i;
if (!l->sens || l->sens > p->p_levels.nprim)
return 0;
@@ -170,19 +168,13 @@ int mls_level_isvalid(struct policydb *p, struct mls_level *l)
if (!levdatum)
return 0;
- ebitmap_for_each_positive_bit(&l->cat, node, i) {
- if (i > p->p_cats.nprim)
- return 0;
- if (!ebitmap_get_bit(&levdatum->level->cat, i)) {
- /*
- * Category may not be associated with
- * sensitivity.
- */
- return 0;
- }
- }
-
- return 1;
+ /*
+ * Return 1 iff all the bits set in l->cat are also be set in
+ * levdatum->level->cat and no bit in l->cat is larger than
+ * p->p_cats.nprim.
+ */
+ return ebitmap_contains(&levdatum->level->cat, &l->cat,
+ p->p_cats.nprim);
}
int mls_range_isvalid(struct policydb *p, struct mls_range *r)
@@ -500,6 +492,8 @@ int mls_convert_context(struct policydb *oldp,
rc = ebitmap_set_bit(&bitmap, catdatum->value - 1, 1);
if (rc)
return rc;
+
+ cond_resched();
}
ebitmap_destroy(&c->range.level[l].cat);
c->range.level[l].cat = bitmap;
@@ -517,6 +511,8 @@ int mls_compute_sid(struct context *scontext,
{
struct range_trans rtr;
struct mls_range *r;
+ struct class_datum *cladatum;
+ int default_range = 0;
if (!policydb.mls_enabled)
return 0;
@@ -530,6 +526,28 @@ int mls_compute_sid(struct context *scontext,
r = hashtab_search(policydb.range_tr, &rtr);
if (r)
return mls_range_set(newcontext, r);
+
+ if (tclass && tclass <= policydb.p_classes.nprim) {
+ cladatum = policydb.class_val_to_struct[tclass - 1];
+ if (cladatum)
+ default_range = cladatum->default_range;
+ }
+
+ switch (default_range) {
+ case DEFAULT_SOURCE_LOW:
+ return mls_context_cpy_low(newcontext, scontext);
+ case DEFAULT_SOURCE_HIGH:
+ return mls_context_cpy_high(newcontext, scontext);
+ case DEFAULT_SOURCE_LOW_HIGH:
+ return mls_context_cpy(newcontext, scontext);
+ case DEFAULT_TARGET_LOW:
+ return mls_context_cpy_low(newcontext, tcontext);
+ case DEFAULT_TARGET_HIGH:
+ return mls_context_cpy_high(newcontext, tcontext);
+ case DEFAULT_TARGET_LOW_HIGH:
+ return mls_context_cpy(newcontext, tcontext);
+ }
+
/* Fallthrough */
case AVTAB_CHANGE:
if ((tclass == policydb.process_class) || (sock == true))
diff --git a/security/selinux/ss/mls_types.h b/security/selinux/ss/mls_types.h
index 03bed52a805..e9364877413 100644
--- a/security/selinux/ss/mls_types.h
+++ b/security/selinux/ss/mls_types.h
@@ -35,7 +35,7 @@ static inline int mls_level_eq(struct mls_level *l1, struct mls_level *l2)
static inline int mls_level_dom(struct mls_level *l1, struct mls_level *l2)
{
return ((l1->sens >= l2->sens) &&
- ebitmap_contains(&l1->cat, &l2->cat));
+ ebitmap_contains(&l1->cat, &l2->cat, 0));
}
#define mls_level_incomp(l1, l2) \
diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c
index a7f61d52f05..9c5cdc2caae 100644
--- a/security/selinux/ss/policydb.c
+++ b/security/selinux/ss/policydb.c
@@ -133,6 +133,21 @@ static struct policydb_compat_info policydb_compat[] = {
.sym_num = SYM_NUM,
.ocon_num = OCON_NUM,
},
+ {
+ .version = POLICYDB_VERSION_NEW_OBJECT_DEFAULTS,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM,
+ },
+ {
+ .version = POLICYDB_VERSION_DEFAULT_TYPE,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM,
+ },
+ {
+ .version = POLICYDB_VERSION_CONSTRAINT_NAMES,
+ .sym_num = SYM_NUM,
+ .ocon_num = OCON_NUM,
+ },
};
static struct policydb_compat_info *policydb_lookup_compat(int version)
@@ -603,6 +618,19 @@ static int common_destroy(void *key, void *datum, void *p)
return 0;
}
+static void constraint_expr_destroy(struct constraint_expr *expr)
+{
+ if (expr) {
+ ebitmap_destroy(&expr->names);
+ if (expr->type_names) {
+ ebitmap_destroy(&expr->type_names->types);
+ ebitmap_destroy(&expr->type_names->negset);
+ kfree(expr->type_names);
+ }
+ kfree(expr);
+ }
+}
+
static int cls_destroy(void *key, void *datum, void *p)
{
struct class_datum *cladatum;
@@ -618,10 +646,9 @@ static int cls_destroy(void *key, void *datum, void *p)
while (constraint) {
e = constraint->expr;
while (e) {
- ebitmap_destroy(&e->names);
etmp = e;
e = e->next;
- kfree(etmp);
+ constraint_expr_destroy(etmp);
}
ctemp = constraint;
constraint = constraint->next;
@@ -632,16 +659,14 @@ static int cls_destroy(void *key, void *datum, void *p)
while (constraint) {
e = constraint->expr;
while (e) {
- ebitmap_destroy(&e->names);
etmp = e;
e = e->next;
- kfree(etmp);
+ constraint_expr_destroy(etmp);
}
ctemp = constraint;
constraint = constraint->next;
kfree(ctemp);
}
-
kfree(cladatum->comkey);
}
kfree(datum);
@@ -1146,8 +1171,34 @@ bad:
return rc;
}
-static int read_cons_helper(struct constraint_node **nodep, int ncons,
- int allowxtarget, void *fp)
+static void type_set_init(struct type_set *t)
+{
+ ebitmap_init(&t->types);
+ ebitmap_init(&t->negset);
+}
+
+static int type_set_read(struct type_set *t, void *fp)
+{
+ __le32 buf[1];
+ int rc;
+
+ if (ebitmap_read(&t->types, fp))
+ return -EINVAL;
+ if (ebitmap_read(&t->negset, fp))
+ return -EINVAL;
+
+ rc = next_entry(buf, fp, sizeof(u32));
+ if (rc < 0)
+ return -EINVAL;
+ t->flags = le32_to_cpu(buf[0]);
+
+ return 0;
+}
+
+
+static int read_cons_helper(struct policydb *p,
+ struct constraint_node **nodep,
+ int ncons, int allowxtarget, void *fp)
{
struct constraint_node *c, *lc;
struct constraint_expr *e, *le;
@@ -1215,6 +1266,18 @@ static int read_cons_helper(struct constraint_node **nodep, int ncons,
rc = ebitmap_read(&e->names, fp);
if (rc)
return rc;
+ if (p->policyvers >=
+ POLICYDB_VERSION_CONSTRAINT_NAMES) {
+ e->type_names = kzalloc(sizeof
+ (*e->type_names),
+ GFP_KERNEL);
+ if (!e->type_names)
+ return -ENOMEM;
+ type_set_init(e->type_names);
+ rc = type_set_read(e->type_names, fp);
+ if (rc)
+ return rc;
+ }
break;
default:
return -EINVAL;
@@ -1291,7 +1354,7 @@ static int class_read(struct policydb *p, struct hashtab *h, void *fp)
goto bad;
}
- rc = read_cons_helper(&cladatum->constraints, ncons, 0, fp);
+ rc = read_cons_helper(p, &cladatum->constraints, ncons, 0, fp);
if (rc)
goto bad;
@@ -1301,11 +1364,29 @@ static int class_read(struct policydb *p, struct hashtab *h, void *fp)
if (rc)
goto bad;
ncons = le32_to_cpu(buf[0]);
- rc = read_cons_helper(&cladatum->validatetrans, ncons, 1, fp);
+ rc = read_cons_helper(p, &cladatum->validatetrans,
+ ncons, 1, fp);
if (rc)
goto bad;
}
+ if (p->policyvers >= POLICYDB_VERSION_NEW_OBJECT_DEFAULTS) {
+ rc = next_entry(buf, fp, sizeof(u32) * 3);
+ if (rc)
+ goto bad;
+
+ cladatum->default_user = le32_to_cpu(buf[0]);
+ cladatum->default_role = le32_to_cpu(buf[1]);
+ cladatum->default_range = le32_to_cpu(buf[2]);
+ }
+
+ if (p->policyvers >= POLICYDB_VERSION_DEFAULT_TYPE) {
+ rc = next_entry(buf, fp, sizeof(u32) * 1);
+ if (rc)
+ goto bad;
+ cladatum->default_type = le32_to_cpu(buf[0]);
+ }
+
rc = hashtab_insert(h, key, cladatum);
if (rc)
goto bad;
@@ -1914,7 +1995,19 @@ static int filename_trans_read(struct policydb *p, void *fp)
if (rc)
goto out;
- hashtab_insert(p->filename_trans, ft, otype);
+ rc = hashtab_insert(p->filename_trans, ft, otype);
+ if (rc) {
+ /*
+ * Do not return -EEXIST to the caller, or the system
+ * will not boot.
+ */
+ if (rc != -EEXIST)
+ goto out;
+ /* But free memory to avoid memory leak. */
+ kfree(ft);
+ kfree(name);
+ kfree(otype);
+ }
}
hash_eval(p->filename_trans, "filenametr");
return 0;
@@ -2141,7 +2234,10 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info,
rc = -EINVAL;
c->v.behavior = le32_to_cpu(buf[0]);
- if (c->v.behavior > SECURITY_FS_USE_NONE)
+ /* Determined at runtime, not in policy DB. */
+ if (c->v.behavior == SECURITY_FS_USE_MNTPOINT)
+ goto out;
+ if (c->v.behavior > SECURITY_FS_USE_MAX)
goto out;
rc = -ENOMEM;
@@ -2723,6 +2819,24 @@ static int common_write(void *vkey, void *datum, void *ptr)
return 0;
}
+static int type_set_write(struct type_set *t, void *fp)
+{
+ int rc;
+ __le32 buf[1];
+
+ if (ebitmap_write(&t->types, fp))
+ return -EINVAL;
+ if (ebitmap_write(&t->negset, fp))
+ return -EINVAL;
+
+ buf[0] = cpu_to_le32(t->flags);
+ rc = put_entry(buf, sizeof(u32), 1, fp);
+ if (rc)
+ return -EINVAL;
+
+ return 0;
+}
+
static int write_cons_helper(struct policydb *p, struct constraint_node *node,
void *fp)
{
@@ -2754,6 +2868,12 @@ static int write_cons_helper(struct policydb *p, struct constraint_node *node,
rc = ebitmap_write(&e->names, fp);
if (rc)
return rc;
+ if (p->policyvers >=
+ POLICYDB_VERSION_CONSTRAINT_NAMES) {
+ rc = type_set_write(e->type_names, fp);
+ if (rc)
+ return rc;
+ }
break;
default:
break;
@@ -2832,6 +2952,23 @@ static int class_write(void *vkey, void *datum, void *ptr)
if (rc)
return rc;
+ if (p->policyvers >= POLICYDB_VERSION_NEW_OBJECT_DEFAULTS) {
+ buf[0] = cpu_to_le32(cladatum->default_user);
+ buf[1] = cpu_to_le32(cladatum->default_role);
+ buf[2] = cpu_to_le32(cladatum->default_range);
+
+ rc = put_entry(buf, sizeof(uint32_t), 3, fp);
+ if (rc)
+ return rc;
+ }
+
+ if (p->policyvers >= POLICYDB_VERSION_DEFAULT_TYPE) {
+ buf[0] = cpu_to_le32(cladatum->default_type);
+ rc = put_entry(buf, sizeof(uint32_t), 1, fp);
+ if (rc)
+ return rc;
+ }
+
return 0;
}
@@ -3156,9 +3293,8 @@ static int range_write_helper(void *key, void *data, void *ptr)
static int range_write(struct policydb *p, void *fp)
{
- size_t nel;
__le32 buf[1];
- int rc;
+ int rc, nel;
struct policy_data pd;
pd.p = p;
@@ -3202,10 +3338,10 @@ static int filename_write_helper(void *key, void *data, void *ptr)
if (rc)
return rc;
- buf[0] = ft->stype;
- buf[1] = ft->ttype;
- buf[2] = ft->tclass;
- buf[3] = otype->otype;
+ buf[0] = cpu_to_le32(ft->stype);
+ buf[1] = cpu_to_le32(ft->ttype);
+ buf[2] = cpu_to_le32(ft->tclass);
+ buf[3] = cpu_to_le32(otype->otype);
rc = put_entry(buf, sizeof(u32), 4, fp);
if (rc)
diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h
index b846c038718..725d5945a97 100644
--- a/security/selinux/ss/policydb.h
+++ b/security/selinux/ss/policydb.h
@@ -60,6 +60,20 @@ struct class_datum {
struct symtab permissions; /* class-specific permission symbol table */
struct constraint_node *constraints; /* constraints on class permissions */
struct constraint_node *validatetrans; /* special transition rules */
+/* Options how a new object user, role, and type should be decided */
+#define DEFAULT_SOURCE 1
+#define DEFAULT_TARGET 2
+ char default_user;
+ char default_role;
+ char default_type;
+/* Options how a new object range should be decided */
+#define DEFAULT_SOURCE_LOW 1
+#define DEFAULT_SOURCE_HIGH 2
+#define DEFAULT_SOURCE_LOW_HIGH 3
+#define DEFAULT_TARGET_LOW 4
+#define DEFAULT_TARGET_HIGH 5
+#define DEFAULT_TARGET_LOW_HIGH 6
+ char default_range;
};
/* Role attributes */
@@ -140,6 +154,17 @@ struct cond_bool_datum {
struct cond_node;
/*
+ * type set preserves data needed to determine constraint info from
+ * policy source. This is not used by the kernel policy but allows
+ * utilities such as audit2allow to determine constraint denials.
+ */
+struct type_set {
+ struct ebitmap types;
+ struct ebitmap negset;
+ u32 flags;
+};
+
+/*
* The configuration data includes security contexts for
* initial SIDs, unlabeled file systems, TCP and UDP port numbers,
* network interfaces, and nodes. This structure stores the
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c
index 185f849a26f..4bca49414a4 100644
--- a/security/selinux/ss/services.c
+++ b/security/selinux/ss/services.c
@@ -72,6 +72,7 @@
int selinux_policycap_netpeer;
int selinux_policycap_openperm;
+int selinux_policycap_alwaysnetwork;
static DEFINE_RWLOCK(policy_rwlock);
@@ -1018,9 +1019,11 @@ static int context_struct_to_string(struct context *context, char **scontext, u3
if (context->len) {
*scontext_len = context->len;
- *scontext = kstrdup(context->str, GFP_ATOMIC);
- if (!(*scontext))
- return -ENOMEM;
+ if (scontext) {
+ *scontext = kstrdup(context->str, GFP_ATOMIC);
+ if (!(*scontext))
+ return -ENOMEM;
+ }
return 0;
}
@@ -1229,6 +1232,10 @@ static int security_context_to_sid_core(const char *scontext, u32 scontext_len,
struct context context;
int rc = 0;
+ /* An empty security context is never valid. */
+ if (!scontext_len)
+ return -EINVAL;
+
if (!ss_initialized) {
int i;
@@ -1282,16 +1289,18 @@ out:
* @scontext: security context
* @scontext_len: length in bytes
* @sid: security identifier, SID
+ * @gfp: context for the allocation
*
* Obtains a SID associated with the security context that
* has the string representation specified by @scontext.
* Returns -%EINVAL if the context is invalid, -%ENOMEM if insufficient
* memory is available, or 0 on success.
*/
-int security_context_to_sid(const char *scontext, u32 scontext_len, u32 *sid)
+int security_context_to_sid(const char *scontext, u32 scontext_len, u32 *sid,
+ gfp_t gfp)
{
return security_context_to_sid_core(scontext, scontext_len,
- sid, SECSID_NULL, GFP_KERNEL, 0);
+ sid, SECSID_NULL, gfp, 0);
}
/**
@@ -1389,6 +1398,7 @@ static int security_compute_sid(u32 ssid,
u32 *out_sid,
bool kern)
{
+ struct class_datum *cladatum = NULL;
struct context *scontext = NULL, *tcontext = NULL, newcontext;
struct role_trans *roletr = NULL;
struct avtab_key avkey;
@@ -1437,12 +1447,20 @@ static int security_compute_sid(u32 ssid,
goto out_unlock;
}
+ if (tclass && tclass <= policydb.p_classes.nprim)
+ cladatum = policydb.class_val_to_struct[tclass - 1];
+
/* Set the user identity. */
switch (specified) {
case AVTAB_TRANSITION:
case AVTAB_CHANGE:
- /* Use the process user identity. */
- newcontext.user = scontext->user;
+ if (cladatum && cladatum->default_user == DEFAULT_TARGET) {
+ newcontext.user = tcontext->user;
+ } else {
+ /* notice this gets both DEFAULT_SOURCE and unset */
+ /* Use the process user identity. */
+ newcontext.user = scontext->user;
+ }
break;
case AVTAB_MEMBER:
/* Use the related object owner. */
@@ -1450,16 +1468,31 @@ static int security_compute_sid(u32 ssid,
break;
}
- /* Set the role and type to default values. */
- if ((tclass == policydb.process_class) || (sock == true)) {
- /* Use the current role and type of process. */
+ /* Set the role to default values. */
+ if (cladatum && cladatum->default_role == DEFAULT_SOURCE) {
newcontext.role = scontext->role;
- newcontext.type = scontext->type;
+ } else if (cladatum && cladatum->default_role == DEFAULT_TARGET) {
+ newcontext.role = tcontext->role;
} else {
- /* Use the well-defined object role. */
- newcontext.role = OBJECT_R_VAL;
- /* Use the type of the related object. */
+ if ((tclass == policydb.process_class) || (sock == true))
+ newcontext.role = scontext->role;
+ else
+ newcontext.role = OBJECT_R_VAL;
+ }
+
+ /* Set the type to default values. */
+ if (cladatum && cladatum->default_type == DEFAULT_SOURCE) {
+ newcontext.type = scontext->type;
+ } else if (cladatum && cladatum->default_type == DEFAULT_TARGET) {
newcontext.type = tcontext->type;
+ } else {
+ if ((tclass == policydb.process_class) || (sock == true)) {
+ /* Use the type of process. */
+ newcontext.type = scontext->type;
+ } else {
+ /* Use the type of the related object. */
+ newcontext.type = tcontext->type;
+ }
}
/* Look for a type transition/member/change rule. */
@@ -1786,6 +1819,8 @@ static void security_load_policycaps(void)
POLICYDB_CAPABILITY_NETPEER);
selinux_policycap_openperm = ebitmap_get_bit(&policydb.policycaps,
POLICYDB_CAPABILITY_OPENPERM);
+ selinux_policycap_alwaysnetwork = ebitmap_get_bit(&policydb.policycaps,
+ POLICYDB_CAPABILITY_ALWAYSNETWORK);
}
static int security_preserve_bools(struct policydb *p);
@@ -1802,7 +1837,7 @@ static int security_preserve_bools(struct policydb *p);
*/
int security_load_policy(void *data, size_t len)
{
- struct policydb oldpolicydb, newpolicydb;
+ struct policydb *oldpolicydb, *newpolicydb;
struct sidtab oldsidtab, newsidtab;
struct selinux_mapping *oldmap, *map = NULL;
struct convert_context_args args;
@@ -1811,12 +1846,19 @@ int security_load_policy(void *data, size_t len)
int rc = 0;
struct policy_file file = { data, len }, *fp = &file;
+ oldpolicydb = kzalloc(2 * sizeof(*oldpolicydb), GFP_KERNEL);
+ if (!oldpolicydb) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ newpolicydb = oldpolicydb + 1;
+
if (!ss_initialized) {
avtab_cache_init();
rc = policydb_read(&policydb, fp);
if (rc) {
avtab_cache_destroy();
- return rc;
+ goto out;
}
policydb.len = len;
@@ -1826,14 +1868,14 @@ int security_load_policy(void *data, size_t len)
if (rc) {
policydb_destroy(&policydb);
avtab_cache_destroy();
- return rc;
+ goto out;
}
rc = policydb_load_isids(&policydb, &sidtab);
if (rc) {
policydb_destroy(&policydb);
avtab_cache_destroy();
- return rc;
+ goto out;
}
security_load_policycaps();
@@ -1845,36 +1887,36 @@ int security_load_policy(void *data, size_t len)
selinux_status_update_policyload(seqno);
selinux_netlbl_cache_invalidate();
selinux_xfrm_notify_policyload();
- return 0;
+ goto out;
}
#if 0
sidtab_hash_eval(&sidtab, "sids");
#endif
- rc = policydb_read(&newpolicydb, fp);
+ rc = policydb_read(newpolicydb, fp);
if (rc)
- return rc;
+ goto out;
- newpolicydb.len = len;
+ newpolicydb->len = len;
/* If switching between different policy types, log MLS status */
- if (policydb.mls_enabled && !newpolicydb.mls_enabled)
+ if (policydb.mls_enabled && !newpolicydb->mls_enabled)
printk(KERN_INFO "SELinux: Disabling MLS support...\n");
- else if (!policydb.mls_enabled && newpolicydb.mls_enabled)
+ else if (!policydb.mls_enabled && newpolicydb->mls_enabled)
printk(KERN_INFO "SELinux: Enabling MLS support...\n");
- rc = policydb_load_isids(&newpolicydb, &newsidtab);
+ rc = policydb_load_isids(newpolicydb, &newsidtab);
if (rc) {
printk(KERN_ERR "SELinux: unable to load the initial SIDs\n");
- policydb_destroy(&newpolicydb);
- return rc;
+ policydb_destroy(newpolicydb);
+ goto out;
}
- rc = selinux_set_mapping(&newpolicydb, secclass_map, &map, &map_size);
+ rc = selinux_set_mapping(newpolicydb, secclass_map, &map, &map_size);
if (rc)
goto err;
- rc = security_preserve_bools(&newpolicydb);
+ rc = security_preserve_bools(newpolicydb);
if (rc) {
printk(KERN_ERR "SELinux: unable to preserve booleans\n");
goto err;
@@ -1892,7 +1934,7 @@ int security_load_policy(void *data, size_t len)
* in the new SID table.
*/
args.oldp = &policydb;
- args.newp = &newpolicydb;
+ args.newp = newpolicydb;
rc = sidtab_map(&newsidtab, convert_context, &args);
if (rc) {
printk(KERN_ERR "SELinux: unable to convert the internal"
@@ -1902,12 +1944,12 @@ int security_load_policy(void *data, size_t len)
}
/* Save the old policydb and SID table to free later. */
- memcpy(&oldpolicydb, &policydb, sizeof policydb);
+ memcpy(oldpolicydb, &policydb, sizeof(policydb));
sidtab_set(&oldsidtab, &sidtab);
/* Install the new policydb and SID table. */
write_lock_irq(&policy_rwlock);
- memcpy(&policydb, &newpolicydb, sizeof policydb);
+ memcpy(&policydb, newpolicydb, sizeof(policydb));
sidtab_set(&sidtab, &newsidtab);
security_load_policycaps();
oldmap = current_mapping;
@@ -1917,7 +1959,7 @@ int security_load_policy(void *data, size_t len)
write_unlock_irq(&policy_rwlock);
/* Free the old policydb and SID table. */
- policydb_destroy(&oldpolicydb);
+ policydb_destroy(oldpolicydb);
sidtab_destroy(&oldsidtab);
kfree(oldmap);
@@ -1927,14 +1969,17 @@ int security_load_policy(void *data, size_t len)
selinux_netlbl_cache_invalidate();
selinux_xfrm_notify_policyload();
- return 0;
+ rc = 0;
+ goto out;
err:
kfree(map);
sidtab_destroy(&newsidtab);
- policydb_destroy(&newpolicydb);
- return rc;
+ policydb_destroy(newpolicydb);
+out:
+ kfree(oldpolicydb);
+ return rc;
}
size_t security_policydb_len(void)
@@ -2297,17 +2342,14 @@ out:
/**
* security_fs_use - Determine how to handle labeling for a filesystem.
- * @fstype: filesystem type
- * @behavior: labeling behavior
- * @sid: SID for filesystem (superblock)
+ * @sb: superblock in question
*/
-int security_fs_use(
- const char *fstype,
- unsigned int *behavior,
- u32 *sid)
+int security_fs_use(struct super_block *sb)
{
int rc = 0;
struct ocontext *c;
+ struct superblock_security_struct *sbsec = sb->s_security;
+ const char *fstype = sb->s_type->name;
read_lock(&policy_rwlock);
@@ -2319,21 +2361,21 @@ int security_fs_use(
}
if (c) {
- *behavior = c->v.behavior;
+ sbsec->behavior = c->v.behavior;
if (!c->sid[0]) {
rc = sidtab_context_to_sid(&sidtab, &c->context[0],
&c->sid[0]);
if (rc)
goto out;
}
- *sid = c->sid[0];
+ sbsec->sid = c->sid[0];
} else {
- rc = security_genfs_sid(fstype, "/", SECCLASS_DIR, sid);
+ rc = security_genfs_sid(fstype, "/", SECCLASS_DIR, &sbsec->sid);
if (rc) {
- *behavior = SECURITY_FS_USE_NONE;
+ sbsec->behavior = SECURITY_FS_USE_NONE;
rc = 0;
} else {
- *behavior = SECURITY_FS_USE_GENFS;
+ sbsec->behavior = SECURITY_FS_USE_GENFS;
}
}
@@ -2414,7 +2456,7 @@ int security_set_bools(int len, int *values)
sym_name(&policydb, SYM_BOOLS, i),
!!values[i],
policydb.bool_val_to_struct[i]->state,
- audit_get_loginuid(current),
+ from_kuid(&init_user_ns, audit_get_loginuid(current)),
audit_get_sessionid(current));
}
if (values[i])
@@ -2912,25 +2954,21 @@ int selinux_audit_rule_match(u32 sid, u32 field, u32 op, void *vrule,
struct selinux_audit_rule *rule = vrule;
int match = 0;
- if (!rule) {
- audit_log(actx, GFP_ATOMIC, AUDIT_SELINUX_ERR,
- "selinux_audit_rule_match: missing rule\n");
+ if (unlikely(!rule)) {
+ WARN_ONCE(1, "selinux_audit_rule_match: missing rule\n");
return -ENOENT;
}
read_lock(&policy_rwlock);
if (rule->au_seqno < latest_granting) {
- audit_log(actx, GFP_ATOMIC, AUDIT_SELINUX_ERR,
- "selinux_audit_rule_match: stale rule\n");
match = -ESTALE;
goto out;
}
ctxt = sidtab_search(&sidtab, sid);
- if (!ctxt) {
- audit_log(actx, GFP_ATOMIC, AUDIT_SELINUX_ERR,
- "selinux_audit_rule_match: unrecognized SID %d\n",
+ if (unlikely(!ctxt)) {
+ WARN_ONCE(1, "selinux_audit_rule_match: unrecognized SID %d\n",
sid);
match = -ENOENT;
goto out;
@@ -3018,8 +3056,7 @@ out:
static int (*aurule_callback)(void) = audit_update_lsm_rules;
-static int aurule_avc_callback(u32 event, u32 ssid, u32 tsid,
- u16 class, u32 perms, u32 *retained)
+static int aurule_avc_callback(u32 event)
{
int err = 0;
@@ -3032,8 +3069,7 @@ static int __init aurule_init(void)
{
int err;
- err = avc_add_callback(aurule_avc_callback, AVC_CALLBACK_RESET,
- SECSID_NULL, SECSID_NULL, SECCLASS_NULL, 0);
+ err = avc_add_callback(aurule_avc_callback, AVC_CALLBACK_RESET);
if (err)
panic("avc_add_callback() failed, error %d\n", err);
diff --git a/security/selinux/xfrm.c b/security/selinux/xfrm.c
index 48665ecd119..98b042630a9 100644
--- a/security/selinux/xfrm.c
+++ b/security/selinux/xfrm.c
@@ -56,7 +56,7 @@
atomic_t selinux_xfrm_refcount = ATOMIC_INIT(0);
/*
- * Returns true if an LSM/SELinux context
+ * Returns true if the context is an LSM/SELinux context.
*/
static inline int selinux_authorizable_ctx(struct xfrm_sec_ctx *ctx)
{
@@ -66,7 +66,7 @@ static inline int selinux_authorizable_ctx(struct xfrm_sec_ctx *ctx)
}
/*
- * Returns true if the xfrm contains a security blob for SELinux
+ * Returns true if the xfrm contains a security blob for SELinux.
*/
static inline int selinux_authorizable_xfrm(struct xfrm_state *x)
{
@@ -74,48 +74,112 @@ static inline int selinux_authorizable_xfrm(struct xfrm_state *x)
}
/*
- * LSM hook implementation that authorizes that a flow can use
- * a xfrm policy rule.
+ * Allocates a xfrm_sec_state and populates it using the supplied security
+ * xfrm_user_sec_ctx context.
*/
-int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
+static int selinux_xfrm_alloc_user(struct xfrm_sec_ctx **ctxp,
+ struct xfrm_user_sec_ctx *uctx,
+ gfp_t gfp)
{
int rc;
- u32 sel_sid;
+ const struct task_security_struct *tsec = current_security();
+ struct xfrm_sec_ctx *ctx = NULL;
+ u32 str_len;
- /* Context sid is either set to label or ANY_ASSOC */
- if (ctx) {
- if (!selinux_authorizable_ctx(ctx))
- return -EINVAL;
-
- sel_sid = ctx->ctx_sid;
- } else
- /*
- * All flows should be treated as polmatch'ing an
- * otherwise applicable "non-labeled" policy. This
- * would prevent inadvertent "leaks".
- */
- return 0;
+ if (ctxp == NULL || uctx == NULL ||
+ uctx->ctx_doi != XFRM_SC_DOI_LSM ||
+ uctx->ctx_alg != XFRM_SC_ALG_SELINUX)
+ return -EINVAL;
+
+ str_len = uctx->ctx_len;
+ if (str_len >= PAGE_SIZE)
+ return -ENOMEM;
+
+ ctx = kmalloc(sizeof(*ctx) + str_len + 1, gfp);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->ctx_doi = XFRM_SC_DOI_LSM;
+ ctx->ctx_alg = XFRM_SC_ALG_SELINUX;
+ ctx->ctx_len = str_len;
+ memcpy(ctx->ctx_str, &uctx[1], str_len);
+ ctx->ctx_str[str_len] = '\0';
+ rc = security_context_to_sid(ctx->ctx_str, str_len, &ctx->ctx_sid, gfp);
+ if (rc)
+ goto err;
- rc = avc_has_perm(fl_secid, sel_sid, SECCLASS_ASSOCIATION,
- ASSOCIATION__POLMATCH,
- NULL);
+ rc = avc_has_perm(tsec->sid, ctx->ctx_sid,
+ SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT, NULL);
+ if (rc)
+ goto err;
- if (rc == -EACCES)
- return -ESRCH;
+ *ctxp = ctx;
+ atomic_inc(&selinux_xfrm_refcount);
+ return 0;
+err:
+ kfree(ctx);
return rc;
}
/*
+ * Free the xfrm_sec_ctx structure.
+ */
+static void selinux_xfrm_free(struct xfrm_sec_ctx *ctx)
+{
+ if (!ctx)
+ return;
+
+ atomic_dec(&selinux_xfrm_refcount);
+ kfree(ctx);
+}
+
+/*
+ * Authorize the deletion of a labeled SA or policy rule.
+ */
+static int selinux_xfrm_delete(struct xfrm_sec_ctx *ctx)
+{
+ const struct task_security_struct *tsec = current_security();
+
+ if (!ctx)
+ return 0;
+
+ return avc_has_perm(tsec->sid, ctx->ctx_sid,
+ SECCLASS_ASSOCIATION, ASSOCIATION__SETCONTEXT,
+ NULL);
+}
+
+/*
+ * LSM hook implementation that authorizes that a flow can use a xfrm policy
+ * rule.
+ */
+int selinux_xfrm_policy_lookup(struct xfrm_sec_ctx *ctx, u32 fl_secid, u8 dir)
+{
+ int rc;
+
+ /* All flows should be treated as polmatch'ing an otherwise applicable
+ * "non-labeled" policy. This would prevent inadvertent "leaks". */
+ if (!ctx)
+ return 0;
+
+ /* Context sid is either set to label or ANY_ASSOC */
+ if (!selinux_authorizable_ctx(ctx))
+ return -EINVAL;
+
+ rc = avc_has_perm(fl_secid, ctx->ctx_sid,
+ SECCLASS_ASSOCIATION, ASSOCIATION__POLMATCH, NULL);
+ return (rc == -EACCES ? -ESRCH : rc);
+}
+
+/*
* LSM hook implementation that authorizes that a state matches
* the given policy, flow combo.
*/
-
-int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *xp,
- const struct flowi *fl)
+int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x,
+ struct xfrm_policy *xp,
+ const struct flowi *fl)
{
u32 state_sid;
- int rc;
if (!xp->security)
if (x->security)
@@ -138,186 +202,112 @@ int selinux_xfrm_state_pol_flow_match(struct xfrm_state *x, struct xfrm_policy *
if (fl->flowi_secid != state_sid)
return 0;
- rc = avc_has_perm(fl->flowi_secid, state_sid, SECCLASS_ASSOCIATION,
- ASSOCIATION__SENDTO,
- NULL)? 0:1;
-
- /*
- * We don't need a separate SA Vs. policy polmatch check
- * since the SA is now of the same label as the flow and
- * a flow Vs. policy polmatch check had already happened
- * in selinux_xfrm_policy_lookup() above.
- */
-
- return rc;
+ /* We don't need a separate SA Vs. policy polmatch check since the SA
+ * is now of the same label as the flow and a flow Vs. policy polmatch
+ * check had already happened in selinux_xfrm_policy_lookup() above. */
+ return (avc_has_perm(fl->flowi_secid, state_sid,
+ SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO,
+ NULL) ? 0 : 1);
}
-/*
- * LSM hook implementation that checks and/or returns the xfrm sid for the
- * incoming packet.
- */
-
-int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
+static u32 selinux_xfrm_skb_sid_egress(struct sk_buff *skb)
{
- struct sec_path *sp;
+ struct dst_entry *dst = skb_dst(skb);
+ struct xfrm_state *x;
- *sid = SECSID_NULL;
+ if (dst == NULL)
+ return SECSID_NULL;
+ x = dst->xfrm;
+ if (x == NULL || !selinux_authorizable_xfrm(x))
+ return SECSID_NULL;
- if (skb == NULL)
- return 0;
+ return x->security->ctx_sid;
+}
+
+static int selinux_xfrm_skb_sid_ingress(struct sk_buff *skb,
+ u32 *sid, int ckall)
+{
+ u32 sid_session = SECSID_NULL;
+ struct sec_path *sp = skb->sp;
- sp = skb->sp;
if (sp) {
- int i, sid_set = 0;
+ int i;
- for (i = sp->len-1; i >= 0; i--) {
+ for (i = sp->len - 1; i >= 0; i--) {
struct xfrm_state *x = sp->xvec[i];
if (selinux_authorizable_xfrm(x)) {
struct xfrm_sec_ctx *ctx = x->security;
- if (!sid_set) {
- *sid = ctx->ctx_sid;
- sid_set = 1;
-
+ if (sid_session == SECSID_NULL) {
+ sid_session = ctx->ctx_sid;
if (!ckall)
- break;
- } else if (*sid != ctx->ctx_sid)
+ goto out;
+ } else if (sid_session != ctx->ctx_sid) {
+ *sid = SECSID_NULL;
return -EINVAL;
+ }
}
}
}
+out:
+ *sid = sid_session;
return 0;
}
/*
- * Security blob allocation for xfrm_policy and xfrm_state
- * CTX does not have a meaningful value on input
+ * LSM hook implementation that checks and/or returns the xfrm sid for the
+ * incoming packet.
*/
-static int selinux_xfrm_sec_ctx_alloc(struct xfrm_sec_ctx **ctxp,
- struct xfrm_user_sec_ctx *uctx, u32 sid)
+int selinux_xfrm_decode_session(struct sk_buff *skb, u32 *sid, int ckall)
{
- int rc = 0;
- const struct task_security_struct *tsec = current_security();
- struct xfrm_sec_ctx *ctx = NULL;
- char *ctx_str = NULL;
- u32 str_len;
-
- BUG_ON(uctx && sid);
-
- if (!uctx)
- goto not_from_user;
-
- if (uctx->ctx_alg != XFRM_SC_ALG_SELINUX)
- return -EINVAL;
-
- str_len = uctx->ctx_len;
- if (str_len >= PAGE_SIZE)
- return -ENOMEM;
-
- *ctxp = ctx = kmalloc(sizeof(*ctx) +
- str_len + 1,
- GFP_KERNEL);
-
- if (!ctx)
- return -ENOMEM;
-
- ctx->ctx_doi = uctx->ctx_doi;
- ctx->ctx_len = str_len;
- ctx->ctx_alg = uctx->ctx_alg;
-
- memcpy(ctx->ctx_str,
- uctx+1,
- str_len);
- ctx->ctx_str[str_len] = 0;
- rc = security_context_to_sid(ctx->ctx_str,
- str_len,
- &ctx->ctx_sid);
-
- if (rc)
- goto out;
-
- /*
- * Does the subject have permission to set security context?
- */
- rc = avc_has_perm(tsec->sid, ctx->ctx_sid,
- SECCLASS_ASSOCIATION,
- ASSOCIATION__SETCONTEXT, NULL);
- if (rc)
- goto out;
-
- return rc;
-
-not_from_user:
- rc = security_sid_to_context(sid, &ctx_str, &str_len);
- if (rc)
- goto out;
-
- *ctxp = ctx = kmalloc(sizeof(*ctx) +
- str_len,
- GFP_ATOMIC);
-
- if (!ctx) {
- rc = -ENOMEM;
- goto out;
+ if (skb == NULL) {
+ *sid = SECSID_NULL;
+ return 0;
}
+ return selinux_xfrm_skb_sid_ingress(skb, sid, ckall);
+}
- ctx->ctx_doi = XFRM_SC_DOI_LSM;
- ctx->ctx_alg = XFRM_SC_ALG_SELINUX;
- ctx->ctx_sid = sid;
- ctx->ctx_len = str_len;
- memcpy(ctx->ctx_str,
- ctx_str,
- str_len);
+int selinux_xfrm_skb_sid(struct sk_buff *skb, u32 *sid)
+{
+ int rc;
- goto out2;
+ rc = selinux_xfrm_skb_sid_ingress(skb, sid, 0);
+ if (rc == 0 && *sid == SECSID_NULL)
+ *sid = selinux_xfrm_skb_sid_egress(skb);
-out:
- *ctxp = NULL;
- kfree(ctx);
-out2:
- kfree(ctx_str);
return rc;
}
/*
- * LSM hook implementation that allocs and transfers uctx spec to
- * xfrm_policy.
+ * LSM hook implementation that allocs and transfers uctx spec to xfrm_policy.
*/
int selinux_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp,
- struct xfrm_user_sec_ctx *uctx)
+ struct xfrm_user_sec_ctx *uctx,
+ gfp_t gfp)
{
- int err;
-
- BUG_ON(!uctx);
-
- err = selinux_xfrm_sec_ctx_alloc(ctxp, uctx, 0);
- if (err == 0)
- atomic_inc(&selinux_xfrm_refcount);
-
- return err;
+ return selinux_xfrm_alloc_user(ctxp, uctx, gfp);
}
-
/*
- * LSM hook implementation that copies security data structure from old to
- * new for policy cloning.
+ * LSM hook implementation that copies security data structure from old to new
+ * for policy cloning.
*/
int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
struct xfrm_sec_ctx **new_ctxp)
{
struct xfrm_sec_ctx *new_ctx;
- if (old_ctx) {
- new_ctx = kmalloc(sizeof(*old_ctx) + old_ctx->ctx_len,
- GFP_KERNEL);
- if (!new_ctx)
- return -ENOMEM;
+ if (!old_ctx)
+ return 0;
+
+ new_ctx = kmemdup(old_ctx, sizeof(*old_ctx) + old_ctx->ctx_len,
+ GFP_ATOMIC);
+ if (!new_ctx)
+ return -ENOMEM;
+ atomic_inc(&selinux_xfrm_refcount);
+ *new_ctxp = new_ctx;
- memcpy(new_ctx, old_ctx, sizeof(*new_ctx));
- memcpy(new_ctx->ctx_str, old_ctx->ctx_str, new_ctx->ctx_len);
- *new_ctxp = new_ctx;
- }
return 0;
}
@@ -326,7 +316,7 @@ int selinux_xfrm_policy_clone(struct xfrm_sec_ctx *old_ctx,
*/
void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
{
- kfree(ctx);
+ selinux_xfrm_free(ctx);
}
/*
@@ -334,35 +324,58 @@ void selinux_xfrm_policy_free(struct xfrm_sec_ctx *ctx)
*/
int selinux_xfrm_policy_delete(struct xfrm_sec_ctx *ctx)
{
- const struct task_security_struct *tsec = current_security();
- int rc = 0;
-
- if (ctx) {
- rc = avc_has_perm(tsec->sid, ctx->ctx_sid,
- SECCLASS_ASSOCIATION,
- ASSOCIATION__SETCONTEXT, NULL);
- if (rc == 0)
- atomic_dec(&selinux_xfrm_refcount);
- }
+ return selinux_xfrm_delete(ctx);
+}
- return rc;
+/*
+ * LSM hook implementation that allocates a xfrm_sec_state, populates it using
+ * the supplied security context, and assigns it to the xfrm_state.
+ */
+int selinux_xfrm_state_alloc(struct xfrm_state *x,
+ struct xfrm_user_sec_ctx *uctx)
+{
+ return selinux_xfrm_alloc_user(&x->security, uctx, GFP_KERNEL);
}
/*
- * LSM hook implementation that allocs and transfers sec_ctx spec to
- * xfrm_state.
+ * LSM hook implementation that allocates a xfrm_sec_state and populates based
+ * on a secid.
*/
-int selinux_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *uctx,
- u32 secid)
+int selinux_xfrm_state_alloc_acquire(struct xfrm_state *x,
+ struct xfrm_sec_ctx *polsec, u32 secid)
{
- int err;
+ int rc;
+ struct xfrm_sec_ctx *ctx;
+ char *ctx_str = NULL;
+ int str_len;
+
+ if (!polsec)
+ return 0;
- BUG_ON(!x);
+ if (secid == 0)
+ return -EINVAL;
+
+ rc = security_sid_to_context(secid, &ctx_str, &str_len);
+ if (rc)
+ return rc;
+
+ ctx = kmalloc(sizeof(*ctx) + str_len, GFP_ATOMIC);
+ if (!ctx) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ ctx->ctx_doi = XFRM_SC_DOI_LSM;
+ ctx->ctx_alg = XFRM_SC_ALG_SELINUX;
+ ctx->ctx_sid = secid;
+ ctx->ctx_len = str_len;
+ memcpy(ctx->ctx_str, ctx_str, str_len);
- err = selinux_xfrm_sec_ctx_alloc(&x->security, uctx, secid);
- if (err == 0)
- atomic_inc(&selinux_xfrm_refcount);
- return err;
+ x->security = ctx;
+ atomic_inc(&selinux_xfrm_refcount);
+out:
+ kfree(ctx_str);
+ return rc;
}
/*
@@ -370,28 +383,15 @@ int selinux_xfrm_state_alloc(struct xfrm_state *x, struct xfrm_user_sec_ctx *uct
*/
void selinux_xfrm_state_free(struct xfrm_state *x)
{
- struct xfrm_sec_ctx *ctx = x->security;
- kfree(ctx);
+ selinux_xfrm_free(x->security);
}
- /*
- * LSM hook implementation that authorizes deletion of labeled SAs.
- */
+/*
+ * LSM hook implementation that authorizes deletion of labeled SAs.
+ */
int selinux_xfrm_state_delete(struct xfrm_state *x)
{
- const struct task_security_struct *tsec = current_security();
- struct xfrm_sec_ctx *ctx = x->security;
- int rc = 0;
-
- if (ctx) {
- rc = avc_has_perm(tsec->sid, ctx->ctx_sid,
- SECCLASS_ASSOCIATION,
- ASSOCIATION__SETCONTEXT, NULL);
- if (rc == 0)
- atomic_dec(&selinux_xfrm_refcount);
- }
-
- return rc;
+ return selinux_xfrm_delete(x->security);
}
/*
@@ -401,14 +401,12 @@ int selinux_xfrm_state_delete(struct xfrm_state *x)
* we need to check for unlabelled access since this may not have
* gone thru the IPSec process.
*/
-int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb,
- struct common_audit_data *ad)
+int selinux_xfrm_sock_rcv_skb(u32 sk_sid, struct sk_buff *skb,
+ struct common_audit_data *ad)
{
- int i, rc = 0;
- struct sec_path *sp;
- u32 sel_sid = SECINITSID_UNLABELED;
-
- sp = skb->sp;
+ int i;
+ struct sec_path *sp = skb->sp;
+ u32 peer_sid = SECINITSID_UNLABELED;
if (sp) {
for (i = 0; i < sp->len; i++) {
@@ -416,23 +414,17 @@ int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb,
if (x && selinux_authorizable_xfrm(x)) {
struct xfrm_sec_ctx *ctx = x->security;
- sel_sid = ctx->ctx_sid;
+ peer_sid = ctx->ctx_sid;
break;
}
}
}
- /*
- * This check even when there's no association involved is
- * intended, according to Trent Jaeger, to make sure a
- * process can't engage in non-ipsec communication unless
- * explicitly allowed by policy.
- */
-
- rc = avc_has_perm(isec_sid, sel_sid, SECCLASS_ASSOCIATION,
- ASSOCIATION__RECVFROM, ad);
-
- return rc;
+ /* This check even when there's no association involved is intended,
+ * according to Trent Jaeger, to make sure a process can't engage in
+ * non-IPsec communication unless explicitly allowed by policy. */
+ return avc_has_perm(sk_sid, peer_sid,
+ SECCLASS_ASSOCIATION, ASSOCIATION__RECVFROM, ad);
}
/*
@@ -442,49 +434,38 @@ int selinux_xfrm_sock_rcv_skb(u32 isec_sid, struct sk_buff *skb,
* If we do have a authorizable security association, then it has already been
* checked in the selinux_xfrm_state_pol_flow_match hook above.
*/
-int selinux_xfrm_postroute_last(u32 isec_sid, struct sk_buff *skb,
- struct common_audit_data *ad, u8 proto)
+int selinux_xfrm_postroute_last(u32 sk_sid, struct sk_buff *skb,
+ struct common_audit_data *ad, u8 proto)
{
struct dst_entry *dst;
- int rc = 0;
-
- dst = skb_dst(skb);
-
- if (dst) {
- struct dst_entry *dst_test;
-
- for (dst_test = dst; dst_test != NULL;
- dst_test = dst_test->child) {
- struct xfrm_state *x = dst_test->xfrm;
-
- if (x && selinux_authorizable_xfrm(x))
- goto out;
- }
- }
switch (proto) {
case IPPROTO_AH:
case IPPROTO_ESP:
case IPPROTO_COMP:
- /*
- * We should have already seen this packet once before
- * it underwent xfrm(s). No need to subject it to the
- * unlabeled check.
- */
- goto out;
+ /* We should have already seen this packet once before it
+ * underwent xfrm(s). No need to subject it to the unlabeled
+ * check. */
+ return 0;
default:
break;
}
- /*
- * This check even when there's no association involved is
- * intended, according to Trent Jaeger, to make sure a
- * process can't engage in non-ipsec communication unless
- * explicitly allowed by policy.
- */
+ dst = skb_dst(skb);
+ if (dst) {
+ struct dst_entry *iter;
+
+ for (iter = dst; iter != NULL; iter = iter->child) {
+ struct xfrm_state *x = iter->xfrm;
- rc = avc_has_perm(isec_sid, SECINITSID_UNLABELED, SECCLASS_ASSOCIATION,
- ASSOCIATION__SENDTO, ad);
-out:
- return rc;
+ if (x && selinux_authorizable_xfrm(x))
+ return 0;
+ }
+ }
+
+ /* This check even when there's no association involved is intended,
+ * according to Trent Jaeger, to make sure a process can't engage in
+ * non-IPsec communication unless explicitly allowed by policy. */
+ return avc_has_perm(sk_sid, SECINITSID_UNLABELED,
+ SECCLASS_ASSOCIATION, ASSOCIATION__SENDTO, ad);
}
diff --git a/security/smack/Kconfig b/security/smack/Kconfig
index 603b0878434..e69de9c642b 100644
--- a/security/smack/Kconfig
+++ b/security/smack/Kconfig
@@ -1,6 +1,10 @@
config SECURITY_SMACK
bool "Simplified Mandatory Access Control Kernel Support"
- depends on NETLABEL && SECURITY_NETWORK
+ depends on NET
+ depends on INET
+ depends on SECURITY
+ select NETLABEL
+ select SECURITY_NETWORK
default n
help
This selects the Simplified Mandatory Access Control Kernel.
diff --git a/security/smack/smack.h b/security/smack/smack.h
index 2ad00657b80..020307ef097 100644
--- a/security/smack/smack.h
+++ b/security/smack/smack.h
@@ -23,13 +23,52 @@
#include <linux/lsm_audit.h>
/*
+ * Smack labels were limited to 23 characters for a long time.
+ */
+#define SMK_LABELLEN 24
+#define SMK_LONGLABEL 256
+
+/*
+ * This is the repository for labels seen so that it is
+ * not necessary to keep allocating tiny chuncks of memory
+ * and so that they can be shared.
+ *
+ * Labels are never modified in place. Anytime a label
+ * is imported (e.g. xattrset on a file) the list is checked
+ * for it and it is added if it doesn't exist. The address
+ * is passed out in either case. Entries are added, but
+ * never deleted.
+ *
+ * Since labels are hanging around anyway it doesn't
+ * hurt to maintain a secid for those awkward situations
+ * where kernel components that ought to use LSM independent
+ * interfaces don't. The secid should go away when all of
+ * these components have been repaired.
+ *
+ * The cipso value associated with the label gets stored here, too.
+ *
+ * Keep the access rules for this subject label here so that
+ * the entire set of rules does not need to be examined every
+ * time.
+ */
+struct smack_known {
+ struct list_head list;
+ struct hlist_node smk_hashed;
+ char *smk_known;
+ u32 smk_secid;
+ struct netlbl_lsm_secattr smk_netlabel; /* on wire labels */
+ struct list_head smk_rules; /* access rules */
+ struct mutex smk_rules_lock; /* lock for rules */
+};
+
+/*
+ * Maximum number of bytes for the levels in a CIPSO IP option.
* Why 23? CIPSO is constrained to 30, so a 32 byte buffer is
* bigger than can be used, and 24 is the next lower multiple
* of 8, and there are too many issues if there isn't space set
* aside for the terminating null byte.
*/
-#define SMK_MAXLEN 23
-#define SMK_LABELLEN (SMK_MAXLEN+1)
+#define SMK_CIPSOLEN 24
struct superblock_smack {
char *smk_root;
@@ -37,56 +76,47 @@ struct superblock_smack {
char *smk_hat;
char *smk_default;
int smk_initialized;
- spinlock_t smk_sblock; /* for initialization */
};
struct socket_smack {
- char *smk_out; /* outbound label */
- char *smk_in; /* inbound label */
- char *smk_packet; /* TCP peer label */
+ struct smack_known *smk_out; /* outbound label */
+ struct smack_known *smk_in; /* inbound label */
+ struct smack_known *smk_packet; /* TCP peer label */
};
/*
* Inode smack data
*/
struct inode_smack {
- char *smk_inode; /* label of the fso */
- char *smk_task; /* label of the task */
- char *smk_mmap; /* label of the mmap domain */
- struct mutex smk_lock; /* initialization lock */
- int smk_flags; /* smack inode flags */
+ char *smk_inode; /* label of the fso */
+ struct smack_known *smk_task; /* label of the task */
+ struct smack_known *smk_mmap; /* label of the mmap domain */
+ struct mutex smk_lock; /* initialization lock */
+ int smk_flags; /* smack inode flags */
};
struct task_smack {
- char *smk_task; /* label for access control */
- char *smk_forked; /* label when forked */
+ struct smack_known *smk_task; /* label for access control */
+ struct smack_known *smk_forked; /* label when forked */
struct list_head smk_rules; /* per task access rules */
struct mutex smk_rules_lock; /* lock for the rules */
};
#define SMK_INODE_INSTANT 0x01 /* inode is instantiated */
#define SMK_INODE_TRANSMUTE 0x02 /* directory is transmuting */
+#define SMK_INODE_CHANGED 0x04 /* smack was transmuted */
/*
* A label access rule.
*/
struct smack_rule {
struct list_head list;
- char *smk_subject;
+ struct smack_known *smk_subject;
char *smk_object;
int smk_access;
};
/*
- * An entry in the table mapping smack values to
- * CIPSO level/category-set values.
- */
-struct smack_cipso {
- int smk_level;
- char smk_catset[SMK_LABELLEN];
-};
-
-/*
* An entry in the table identifying hosts.
*/
struct smk_netlbladdr {
@@ -97,38 +127,14 @@ struct smk_netlbladdr {
};
/*
- * This is the repository for labels seen so that it is
- * not necessary to keep allocating tiny chuncks of memory
- * and so that they can be shared.
- *
- * Labels are never modified in place. Anytime a label
- * is imported (e.g. xattrset on a file) the list is checked
- * for it and it is added if it doesn't exist. The address
- * is passed out in either case. Entries are added, but
- * never deleted.
- *
- * Since labels are hanging around anyway it doesn't
- * hurt to maintain a secid for those awkward situations
- * where kernel components that ought to use LSM independent
- * interfaces don't. The secid should go away when all of
- * these components have been repaired.
- *
- * If there is a cipso value associated with the label it
- * gets stored here, too. This will most likely be rare as
- * the cipso direct mapping in used internally.
- *
- * Keep the access rules for this subject label here so that
- * the entire set of rules does not need to be examined every
- * time.
+ * An entry in the table identifying ports.
*/
-struct smack_known {
+struct smk_port_label {
struct list_head list;
- char smk_known[SMK_LABELLEN];
- u32 smk_secid;
- struct smack_cipso *smk_cipso;
- spinlock_t smk_cipsolock; /* for changing cipso map */
- struct list_head smk_rules; /* access rules */
- struct mutex smk_rules_lock; /* lock for the rules */
+ struct sock *smk_sock; /* socket initialized on */
+ unsigned short smk_port; /* the port number */
+ struct smack_known *smk_in; /* inbound label */
+ struct smack_known *smk_out; /* outgoing label */
};
/*
@@ -138,6 +144,7 @@ struct smack_known {
#define SMK_FSFLOOR "smackfsfloor="
#define SMK_FSHAT "smackfshat="
#define SMK_FSROOT "smackfsroot="
+#define SMK_FSTRANS "smackfstransmute="
#define SMACK_CIPSO_OPTION "-CIPSO"
@@ -155,24 +162,36 @@ struct smack_known {
#define SMACK_CIPSO_SOCKET 1
/*
- * smackfs magic number
- */
-#define SMACK_MAGIC 0x43415d53 /* "SMAC" */
-
-/*
* CIPSO defaults.
*/
#define SMACK_CIPSO_DOI_DEFAULT 3 /* Historical */
#define SMACK_CIPSO_DOI_INVALID -1 /* Not a DOI */
#define SMACK_CIPSO_DIRECT_DEFAULT 250 /* Arbitrary */
-#define SMACK_CIPSO_MAXCATVAL 63 /* Bigger gets harder */
+#define SMACK_CIPSO_MAPPED_DEFAULT 251 /* Also arbitrary */
#define SMACK_CIPSO_MAXLEVEL 255 /* CIPSO 2.2 standard */
-#define SMACK_CIPSO_MAXCATNUM 239 /* CIPSO 2.2 standard */
+/*
+ * CIPSO 2.2 standard is 239, but Smack wants to use the
+ * categories in a structured way that limits the value to
+ * the bits in 23 bytes, hence the unusual number.
+ */
+#define SMACK_CIPSO_MAXCATNUM 184 /* 23 * 8 */
/*
- * Flag for transmute access
+ * Ptrace rules
*/
-#define MAY_TRANSMUTE 64
+#define SMACK_PTRACE_DEFAULT 0
+#define SMACK_PTRACE_EXACT 1
+#define SMACK_PTRACE_DRACONIAN 2
+#define SMACK_PTRACE_MAX SMACK_PTRACE_DRACONIAN
+
+/*
+ * Flags for untraditional access modes.
+ * It shouldn't be necessary to avoid conflicts with definitions
+ * in fs.h, but do so anyway.
+ */
+#define MAY_TRANSMUTE 0x00001000 /* Controls directory labeling */
+#define MAY_LOCK 0x00002000 /* Locks should be writes, but ... */
+
/*
* Just to make the common cases easier to deal with
*/
@@ -181,9 +200,18 @@ struct smack_known {
#define MAY_NOT 0
/*
- * Number of access types used by Smack (rwxat)
+ * Number of access types used by Smack (rwxatl)
*/
-#define SMK_NUM_ACCESS_TYPE 5
+#define SMK_NUM_ACCESS_TYPE 6
+
+/* SMACK data */
+struct smack_audit_data {
+ const char *function;
+ char *subject;
+ char *object;
+ char *request;
+ int result;
+};
/*
* Smack audit data; is empty if CONFIG_AUDIT not set
@@ -192,6 +220,7 @@ struct smack_known {
struct smk_audit_info {
#ifdef CONFIG_AUDIT
struct common_audit_data a;
+ struct smack_audit_data sad;
#endif
};
/*
@@ -203,14 +232,15 @@ struct inode_smack *new_inode_smack(char *);
* These functions are in smack_access.c
*/
int smk_access_entry(char *, char *, struct list_head *);
-int smk_access(char *, char *, int, struct smk_audit_info *);
+int smk_access(struct smack_known *, char *, int, struct smk_audit_info *);
+int smk_tskacc(struct task_smack *, char *, u32, struct smk_audit_info *);
int smk_curacc(char *, u32, struct smk_audit_info *);
-int smack_to_cipso(const char *, struct smack_cipso *);
-char *smack_from_cipso(u32, char *);
-char *smack_from_secid(const u32);
-void smk_parse_smack(const char *string, int len, char *smack);
+struct smack_known *smack_from_secid(const u32);
+char *smk_parse_smack(const char *string, int len);
+int smk_netlbl_mls(int, char *, struct netlbl_lsm_secattr *, int);
char *smk_import(const char *, int);
struct smack_known *smk_import_entry(const char *, int);
+void smk_insert_entry(struct smack_known *skp);
struct smack_known *smk_find_entry(const char *);
u32 smack_to_secid(const char *);
@@ -218,9 +248,12 @@ u32 smack_to_secid(const char *);
* Shared data.
*/
extern int smack_cipso_direct;
-extern char *smack_net_ambient;
-extern char *smack_onlycap;
+extern int smack_cipso_mapped;
+extern struct smack_known *smack_net_ambient;
+extern struct smack_known *smack_onlycap;
+extern struct smack_known *smack_syslog_label;
extern const char *smack_cipso_option;
+extern int smack_ptrace_rule;
extern struct smack_known smack_known_floor;
extern struct smack_known smack_known_hat;
@@ -229,22 +262,14 @@ extern struct smack_known smack_known_invalid;
extern struct smack_known smack_known_star;
extern struct smack_known smack_known_web;
+extern struct mutex smack_known_lock;
extern struct list_head smack_known_list;
extern struct list_head smk_netlbladdr_list;
extern struct security_operations smack_ops;
-/*
- * Stricly for CIPSO level manipulation.
- * Set the category bit number in a smack label sized buffer.
- */
-static inline void smack_catset_bit(int cat, char *catsetp)
-{
- if (cat > SMK_LABELLEN * 8)
- return;
-
- catsetp[(cat - 1) / 8] |= 0x80 >> ((cat - 1) % 8);
-}
+#define SMACK_HASH_SLOTS 16
+extern struct hlist_head smack_known_hash[SMACK_HASH_SLOTS];
/*
* Is the directory transmuting?
@@ -265,17 +290,17 @@ static inline char *smk_of_inode(const struct inode *isp)
}
/*
- * Present a pointer to the smack label in an task blob.
+ * Present a pointer to the smack label entry in an task blob.
*/
-static inline char *smk_of_task(const struct task_smack *tsp)
+static inline struct smack_known *smk_of_task(const struct task_smack *tsp)
{
return tsp->smk_task;
}
/*
- * Present a pointer to the forked smack label in an task blob.
+ * Present a pointer to the forked smack label entry in an task blob.
*/
-static inline char *smk_of_forked(const struct task_smack *tsp)
+static inline struct smack_known *smk_of_forked(const struct task_smack *tsp)
{
return tsp->smk_forked;
}
@@ -283,12 +308,27 @@ static inline char *smk_of_forked(const struct task_smack *tsp)
/*
* Present a pointer to the smack label in the current task blob.
*/
-static inline char *smk_of_current(void)
+static inline struct smack_known *smk_of_current(void)
{
return smk_of_task(current_security());
}
/*
+ * Is the task privileged and allowed to be privileged
+ * by the onlycap rule.
+ */
+static inline int smack_privileged(int cap)
+{
+ struct smack_known *skp = smk_of_current();
+
+ if (!capable(cap))
+ return 0;
+ if (smack_onlycap == NULL || smack_onlycap == skp)
+ return 1;
+ return 0;
+}
+
+/*
* logging functions
*/
#define SMACK_AUDIT_DENIED 0x1
@@ -309,9 +349,18 @@ void smack_log(char *subject_label, char *object_label,
static inline void smk_ad_init(struct smk_audit_info *a, const char *func,
char type)
{
- memset(a, 0, sizeof(*a));
+ memset(&a->sad, 0, sizeof(a->sad));
a->a.type = type;
- a->a.smack_audit_data.function = func;
+ a->a.smack_audit_data = &a->sad;
+ a->a.smack_audit_data->function = func;
+}
+
+static inline void smk_ad_init_net(struct smk_audit_info *a, const char *func,
+ char type, struct lsm_network_audit *net)
+{
+ smk_ad_init(a, func, type);
+ memset(net, 0, sizeof(*net));
+ a->a.u.net = net;
}
static inline void smk_ad_setfield_u_tsk(struct smk_audit_info *a,
@@ -337,7 +386,7 @@ static inline void smk_ad_setfield_u_fs_path(struct smk_audit_info *a,
static inline void smk_ad_setfield_u_net_sk(struct smk_audit_info *a,
struct sock *sk)
{
- a->a.u.net.sk = sk;
+ a->a.u.net->sk = sk;
}
#else /* no AUDIT */
diff --git a/security/smack/smack_access.c b/security/smack/smack_access.c
index cc7cb6edba0..c062e9467b6 100644
--- a/security/smack/smack_access.c
+++ b/security/smack/smack_access.c
@@ -19,37 +19,31 @@
struct smack_known smack_known_huh = {
.smk_known = "?",
.smk_secid = 2,
- .smk_cipso = NULL,
};
struct smack_known smack_known_hat = {
.smk_known = "^",
.smk_secid = 3,
- .smk_cipso = NULL,
};
struct smack_known smack_known_star = {
.smk_known = "*",
.smk_secid = 4,
- .smk_cipso = NULL,
};
struct smack_known smack_known_floor = {
.smk_known = "_",
.smk_secid = 5,
- .smk_cipso = NULL,
};
struct smack_known smack_known_invalid = {
.smk_known = "",
.smk_secid = 6,
- .smk_cipso = NULL,
};
struct smack_known smack_known_web = {
.smk_known = "@",
.smk_secid = 7,
- .smk_cipso = NULL,
};
LIST_HEAD(smack_known_list);
@@ -90,6 +84,8 @@ int log_policy = SMACK_AUDIT_DENIED;
*
* Do the object check first because that is more
* likely to differ.
+ *
+ * Allowing write access implies allowing locking.
*/
int smk_access_entry(char *subject_label, char *object_label,
struct list_head *rule_list)
@@ -99,18 +95,23 @@ int smk_access_entry(char *subject_label, char *object_label,
list_for_each_entry_rcu(srp, rule_list, list) {
if (srp->smk_object == object_label &&
- srp->smk_subject == subject_label) {
+ srp->smk_subject->smk_known == subject_label) {
may = srp->smk_access;
break;
}
}
+ /*
+ * MAY_WRITE implies MAY_LOCK.
+ */
+ if ((may & MAY_WRITE) == MAY_WRITE)
+ may |= MAY_LOCK;
return may;
}
/**
* smk_access - determine if a subject has a specific access to an object
- * @subject_label: a pointer to the subject's Smack label
+ * @subject_known: a pointer to the subject's Smack label entry
* @object_label: a pointer to the object's Smack label
* @request: the access requested, in "MAY" format
* @a : a pointer to the audit data
@@ -121,10 +122,9 @@ int smk_access_entry(char *subject_label, char *object_label,
*
* Smack labels are shared on smack_list
*/
-int smk_access(char *subject_label, char *object_label, int request,
- struct smk_audit_info *a)
+int smk_access(struct smack_known *subject_known, char *object_label,
+ int request, struct smk_audit_info *a)
{
- struct smack_known *skp;
int may = MAY_NOT;
int rc = 0;
@@ -133,7 +133,7 @@ int smk_access(char *subject_label, char *object_label, int request,
*
* A star subject can't access any object.
*/
- if (subject_label == smack_known_star.smk_known) {
+ if (subject_known == &smack_known_star) {
rc = -EACCES;
goto out_audit;
}
@@ -143,7 +143,7 @@ int smk_access(char *subject_label, char *object_label, int request,
* An internet subject can access any object.
*/
if (object_label == smack_known_web.smk_known ||
- subject_label == smack_known_web.smk_known)
+ subject_known == &smack_known_web)
goto out_audit;
/*
* A star object can be accessed by any subject.
@@ -154,7 +154,7 @@ int smk_access(char *subject_label, char *object_label, int request,
* An object can be accessed in any way by a subject
* with the same label.
*/
- if (subject_label == object_label)
+ if (subject_known->smk_known == object_label)
goto out_audit;
/*
* A hat subject can read any object.
@@ -163,7 +163,7 @@ int smk_access(char *subject_label, char *object_label, int request,
if ((request & MAY_ANYREAD) == request) {
if (object_label == smack_known_floor.smk_known)
goto out_audit;
- if (subject_label == smack_known_hat.smk_known)
+ if (subject_known == &smack_known_hat)
goto out_audit;
}
/*
@@ -173,9 +173,9 @@ int smk_access(char *subject_label, char *object_label, int request,
* good. A negative response from smk_access_entry()
* indicates there is no entry for this pair.
*/
- skp = smk_find_entry(subject_label);
rcu_read_lock();
- may = smk_access_entry(subject_label, object_label, &skp->smk_rules);
+ may = smk_access_entry(subject_known->smk_known, object_label,
+ &subject_known->smk_rules);
rcu_read_unlock();
if (may > 0 && (request & may) == request)
@@ -185,39 +185,42 @@ int smk_access(char *subject_label, char *object_label, int request,
out_audit:
#ifdef CONFIG_AUDIT
if (a)
- smack_log(subject_label, object_label, request, rc, a);
+ smack_log(subject_known->smk_known, object_label, request,
+ rc, a);
#endif
return rc;
}
/**
- * smk_curacc - determine if current has a specific access to an object
+ * smk_tskacc - determine if a task has a specific access to an object
+ * @tsp: a pointer to the subject task
* @obj_label: a pointer to the object's Smack label
* @mode: the access requested, in "MAY" format
* @a : common audit data
*
- * This function checks the current subject label/object label pair
+ * This function checks the subject task's label/object label pair
* in the access rule list and returns 0 if the access is permitted,
- * non zero otherwise. It allows that current may have the capability
+ * non zero otherwise. It allows that the task may have the capability
* to override the rules.
*/
-int smk_curacc(char *obj_label, u32 mode, struct smk_audit_info *a)
+int smk_tskacc(struct task_smack *subject, char *obj_label,
+ u32 mode, struct smk_audit_info *a)
{
- struct task_smack *tsp = current_security();
- char *sp = smk_of_task(tsp);
+ struct smack_known *skp = smk_of_task(subject);
int may;
int rc;
/*
* Check the global rule list
*/
- rc = smk_access(sp, obj_label, mode, NULL);
+ rc = smk_access(skp, obj_label, mode, NULL);
if (rc == 0) {
/*
* If there is an entry in the task's rule list
* it can further restrict access.
*/
- may = smk_access_entry(sp, obj_label, &tsp->smk_rules);
+ may = smk_access_entry(skp->smk_known, obj_label,
+ &subject->smk_rules);
if (may < 0)
goto out_audit;
if ((mode & may) == mode)
@@ -226,24 +229,37 @@ int smk_curacc(char *obj_label, u32 mode, struct smk_audit_info *a)
}
/*
- * Return if a specific label has been designated as the
- * only one that gets privilege and current does not
- * have that label.
+ * Allow for priviliged to override policy.
*/
- if (smack_onlycap != NULL && smack_onlycap != sp)
- goto out_audit;
-
- if (capable(CAP_MAC_OVERRIDE))
+ if (rc != 0 && smack_privileged(CAP_MAC_OVERRIDE))
rc = 0;
out_audit:
#ifdef CONFIG_AUDIT
if (a)
- smack_log(sp, obj_label, mode, rc, a);
+ smack_log(skp->smk_known, obj_label, mode, rc, a);
#endif
return rc;
}
+/**
+ * smk_curacc - determine if current has a specific access to an object
+ * @obj_label: a pointer to the object's Smack label
+ * @mode: the access requested, in "MAY" format
+ * @a : common audit data
+ *
+ * This function checks the current subject label/object label pair
+ * in the access rule list and returns 0 if the access is permitted,
+ * non zero otherwise. It allows that current may have the capability
+ * to override the rules.
+ */
+int smk_curacc(char *obj_label, u32 mode, struct smk_audit_info *a)
+{
+ struct task_smack *tsp = current_security();
+
+ return smk_tskacc(tsp, obj_label, mode, a);
+}
+
#ifdef CONFIG_AUDIT
/**
* smack_str_from_perm : helper to transalate an int to a
@@ -255,6 +271,7 @@ out_audit:
static inline void smack_str_from_perm(char *string, int access)
{
int i = 0;
+
if (access & MAY_READ)
string[i++] = 'r';
if (access & MAY_WRITE)
@@ -263,6 +280,10 @@ static inline void smack_str_from_perm(char *string, int access)
string[i++] = 'x';
if (access & MAY_APPEND)
string[i++] = 'a';
+ if (access & MAY_TRANSMUTE)
+ string[i++] = 't';
+ if (access & MAY_LOCK)
+ string[i++] = 'l';
string[i] = '\0';
}
/**
@@ -275,15 +296,18 @@ static inline void smack_str_from_perm(char *string, int access)
static void smack_log_callback(struct audit_buffer *ab, void *a)
{
struct common_audit_data *ad = a;
- struct smack_audit_data *sad = &ad->smack_audit_data;
+ struct smack_audit_data *sad = ad->smack_audit_data;
audit_log_format(ab, "lsm=SMACK fn=%s action=%s",
- ad->smack_audit_data.function,
+ ad->smack_audit_data->function,
sad->result ? "denied" : "granted");
audit_log_format(ab, " subject=");
audit_log_untrustedstring(ab, sad->subject);
audit_log_format(ab, " object=");
audit_log_untrustedstring(ab, sad->object);
- audit_log_format(ab, " requested=%s", sad->request);
+ if (sad->request[0] == '\0')
+ audit_log_format(ab, " labels_differ");
+ else
+ audit_log_format(ab, " requested=%s", sad->request);
}
/**
@@ -310,19 +334,19 @@ void smack_log(char *subject_label, char *object_label, int request,
if (result == 0 && (log_policy & SMACK_AUDIT_ACCEPT) == 0)
return;
- if (a->smack_audit_data.function == NULL)
- a->smack_audit_data.function = "unknown";
+ sad = a->smack_audit_data;
+
+ if (sad->function == NULL)
+ sad->function = "unknown";
/* end preparing the audit data */
- sad = &a->smack_audit_data;
smack_str_from_perm(request_buffer, request);
sad->subject = subject_label;
sad->object = object_label;
sad->request = request_buffer;
sad->result = result;
- a->lsm_pre_audit = smack_log_callback;
- common_lsm_audit(a);
+ common_lsm_audit(a, smack_log_callback, NULL);
}
#else /* #ifdef CONFIG_AUDIT */
void smack_log(char *subject_label, char *object_label, int request,
@@ -331,7 +355,26 @@ void smack_log(char *subject_label, char *object_label, int request,
}
#endif
-static DEFINE_MUTEX(smack_known_lock);
+DEFINE_MUTEX(smack_known_lock);
+
+struct hlist_head smack_known_hash[SMACK_HASH_SLOTS];
+
+/**
+ * smk_insert_entry - insert a smack label into a hash map,
+ *
+ * this function must be called under smack_known_lock
+ */
+void smk_insert_entry(struct smack_known *skp)
+{
+ unsigned int hash;
+ struct hlist_head *head;
+
+ hash = full_name_hash(skp->smk_known, strlen(skp->smk_known));
+ head = &smack_known_hash[hash & (SMACK_HASH_SLOTS - 1)];
+
+ hlist_add_head_rcu(&skp->smk_hashed, head);
+ list_add_rcu(&skp->list, &smack_known_list);
+}
/**
* smk_find_entry - find a label on the list, return the list entry
@@ -342,12 +385,16 @@ static DEFINE_MUTEX(smack_known_lock);
*/
struct smack_known *smk_find_entry(const char *string)
{
+ unsigned int hash;
+ struct hlist_head *head;
struct smack_known *skp;
- list_for_each_entry_rcu(skp, &smack_known_list, list) {
- if (strncmp(skp->smk_known, string, SMK_MAXLEN) == 0)
+ hash = full_name_hash(string, strlen(string));
+ head = &smack_known_hash[hash & (SMACK_HASH_SLOTS - 1)];
+
+ hlist_for_each_entry_rcu(skp, head, smk_hashed)
+ if (strcmp(skp->smk_known, string) == 0)
return skp;
- }
return NULL;
}
@@ -356,27 +403,78 @@ struct smack_known *smk_find_entry(const char *string)
* smk_parse_smack - parse smack label from a text string
* @string: a text string that might contain a Smack label
* @len: the maximum size, or zero if it is NULL terminated.
- * @smack: parsed smack label, or NULL if parse error
+ *
+ * Returns a pointer to the clean label, or NULL
*/
-void smk_parse_smack(const char *string, int len, char *smack)
+char *smk_parse_smack(const char *string, int len)
{
- int found;
+ char *smack;
int i;
- if (len <= 0 || len > SMK_MAXLEN)
- len = SMK_MAXLEN;
-
- for (i = 0, found = 0; i < SMK_LABELLEN; i++) {
- if (found)
- smack[i] = '\0';
- else if (i >= len || string[i] > '~' || string[i] <= ' ' ||
- string[i] == '/' || string[i] == '"' ||
- string[i] == '\\' || string[i] == '\'') {
- smack[i] = '\0';
- found = 1;
- } else
- smack[i] = string[i];
+ if (len <= 0)
+ len = strlen(string) + 1;
+
+ /*
+ * Reserve a leading '-' as an indicator that
+ * this isn't a label, but an option to interfaces
+ * including /smack/cipso and /smack/cipso2
+ */
+ if (string[0] == '-')
+ return NULL;
+
+ for (i = 0; i < len; i++)
+ if (string[i] > '~' || string[i] <= ' ' || string[i] == '/' ||
+ string[i] == '"' || string[i] == '\\' || string[i] == '\'')
+ break;
+
+ if (i == 0 || i >= SMK_LONGLABEL)
+ return NULL;
+
+ smack = kzalloc(i + 1, GFP_KERNEL);
+ if (smack != NULL) {
+ strncpy(smack, string, i + 1);
+ smack[i] = '\0';
}
+ return smack;
+}
+
+/**
+ * smk_netlbl_mls - convert a catset to netlabel mls categories
+ * @catset: the Smack categories
+ * @sap: where to put the netlabel categories
+ *
+ * Allocates and fills attr.mls
+ * Returns 0 on success, error code on failure.
+ */
+int smk_netlbl_mls(int level, char *catset, struct netlbl_lsm_secattr *sap,
+ int len)
+{
+ unsigned char *cp;
+ unsigned char m;
+ int cat;
+ int rc;
+ int byte;
+
+ sap->flags |= NETLBL_SECATTR_MLS_CAT;
+ sap->attr.mls.lvl = level;
+ sap->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC);
+ if (!sap->attr.mls.cat)
+ return -ENOMEM;
+ sap->attr.mls.cat->startbit = 0;
+
+ for (cat = 1, cp = catset, byte = 0; byte < len; cp++, byte++)
+ for (m = 0x80; m != 0; m >>= 1, cat++) {
+ if ((m & *cp) == 0)
+ continue;
+ rc = netlbl_secattr_catmap_setbit(sap->attr.mls.cat,
+ cat, GFP_ATOMIC);
+ if (rc < 0) {
+ netlbl_secattr_catmap_free(sap->attr.mls.cat);
+ return rc;
+ }
+ }
+
+ return 0;
}
/**
@@ -390,33 +488,59 @@ void smk_parse_smack(const char *string, int len, char *smack)
struct smack_known *smk_import_entry(const char *string, int len)
{
struct smack_known *skp;
- char smack[SMK_LABELLEN];
+ char *smack;
+ int slen;
+ int rc;
- smk_parse_smack(string, len, smack);
- if (smack[0] == '\0')
+ smack = smk_parse_smack(string, len);
+ if (smack == NULL)
return NULL;
mutex_lock(&smack_known_lock);
skp = smk_find_entry(smack);
+ if (skp != NULL)
+ goto freeout;
- if (skp == NULL) {
- skp = kzalloc(sizeof(struct smack_known), GFP_KERNEL);
- if (skp != NULL) {
- strncpy(skp->smk_known, smack, SMK_MAXLEN);
- skp->smk_secid = smack_next_secid++;
- skp->smk_cipso = NULL;
- INIT_LIST_HEAD(&skp->smk_rules);
- spin_lock_init(&skp->smk_cipsolock);
- mutex_init(&skp->smk_rules_lock);
- /*
- * Make sure that the entry is actually
- * filled before putting it on the list.
- */
- list_add_rcu(&skp->list, &smack_known_list);
- }
- }
+ skp = kzalloc(sizeof(*skp), GFP_KERNEL);
+ if (skp == NULL)
+ goto freeout;
+ skp->smk_known = smack;
+ skp->smk_secid = smack_next_secid++;
+ skp->smk_netlabel.domain = skp->smk_known;
+ skp->smk_netlabel.flags =
+ NETLBL_SECATTR_DOMAIN | NETLBL_SECATTR_MLS_LVL;
+ /*
+ * If direct labeling works use it.
+ * Otherwise use mapped labeling.
+ */
+ slen = strlen(smack);
+ if (slen < SMK_CIPSOLEN)
+ rc = smk_netlbl_mls(smack_cipso_direct, skp->smk_known,
+ &skp->smk_netlabel, slen);
+ else
+ rc = smk_netlbl_mls(smack_cipso_mapped, (char *)&skp->smk_secid,
+ &skp->smk_netlabel, sizeof(skp->smk_secid));
+
+ if (rc >= 0) {
+ INIT_LIST_HEAD(&skp->smk_rules);
+ mutex_init(&skp->smk_rules_lock);
+ /*
+ * Make sure that the entry is actually
+ * filled before putting it on the list.
+ */
+ smk_insert_entry(skp);
+ goto unlockout;
+ }
+ /*
+ * smk_netlbl_mls failed.
+ */
+ kfree(skp);
+ skp = NULL;
+freeout:
+ kfree(smack);
+unlockout:
mutex_unlock(&smack_known_lock);
return skp;
@@ -447,10 +571,10 @@ char *smk_import(const char *string, int len)
* smack_from_secid - find the Smack label associated with a secid
* @secid: an integer that might be associated with a Smack label
*
- * Returns a pointer to the appropriate Smack label if there is one,
+ * Returns a pointer to the appropriate Smack label entry if there is one,
* otherwise a pointer to the invalid Smack label.
*/
-char *smack_from_secid(const u32 secid)
+struct smack_known *smack_from_secid(const u32 secid)
{
struct smack_known *skp;
@@ -458,7 +582,7 @@ char *smack_from_secid(const u32 secid)
list_for_each_entry_rcu(skp, &smack_known_list, list) {
if (skp->smk_secid == secid) {
rcu_read_unlock();
- return skp->smk_known;
+ return skp;
}
}
@@ -467,7 +591,7 @@ char *smack_from_secid(const u32 secid)
* of a secid that is not on the list.
*/
rcu_read_unlock();
- return smack_known_invalid.smk_known;
+ return &smack_known_invalid;
}
/**
@@ -479,79 +603,9 @@ char *smack_from_secid(const u32 secid)
*/
u32 smack_to_secid(const char *smack)
{
- struct smack_known *skp;
-
- rcu_read_lock();
- list_for_each_entry_rcu(skp, &smack_known_list, list) {
- if (strncmp(skp->smk_known, smack, SMK_MAXLEN) == 0) {
- rcu_read_unlock();
- return skp->smk_secid;
- }
- }
- rcu_read_unlock();
- return 0;
-}
-
-/**
- * smack_from_cipso - find the Smack label associated with a CIPSO option
- * @level: Bell & LaPadula level from the network
- * @cp: Bell & LaPadula categories from the network
- *
- * This is a simple lookup in the label table.
- *
- * Return the matching label from the label list or NULL.
- */
-char *smack_from_cipso(u32 level, char *cp)
-{
- struct smack_known *kp;
- char *final = NULL;
-
- rcu_read_lock();
- list_for_each_entry(kp, &smack_known_list, list) {
- if (kp->smk_cipso == NULL)
- continue;
-
- spin_lock_bh(&kp->smk_cipsolock);
-
- if (kp->smk_cipso->smk_level == level &&
- memcmp(kp->smk_cipso->smk_catset, cp, SMK_LABELLEN) == 0)
- final = kp->smk_known;
-
- spin_unlock_bh(&kp->smk_cipsolock);
-
- if (final != NULL)
- break;
- }
- rcu_read_unlock();
-
- return final;
-}
+ struct smack_known *skp = smk_find_entry(smack);
-/**
- * smack_to_cipso - find the CIPSO option to go with a Smack label
- * @smack: a pointer to the smack label in question
- * @cp: where to put the result
- *
- * Returns zero if a value is available, non-zero otherwise.
- */
-int smack_to_cipso(const char *smack, struct smack_cipso *cp)
-{
- struct smack_known *kp;
- int found = 0;
-
- rcu_read_lock();
- list_for_each_entry_rcu(kp, &smack_known_list, list) {
- if (kp->smk_known == smack ||
- strcmp(kp->smk_known, smack) == 0) {
- found = 1;
- break;
- }
- }
- rcu_read_unlock();
-
- if (found == 0 || kp->smk_cipso == NULL)
- return -ENOENT;
-
- memcpy(cp, kp->smk_cipso, sizeof(struct smack_cipso));
- return 0;
+ if (skp == NULL)
+ return 0;
+ return skp->smk_secid;
}
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index e8af5b0ba80..f2c30801ce4 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -27,15 +27,20 @@
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/udp.h>
+#include <linux/dccp.h>
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/pipe_fs_i.h>
-#include <net/netlabel.h>
#include <net/cipso_ipv4.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
#include <linux/audit.h>
#include <linux/magic.h>
#include <linux/dcache.h>
#include <linux/personality.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/binfmts.h>
#include "smack.h"
#define task_security(task) (task_cred_xxx((task), security))
@@ -43,6 +48,12 @@
#define TRANS_TRUE "TRUE"
#define TRANS_TRUE_SIZE 4
+#define SMK_CONNECTING 0
+#define SMK_RECEIVING 1
+#define SMK_SENDING 2
+
+LIST_HEAD(smk_ipv6_port_list);
+
/**
* smk_fetch - Fetch the smack label from a file.
* @ip: a pointer to the inode
@@ -51,19 +62,27 @@
* Returns a pointer to the master list entry for the Smack label
* or NULL if there was no label to fetch.
*/
-static char *smk_fetch(const char *name, struct inode *ip, struct dentry *dp)
+static struct smack_known *smk_fetch(const char *name, struct inode *ip,
+ struct dentry *dp)
{
int rc;
- char in[SMK_LABELLEN];
+ char *buffer;
+ struct smack_known *skp = NULL;
if (ip->i_op->getxattr == NULL)
return NULL;
- rc = ip->i_op->getxattr(dp, name, in, SMK_LABELLEN);
- if (rc < 0)
+ buffer = kzalloc(SMK_LONGLABEL, GFP_KERNEL);
+ if (buffer == NULL)
return NULL;
- return smk_import(in, rc);
+ rc = ip->i_op->getxattr(dp, name, buffer, SMK_LONGLABEL);
+ if (rc > 0)
+ skp = smk_import_entry(buffer, rc);
+
+ kfree(buffer);
+
+ return skp;
}
/**
@@ -76,7 +95,7 @@ struct inode_smack *new_inode_smack(char *smack)
{
struct inode_smack *isp;
- isp = kzalloc(sizeof(struct inode_smack), GFP_KERNEL);
+ isp = kzalloc(sizeof(struct inode_smack), GFP_NOFS);
if (isp == NULL)
return NULL;
@@ -93,7 +112,8 @@ struct inode_smack *new_inode_smack(char *smack)
*
* Returns the new blob or NULL if there's no memory available
*/
-static struct task_smack *new_task_smack(char *task, char *forked, gfp_t gfp)
+static struct task_smack *new_task_smack(struct smack_known *task,
+ struct smack_known *forked, gfp_t gfp)
{
struct task_smack *tsp;
@@ -137,6 +157,74 @@ static int smk_copy_rules(struct list_head *nhead, struct list_head *ohead,
return rc;
}
+/**
+ * smk_ptrace_mode - helper function for converting PTRACE_MODE_* into MAY_*
+ * @mode - input mode in form of PTRACE_MODE_*
+ *
+ * Returns a converted MAY_* mode usable by smack rules
+ */
+static inline unsigned int smk_ptrace_mode(unsigned int mode)
+{
+ switch (mode) {
+ case PTRACE_MODE_READ:
+ return MAY_READ;
+ case PTRACE_MODE_ATTACH:
+ return MAY_READWRITE;
+ }
+
+ return 0;
+}
+
+/**
+ * smk_ptrace_rule_check - helper for ptrace access
+ * @tracer: tracer process
+ * @tracee_label: label of the process that's about to be traced,
+ * the pointer must originate from smack structures
+ * @mode: ptrace attachment mode (PTRACE_MODE_*)
+ * @func: name of the function that called us, used for audit
+ *
+ * Returns 0 on access granted, -error on error
+ */
+static int smk_ptrace_rule_check(struct task_struct *tracer, char *tracee_label,
+ unsigned int mode, const char *func)
+{
+ int rc;
+ struct smk_audit_info ad, *saip = NULL;
+ struct task_smack *tsp;
+ struct smack_known *skp;
+
+ if ((mode & PTRACE_MODE_NOAUDIT) == 0) {
+ smk_ad_init(&ad, func, LSM_AUDIT_DATA_TASK);
+ smk_ad_setfield_u_tsk(&ad, tracer);
+ saip = &ad;
+ }
+
+ tsp = task_security(tracer);
+ skp = smk_of_task(tsp);
+
+ if ((mode & PTRACE_MODE_ATTACH) &&
+ (smack_ptrace_rule == SMACK_PTRACE_EXACT ||
+ smack_ptrace_rule == SMACK_PTRACE_DRACONIAN)) {
+ if (skp->smk_known == tracee_label)
+ rc = 0;
+ else if (smack_ptrace_rule == SMACK_PTRACE_DRACONIAN)
+ rc = -EACCES;
+ else if (capable(CAP_SYS_PTRACE))
+ rc = 0;
+ else
+ rc = -EACCES;
+
+ if (saip)
+ smack_log(skp->smk_known, tracee_label, 0, rc, saip);
+
+ return rc;
+ }
+
+ /* In case of rule==SMACK_PTRACE_DEFAULT or mode==PTRACE_MODE_READ */
+ rc = smk_tskacc(tsp, tracee_label, smk_ptrace_mode(mode), saip);
+ return rc;
+}
+
/*
* LSM hooks.
* We he, that is fun!
@@ -145,27 +233,24 @@ static int smk_copy_rules(struct list_head *nhead, struct list_head *ohead,
/**
* smack_ptrace_access_check - Smack approval on PTRACE_ATTACH
* @ctp: child task pointer
- * @mode: ptrace attachment mode
+ * @mode: ptrace attachment mode (PTRACE_MODE_*)
*
* Returns 0 if access is OK, an error code otherwise
*
- * Do the capability checks, and require read and write.
+ * Do the capability checks.
*/
static int smack_ptrace_access_check(struct task_struct *ctp, unsigned int mode)
{
int rc;
- struct smk_audit_info ad;
- char *tsp;
+ struct smack_known *skp;
rc = cap_ptrace_access_check(ctp, mode);
if (rc != 0)
return rc;
- tsp = smk_of_task(task_security(ctp));
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
- smk_ad_setfield_u_tsk(&ad, ctp);
+ skp = smk_of_task(task_security(ctp));
- rc = smk_curacc(tsp, MAY_READWRITE, &ad);
+ rc = smk_ptrace_rule_check(current, skp->smk_known, mode, __func__);
return rc;
}
@@ -175,23 +260,21 @@ static int smack_ptrace_access_check(struct task_struct *ctp, unsigned int mode)
*
* Returns 0 if access is OK, an error code otherwise
*
- * Do the capability checks, and require read and write.
+ * Do the capability checks, and require PTRACE_MODE_ATTACH.
*/
static int smack_ptrace_traceme(struct task_struct *ptp)
{
int rc;
- struct smk_audit_info ad;
- char *tsp;
+ struct smack_known *skp;
rc = cap_ptrace_traceme(ptp);
if (rc != 0)
return rc;
- tsp = smk_of_task(task_security(ptp));
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
- smk_ad_setfield_u_tsk(&ad, ptp);
+ skp = smk_of_task(current_security());
- rc = smk_curacc(tsp, MAY_READWRITE, &ad);
+ rc = smk_ptrace_rule_check(ptp, skp->smk_known,
+ PTRACE_MODE_ATTACH, __func__);
return rc;
}
@@ -199,19 +282,17 @@ static int smack_ptrace_traceme(struct task_struct *ptp)
* smack_syslog - Smack approval on syslog
* @type: message type
*
- * Require that the task has the floor label
- *
* Returns 0 on success, error code otherwise.
*/
static int smack_syslog(int typefrom_file)
{
int rc = 0;
- char *sp = smk_of_current();
+ struct smack_known *skp = smk_of_current();
- if (capable(CAP_MAC_OVERRIDE))
+ if (smack_privileged(CAP_MAC_OVERRIDE))
return 0;
- if (sp != smack_known_floor.smk_known)
+ if (smack_syslog_label != NULL && smack_syslog_label != skp)
rc = -EACCES;
return rc;
@@ -241,9 +322,9 @@ static int smack_sb_alloc_security(struct super_block *sb)
sbsp->smk_default = smack_known_floor.smk_known;
sbsp->smk_floor = smack_known_floor.smk_known;
sbsp->smk_hat = smack_known_hat.smk_known;
- sbsp->smk_initialized = 0;
- spin_lock_init(&sbsp->smk_sblock);
-
+ /*
+ * smk_initialized will be zero from kzalloc.
+ */
sb->s_security = sbsp;
return 0;
@@ -287,6 +368,8 @@ static int smack_sb_copy_data(char *orig, char *smackopts)
dp = smackopts;
else if (strstr(cp, SMK_FSROOT) == cp)
dp = smackopts;
+ else if (strstr(cp, SMK_FSTRANS) == cp)
+ dp = smackopts;
else
dp = otheropts;
@@ -319,17 +402,17 @@ static int smack_sb_kern_mount(struct super_block *sb, int flags, void *data)
struct inode *inode = root->d_inode;
struct superblock_smack *sp = sb->s_security;
struct inode_smack *isp;
+ struct smack_known *skp;
char *op;
char *commap;
char *nsp;
+ int transmute = 0;
+ int specified = 0;
- spin_lock(&sp->smk_sblock);
- if (sp->smk_initialized != 0) {
- spin_unlock(&sp->smk_sblock);
+ if (sp->smk_initialized)
return 0;
- }
+
sp->smk_initialized = 1;
- spin_unlock(&sp->smk_sblock);
for (op = data; op != NULL; op = commap) {
commap = strchr(op, ',');
@@ -339,36 +422,71 @@ static int smack_sb_kern_mount(struct super_block *sb, int flags, void *data)
if (strncmp(op, SMK_FSHAT, strlen(SMK_FSHAT)) == 0) {
op += strlen(SMK_FSHAT);
nsp = smk_import(op, 0);
- if (nsp != NULL)
+ if (nsp != NULL) {
sp->smk_hat = nsp;
+ specified = 1;
+ }
} else if (strncmp(op, SMK_FSFLOOR, strlen(SMK_FSFLOOR)) == 0) {
op += strlen(SMK_FSFLOOR);
nsp = smk_import(op, 0);
- if (nsp != NULL)
+ if (nsp != NULL) {
sp->smk_floor = nsp;
+ specified = 1;
+ }
} else if (strncmp(op, SMK_FSDEFAULT,
strlen(SMK_FSDEFAULT)) == 0) {
op += strlen(SMK_FSDEFAULT);
nsp = smk_import(op, 0);
- if (nsp != NULL)
+ if (nsp != NULL) {
sp->smk_default = nsp;
+ specified = 1;
+ }
} else if (strncmp(op, SMK_FSROOT, strlen(SMK_FSROOT)) == 0) {
op += strlen(SMK_FSROOT);
nsp = smk_import(op, 0);
- if (nsp != NULL)
+ if (nsp != NULL) {
+ sp->smk_root = nsp;
+ specified = 1;
+ }
+ } else if (strncmp(op, SMK_FSTRANS, strlen(SMK_FSTRANS)) == 0) {
+ op += strlen(SMK_FSTRANS);
+ nsp = smk_import(op, 0);
+ if (nsp != NULL) {
sp->smk_root = nsp;
+ transmute = 1;
+ specified = 1;
+ }
}
}
+ if (!smack_privileged(CAP_MAC_ADMIN)) {
+ /*
+ * Unprivileged mounts don't get to specify Smack values.
+ */
+ if (specified)
+ return -EPERM;
+ /*
+ * Unprivileged mounts get root and default from the caller.
+ */
+ skp = smk_of_current();
+ sp->smk_root = skp->smk_known;
+ sp->smk_default = skp->smk_known;
+ }
/*
* Initialize the root inode.
*/
isp = inode->i_security;
- if (isp == NULL)
- inode->i_security = new_inode_smack(sp->smk_root);
- else
+ if (isp == NULL) {
+ isp = new_inode_smack(sp->smk_root);
+ if (isp == NULL)
+ return -ENOMEM;
+ inode->i_security = isp;
+ } else
isp->smk_inode = sp->smk_root;
+ if (transmute)
+ isp->smk_flags |= SMK_INODE_TRANSMUTE;
+
return 0;
}
@@ -392,53 +510,6 @@ static int smack_sb_statfs(struct dentry *dentry)
return rc;
}
-/**
- * smack_sb_mount - Smack check for mounting
- * @dev_name: unused
- * @path: mount point
- * @type: unused
- * @flags: unused
- * @data: unused
- *
- * Returns 0 if current can write the floor of the filesystem
- * being mounted on, an error code otherwise.
- */
-static int smack_sb_mount(char *dev_name, struct path *path,
- char *type, unsigned long flags, void *data)
-{
- struct superblock_smack *sbp = path->dentry->d_sb->s_security;
- struct smk_audit_info ad;
-
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
- smk_ad_setfield_u_fs_path(&ad, *path);
-
- return smk_curacc(sbp->smk_floor, MAY_WRITE, &ad);
-}
-
-/**
- * smack_sb_umount - Smack check for unmounting
- * @mnt: file system to unmount
- * @flags: unused
- *
- * Returns 0 if current can write the floor of the filesystem
- * being unmounted, an error code otherwise.
- */
-static int smack_sb_umount(struct vfsmount *mnt, int flags)
-{
- struct superblock_smack *sbp;
- struct smk_audit_info ad;
- struct path path;
-
- path.dentry = mnt->mnt_root;
- path.mnt = mnt;
-
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
- smk_ad_setfield_u_fs_path(&ad, path);
-
- sbp = path.dentry->d_sb->s_security;
- return smk_curacc(sbp->smk_floor, MAY_WRITE, &ad);
-}
-
/*
* BPRM hooks
*/
@@ -447,11 +518,11 @@ static int smack_sb_umount(struct vfsmount *mnt, int flags)
* smack_bprm_set_creds - set creds for exec
* @bprm: the exec information
*
- * Returns 0 if it gets a blob, -ENOMEM otherwise
+ * Returns 0 if it gets a blob, -EPERM if exec forbidden and -ENOMEM otherwise
*/
static int smack_bprm_set_creds(struct linux_binprm *bprm)
{
- struct inode *inode = bprm->file->f_path.dentry->d_inode;
+ struct inode *inode = file_inode(bprm->file);
struct task_smack *bsp = bprm->cred->security;
struct inode_smack *isp;
int rc;
@@ -467,7 +538,22 @@ static int smack_bprm_set_creds(struct linux_binprm *bprm)
if (isp->smk_task == NULL || isp->smk_task == bsp->smk_task)
return 0;
- if (bprm->unsafe)
+ if (bprm->unsafe & (LSM_UNSAFE_PTRACE | LSM_UNSAFE_PTRACE_CAP)) {
+ struct task_struct *tracer;
+ rc = 0;
+
+ rcu_read_lock();
+ tracer = ptrace_parent(current);
+ if (likely(tracer != NULL))
+ rc = smk_ptrace_rule_check(tracer,
+ isp->smk_task->smk_known,
+ PTRACE_MODE_ATTACH,
+ __func__);
+ rcu_read_unlock();
+
+ if (rc != 0)
+ return rc;
+ } else if (bprm->unsafe)
return -EPERM;
bsp->smk_task = isp->smk_task;
@@ -519,7 +605,9 @@ static int smack_bprm_secureexec(struct linux_binprm *bprm)
*/
static int smack_inode_alloc_security(struct inode *inode)
{
- inode->i_security = new_inode_smack(smk_of_current());
+ struct smack_known *skp = smk_of_current();
+
+ inode->i_security = new_inode_smack(skp->smk_known);
if (inode->i_security == NULL)
return -ENOMEM;
return 0;
@@ -549,37 +637,36 @@ static void smack_inode_free_security(struct inode *inode)
* Returns 0 if it all works out, -ENOMEM if there's no memory
*/
static int smack_inode_init_security(struct inode *inode, struct inode *dir,
- const struct qstr *qstr, char **name,
+ const struct qstr *qstr, const char **name,
void **value, size_t *len)
{
- struct smack_known *skp;
- char *csp = smk_of_current();
+ struct inode_smack *issp = inode->i_security;
+ struct smack_known *skp = smk_of_current();
char *isp = smk_of_inode(inode);
char *dsp = smk_of_inode(dir);
int may;
- if (name) {
- *name = kstrdup(XATTR_SMACK_SUFFIX, GFP_KERNEL);
- if (*name == NULL)
- return -ENOMEM;
- }
+ if (name)
+ *name = XATTR_SMACK_SUFFIX;
if (value) {
- skp = smk_find_entry(csp);
rcu_read_lock();
- may = smk_access_entry(csp, dsp, &skp->smk_rules);
+ may = smk_access_entry(skp->smk_known, dsp, &skp->smk_rules);
rcu_read_unlock();
/*
* If the access rule allows transmutation and
* the directory requests transmutation then
* by all means transmute.
+ * Mark the inode as changed.
*/
if (may > 0 && ((may & MAY_TRANSMUTE) != 0) &&
- smk_inode_transmutable(dir))
+ smk_inode_transmutable(dir)) {
isp = dsp;
+ issp->smk_flags |= SMK_INODE_CHANGED;
+ }
- *value = kstrdup(isp, GFP_KERNEL);
+ *value = kstrdup(isp, GFP_NOFS);
if (*value == NULL)
return -ENOMEM;
}
@@ -645,7 +732,7 @@ static int smack_inode_unlink(struct inode *dir, struct dentry *dentry)
/*
* You also need write access to the containing directory
*/
- smk_ad_setfield_u_fs_path_dentry(&ad, NULL);
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_INODE);
smk_ad_setfield_u_fs_inode(&ad, dir);
rc = smk_curacc(smk_of_inode(dir), MAY_WRITE, &ad);
}
@@ -676,7 +763,7 @@ static int smack_inode_rmdir(struct inode *dir, struct dentry *dentry)
/*
* You also need write access to the containing directory
*/
- smk_ad_setfield_u_fs_path_dentry(&ad, NULL);
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_INODE);
smk_ad_setfield_u_fs_inode(&ad, dir);
rc = smk_curacc(smk_of_inode(dir), MAY_WRITE, &ad);
}
@@ -805,31 +892,43 @@ static int smack_inode_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
struct smk_audit_info ad;
+ struct smack_known *skp;
+ int check_priv = 0;
+ int check_import = 0;
+ int check_star = 0;
int rc = 0;
+ /*
+ * Check label validity here so import won't fail in post_setxattr
+ */
if (strcmp(name, XATTR_NAME_SMACK) == 0 ||
strcmp(name, XATTR_NAME_SMACKIPIN) == 0 ||
- strcmp(name, XATTR_NAME_SMACKIPOUT) == 0 ||
- strcmp(name, XATTR_NAME_SMACKEXEC) == 0 ||
- strcmp(name, XATTR_NAME_SMACKMMAP) == 0) {
- if (!capable(CAP_MAC_ADMIN))
- rc = -EPERM;
- /*
- * check label validity here so import wont fail on
- * post_setxattr
- */
- if (size == 0 || size >= SMK_LABELLEN ||
- smk_import(value, size) == NULL)
- rc = -EINVAL;
+ strcmp(name, XATTR_NAME_SMACKIPOUT) == 0) {
+ check_priv = 1;
+ check_import = 1;
+ } else if (strcmp(name, XATTR_NAME_SMACKEXEC) == 0 ||
+ strcmp(name, XATTR_NAME_SMACKMMAP) == 0) {
+ check_priv = 1;
+ check_import = 1;
+ check_star = 1;
} else if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0) {
- if (!capable(CAP_MAC_ADMIN))
- rc = -EPERM;
+ check_priv = 1;
if (size != TRANS_TRUE_SIZE ||
strncmp(value, TRANS_TRUE, TRANS_TRUE_SIZE) != 0)
rc = -EINVAL;
} else
rc = cap_inode_setxattr(dentry, name, value, size, flags);
+ if (check_priv && !smack_privileged(CAP_MAC_ADMIN))
+ rc = -EPERM;
+
+ if (rc == 0 && check_import) {
+ skp = smk_import_entry(value, size);
+ if (skp == NULL || (check_star &&
+ (skp == &smack_known_star || skp == &smack_known_web)))
+ rc = -EINVAL;
+ }
+
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
@@ -853,29 +952,33 @@ static int smack_inode_setxattr(struct dentry *dentry, const char *name,
static void smack_inode_post_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags)
{
- char *nsp;
+ struct smack_known *skp;
struct inode_smack *isp = dentry->d_inode->i_security;
+ if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0) {
+ isp->smk_flags |= SMK_INODE_TRANSMUTE;
+ return;
+ }
+
if (strcmp(name, XATTR_NAME_SMACK) == 0) {
- nsp = smk_import(value, size);
- if (nsp != NULL)
- isp->smk_inode = nsp;
+ skp = smk_import_entry(value, size);
+ if (skp != NULL)
+ isp->smk_inode = skp->smk_known;
else
isp->smk_inode = smack_known_invalid.smk_known;
} else if (strcmp(name, XATTR_NAME_SMACKEXEC) == 0) {
- nsp = smk_import(value, size);
- if (nsp != NULL)
- isp->smk_task = nsp;
+ skp = smk_import_entry(value, size);
+ if (skp != NULL)
+ isp->smk_task = skp;
else
- isp->smk_task = smack_known_invalid.smk_known;
+ isp->smk_task = &smack_known_invalid;
} else if (strcmp(name, XATTR_NAME_SMACKMMAP) == 0) {
- nsp = smk_import(value, size);
- if (nsp != NULL)
- isp->smk_mmap = nsp;
+ skp = smk_import_entry(value, size);
+ if (skp != NULL)
+ isp->smk_mmap = skp;
else
- isp->smk_mmap = smack_known_invalid.smk_known;
- } else if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0)
- isp->smk_flags |= SMK_INODE_TRANSMUTE;
+ isp->smk_mmap = &smack_known_invalid;
+ }
return;
}
@@ -917,24 +1020,37 @@ static int smack_inode_removexattr(struct dentry *dentry, const char *name)
strcmp(name, XATTR_NAME_SMACKIPOUT) == 0 ||
strcmp(name, XATTR_NAME_SMACKEXEC) == 0 ||
strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0 ||
- strcmp(name, XATTR_NAME_SMACKMMAP)) {
- if (!capable(CAP_MAC_ADMIN))
+ strcmp(name, XATTR_NAME_SMACKMMAP) == 0) {
+ if (!smack_privileged(CAP_MAC_ADMIN))
rc = -EPERM;
} else
rc = cap_inode_removexattr(dentry, name);
+ if (rc != 0)
+ return rc;
+
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_DENTRY);
smk_ad_setfield_u_fs_path_dentry(&ad, dentry);
- if (rc == 0)
- rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE, &ad);
- if (rc == 0) {
- isp = dentry->d_inode->i_security;
+ rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE, &ad);
+ if (rc != 0)
+ return rc;
+
+ isp = dentry->d_inode->i_security;
+ /*
+ * Don't do anything special for these.
+ * XATTR_NAME_SMACKIPIN
+ * XATTR_NAME_SMACKIPOUT
+ * XATTR_NAME_SMACKEXEC
+ */
+ if (strcmp(name, XATTR_NAME_SMACK) == 0)
isp->smk_task = NULL;
+ else if (strcmp(name, XATTR_NAME_SMACKMMAP) == 0)
isp->smk_mmap = NULL;
- }
+ else if (strcmp(name, XATTR_NAME_SMACKTRANSMUTE) == 0)
+ isp->smk_flags &= ~SMK_INODE_TRANSMUTE;
- return rc;
+ return 0;
}
/**
@@ -979,9 +1095,9 @@ static int smack_inode_getsecurity(const struct inode *inode,
ssp = sock->sk->sk_security;
if (strcmp(name, XATTR_SMACK_IPIN) == 0)
- isp = ssp->smk_in;
+ isp = ssp->smk_in->smk_known;
else if (strcmp(name, XATTR_SMACK_IPOUT) == 0)
- isp = ssp->smk_out;
+ isp = ssp->smk_out->smk_known;
else
return -EOPNOTSUPP;
@@ -1061,7 +1177,9 @@ static int smack_file_permission(struct file *file, int mask)
*/
static int smack_file_alloc_security(struct file *file)
{
- file->f_security = smk_of_current();
+ struct smack_known *skp = smk_of_current();
+
+ file->f_security = skp->smk_known;
return 0;
}
@@ -1110,7 +1228,7 @@ static int smack_file_ioctl(struct file *file, unsigned int cmd,
* @file: the object
* @cmd: unused
*
- * Returns 0 if current has write access, error code otherwise
+ * Returns 0 if current has lock access, error code otherwise
*/
static int smack_file_lock(struct file *file, unsigned int cmd)
{
@@ -1118,7 +1236,7 @@ static int smack_file_lock(struct file *file, unsigned int cmd)
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
smk_ad_setfield_u_fs_path(&ad, file->f_path);
- return smk_curacc(file->f_security, MAY_WRITE, &ad);
+ return smk_curacc(file->f_security, MAY_LOCK, &ad);
}
/**
@@ -1142,8 +1260,13 @@ static int smack_file_fcntl(struct file *file, unsigned int cmd,
switch (cmd) {
case F_GETLK:
+ break;
case F_SETLK:
case F_SETLKW:
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
+ smk_ad_setfield_u_fs_path(&ad, file->f_path);
+ rc = smk_curacc(file->f_security, MAY_LOCK, &ad);
+ break;
case F_SETOWN:
case F_SETSIG:
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
@@ -1158,7 +1281,7 @@ static int smack_file_fcntl(struct file *file, unsigned int cmd,
}
/**
- * smack_file_mmap :
+ * smack_mmap_file :
* Check permissions for a mmap operation. The @file may be NULL, e.g.
* if mapping anonymous memory.
* @file contains the file structure for file to map (may be NULL).
@@ -1167,45 +1290,31 @@ static int smack_file_fcntl(struct file *file, unsigned int cmd,
* @flags contains the operational flags.
* Return 0 if permission is granted.
*/
-static int smack_file_mmap(struct file *file,
+static int smack_mmap_file(struct file *file,
unsigned long reqprot, unsigned long prot,
- unsigned long flags, unsigned long addr,
- unsigned long addr_only)
+ unsigned long flags)
{
struct smack_known *skp;
+ struct smack_known *mkp;
struct smack_rule *srp;
struct task_smack *tsp;
- char *sp;
- char *msmack;
char *osmack;
struct inode_smack *isp;
- struct dentry *dp;
int may;
int mmay;
int tmay;
int rc;
- /* do DAC check on address space usage */
- rc = cap_file_mmap(file, reqprot, prot, flags, addr, addr_only);
- if (rc || addr_only)
- return rc;
-
- if (file == NULL || file->f_dentry == NULL)
+ if (file == NULL)
return 0;
- dp = file->f_dentry;
-
- if (dp->d_inode == NULL)
- return 0;
-
- isp = dp->d_inode->i_security;
+ isp = file_inode(file)->i_security;
if (isp->smk_mmap == NULL)
return 0;
- msmack = isp->smk_mmap;
+ mkp = isp->smk_mmap;
tsp = current_security();
- sp = smk_of_current();
- skp = smk_find_entry(sp);
+ skp = smk_of_current();
rc = 0;
rcu_read_lock();
@@ -1219,13 +1328,13 @@ static int smack_file_mmap(struct file *file,
/*
* Matching labels always allows access.
*/
- if (msmack == osmack)
+ if (mkp->smk_known == osmack)
continue;
/*
* If there is a matching local rule take
* that into account as well.
*/
- may = smk_access_entry(srp->smk_subject, osmack,
+ may = smk_access_entry(srp->smk_subject->smk_known, osmack,
&tsp->smk_rules);
if (may == -ENOENT)
may = srp->smk_access;
@@ -1243,8 +1352,8 @@ static int smack_file_mmap(struct file *file,
* If there isn't one a SMACK64MMAP subject
* can't have as much access as current.
*/
- skp = smk_find_entry(msmack);
- mmay = smk_access_entry(msmack, osmack, &skp->smk_rules);
+ mmay = smk_access_entry(mkp->smk_known, osmack,
+ &mkp->smk_rules);
if (mmay == -ENOENT) {
rc = -EACCES;
break;
@@ -1253,7 +1362,8 @@ static int smack_file_mmap(struct file *file,
* If there is a local entry it modifies the
* potential access, too.
*/
- tmay = smk_access_entry(msmack, osmack, &tsp->smk_rules);
+ tmay = smk_access_entry(mkp->smk_known, osmack,
+ &tsp->smk_rules);
if (tmay != -ENOENT)
mmay &= tmay;
@@ -1282,7 +1392,9 @@ static int smack_file_mmap(struct file *file,
*/
static int smack_file_set_fowner(struct file *file)
{
- file->f_security = smk_of_current();
+ struct smack_known *skp = smk_of_current();
+
+ file->f_security = skp->smk_known;
return 0;
}
@@ -1300,9 +1412,10 @@ static int smack_file_set_fowner(struct file *file)
static int smack_file_send_sigiotask(struct task_struct *tsk,
struct fown_struct *fown, int signum)
{
+ struct smack_known *skp;
+ struct smack_known *tkp = smk_of_task(tsk->cred->security);
struct file *file;
int rc;
- char *tsp = smk_of_task(tsk->cred->security);
struct smk_audit_info ad;
/*
@@ -1311,13 +1424,14 @@ static int smack_file_send_sigiotask(struct task_struct *tsk,
file = container_of(fown, struct file, f_owner);
/* we don't log here as rc can be overriden */
- rc = smk_access(file->f_security, tsp, MAY_WRITE, NULL);
+ skp = smk_find_entry(file->f_security);
+ rc = smk_access(skp, tkp->smk_known, MAY_WRITE, NULL);
if (rc != 0 && has_capability(tsk, CAP_MAC_OVERRIDE))
rc = 0;
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
smk_ad_setfield_u_tsk(&ad, tsk);
- smack_log(file->f_security, tsp, MAY_WRITE, rc, &ad);
+ smack_log(file->f_security, tkp->smk_known, MAY_WRITE, rc, &ad);
return rc;
}
@@ -1332,7 +1446,7 @@ static int smack_file_receive(struct file *file)
int may = 0;
struct smk_audit_info ad;
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
smk_ad_setfield_u_fs_path(&ad, file->f_path);
/*
* This code relies on bitmasks.
@@ -1346,21 +1460,34 @@ static int smack_file_receive(struct file *file)
}
/**
- * smack_dentry_open - Smack dentry open processing
+ * smack_file_open - Smack dentry open processing
* @file: the object
- * @cred: unused
+ * @cred: task credential
*
* Set the security blob in the file structure.
+ * Allow the open only if the task has read access. There are
+ * many read operations (e.g. fstat) that you can do with an
+ * fd even if you have the file open write-only.
*
* Returns 0
*/
-static int smack_dentry_open(struct file *file, const struct cred *cred)
+static int smack_file_open(struct file *file, const struct cred *cred)
{
- struct inode_smack *isp = file->f_path.dentry->d_inode->i_security;
+ struct task_smack *tsp = cred->security;
+ struct inode_smack *isp = file_inode(file)->i_security;
+ struct smk_audit_info ad;
+ int rc;
- file->f_security = isp->smk_inode;
+ if (smack_privileged(CAP_MAC_OVERRIDE))
+ return 0;
- return 0;
+ smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_PATH);
+ smk_ad_setfield_u_fs_path(&ad, file->f_path);
+ rc = smk_access(tsp->smk_task, isp->smk_inode, MAY_READ, &ad);
+ if (rc == 0)
+ file->f_security = isp->smk_inode;
+
+ return rc;
}
/*
@@ -1472,12 +1599,12 @@ static void smack_cred_transfer(struct cred *new, const struct cred *old)
static int smack_kernel_act_as(struct cred *new, u32 secid)
{
struct task_smack *new_tsp = new->security;
- char *smack = smack_from_secid(secid);
+ struct smack_known *skp = smack_from_secid(secid);
- if (smack == NULL)
+ if (skp == NULL)
return -EINVAL;
- new_tsp->smk_task = smack;
+ new_tsp->smk_task = skp;
return 0;
}
@@ -1495,8 +1622,8 @@ static int smack_kernel_create_files_as(struct cred *new,
struct inode_smack *isp = inode->i_security;
struct task_smack *tsp = new->security;
- tsp->smk_forked = isp->smk_inode;
- tsp->smk_task = isp->smk_inode;
+ tsp->smk_forked = smk_find_entry(isp->smk_inode);
+ tsp->smk_task = tsp->smk_forked;
return 0;
}
@@ -1512,10 +1639,11 @@ static int smk_curacc_on_task(struct task_struct *p, int access,
const char *caller)
{
struct smk_audit_info ad;
+ struct smack_known *skp = smk_of_task(task_security(p));
smk_ad_init(&ad, caller, LSM_AUDIT_DATA_TASK);
smk_ad_setfield_u_tsk(&ad, p);
- return smk_curacc(smk_of_task(task_security(p)), access, &ad);
+ return smk_curacc(skp->smk_known, access, &ad);
}
/**
@@ -1561,7 +1689,9 @@ static int smack_task_getsid(struct task_struct *p)
*/
static void smack_task_getsecid(struct task_struct *p, u32 *secid)
{
- *secid = smack_to_secid(smk_of_task(task_security(p)));
+ struct smack_known *skp = smk_of_task(task_security(p));
+
+ *secid = skp->smk_secid;
}
/**
@@ -1665,6 +1795,8 @@ static int smack_task_kill(struct task_struct *p, struct siginfo *info,
int sig, u32 secid)
{
struct smk_audit_info ad;
+ struct smack_known *skp;
+ struct smack_known *tkp = smk_of_task(task_security(p));
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
smk_ad_setfield_u_tsk(&ad, p);
@@ -1673,54 +1805,33 @@ static int smack_task_kill(struct task_struct *p, struct siginfo *info,
* can write the receiver.
*/
if (secid == 0)
- return smk_curacc(smk_of_task(task_security(p)), MAY_WRITE,
- &ad);
+ return smk_curacc(tkp->smk_known, MAY_WRITE, &ad);
/*
* If the secid isn't 0 we're dealing with some USB IO
* specific behavior. This is not clean. For one thing
* we can't take privilege into account.
*/
- return smk_access(smack_from_secid(secid),
- smk_of_task(task_security(p)), MAY_WRITE, &ad);
+ skp = smack_from_secid(secid);
+ return smk_access(skp, tkp->smk_known, MAY_WRITE, &ad);
}
/**
* smack_task_wait - Smack access check for waiting
* @p: task to wait for
*
- * Returns 0 if current can wait for p, error code otherwise
+ * Returns 0
*/
static int smack_task_wait(struct task_struct *p)
{
- struct smk_audit_info ad;
- char *sp = smk_of_current();
- char *tsp = smk_of_forked(task_security(p));
- int rc;
-
- /* we don't log here, we can be overriden */
- rc = smk_access(tsp, sp, MAY_WRITE, NULL);
- if (rc == 0)
- goto out_log;
-
/*
- * Allow the operation to succeed if either task
- * has privilege to perform operations that might
- * account for the smack labels having gotten to
- * be different in the first place.
- *
- * This breaks the strict subject/object access
- * control ideal, taking the object's privilege
- * state into account in the decision as well as
- * the smack value.
+ * Allow the operation to succeed.
+ * Zombies are bad.
+ * In userless environments (e.g. phones) programs
+ * get marked with SMACK64EXEC and even if the parent
+ * and child shouldn't be talking the parent still
+ * may expect to know when the child exits.
*/
- if (capable(CAP_MAC_OVERRIDE) || has_capability(p, CAP_MAC_OVERRIDE))
- rc = 0;
- /* we log only if we didn't get overriden */
- out_log:
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_TASK);
- smk_ad_setfield_u_tsk(&ad, p);
- smack_log(tsp, sp, MAY_WRITE, rc, &ad);
- return rc;
+ return 0;
}
/**
@@ -1733,7 +1844,9 @@ static int smack_task_wait(struct task_struct *p)
static void smack_task_to_inode(struct task_struct *p, struct inode *inode)
{
struct inode_smack *isp = inode->i_security;
- isp->smk_inode = smk_of_task(task_security(p));
+ struct smack_known *skp = smk_of_task(task_security(p));
+
+ isp->smk_inode = skp->smk_known;
}
/*
@@ -1752,15 +1865,15 @@ static void smack_task_to_inode(struct task_struct *p, struct inode *inode)
*/
static int smack_sk_alloc_security(struct sock *sk, int family, gfp_t gfp_flags)
{
- char *csp = smk_of_current();
+ struct smack_known *skp = smk_of_current();
struct socket_smack *ssp;
ssp = kzalloc(sizeof(struct socket_smack), gfp_flags);
if (ssp == NULL)
return -ENOMEM;
- ssp->smk_in = csp;
- ssp->smk_out = csp;
+ ssp->smk_in = skp;
+ ssp->smk_out = skp;
ssp->smk_packet = NULL;
sk->sk_security = ssp;
@@ -1817,65 +1930,6 @@ static char *smack_host_label(struct sockaddr_in *sip)
}
/**
- * smack_set_catset - convert a capset to netlabel mls categories
- * @catset: the Smack categories
- * @sap: where to put the netlabel categories
- *
- * Allocates and fills attr.mls.cat
- */
-static void smack_set_catset(char *catset, struct netlbl_lsm_secattr *sap)
-{
- unsigned char *cp;
- unsigned char m;
- int cat;
- int rc;
- int byte;
-
- if (!catset)
- return;
-
- sap->flags |= NETLBL_SECATTR_MLS_CAT;
- sap->attr.mls.cat = netlbl_secattr_catmap_alloc(GFP_ATOMIC);
- sap->attr.mls.cat->startbit = 0;
-
- for (cat = 1, cp = catset, byte = 0; byte < SMK_LABELLEN; cp++, byte++)
- for (m = 0x80; m != 0; m >>= 1, cat++) {
- if ((m & *cp) == 0)
- continue;
- rc = netlbl_secattr_catmap_setbit(sap->attr.mls.cat,
- cat, GFP_ATOMIC);
- }
-}
-
-/**
- * smack_to_secattr - fill a secattr from a smack value
- * @smack: the smack value
- * @nlsp: where the result goes
- *
- * Casey says that CIPSO is good enough for now.
- * It can be used to effect.
- * It can also be abused to effect when necessary.
- * Apologies to the TSIG group in general and GW in particular.
- */
-static void smack_to_secattr(char *smack, struct netlbl_lsm_secattr *nlsp)
-{
- struct smack_cipso cipso;
- int rc;
-
- nlsp->domain = smack;
- nlsp->flags = NETLBL_SECATTR_DOMAIN | NETLBL_SECATTR_MLS_LVL;
-
- rc = smack_to_cipso(smack, &cipso);
- if (rc == 0) {
- nlsp->attr.mls.lvl = cipso.smk_level;
- smack_set_catset(cipso.smk_catset, nlsp);
- } else {
- nlsp->attr.mls.lvl = smack_cipso_direct;
- smack_set_catset(smack, nlsp);
- }
-}
-
-/**
* smack_netlabel - Set the secattr on a socket
* @sk: the socket
* @labeled: socket label scheme
@@ -1887,8 +1941,8 @@ static void smack_to_secattr(char *smack, struct netlbl_lsm_secattr *nlsp)
*/
static int smack_netlabel(struct sock *sk, int labeled)
{
+ struct smack_known *skp;
struct socket_smack *ssp = sk->sk_security;
- struct netlbl_lsm_secattr secattr;
int rc = 0;
/*
@@ -1906,10 +1960,8 @@ static int smack_netlabel(struct sock *sk, int labeled)
labeled == SMACK_UNLABELED_SOCKET)
netlbl_sock_delattr(sk);
else {
- netlbl_secattr_init(&secattr);
- smack_to_secattr(ssp->smk_out, &secattr);
- rc = netlbl_sock_setattr(sk, sk->sk_family, &secattr);
- netlbl_secattr_destroy(&secattr);
+ skp = ssp->smk_out;
+ rc = netlbl_sock_setattr(sk, sk->sk_family, &skp->smk_netlabel);
}
bh_unlock_sock(sk);
@@ -1931,6 +1983,7 @@ static int smack_netlabel(struct sock *sk, int labeled)
*/
static int smack_netlabel_send(struct sock *sk, struct sockaddr_in *sap)
{
+ struct smack_known *skp;
int rc;
int sk_lbl;
char *hostsp;
@@ -1940,14 +1993,17 @@ static int smack_netlabel_send(struct sock *sk, struct sockaddr_in *sap)
rcu_read_lock();
hostsp = smack_host_label(sap);
if (hostsp != NULL) {
- sk_lbl = SMACK_UNLABELED_SOCKET;
#ifdef CONFIG_AUDIT
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET);
- ad.a.u.net.family = sap->sin_family;
- ad.a.u.net.dport = sap->sin_port;
- ad.a.u.net.v4info.daddr = sap->sin_addr.s_addr;
+ struct lsm_network_audit net;
+
+ smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+ ad.a.u.net->family = sap->sin_family;
+ ad.a.u.net->dport = sap->sin_port;
+ ad.a.u.net->v4info.daddr = sap->sin_addr.s_addr;
#endif
- rc = smk_access(ssp->smk_out, hostsp, MAY_WRITE, &ad);
+ sk_lbl = SMACK_UNLABELED_SOCKET;
+ skp = ssp->smk_out;
+ rc = smk_access(skp, hostsp, MAY_WRITE, &ad);
} else {
sk_lbl = SMACK_CIPSO_SOCKET;
rc = 0;
@@ -1960,6 +2016,153 @@ static int smack_netlabel_send(struct sock *sk, struct sockaddr_in *sap)
}
/**
+ * smk_ipv6_port_label - Smack port access table management
+ * @sock: socket
+ * @address: address
+ *
+ * Create or update the port list entry
+ */
+static void smk_ipv6_port_label(struct socket *sock, struct sockaddr *address)
+{
+ struct sock *sk = sock->sk;
+ struct sockaddr_in6 *addr6;
+ struct socket_smack *ssp = sock->sk->sk_security;
+ struct smk_port_label *spp;
+ unsigned short port = 0;
+
+ if (address == NULL) {
+ /*
+ * This operation is changing the Smack information
+ * on the bound socket. Take the changes to the port
+ * as well.
+ */
+ list_for_each_entry(spp, &smk_ipv6_port_list, list) {
+ if (sk != spp->smk_sock)
+ continue;
+ spp->smk_in = ssp->smk_in;
+ spp->smk_out = ssp->smk_out;
+ return;
+ }
+ /*
+ * A NULL address is only used for updating existing
+ * bound entries. If there isn't one, it's OK.
+ */
+ return;
+ }
+
+ addr6 = (struct sockaddr_in6 *)address;
+ port = ntohs(addr6->sin6_port);
+ /*
+ * This is a special case that is safely ignored.
+ */
+ if (port == 0)
+ return;
+
+ /*
+ * Look for an existing port list entry.
+ * This is an indication that a port is getting reused.
+ */
+ list_for_each_entry(spp, &smk_ipv6_port_list, list) {
+ if (spp->smk_port != port)
+ continue;
+ spp->smk_port = port;
+ spp->smk_sock = sk;
+ spp->smk_in = ssp->smk_in;
+ spp->smk_out = ssp->smk_out;
+ return;
+ }
+
+ /*
+ * A new port entry is required.
+ */
+ spp = kzalloc(sizeof(*spp), GFP_KERNEL);
+ if (spp == NULL)
+ return;
+
+ spp->smk_port = port;
+ spp->smk_sock = sk;
+ spp->smk_in = ssp->smk_in;
+ spp->smk_out = ssp->smk_out;
+
+ list_add(&spp->list, &smk_ipv6_port_list);
+ return;
+}
+
+/**
+ * smk_ipv6_port_check - check Smack port access
+ * @sock: socket
+ * @address: address
+ *
+ * Create or update the port list entry
+ */
+static int smk_ipv6_port_check(struct sock *sk, struct sockaddr_in6 *address,
+ int act)
+{
+ __be16 *bep;
+ __be32 *be32p;
+ struct smk_port_label *spp;
+ struct socket_smack *ssp = sk->sk_security;
+ struct smack_known *skp;
+ unsigned short port = 0;
+ char *object;
+ struct smk_audit_info ad;
+#ifdef CONFIG_AUDIT
+ struct lsm_network_audit net;
+#endif
+
+ if (act == SMK_RECEIVING) {
+ skp = smack_net_ambient;
+ object = ssp->smk_in->smk_known;
+ } else {
+ skp = ssp->smk_out;
+ object = smack_net_ambient->smk_known;
+ }
+
+ /*
+ * Get the IP address and port from the address.
+ */
+ port = ntohs(address->sin6_port);
+ bep = (__be16 *)(&address->sin6_addr);
+ be32p = (__be32 *)(&address->sin6_addr);
+
+ /*
+ * It's remote, so port lookup does no good.
+ */
+ if (be32p[0] || be32p[1] || be32p[2] || bep[6] || ntohs(bep[7]) != 1)
+ goto auditout;
+
+ /*
+ * It's local so the send check has to have passed.
+ */
+ if (act == SMK_RECEIVING) {
+ skp = &smack_known_web;
+ goto auditout;
+ }
+
+ list_for_each_entry(spp, &smk_ipv6_port_list, list) {
+ if (spp->smk_port != port)
+ continue;
+ object = spp->smk_in->smk_known;
+ if (act == SMK_CONNECTING)
+ ssp->smk_packet = spp->smk_out;
+ break;
+ }
+
+auditout:
+
+#ifdef CONFIG_AUDIT
+ smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+ ad.a.u.net->family = sk->sk_family;
+ ad.a.u.net->dport = port;
+ if (act == SMK_RECEIVING)
+ ad.a.u.net->v6info.saddr = address->sin6_addr;
+ else
+ ad.a.u.net->v6info.daddr = address->sin6_addr;
+#endif
+ return smk_access(skp, object, MAY_WRITE, &ad);
+}
+
+/**
* smack_inode_setsecurity - set smack xattrs
* @inode: the object
* @name: attribute name
@@ -1974,21 +2177,21 @@ static int smack_netlabel_send(struct sock *sk, struct sockaddr_in *sap)
static int smack_inode_setsecurity(struct inode *inode, const char *name,
const void *value, size_t size, int flags)
{
- char *sp;
+ struct smack_known *skp;
struct inode_smack *nsp = inode->i_security;
struct socket_smack *ssp;
struct socket *sock;
int rc = 0;
- if (value == NULL || size > SMK_LABELLEN || size == 0)
- return -EACCES;
+ if (value == NULL || size > SMK_LONGLABEL || size == 0)
+ return -EINVAL;
- sp = smk_import(value, size);
- if (sp == NULL)
+ skp = smk_import_entry(value, size);
+ if (skp == NULL)
return -EINVAL;
if (strcmp(name, XATTR_SMACK_SUFFIX) == 0) {
- nsp->smk_inode = sp;
+ nsp->smk_inode = skp->smk_known;
nsp->smk_flags |= SMK_INODE_INSTANT;
return 0;
}
@@ -2005,10 +2208,10 @@ static int smack_inode_setsecurity(struct inode *inode, const char *name,
ssp = sock->sk->sk_security;
if (strcmp(name, XATTR_SMACK_IPIN) == 0)
- ssp->smk_in = sp;
+ ssp->smk_in = skp;
else if (strcmp(name, XATTR_SMACK_IPOUT) == 0) {
- ssp->smk_out = sp;
- if (sock->sk->sk_family != PF_UNIX) {
+ ssp->smk_out = skp;
+ if (sock->sk->sk_family == PF_INET) {
rc = smack_netlabel(sock->sk, SMACK_CIPSO_SOCKET);
if (rc != 0)
printk(KERN_WARNING
@@ -2018,6 +2221,9 @@ static int smack_inode_setsecurity(struct inode *inode, const char *name,
} else
return -EOPNOTSUPP;
+ if (sock->sk->sk_family == PF_INET6)
+ smk_ipv6_port_label(sock, NULL);
+
return 0;
}
@@ -2045,6 +2251,25 @@ static int smack_socket_post_create(struct socket *sock, int family,
}
/**
+ * smack_socket_bind - record port binding information.
+ * @sock: the socket
+ * @address: the port address
+ * @addrlen: size of the address
+ *
+ * Records the label bound to a port.
+ *
+ * Returns 0
+ */
+static int smack_socket_bind(struct socket *sock, struct sockaddr *address,
+ int addrlen)
+{
+ if (sock->sk != NULL && sock->sk->sk_family == PF_INET6)
+ smk_ipv6_port_label(sock, address);
+
+ return 0;
+}
+
+/**
* smack_socket_connect - connect access check
* @sock: the socket
* @sap: the other end
@@ -2057,12 +2282,25 @@ static int smack_socket_post_create(struct socket *sock, int family,
static int smack_socket_connect(struct socket *sock, struct sockaddr *sap,
int addrlen)
{
- if (sock->sk == NULL || sock->sk->sk_family != PF_INET)
+ int rc = 0;
+
+ if (sock->sk == NULL)
return 0;
- if (addrlen < sizeof(struct sockaddr_in))
- return -EINVAL;
- return smack_netlabel_send(sock->sk, (struct sockaddr_in *)sap);
+ switch (sock->sk->sk_family) {
+ case PF_INET:
+ if (addrlen < sizeof(struct sockaddr_in))
+ return -EINVAL;
+ rc = smack_netlabel_send(sock->sk, (struct sockaddr_in *)sap);
+ break;
+ case PF_INET6:
+ if (addrlen < sizeof(struct sockaddr_in6))
+ return -EINVAL;
+ rc = smk_ipv6_port_check(sock->sk, (struct sockaddr_in6 *)sap,
+ SMK_CONNECTING);
+ break;
+ }
+ return rc;
}
/**
@@ -2093,7 +2331,9 @@ static int smack_flags_to_may(int flags)
*/
static int smack_msg_msg_alloc_security(struct msg_msg *msg)
{
- msg->security = smk_of_current();
+ struct smack_known *skp = smk_of_current();
+
+ msg->security = skp->smk_known;
return 0;
}
@@ -2128,8 +2368,9 @@ static char *smack_of_shm(struct shmid_kernel *shp)
static int smack_shm_alloc_security(struct shmid_kernel *shp)
{
struct kern_ipc_perm *isp = &shp->shm_perm;
+ struct smack_known *skp = smk_of_current();
- isp->security = smk_of_current();
+ isp->security = skp->smk_known;
return 0;
}
@@ -2251,8 +2492,9 @@ static char *smack_of_sem(struct sem_array *sma)
static int smack_sem_alloc_security(struct sem_array *sma)
{
struct kern_ipc_perm *isp = &sma->sem_perm;
+ struct smack_known *skp = smk_of_current();
- isp->security = smk_of_current();
+ isp->security = skp->smk_known;
return 0;
}
@@ -2369,8 +2611,9 @@ static int smack_sem_semop(struct sem_array *sma, struct sembuf *sops,
static int smack_msg_queue_alloc_security(struct msg_queue *msq)
{
struct kern_ipc_perm *kisp = &msq->q_perm;
+ struct smack_known *skp = smk_of_current();
- kisp->security = smk_of_current();
+ kisp->security = skp->smk_known;
return 0;
}
@@ -2542,11 +2785,12 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
struct super_block *sbp;
struct superblock_smack *sbsp;
struct inode_smack *isp;
- char *csp = smk_of_current();
- char *fetched;
+ struct smack_known *skp;
+ struct smack_known *ckp = smk_of_current();
char *final;
char trattr[TRANS_TRUE_SIZE];
int transflag = 0;
+ int rc;
struct dentry *dp;
if (inode == NULL)
@@ -2577,6 +2821,15 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
* of the superblock.
*/
if (opt_dentry->d_parent == opt_dentry) {
+ if (sbp->s_magic == CGROUP_SUPER_MAGIC) {
+ /*
+ * The cgroup filesystem is never mounted,
+ * so there's no opportunity to set the mount
+ * options.
+ */
+ sbsp->smk_root = smack_known_star.smk_known;
+ sbsp->smk_default = smack_known_star.smk_known;
+ }
isp->smk_inode = sbsp->smk_root;
isp->smk_flags |= SMK_INODE_INSTANT;
goto unlockandout;
@@ -2590,16 +2843,20 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
*/
switch (sbp->s_magic) {
case SMACK_MAGIC:
+ case PIPEFS_MAGIC:
+ case SOCKFS_MAGIC:
+ case CGROUP_SUPER_MAGIC:
/*
* Casey says that it's a little embarrassing
* that the smack file system doesn't do
* extended attributes.
- */
- final = smack_known_star.smk_known;
- break;
- case PIPEFS_MAGIC:
- /*
+ *
* Casey says pipes are easy (?)
+ *
+ * Socket access is controlled by the socket
+ * structures associated with the task involved.
+ *
+ * Cgroupfs is special
*/
final = smack_known_star.smk_known;
break;
@@ -2609,14 +2866,7 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
* Programs that change smack have to treat the
* pty with respect.
*/
- final = csp;
- break;
- case SOCKFS_MAGIC:
- /*
- * Socket access is controlled by the socket
- * structures associated with the task involved.
- */
- final = smack_known_star.smk_known;
+ final = ckp->smk_known;
break;
case PROC_SUPER_MAGIC:
/*
@@ -2664,28 +2914,58 @@ static void smack_d_instantiate(struct dentry *opt_dentry, struct inode *inode)
* Get the dentry for xattr.
*/
dp = dget(opt_dentry);
- fetched = smk_fetch(XATTR_NAME_SMACK, inode, dp);
- if (fetched != NULL) {
- final = fetched;
- if (S_ISDIR(inode->i_mode)) {
- trattr[0] = '\0';
- inode->i_op->getxattr(dp,
+ skp = smk_fetch(XATTR_NAME_SMACK, inode, dp);
+ if (skp != NULL)
+ final = skp->smk_known;
+
+ /*
+ * Transmuting directory
+ */
+ if (S_ISDIR(inode->i_mode)) {
+ /*
+ * If this is a new directory and the label was
+ * transmuted when the inode was initialized
+ * set the transmute attribute on the directory
+ * and mark the inode.
+ *
+ * If there is a transmute attribute on the
+ * directory mark the inode.
+ */
+ if (isp->smk_flags & SMK_INODE_CHANGED) {
+ isp->smk_flags &= ~SMK_INODE_CHANGED;
+ rc = inode->i_op->setxattr(dp,
XATTR_NAME_SMACKTRANSMUTE,
- trattr, TRANS_TRUE_SIZE);
- if (strncmp(trattr, TRANS_TRUE,
- TRANS_TRUE_SIZE) == 0)
- transflag = SMK_INODE_TRANSMUTE;
+ TRANS_TRUE, TRANS_TRUE_SIZE,
+ 0);
+ } else {
+ rc = inode->i_op->getxattr(dp,
+ XATTR_NAME_SMACKTRANSMUTE, trattr,
+ TRANS_TRUE_SIZE);
+ if (rc >= 0 && strncmp(trattr, TRANS_TRUE,
+ TRANS_TRUE_SIZE) != 0)
+ rc = -EINVAL;
}
+ if (rc >= 0)
+ transflag = SMK_INODE_TRANSMUTE;
}
- isp->smk_task = smk_fetch(XATTR_NAME_SMACKEXEC, inode, dp);
- isp->smk_mmap = smk_fetch(XATTR_NAME_SMACKMMAP, inode, dp);
+ /*
+ * Don't let the exec or mmap label be "*" or "@".
+ */
+ skp = smk_fetch(XATTR_NAME_SMACKEXEC, inode, dp);
+ if (skp == &smack_known_star || skp == &smack_known_web)
+ skp = NULL;
+ isp->smk_task = skp;
+ skp = smk_fetch(XATTR_NAME_SMACKMMAP, inode, dp);
+ if (skp == &smack_known_star || skp == &smack_known_web)
+ skp = NULL;
+ isp->smk_mmap = skp;
dput(dp);
break;
}
if (final == NULL)
- isp->smk_inode = csp;
+ isp->smk_inode = ckp->smk_known;
else
isp->smk_inode = final;
@@ -2708,13 +2988,14 @@ unlockandout:
*/
static int smack_getprocattr(struct task_struct *p, char *name, char **value)
{
+ struct smack_known *skp = smk_of_task(task_security(p));
char *cp;
int slen;
if (strcmp(name, "current") != 0)
return -EINVAL;
- cp = kstrdup(smk_of_task(task_security(p)), GFP_KERNEL);
+ cp = kstrdup(skp->smk_known, GFP_KERNEL);
if (cp == NULL)
return -ENOMEM;
@@ -2738,11 +3019,9 @@ static int smack_getprocattr(struct task_struct *p, char *name, char **value)
static int smack_setprocattr(struct task_struct *p, char *name,
void *value, size_t size)
{
- int rc;
struct task_smack *tsp;
- struct task_smack *oldtsp;
struct cred *new;
- char *newsmack;
+ struct smack_known *skp;
/*
* Changing another process' Smack value is too dangerous
@@ -2751,40 +3030,32 @@ static int smack_setprocattr(struct task_struct *p, char *name,
if (p != current)
return -EPERM;
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
- if (value == NULL || size == 0 || size >= SMK_LABELLEN)
+ if (value == NULL || size == 0 || size >= SMK_LONGLABEL)
return -EINVAL;
if (strcmp(name, "current") != 0)
return -EINVAL;
- newsmack = smk_import(value, size);
- if (newsmack == NULL)
+ skp = smk_import_entry(value, size);
+ if (skp == NULL)
return -EINVAL;
/*
* No process is ever allowed the web ("@") label.
*/
- if (newsmack == smack_known_web.smk_known)
+ if (skp == &smack_known_web)
return -EPERM;
- oldtsp = p->cred->security;
new = prepare_creds();
if (new == NULL)
return -ENOMEM;
- tsp = new_task_smack(newsmack, oldtsp->smk_forked, GFP_KERNEL);
- if (tsp == NULL) {
- kfree(new);
- return -ENOMEM;
- }
- rc = smk_copy_rules(&tsp->smk_rules, &oldtsp->smk_rules, GFP_KERNEL);
- if (rc != 0)
- return rc;
+ tsp = new->security;
+ tsp->smk_task = skp;
- new->security = tsp;
commit_creds(new);
return size;
}
@@ -2801,17 +3072,28 @@ static int smack_setprocattr(struct task_struct *p, char *name,
static int smack_unix_stream_connect(struct sock *sock,
struct sock *other, struct sock *newsk)
{
+ struct smack_known *skp;
+ struct smack_known *okp;
struct socket_smack *ssp = sock->sk_security;
struct socket_smack *osp = other->sk_security;
struct socket_smack *nsp = newsk->sk_security;
struct smk_audit_info ad;
int rc = 0;
+#ifdef CONFIG_AUDIT
+ struct lsm_network_audit net;
+#endif
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET);
- smk_ad_setfield_u_net_sk(&ad, other);
-
- if (!capable(CAP_MAC_OVERRIDE))
- rc = smk_access(ssp->smk_out, osp->smk_in, MAY_WRITE, &ad);
+ if (!smack_privileged(CAP_MAC_OVERRIDE)) {
+ skp = ssp->smk_out;
+ okp = osp->smk_out;
+#ifdef CONFIG_AUDIT
+ smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+ smk_ad_setfield_u_net_sk(&ad, other);
+#endif
+ rc = smk_access(skp, okp->smk_known, MAY_WRITE, &ad);
+ if (rc == 0)
+ rc = smk_access(okp, okp->smk_known, MAY_WRITE, NULL);
+ }
/*
* Cross reference the peer labels for SO_PEERSEC.
@@ -2836,16 +3118,21 @@ static int smack_unix_may_send(struct socket *sock, struct socket *other)
{
struct socket_smack *ssp = sock->sk->sk_security;
struct socket_smack *osp = other->sk->sk_security;
+ struct smack_known *skp;
struct smk_audit_info ad;
- int rc = 0;
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET);
+#ifdef CONFIG_AUDIT
+ struct lsm_network_audit net;
+
+ smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
smk_ad_setfield_u_net_sk(&ad, other->sk);
+#endif
- if (!capable(CAP_MAC_OVERRIDE))
- rc = smk_access(ssp->smk_out, osp->smk_in, MAY_WRITE, &ad);
+ if (smack_privileged(CAP_MAC_OVERRIDE))
+ return 0;
- return rc;
+ skp = ssp->smk_out;
+ return smk_access(skp, osp->smk_in->smk_known, MAY_WRITE, &ad);
}
/**
@@ -2854,22 +3141,32 @@ static int smack_unix_may_send(struct socket *sock, struct socket *other)
* @msg: the message
* @size: the size of the message
*
- * Return 0 if the current subject can write to the destination
- * host. This is only a question if the destination is a single
- * label host.
+ * Return 0 if the current subject can write to the destination host.
+ * For IPv4 this is only a question if the destination is a single label host.
+ * For IPv6 this is a check against the label of the port.
*/
static int smack_socket_sendmsg(struct socket *sock, struct msghdr *msg,
int size)
{
struct sockaddr_in *sip = (struct sockaddr_in *) msg->msg_name;
+ struct sockaddr_in6 *sap = (struct sockaddr_in6 *) msg->msg_name;
+ int rc = 0;
/*
* Perfectly reasonable for this to be NULL
*/
- if (sip == NULL || sip->sin_family != AF_INET)
+ if (sip == NULL)
return 0;
- return smack_netlabel_send(sock->sk, sip);
+ switch (sip->sin_family) {
+ case AF_INET:
+ rc = smack_netlabel_send(sock->sk, sip);
+ break;
+ case AF_INET6:
+ rc = smk_ipv6_port_check(sock->sk, sap, SMK_SENDING);
+ break;
+ }
+ return rc;
}
/**
@@ -2877,15 +3174,15 @@ static int smack_socket_sendmsg(struct socket *sock, struct msghdr *msg,
* @sap: netlabel secattr
* @ssp: socket security information
*
- * Returns a pointer to a Smack label found on the label list.
+ * Returns a pointer to a Smack label entry found on the label list.
*/
-static char *smack_from_secattr(struct netlbl_lsm_secattr *sap,
- struct socket_smack *ssp)
+static struct smack_known *smack_from_secattr(struct netlbl_lsm_secattr *sap,
+ struct socket_smack *ssp)
{
struct smack_known *skp;
- char smack[SMK_LABELLEN];
- char *sp;
- int pcat;
+ int found = 0;
+ int acat;
+ int kcat;
if ((sap->flags & NETLBL_SECATTR_MLS_LVL) != 0) {
/*
@@ -2893,68 +3190,52 @@ static char *smack_from_secattr(struct netlbl_lsm_secattr *sap,
* If there are flags but no level netlabel isn't
* behaving the way we expect it to.
*
- * Get the categories, if any
+ * Look it up in the label table
* Without guidance regarding the smack value
* for the packet fall back on the network
* ambient value.
*/
- memset(smack, '\0', SMK_LABELLEN);
- if ((sap->flags & NETLBL_SECATTR_MLS_CAT) != 0)
- for (pcat = -1;;) {
- pcat = netlbl_secattr_catmap_walk(
- sap->attr.mls.cat, pcat + 1);
- if (pcat < 0)
- break;
- smack_catset_bit(pcat, smack);
- }
- /*
- * If it is CIPSO using smack direct mapping
- * we are already done. WeeHee.
- */
- if (sap->attr.mls.lvl == smack_cipso_direct) {
+ rcu_read_lock();
+ list_for_each_entry(skp, &smack_known_list, list) {
+ if (sap->attr.mls.lvl != skp->smk_netlabel.attr.mls.lvl)
+ continue;
/*
- * The label sent is usually on the label list.
- *
- * If it is not we may still want to allow the
- * delivery.
- *
- * If the recipient is accepting all packets
- * because it is using the star ("*") label
- * for SMACK64IPIN provide the web ("@") label
- * so that a directed response will succeed.
- * This is not very correct from a MAC point
- * of view, but gets around the problem that
- * locking prevents adding the newly discovered
- * label to the list.
- * The case where the recipient is not using
- * the star label should obviously fail.
- * The easy way to do this is to provide the
- * star label as the subject label.
+ * Compare the catsets. Use the netlbl APIs.
*/
- skp = smk_find_entry(smack);
- if (skp != NULL)
- return skp->smk_known;
- if (ssp != NULL &&
- ssp->smk_in == smack_known_star.smk_known)
- return smack_known_web.smk_known;
- return smack_known_star.smk_known;
+ if ((sap->flags & NETLBL_SECATTR_MLS_CAT) == 0) {
+ if ((skp->smk_netlabel.flags &
+ NETLBL_SECATTR_MLS_CAT) == 0)
+ found = 1;
+ break;
+ }
+ for (acat = -1, kcat = -1; acat == kcat; ) {
+ acat = netlbl_secattr_catmap_walk(
+ sap->attr.mls.cat, acat + 1);
+ kcat = netlbl_secattr_catmap_walk(
+ skp->smk_netlabel.attr.mls.cat,
+ kcat + 1);
+ if (acat < 0 || kcat < 0)
+ break;
+ }
+ if (acat == kcat) {
+ found = 1;
+ break;
+ }
}
- /*
- * Look it up in the supplied table if it is not
- * a direct mapping.
- */
- sp = smack_from_cipso(sap->attr.mls.lvl, smack);
- if (sp != NULL)
- return sp;
- if (ssp != NULL && ssp->smk_in == smack_known_star.smk_known)
- return smack_known_web.smk_known;
- return smack_known_star.smk_known;
+ rcu_read_unlock();
+
+ if (found)
+ return skp;
+
+ if (ssp != NULL && ssp->smk_in == &smack_known_star)
+ return &smack_known_web;
+ return &smack_known_star;
}
if ((sap->flags & NETLBL_SECATTR_SECID) != 0) {
/*
* Looks like a fallback, which gives us a secid.
*/
- sp = smack_from_secid(sap->attr.secid);
+ skp = smack_from_secid(sap->attr.secid);
/*
* This has got to be a bug because it is
* impossible to specify a fallback without
@@ -2962,8 +3243,8 @@ static char *smack_from_secattr(struct netlbl_lsm_secattr *sap,
* it has a secid, and the only way to get a
* secid is from a fallback.
*/
- BUG_ON(sp == NULL);
- return sp;
+ BUG_ON(skp == NULL);
+ return skp;
}
/*
* Without guidance regarding the smack value
@@ -2973,6 +3254,53 @@ static char *smack_from_secattr(struct netlbl_lsm_secattr *sap,
return smack_net_ambient;
}
+static int smk_skb_to_addr_ipv6(struct sk_buff *skb, struct sockaddr_in6 *sip)
+{
+ u8 nexthdr;
+ int offset;
+ int proto = -EINVAL;
+ struct ipv6hdr _ipv6h;
+ struct ipv6hdr *ip6;
+ __be16 frag_off;
+ struct tcphdr _tcph, *th;
+ struct udphdr _udph, *uh;
+ struct dccp_hdr _dccph, *dh;
+
+ sip->sin6_port = 0;
+
+ offset = skb_network_offset(skb);
+ ip6 = skb_header_pointer(skb, offset, sizeof(_ipv6h), &_ipv6h);
+ if (ip6 == NULL)
+ return -EINVAL;
+ sip->sin6_addr = ip6->saddr;
+
+ nexthdr = ip6->nexthdr;
+ offset += sizeof(_ipv6h);
+ offset = ipv6_skip_exthdr(skb, offset, &nexthdr, &frag_off);
+ if (offset < 0)
+ return -EINVAL;
+
+ proto = nexthdr;
+ switch (proto) {
+ case IPPROTO_TCP:
+ th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
+ if (th != NULL)
+ sip->sin6_port = th->source;
+ break;
+ case IPPROTO_UDP:
+ uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
+ if (uh != NULL)
+ sip->sin6_port = uh->source;
+ break;
+ case IPPROTO_DCCP:
+ dh = skb_header_pointer(skb, offset, sizeof(_dccph), &_dccph);
+ if (dh != NULL)
+ sip->sin6_port = dh->dccph_sport;
+ break;
+ }
+ return proto;
+}
+
/**
* smack_socket_sock_rcv_skb - Smack packet delivery access check
* @sk: socket
@@ -2984,40 +3312,52 @@ static int smack_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
{
struct netlbl_lsm_secattr secattr;
struct socket_smack *ssp = sk->sk_security;
- char *csp;
- int rc;
+ struct smack_known *skp;
+ struct sockaddr_in6 sadd;
+ int rc = 0;
struct smk_audit_info ad;
- if (sk->sk_family != PF_INET && sk->sk_family != PF_INET6)
- return 0;
-
- /*
- * Translate what netlabel gave us.
- */
- netlbl_secattr_init(&secattr);
+#ifdef CONFIG_AUDIT
+ struct lsm_network_audit net;
+#endif
+ switch (sk->sk_family) {
+ case PF_INET:
+ /*
+ * Translate what netlabel gave us.
+ */
+ netlbl_secattr_init(&secattr);
- rc = netlbl_skbuff_getattr(skb, sk->sk_family, &secattr);
- if (rc == 0)
- csp = smack_from_secattr(&secattr, ssp);
- else
- csp = smack_net_ambient;
+ rc = netlbl_skbuff_getattr(skb, sk->sk_family, &secattr);
+ if (rc == 0)
+ skp = smack_from_secattr(&secattr, ssp);
+ else
+ skp = smack_net_ambient;
- netlbl_secattr_destroy(&secattr);
+ netlbl_secattr_destroy(&secattr);
#ifdef CONFIG_AUDIT
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET);
- ad.a.u.net.family = sk->sk_family;
- ad.a.u.net.netif = skb->skb_iif;
- ipv4_skb_to_auditdata(skb, &ad.a, NULL);
+ smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+ ad.a.u.net->family = sk->sk_family;
+ ad.a.u.net->netif = skb->skb_iif;
+ ipv4_skb_to_auditdata(skb, &ad.a, NULL);
#endif
- /*
- * Receiving a packet requires that the other end
- * be able to write here. Read access is not required.
- * This is the simplist possible security model
- * for networking.
- */
- rc = smk_access(csp, ssp->smk_in, MAY_WRITE, &ad);
- if (rc != 0)
- netlbl_skbuff_err(skb, rc, 0);
+ /*
+ * Receiving a packet requires that the other end
+ * be able to write here. Read access is not required.
+ * This is the simplist possible security model
+ * for networking.
+ */
+ rc = smk_access(skp, ssp->smk_in->smk_known, MAY_WRITE, &ad);
+ if (rc != 0)
+ netlbl_skbuff_err(skb, rc, 0);
+ break;
+ case PF_INET6:
+ rc = smk_skb_to_addr_ipv6(skb, &sadd);
+ if (rc == IPPROTO_UDP || rc == IPPROTO_TCP)
+ rc = smk_ipv6_port_check(sk, &sadd, SMK_RECEIVING);
+ else
+ rc = 0;
+ break;
+ }
return rc;
}
@@ -3041,7 +3381,7 @@ static int smack_socket_getpeersec_stream(struct socket *sock,
ssp = sock->sk->sk_security;
if (ssp->smk_packet != NULL) {
- rcp = ssp->smk_packet;
+ rcp = ssp->smk_packet->smk_known;
slen = strlen(rcp) + 1;
}
@@ -3071,7 +3411,7 @@ static int smack_socket_getpeersec_dgram(struct socket *sock,
{
struct netlbl_lsm_secattr secattr;
struct socket_smack *ssp = NULL;
- char *sp;
+ struct smack_known *skp;
int family = PF_UNSPEC;
u32 s = 0; /* 0 is the invalid secid */
int rc;
@@ -3087,7 +3427,7 @@ static int smack_socket_getpeersec_dgram(struct socket *sock,
if (family == PF_UNIX) {
ssp = sock->sk->sk_security;
- s = smack_to_secid(ssp->smk_out);
+ s = ssp->smk_out->smk_secid;
} else if (family == PF_INET || family == PF_INET6) {
/*
* Translate what netlabel gave us.
@@ -3097,8 +3437,8 @@ static int smack_socket_getpeersec_dgram(struct socket *sock,
netlbl_secattr_init(&secattr);
rc = netlbl_skbuff_getattr(skb, family, &secattr);
if (rc == 0) {
- sp = smack_from_secattr(&secattr, ssp);
- s = smack_to_secid(sp);
+ skp = smack_from_secattr(&secattr, ssp);
+ s = skp->smk_secid;
}
netlbl_secattr_destroy(&secattr);
}
@@ -3119,13 +3459,15 @@ static int smack_socket_getpeersec_dgram(struct socket *sock,
static void smack_sock_graft(struct sock *sk, struct socket *parent)
{
struct socket_smack *ssp;
+ struct smack_known *skp = smk_of_current();
if (sk == NULL ||
(sk->sk_family != PF_INET && sk->sk_family != PF_INET6))
return;
ssp = sk->sk_security;
- ssp->smk_in = ssp->smk_out = smk_of_current();
+ ssp->smk_in = skp;
+ ssp->smk_out = skp;
/* cssp->smk_packet is already set in smack_inet_csk_clone() */
}
@@ -3142,37 +3484,49 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
struct request_sock *req)
{
u16 family = sk->sk_family;
+ struct smack_known *skp;
struct socket_smack *ssp = sk->sk_security;
struct netlbl_lsm_secattr secattr;
struct sockaddr_in addr;
struct iphdr *hdr;
- char *sp;
+ char *hsp;
int rc;
struct smk_audit_info ad;
+#ifdef CONFIG_AUDIT
+ struct lsm_network_audit net;
+#endif
- /* handle mapped IPv4 packets arriving via IPv6 sockets */
- if (family == PF_INET6 && skb->protocol == htons(ETH_P_IP))
- family = PF_INET;
+ if (family == PF_INET6) {
+ /*
+ * Handle mapped IPv4 packets arriving
+ * via IPv6 sockets. Don't set up netlabel
+ * processing on IPv6.
+ */
+ if (skb->protocol == htons(ETH_P_IP))
+ family = PF_INET;
+ else
+ return 0;
+ }
netlbl_secattr_init(&secattr);
rc = netlbl_skbuff_getattr(skb, family, &secattr);
if (rc == 0)
- sp = smack_from_secattr(&secattr, ssp);
+ skp = smack_from_secattr(&secattr, ssp);
else
- sp = smack_known_huh.smk_known;
+ skp = &smack_known_huh;
netlbl_secattr_destroy(&secattr);
#ifdef CONFIG_AUDIT
- smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_NET);
- ad.a.u.net.family = family;
- ad.a.u.net.netif = skb->skb_iif;
+ smk_ad_init_net(&ad, __func__, LSM_AUDIT_DATA_NET, &net);
+ ad.a.u.net->family = family;
+ ad.a.u.net->netif = skb->skb_iif;
ipv4_skb_to_auditdata(skb, &ad.a, NULL);
#endif
/*
* Receiving a packet requires that the other end be able to write
* here. Read access is not required.
*/
- rc = smk_access(sp, ssp->smk_in, MAY_WRITE, &ad);
+ rc = smk_access(skp, ssp->smk_in->smk_known, MAY_WRITE, &ad);
if (rc != 0)
return rc;
@@ -3180,7 +3534,7 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
* Save the peer's label in the request_sock so we can later setup
* smk_packet in the child socket so that SO_PEERCRED can report it.
*/
- req->peer_secid = smack_to_secid(sp);
+ req->peer_secid = skp->smk_secid;
/*
* We need to decide if we want to label the incoming connection here
@@ -3190,16 +3544,13 @@ static int smack_inet_conn_request(struct sock *sk, struct sk_buff *skb,
hdr = ip_hdr(skb);
addr.sin_addr.s_addr = hdr->saddr;
rcu_read_lock();
- if (smack_host_label(&addr) == NULL) {
- rcu_read_unlock();
- netlbl_secattr_init(&secattr);
- smack_to_secattr(sp, &secattr);
- rc = netlbl_req_setattr(req, &secattr);
- netlbl_secattr_destroy(&secattr);
- } else {
- rcu_read_unlock();
+ hsp = smack_host_label(&addr);
+ rcu_read_unlock();
+
+ if (hsp == NULL)
+ rc = netlbl_req_setattr(req, &skp->smk_netlabel);
+ else
netlbl_req_delattr(req);
- }
return rc;
}
@@ -3215,10 +3566,12 @@ static void smack_inet_csk_clone(struct sock *sk,
const struct request_sock *req)
{
struct socket_smack *ssp = sk->sk_security;
+ struct smack_known *skp;
- if (req->peer_secid != 0)
- ssp->smk_packet = smack_from_secid(req->peer_secid);
- else
+ if (req->peer_secid != 0) {
+ skp = smack_from_secid(req->peer_secid);
+ ssp->smk_packet = skp;
+ } else
ssp->smk_packet = NULL;
}
@@ -3244,7 +3597,9 @@ static void smack_inet_csk_clone(struct sock *sk,
static int smack_key_alloc(struct key *key, const struct cred *cred,
unsigned long flags)
{
- key->security = smk_of_task(cred->security);
+ struct smack_known *skp = smk_of_task(cred->security);
+
+ key->security = skp->smk_known;
return 0;
}
@@ -3269,11 +3624,12 @@ static void smack_key_free(struct key *key)
* an error code otherwise
*/
static int smack_key_permission(key_ref_t key_ref,
- const struct cred *cred, key_perm_t perm)
+ const struct cred *cred, unsigned perm)
{
struct key *keyp;
struct smk_audit_info ad;
- char *tsp = smk_of_task(cred->security);
+ struct smack_known *tkp = smk_of_task(cred->security);
+ int request = 0;
keyp = key_ref_to_ptr(key_ref);
if (keyp == NULL)
@@ -3287,15 +3643,18 @@ static int smack_key_permission(key_ref_t key_ref,
/*
* This should not occur
*/
- if (tsp == NULL)
+ if (tkp == NULL)
return -EACCES;
#ifdef CONFIG_AUDIT
smk_ad_init(&ad, __func__, LSM_AUDIT_DATA_KEY);
ad.a.u.key_struct.key = keyp->serial;
ad.a.u.key_struct.key_desc = keyp->description;
#endif
- return smk_access(tsp, keyp->security,
- MAY_READWRITE, &ad);
+ if (perm & KEY_NEED_READ)
+ request = MAY_READ;
+ if (perm & (KEY_NEED_WRITE | KEY_NEED_LINK | KEY_NEED_SETATTR))
+ request = MAY_WRITE;
+ return smk_access(tkp, keyp->security, request, &ad);
}
#endif /* CONFIG_KEYS */
@@ -3377,19 +3736,18 @@ static int smack_audit_rule_known(struct audit_krule *krule)
static int smack_audit_rule_match(u32 secid, u32 field, u32 op, void *vrule,
struct audit_context *actx)
{
- char *smack;
+ struct smack_known *skp;
char *rule = vrule;
- if (!rule) {
- audit_log(actx, GFP_KERNEL, AUDIT_SELINUX_ERR,
- "Smack: missing rule\n");
+ if (unlikely(!rule)) {
+ WARN_ONCE(1, "Smack: missing rule\n");
return -ENOENT;
}
if (field != AUDIT_SUBJ_USER && field != AUDIT_OBJ_USER)
return 0;
- smack = smack_from_secid(secid);
+ skp = smack_from_secid(secid);
/*
* No need to do string comparisons. If a match occurs,
@@ -3397,9 +3755,9 @@ static int smack_audit_rule_match(u32 secid, u32 field, u32 op, void *vrule,
* label.
*/
if (op == Audit_equal)
- return (rule == smack);
+ return (rule == skp->smk_known);
if (op == Audit_not_equal)
- return (rule != smack);
+ return (rule != skp->smk_known);
return 0;
}
@@ -3418,6 +3776,16 @@ static void smack_audit_rule_free(void *vrule)
#endif /* CONFIG_AUDIT */
/**
+ * smack_ismaclabel - check if xattr @name references a smack MAC label
+ * @name: Full xattr name to check.
+ */
+static int smack_ismaclabel(const char *name)
+{
+ return (strcmp(name, XATTR_SMACK_SUFFIX) == 0);
+}
+
+
+/**
* smack_secid_to_secctx - return the smack label for a secid
* @secid: incoming integer
* @secdata: destination
@@ -3427,11 +3795,11 @@ static void smack_audit_rule_free(void *vrule)
*/
static int smack_secid_to_secctx(u32 secid, char **secdata, u32 *seclen)
{
- char *sp = smack_from_secid(secid);
+ struct smack_known *skp = smack_from_secid(secid);
if (secdata)
- *secdata = sp;
- *seclen = strlen(sp);
+ *secdata = skp->smk_known;
+ *seclen = strlen(skp->smk_known);
return 0;
}
@@ -3493,8 +3861,6 @@ struct security_operations smack_ops = {
.sb_copy_data = smack_sb_copy_data,
.sb_kern_mount = smack_sb_kern_mount,
.sb_statfs = smack_sb_statfs,
- .sb_mount = smack_sb_mount,
- .sb_umount = smack_sb_umount,
.bprm_set_creds = smack_bprm_set_creds,
.bprm_committing_creds = smack_bprm_committing_creds,
@@ -3525,12 +3891,13 @@ struct security_operations smack_ops = {
.file_ioctl = smack_file_ioctl,
.file_lock = smack_file_lock,
.file_fcntl = smack_file_fcntl,
- .file_mmap = smack_file_mmap,
+ .mmap_file = smack_mmap_file,
+ .mmap_addr = cap_mmap_addr,
.file_set_fowner = smack_file_set_fowner,
.file_send_sigiotask = smack_file_send_sigiotask,
.file_receive = smack_file_receive,
- .dentry_open = smack_dentry_open,
+ .file_open = smack_file_open,
.cred_alloc_blank = smack_cred_alloc_blank,
.cred_free = smack_cred_free,
@@ -3586,6 +3953,7 @@ struct security_operations smack_ops = {
.unix_may_send = smack_unix_may_send,
.socket_post_create = smack_socket_post_create,
+ .socket_bind = smack_socket_bind,
.socket_connect = smack_socket_connect,
.socket_sendmsg = smack_socket_sendmsg,
.socket_sock_rcv_skb = smack_socket_sock_rcv_skb,
@@ -3612,6 +3980,7 @@ struct security_operations smack_ops = {
.audit_rule_free = smack_audit_rule_free,
#endif /* CONFIG_AUDIT */
+ .ismaclabel = smack_ismaclabel,
.secid_to_secctx = smack_secid_to_secctx,
.secctx_to_secid = smack_secctx_to_secid,
.release_secctx = smack_release_secctx,
@@ -3621,14 +3990,35 @@ struct security_operations smack_ops = {
};
-static __init void init_smack_know_list(void)
+static __init void init_smack_known_list(void)
{
- list_add(&smack_known_huh.list, &smack_known_list);
- list_add(&smack_known_hat.list, &smack_known_list);
- list_add(&smack_known_star.list, &smack_known_list);
- list_add(&smack_known_floor.list, &smack_known_list);
- list_add(&smack_known_invalid.list, &smack_known_list);
- list_add(&smack_known_web.list, &smack_known_list);
+ /*
+ * Initialize rule list locks
+ */
+ mutex_init(&smack_known_huh.smk_rules_lock);
+ mutex_init(&smack_known_hat.smk_rules_lock);
+ mutex_init(&smack_known_floor.smk_rules_lock);
+ mutex_init(&smack_known_star.smk_rules_lock);
+ mutex_init(&smack_known_invalid.smk_rules_lock);
+ mutex_init(&smack_known_web.smk_rules_lock);
+ /*
+ * Initialize rule lists
+ */
+ INIT_LIST_HEAD(&smack_known_huh.smk_rules);
+ INIT_LIST_HEAD(&smack_known_hat.smk_rules);
+ INIT_LIST_HEAD(&smack_known_star.smk_rules);
+ INIT_LIST_HEAD(&smack_known_floor.smk_rules);
+ INIT_LIST_HEAD(&smack_known_invalid.smk_rules);
+ INIT_LIST_HEAD(&smack_known_web.smk_rules);
+ /*
+ * Create the known labels list
+ */
+ smk_insert_entry(&smack_known_huh);
+ smk_insert_entry(&smack_known_hat);
+ smk_insert_entry(&smack_known_star);
+ smk_insert_entry(&smack_known_floor);
+ smk_insert_entry(&smack_known_invalid);
+ smk_insert_entry(&smack_known_web);
}
/**
@@ -3644,8 +4034,8 @@ static __init int smack_init(void)
if (!security_module_enable(&smack_ops))
return 0;
- tsp = new_task_smack(smack_known_floor.smk_known,
- smack_known_floor.smk_known, GFP_KERNEL);
+ tsp = new_task_smack(&smack_known_floor, &smack_known_floor,
+ GFP_KERNEL);
if (tsp == NULL)
return -ENOMEM;
@@ -3657,16 +4047,8 @@ static __init int smack_init(void)
cred = (struct cred *) current->cred;
cred->security = tsp;
- /* initialize the smack_know_list */
- init_smack_know_list();
- /*
- * Initialize locks
- */
- spin_lock_init(&smack_known_huh.smk_cipsolock);
- spin_lock_init(&smack_known_hat.smk_cipsolock);
- spin_lock_init(&smack_known_star.smk_cipsolock);
- spin_lock_init(&smack_known_floor.smk_cipsolock);
- spin_lock_init(&smack_known_invalid.smk_cipsolock);
+ /* initialize the smack_known_list */
+ init_smack_known_list();
/*
* Register with LSM
diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
index 5c32f36ff70..32b24882084 100644
--- a/security/smack/smackfs.c
+++ b/security/smack/smackfs.c
@@ -22,11 +22,11 @@
#include <linux/mutex.h>
#include <linux/slab.h>
#include <net/net_namespace.h>
-#include <net/netlabel.h>
#include <net/cipso_ipv4.h>
#include <linux/seq_file.h>
#include <linux/ctype.h>
#include <linux/audit.h>
+#include <linux/magic.h>
#include "smack.h"
/*
@@ -45,14 +45,23 @@ enum smk_inos {
SMK_LOGGING = 10, /* logging */
SMK_LOAD_SELF = 11, /* task specific rules */
SMK_ACCESSES = 12, /* access policy */
+ SMK_MAPPED = 13, /* CIPSO level indicating mapped label */
+ SMK_LOAD2 = 14, /* load policy with long labels */
+ SMK_LOAD_SELF2 = 15, /* load task specific rules with long labels */
+ SMK_ACCESS2 = 16, /* make an access check with long labels */
+ SMK_CIPSO2 = 17, /* load long label -> CIPSO mapping */
+ SMK_REVOKE_SUBJ = 18, /* set rules with subject label to '-' */
+ SMK_CHANGE_RULE = 19, /* change or add rules (long labels) */
+ SMK_SYSLOG = 20, /* change syslog label) */
+ SMK_PTRACE = 21, /* set ptrace rule */
};
/*
* List locks
*/
-static DEFINE_MUTEX(smack_list_lock);
static DEFINE_MUTEX(smack_cipso_lock);
static DEFINE_MUTEX(smack_ambient_lock);
+static DEFINE_MUTEX(smack_syslog_lock);
static DEFINE_MUTEX(smk_netlbladdr_lock);
/*
@@ -60,7 +69,7 @@ static DEFINE_MUTEX(smk_netlbladdr_lock);
* If it isn't somehow marked, use this.
* It can be reset via smackfs/ambient
*/
-char *smack_net_ambient = smack_known_floor.smk_known;
+struct smack_known *smack_net_ambient;
/*
* This is the level in a CIPSO header that indicates a
@@ -70,6 +79,13 @@ char *smack_net_ambient = smack_known_floor.smk_known;
int smack_cipso_direct = SMACK_CIPSO_DIRECT_DEFAULT;
/*
+ * This is the level in a CIPSO header that indicates a
+ * secid is contained directly in the category set.
+ * It can be reset via smackfs/mapped
+ */
+int smack_cipso_mapped = SMACK_CIPSO_MAPPED_DEFAULT;
+
+/*
* Unless a process is running with this label even
* having CAP_MAC_OVERRIDE isn't enough to grant
* privilege to violate MAC policy. If no label is
@@ -77,7 +93,22 @@ int smack_cipso_direct = SMACK_CIPSO_DIRECT_DEFAULT;
* everyone. It is expected that the hat (^) label
* will be used if any label is used.
*/
-char *smack_onlycap;
+struct smack_known *smack_onlycap;
+
+/*
+ * If this value is set restrict syslog use to the label specified.
+ * It can be reset via smackfs/syslog
+ */
+struct smack_known *smack_syslog_label;
+
+/*
+ * Ptrace current rule
+ * SMACK_PTRACE_DEFAULT regular smack ptrace rules (/proc based)
+ * SMACK_PTRACE_EXACT labels must match, but can be overriden with
+ * CAP_SYS_PTRACE
+ * SMACK_PTRACE_DRACONIAN lables must match, CAP_SYS_PTRACE has no effect
+ */
+int smack_ptrace_rule = SMACK_PTRACE_DEFAULT;
/*
* Certain IP addresses may be designated as single label hosts.
@@ -89,7 +120,7 @@ LIST_HEAD(smk_netlbladdr_list);
/*
* Rule lists are maintained for each label.
- * This master list is just for reading /smack/load.
+ * This master list is just for reading /smack/load and /smack/load2.
*/
struct smack_master_list {
struct list_head list;
@@ -98,6 +129,13 @@ struct smack_master_list {
LIST_HEAD(smack_rule_list);
+struct smack_parsed_rule {
+ struct smack_known *smk_subject;
+ char *smk_object;
+ int smk_access1;
+ int smk_access2;
+};
+
static int smk_cipso_doi_value = SMACK_CIPSO_DOI_DEFAULT;
const char *smack_cipso_option = SMACK_CIPSO_OPTION;
@@ -119,51 +157,66 @@ const char *smack_cipso_option = SMACK_CIPSO_OPTION;
* SMK_LOADLEN: Smack rule length
*/
#define SMK_OACCESS "rwxa"
-#define SMK_ACCESS "rwxat"
+#define SMK_ACCESS "rwxatl"
#define SMK_OACCESSLEN (sizeof(SMK_OACCESS) - 1)
#define SMK_ACCESSLEN (sizeof(SMK_ACCESS) - 1)
#define SMK_OLOADLEN (SMK_LABELLEN + SMK_LABELLEN + SMK_OACCESSLEN)
#define SMK_LOADLEN (SMK_LABELLEN + SMK_LABELLEN + SMK_ACCESSLEN)
+/*
+ * Stricly for CIPSO level manipulation.
+ * Set the category bit number in a smack label sized buffer.
+ */
+static inline void smack_catset_bit(unsigned int cat, char *catsetp)
+{
+ if (cat == 0 || cat > (SMK_CIPSOLEN * 8))
+ return;
+
+ catsetp[(cat - 1) / 8] |= 0x80 >> ((cat - 1) % 8);
+}
+
/**
* smk_netlabel_audit_set - fill a netlbl_audit struct
* @nap: structure to fill
*/
static void smk_netlabel_audit_set(struct netlbl_audit *nap)
{
+ struct smack_known *skp = smk_of_current();
+
nap->loginuid = audit_get_loginuid(current);
nap->sessionid = audit_get_sessionid(current);
- nap->secid = smack_to_secid(smk_of_current());
+ nap->secid = skp->smk_secid;
}
/*
- * Values for parsing single label host rules
+ * Value for parsing single label host rules
* "1.2.3.4 X"
- * "192.168.138.129/32 abcdefghijklmnopqrstuvw"
*/
#define SMK_NETLBLADDRMIN 9
-#define SMK_NETLBLADDRMAX 42
/**
- * smk_set_access - add a rule to the rule list
- * @srp: the new rule to add
+ * smk_set_access - add a rule to the rule list or replace an old rule
+ * @srp: the rule to add or replace
* @rule_list: the list of rules
* @rule_lock: the rule list lock
+ * @global: if non-zero, indicates a global rule
*
* Looks through the current subject/object/access list for
* the subject/object pair and replaces the access that was
* there. If the pair isn't found add it with the specified
* access.
*
- * Returns 1 if a rule was found to exist already, 0 if it is new
* Returns 0 if nothing goes wrong or -ENOMEM if it fails
* during the allocation of the new pair to add.
*/
-static int smk_set_access(struct smack_rule *srp, struct list_head *rule_list,
- struct mutex *rule_lock)
+static int smk_set_access(struct smack_parsed_rule *srp,
+ struct list_head *rule_list,
+ struct mutex *rule_lock, int global)
{
struct smack_rule *sp;
+ struct smack_master_list *smlp;
int found = 0;
+ int rc = 0;
mutex_lock(rule_lock);
@@ -175,139 +228,240 @@ static int smk_set_access(struct smack_rule *srp, struct list_head *rule_list,
if (sp->smk_object == srp->smk_object &&
sp->smk_subject == srp->smk_subject) {
found = 1;
- sp->smk_access = srp->smk_access;
+ sp->smk_access |= srp->smk_access1;
+ sp->smk_access &= ~srp->smk_access2;
break;
}
}
- if (found == 0)
- list_add_rcu(&srp->list, rule_list);
+ if (found == 0) {
+ sp = kzalloc(sizeof(*sp), GFP_KERNEL);
+ if (sp == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ sp->smk_subject = srp->smk_subject;
+ sp->smk_object = srp->smk_object;
+ sp->smk_access = srp->smk_access1 & ~srp->smk_access2;
+
+ list_add_rcu(&sp->list, rule_list);
+ /*
+ * If this is a global as opposed to self and a new rule
+ * it needs to get added for reporting.
+ */
+ if (global) {
+ smlp = kzalloc(sizeof(*smlp), GFP_KERNEL);
+ if (smlp != NULL) {
+ smlp->smk_rule = sp;
+ list_add_rcu(&smlp->list, &smack_rule_list);
+ } else
+ rc = -ENOMEM;
+ }
+ }
+
+out:
mutex_unlock(rule_lock);
+ return rc;
+}
+
+/**
+ * smk_perm_from_str - parse smack accesses from a text string
+ * @string: a text string that contains a Smack accesses code
+ *
+ * Returns an integer with respective bits set for specified accesses.
+ */
+static int smk_perm_from_str(const char *string)
+{
+ int perm = 0;
+ const char *cp;
- return found;
+ for (cp = string; ; cp++)
+ switch (*cp) {
+ case '-':
+ break;
+ case 'r':
+ case 'R':
+ perm |= MAY_READ;
+ break;
+ case 'w':
+ case 'W':
+ perm |= MAY_WRITE;
+ break;
+ case 'x':
+ case 'X':
+ perm |= MAY_EXEC;
+ break;
+ case 'a':
+ case 'A':
+ perm |= MAY_APPEND;
+ break;
+ case 't':
+ case 'T':
+ perm |= MAY_TRANSMUTE;
+ break;
+ case 'l':
+ case 'L':
+ perm |= MAY_LOCK;
+ break;
+ default:
+ return perm;
+ }
}
/**
- * smk_parse_rule - parse Smack rule from load string
- * @data: string to be parsed whose size is SMK_LOADLEN
+ * smk_fill_rule - Fill Smack rule from strings
+ * @subject: subject label string
+ * @object: object label string
+ * @access1: access string
+ * @access2: string with permissions to be removed
* @rule: Smack rule
* @import: if non-zero, import labels
+ * @len: label length limit
+ *
+ * Returns 0 on success, -EINVAL on failure and -ENOENT when either subject
+ * or object is missing.
*/
-static int smk_parse_rule(const char *data, struct smack_rule *rule, int import)
+static int smk_fill_rule(const char *subject, const char *object,
+ const char *access1, const char *access2,
+ struct smack_parsed_rule *rule, int import,
+ int len)
{
- char smack[SMK_LABELLEN];
+ const char *cp;
struct smack_known *skp;
if (import) {
- rule->smk_subject = smk_import(data, 0);
+ rule->smk_subject = smk_import_entry(subject, len);
if (rule->smk_subject == NULL)
- return -1;
+ return -EINVAL;
- rule->smk_object = smk_import(data + SMK_LABELLEN, 0);
+ rule->smk_object = smk_import(object, len);
if (rule->smk_object == NULL)
- return -1;
+ return -EINVAL;
} else {
- smk_parse_smack(data, 0, smack);
- skp = smk_find_entry(smack);
+ cp = smk_parse_smack(subject, len);
+ if (cp == NULL)
+ return -EINVAL;
+ skp = smk_find_entry(cp);
+ kfree(cp);
if (skp == NULL)
- return -1;
- rule->smk_subject = skp->smk_known;
+ return -ENOENT;
+ rule->smk_subject = skp;
- smk_parse_smack(data + SMK_LABELLEN, 0, smack);
- skp = smk_find_entry(smack);
+ cp = smk_parse_smack(object, len);
+ if (cp == NULL)
+ return -EINVAL;
+ skp = smk_find_entry(cp);
+ kfree(cp);
if (skp == NULL)
- return -1;
+ return -ENOENT;
rule->smk_object = skp->smk_known;
}
- rule->smk_access = 0;
-
- switch (data[SMK_LABELLEN + SMK_LABELLEN]) {
- case '-':
- break;
- case 'r':
- case 'R':
- rule->smk_access |= MAY_READ;
- break;
- default:
- return -1;
- }
+ rule->smk_access1 = smk_perm_from_str(access1);
+ if (access2)
+ rule->smk_access2 = smk_perm_from_str(access2);
+ else
+ rule->smk_access2 = ~rule->smk_access1;
- switch (data[SMK_LABELLEN + SMK_LABELLEN + 1]) {
- case '-':
- break;
- case 'w':
- case 'W':
- rule->smk_access |= MAY_WRITE;
- break;
- default:
- return -1;
- }
+ return 0;
+}
- switch (data[SMK_LABELLEN + SMK_LABELLEN + 2]) {
- case '-':
- break;
- case 'x':
- case 'X':
- rule->smk_access |= MAY_EXEC;
- break;
- default:
- return -1;
- }
+/**
+ * smk_parse_rule - parse Smack rule from load string
+ * @data: string to be parsed whose size is SMK_LOADLEN
+ * @rule: Smack rule
+ * @import: if non-zero, import labels
+ *
+ * Returns 0 on success, -1 on errors.
+ */
+static int smk_parse_rule(const char *data, struct smack_parsed_rule *rule,
+ int import)
+{
+ int rc;
- switch (data[SMK_LABELLEN + SMK_LABELLEN + 3]) {
- case '-':
- break;
- case 'a':
- case 'A':
- rule->smk_access |= MAY_APPEND;
- break;
- default:
- return -1;
- }
+ rc = smk_fill_rule(data, data + SMK_LABELLEN,
+ data + SMK_LABELLEN + SMK_LABELLEN, NULL, rule,
+ import, SMK_LABELLEN);
+ return rc;
+}
+
+/**
+ * smk_parse_long_rule - parse Smack rule from rule string
+ * @data: string to be parsed, null terminated
+ * @rule: Will be filled with Smack parsed rule
+ * @import: if non-zero, import labels
+ * @tokens: numer of substrings expected in data
+ *
+ * Returns number of processed bytes on success, -1 on failure.
+ */
+static ssize_t smk_parse_long_rule(char *data, struct smack_parsed_rule *rule,
+ int import, int tokens)
+{
+ ssize_t cnt = 0;
+ char *tok[4];
+ int rc;
+ int i;
+
+ /*
+ * Parsing the rule in-place, filling all white-spaces with '\0'
+ */
+ for (i = 0; i < tokens; ++i) {
+ while (isspace(data[cnt]))
+ data[cnt++] = '\0';
+
+ if (data[cnt] == '\0')
+ /* Unexpected end of data */
+ return -1;
+
+ tok[i] = data + cnt;
- switch (data[SMK_LABELLEN + SMK_LABELLEN + 4]) {
- case '-':
- break;
- case 't':
- case 'T':
- rule->smk_access |= MAY_TRANSMUTE;
- break;
- default:
- return -1;
+ while (data[cnt] && !isspace(data[cnt]))
+ ++cnt;
}
+ while (isspace(data[cnt]))
+ data[cnt++] = '\0';
- return 0;
+ while (i < 4)
+ tok[i++] = NULL;
+
+ rc = smk_fill_rule(tok[0], tok[1], tok[2], tok[3], rule, import, 0);
+ return rc == 0 ? cnt : rc;
}
+#define SMK_FIXED24_FMT 0 /* Fixed 24byte label format */
+#define SMK_LONG_FMT 1 /* Variable long label format */
+#define SMK_CHANGE_FMT 2 /* Rule modification format */
/**
- * smk_write_load_list - write() for any /smack/load
+ * smk_write_rules_list - write() for any /smack rule file
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
* @ppos: where to start - must be 0
* @rule_list: the list of rules to write to
* @rule_lock: lock for the rule list
+ * @format: /smack/load or /smack/load2 or /smack/change-rule format.
*
* Get one smack access rule from above.
- * The format is exactly:
- * char subject[SMK_LABELLEN]
- * char object[SMK_LABELLEN]
- * char access[SMK_ACCESSLEN]
- *
- * writes must be SMK_LABELLEN+SMK_LABELLEN+SMK_ACCESSLEN bytes.
+ * The format for SMK_LONG_FMT is:
+ * "subject<whitespace>object<whitespace>access[<whitespace>...]"
+ * The format for SMK_FIXED24_FMT is exactly:
+ * "subject object rwxat"
+ * The format for SMK_CHANGE_FMT is:
+ * "subject<whitespace>object<whitespace>
+ * acc_enable<whitespace>acc_disable[<whitespace>...]"
*/
-static ssize_t smk_write_load_list(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos,
- struct list_head *rule_list,
- struct mutex *rule_lock)
+static ssize_t smk_write_rules_list(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos,
+ struct list_head *rule_list,
+ struct mutex *rule_lock, int format)
{
- struct smack_master_list *smlp;
- struct smack_known *skp;
- struct smack_rule *rule;
+ struct smack_parsed_rule rule;
char *data;
- int rc = -EINVAL;
- int load = 0;
+ int rc;
+ int trunc = 0;
+ int tokens;
+ ssize_t cnt = 0;
/*
* No partial writes.
@@ -315,13 +469,21 @@ static ssize_t smk_write_load_list(struct file *file, const char __user *buf,
*/
if (*ppos != 0)
return -EINVAL;
- /*
- * Minor hack for backward compatibility
- */
- if (count < (SMK_OLOADLEN) || count > SMK_LOADLEN)
- return -EINVAL;
- data = kzalloc(SMK_LOADLEN, GFP_KERNEL);
+ if (format == SMK_FIXED24_FMT) {
+ /*
+ * Minor hack for backward compatibility
+ */
+ if (count < SMK_OLOADLEN || count > SMK_LOADLEN)
+ return -EINVAL;
+ } else {
+ if (count >= PAGE_SIZE) {
+ count = PAGE_SIZE - 1;
+ trunc = 1;
+ }
+ }
+
+ data = kmalloc(count + 1, GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
@@ -331,46 +493,48 @@ static ssize_t smk_write_load_list(struct file *file, const char __user *buf,
}
/*
- * More on the minor hack for backward compatibility
+ * In case of parsing only part of user buf,
+ * avoid having partial rule at the data buffer
*/
- if (count == (SMK_OLOADLEN))
- data[SMK_OLOADLEN] = '-';
-
- rule = kzalloc(sizeof(*rule), GFP_KERNEL);
- if (rule == NULL) {
- rc = -ENOMEM;
- goto out;
+ if (trunc) {
+ while (count > 0 && (data[count - 1] != '\n'))
+ --count;
+ if (count == 0) {
+ rc = -EINVAL;
+ goto out;
+ }
}
- if (smk_parse_rule(data, rule, 1))
- goto out_free_rule;
+ data[count] = '\0';
+ tokens = (format == SMK_CHANGE_FMT ? 4 : 3);
+ while (cnt < count) {
+ if (format == SMK_FIXED24_FMT) {
+ rc = smk_parse_rule(data, &rule, 1);
+ if (rc != 0) {
+ rc = -EINVAL;
+ goto out;
+ }
+ cnt = count;
+ } else {
+ rc = smk_parse_long_rule(data + cnt, &rule, 1, tokens);
+ if (rc <= 0) {
+ rc = -EINVAL;
+ goto out;
+ }
+ cnt += rc;
+ }
- if (rule_list == NULL) {
- load = 1;
- skp = smk_find_entry(rule->smk_subject);
- rule_list = &skp->smk_rules;
- rule_lock = &skp->smk_rules_lock;
- }
+ if (rule_list == NULL)
+ rc = smk_set_access(&rule, &rule.smk_subject->smk_rules,
+ &rule.smk_subject->smk_rules_lock, 1);
+ else
+ rc = smk_set_access(&rule, rule_list, rule_lock, 0);
- rc = count;
- /*
- * If this is "load" as opposed to "load-self" and a new rule
- * it needs to get added for reporting.
- * smk_set_access returns true if there was already a rule
- * for the subject/object pair, and false if it was new.
- */
- if (load && !smk_set_access(rule, rule_list, rule_lock)) {
- smlp = kzalloc(sizeof(*smlp), GFP_KERNEL);
- if (smlp != NULL) {
- smlp->smk_rule = rule;
- list_add_rcu(&smlp->list, &smack_rule_list);
- } else
- rc = -ENOMEM;
- goto out;
+ if (rc)
+ goto out;
}
-out_free_rule:
- kfree(rule);
+ rc = cnt;
out:
kfree(data);
return rc;
@@ -421,29 +585,22 @@ static void smk_seq_stop(struct seq_file *s, void *v)
/* No-op */
}
-/*
- * Seq_file read operations for /smack/load
- */
-
-static void *load_seq_start(struct seq_file *s, loff_t *pos)
-{
- return smk_seq_start(s, pos, &smack_rule_list);
-}
-
-static void *load_seq_next(struct seq_file *s, void *v, loff_t *pos)
+static void smk_rule_show(struct seq_file *s, struct smack_rule *srp, int max)
{
- return smk_seq_next(s, v, pos, &smack_rule_list);
-}
+ /*
+ * Don't show any rules with label names too long for
+ * interface file (/smack/load or /smack/load2)
+ * because you should expect to be able to write
+ * anything you read back.
+ */
+ if (strlen(srp->smk_subject->smk_known) >= max ||
+ strlen(srp->smk_object) >= max)
+ return;
-static int load_seq_show(struct seq_file *s, void *v)
-{
- struct list_head *list = v;
- struct smack_master_list *smlp =
- list_entry(list, struct smack_master_list, list);
- struct smack_rule *srp = smlp->smk_rule;
+ if (srp->smk_access == 0)
+ return;
- seq_printf(s, "%s %s", (char *)srp->smk_subject,
- (char *)srp->smk_object);
+ seq_printf(s, "%s %s", srp->smk_subject->smk_known, srp->smk_object);
seq_putc(s, ' ');
@@ -457,17 +614,40 @@ static int load_seq_show(struct seq_file *s, void *v)
seq_putc(s, 'a');
if (srp->smk_access & MAY_TRANSMUTE)
seq_putc(s, 't');
- if (srp->smk_access == 0)
- seq_putc(s, '-');
+ if (srp->smk_access & MAY_LOCK)
+ seq_putc(s, 'l');
seq_putc(s, '\n');
+}
+
+/*
+ * Seq_file read operations for /smack/load
+ */
+
+static void *load2_seq_start(struct seq_file *s, loff_t *pos)
+{
+ return smk_seq_start(s, pos, &smack_rule_list);
+}
+
+static void *load2_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ return smk_seq_next(s, v, pos, &smack_rule_list);
+}
+
+static int load_seq_show(struct seq_file *s, void *v)
+{
+ struct list_head *list = v;
+ struct smack_master_list *smlp =
+ list_entry(list, struct smack_master_list, list);
+
+ smk_rule_show(s, smlp->smk_rule, SMK_LABELLEN);
return 0;
}
static const struct seq_operations load_seq_ops = {
- .start = load_seq_start,
- .next = load_seq_next,
+ .start = load2_seq_start,
+ .next = load2_seq_next,
.show = load_seq_show,
.stop = smk_seq_stop,
};
@@ -495,16 +675,16 @@ static int smk_open_load(struct inode *inode, struct file *file)
static ssize_t smk_write_load(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
-
/*
* Must have privilege.
* No partial writes.
* Enough data must be present.
*/
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
- return smk_write_load_list(file, buf, count, ppos, NULL, NULL);
+ return smk_write_rules_list(file, buf, count, ppos, NULL, NULL,
+ SMK_FIXED24_FMT);
}
static const struct file_operations smk_load_ops = {
@@ -574,8 +754,10 @@ static void smk_unlbl_ambient(char *oldambient)
printk(KERN_WARNING "%s:%d remove rc = %d\n",
__func__, __LINE__, rc);
}
+ if (smack_net_ambient == NULL)
+ smack_net_ambient = &smack_known_floor;
- rc = netlbl_cfg_unlbl_map_add(smack_net_ambient, PF_INET,
+ rc = netlbl_cfg_unlbl_map_add(smack_net_ambient->smk_known, PF_INET,
NULL, NULL, &nai);
if (rc != 0)
printk(KERN_WARNING "%s:%d add rc = %d\n",
@@ -605,27 +787,28 @@ static int cipso_seq_show(struct seq_file *s, void *v)
struct list_head *list = v;
struct smack_known *skp =
list_entry(list, struct smack_known, list);
- struct smack_cipso *scp = skp->smk_cipso;
- char *cbp;
+ struct netlbl_lsm_secattr_catmap *cmp = skp->smk_netlabel.attr.mls.cat;
char sep = '/';
- int cat = 1;
int i;
- unsigned char m;
- if (scp == NULL)
+ /*
+ * Don't show a label that could not have been set using
+ * /smack/cipso. This is in support of the notion that
+ * anything read from /smack/cipso ought to be writeable
+ * to /smack/cipso.
+ *
+ * /smack/cipso2 should be used instead.
+ */
+ if (strlen(skp->smk_known) >= SMK_LABELLEN)
return 0;
- seq_printf(s, "%s %3d", (char *)&skp->smk_known, scp->smk_level);
+ seq_printf(s, "%s %3d", skp->smk_known, skp->smk_netlabel.attr.mls.lvl);
- cbp = scp->smk_catset;
- for (i = 0; i < SMK_LABELLEN; i++)
- for (m = 0x80; m != 0; m >>= 1) {
- if (m & cbp[i]) {
- seq_printf(s, "%c%d", sep, cat);
- sep = ',';
- }
- cat++;
- }
+ for (i = netlbl_secattr_catmap_walk(cmp, 0); i >= 0;
+ i = netlbl_secattr_catmap_walk(cmp, i + 1)) {
+ seq_printf(s, "%c%d", sep, i);
+ sep = ',';
+ }
seq_putc(s, '\n');
@@ -653,23 +836,24 @@ static int smk_open_cipso(struct inode *inode, struct file *file)
}
/**
- * smk_write_cipso - write() for /smack/cipso
+ * smk_set_cipso - do the work for write() for cipso and cipso2
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
* @ppos: where to start
+ * @format: /smack/cipso or /smack/cipso2
*
* Accepts only one cipso rule per write call.
* Returns number of bytes written or error code, as appropriate
*/
-static ssize_t smk_write_cipso(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t smk_set_cipso(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos, int format)
{
struct smack_known *skp;
- struct smack_cipso *scp = NULL;
- char mapcatset[SMK_LABELLEN];
+ struct netlbl_lsm_secattr ncats;
+ char mapcatset[SMK_CIPSOLEN];
int maplevel;
- int cat;
+ unsigned int cat;
int catlen;
ssize_t rc = -EINVAL;
char *data = NULL;
@@ -682,11 +866,12 @@ static ssize_t smk_write_cipso(struct file *file, const char __user *buf,
* No partial writes.
* Enough data must be present.
*/
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
if (*ppos != 0)
return -EINVAL;
- if (count < SMK_CIPSOMIN || count > SMK_CIPSOMAX)
+ if (format == SMK_FIXED24_FMT &&
+ (count < SMK_CIPSOMIN || count > SMK_CIPSOMAX))
return -EINVAL;
data = kzalloc(count + 1, GFP_KERNEL);
@@ -698,11 +883,6 @@ static ssize_t smk_write_cipso(struct file *file, const char __user *buf,
goto unlockedout;
}
- /* labels cannot begin with a '-' */
- if (data[0] == '-') {
- rc = -EINVAL;
- goto unlockedout;
- }
data[count] = '\0';
rule = data;
/*
@@ -715,7 +895,11 @@ static ssize_t smk_write_cipso(struct file *file, const char __user *buf,
if (skp == NULL)
goto out;
- rule += SMK_LABELLEN;
+ if (format == SMK_FIXED24_FMT)
+ rule += SMK_LABELLEN;
+ else
+ rule += strlen(skp->smk_known) + 1;
+
ret = sscanf(rule, "%d", &maplevel);
if (ret != 1 || maplevel > SMACK_CIPSO_MAXLEVEL)
goto out;
@@ -725,41 +909,29 @@ static ssize_t smk_write_cipso(struct file *file, const char __user *buf,
if (ret != 1 || catlen > SMACK_CIPSO_MAXCATNUM)
goto out;
- if (count != (SMK_CIPSOMIN + catlen * SMK_DIGITLEN))
+ if (format == SMK_FIXED24_FMT &&
+ count != (SMK_CIPSOMIN + catlen * SMK_DIGITLEN))
goto out;
memset(mapcatset, 0, sizeof(mapcatset));
for (i = 0; i < catlen; i++) {
rule += SMK_DIGITLEN;
- ret = sscanf(rule, "%d", &cat);
- if (ret != 1 || cat > SMACK_CIPSO_MAXCATVAL)
+ ret = sscanf(rule, "%u", &cat);
+ if (ret != 1 || cat > SMACK_CIPSO_MAXCATNUM)
goto out;
smack_catset_bit(cat, mapcatset);
}
- if (skp->smk_cipso == NULL) {
- scp = kzalloc(sizeof(struct smack_cipso), GFP_KERNEL);
- if (scp == NULL) {
- rc = -ENOMEM;
- goto out;
- }
+ rc = smk_netlbl_mls(maplevel, mapcatset, &ncats, SMK_CIPSOLEN);
+ if (rc >= 0) {
+ netlbl_secattr_catmap_free(skp->smk_netlabel.attr.mls.cat);
+ skp->smk_netlabel.attr.mls.cat = ncats.attr.mls.cat;
+ skp->smk_netlabel.attr.mls.lvl = ncats.attr.mls.lvl;
+ rc = count;
}
- spin_lock_bh(&skp->smk_cipsolock);
-
- if (scp == NULL)
- scp = skp->smk_cipso;
- else
- skp->smk_cipso = scp;
-
- scp->smk_level = maplevel;
- memcpy(scp->smk_catset, mapcatset, sizeof(mapcatset));
-
- spin_unlock_bh(&skp->smk_cipsolock);
-
- rc = count;
out:
mutex_unlock(&smack_cipso_lock);
unlockedout:
@@ -767,6 +939,22 @@ unlockedout:
return rc;
}
+/**
+ * smk_write_cipso - write() for /smack/cipso
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Accepts only one cipso rule per write call.
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_cipso(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return smk_set_cipso(file, buf, count, ppos, SMK_FIXED24_FMT);
+}
+
static const struct file_operations smk_cipso_ops = {
.open = smk_open_cipso,
.read = seq_read,
@@ -776,6 +964,80 @@ static const struct file_operations smk_cipso_ops = {
};
/*
+ * Seq_file read operations for /smack/cipso2
+ */
+
+/*
+ * Print cipso labels in format:
+ * label level[/cat[,cat]]
+ */
+static int cipso2_seq_show(struct seq_file *s, void *v)
+{
+ struct list_head *list = v;
+ struct smack_known *skp =
+ list_entry(list, struct smack_known, list);
+ struct netlbl_lsm_secattr_catmap *cmp = skp->smk_netlabel.attr.mls.cat;
+ char sep = '/';
+ int i;
+
+ seq_printf(s, "%s %3d", skp->smk_known, skp->smk_netlabel.attr.mls.lvl);
+
+ for (i = netlbl_secattr_catmap_walk(cmp, 0); i >= 0;
+ i = netlbl_secattr_catmap_walk(cmp, i + 1)) {
+ seq_printf(s, "%c%d", sep, i);
+ sep = ',';
+ }
+
+ seq_putc(s, '\n');
+
+ return 0;
+}
+
+static const struct seq_operations cipso2_seq_ops = {
+ .start = cipso_seq_start,
+ .next = cipso_seq_next,
+ .show = cipso2_seq_show,
+ .stop = smk_seq_stop,
+};
+
+/**
+ * smk_open_cipso2 - open() for /smack/cipso2
+ * @inode: inode structure representing file
+ * @file: "cipso2" file pointer
+ *
+ * Connect our cipso_seq_* operations with /smack/cipso2
+ * file_operations
+ */
+static int smk_open_cipso2(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &cipso2_seq_ops);
+}
+
+/**
+ * smk_write_cipso2 - write() for /smack/cipso2
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Accepts only one cipso rule per write call.
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_cipso2(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return smk_set_cipso(file, buf, count, ppos, SMK_LONG_FMT);
+}
+
+static const struct file_operations smk_cipso2_ops = {
+ .open = smk_open_cipso2,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = smk_write_cipso2,
+ .release = seq_release,
+};
+
+/*
* Seq_file read operations for /smack/netlabel
*/
@@ -887,9 +1149,9 @@ static ssize_t smk_write_netlbladdr(struct file *file, const char __user *buf,
{
struct smk_netlbladdr *skp;
struct sockaddr_in newname;
- char smack[SMK_LABELLEN];
+ char *smack;
char *sp;
- char data[SMK_NETLBLADDRMAX + 1];
+ char *data;
char *host = (char *)&newname.sin_addr.s_addr;
int rc;
struct netlbl_audit audit_info;
@@ -907,40 +1169,63 @@ static ssize_t smk_write_netlbladdr(struct file *file, const char __user *buf,
* "<addr/mask, as a.b.c.d/e><space><label>"
* "<addr, as a.b.c.d><space><label>"
*/
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
if (*ppos != 0)
return -EINVAL;
- if (count < SMK_NETLBLADDRMIN || count > SMK_NETLBLADDRMAX)
+ if (count < SMK_NETLBLADDRMIN)
return -EINVAL;
- if (copy_from_user(data, buf, count) != 0)
- return -EFAULT;
+
+ data = kzalloc(count + 1, GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(data, buf, count) != 0) {
+ rc = -EFAULT;
+ goto free_data_out;
+ }
+
+ smack = kzalloc(count + 1, GFP_KERNEL);
+ if (smack == NULL) {
+ rc = -ENOMEM;
+ goto free_data_out;
+ }
data[count] = '\0';
- rc = sscanf(data, "%hhd.%hhd.%hhd.%hhd/%d %s",
+ rc = sscanf(data, "%hhd.%hhd.%hhd.%hhd/%u %s",
&host[0], &host[1], &host[2], &host[3], &m, smack);
if (rc != 6) {
rc = sscanf(data, "%hhd.%hhd.%hhd.%hhd %s",
&host[0], &host[1], &host[2], &host[3], smack);
- if (rc != 5)
- return -EINVAL;
+ if (rc != 5) {
+ rc = -EINVAL;
+ goto free_out;
+ }
m = BEBITS;
}
- if (m > BEBITS)
- return -EINVAL;
+ if (m > BEBITS) {
+ rc = -EINVAL;
+ goto free_out;
+ }
- /* if smack begins with '-', its an option, don't import it */
+ /*
+ * If smack begins with '-', it is an option, don't import it
+ */
if (smack[0] != '-') {
sp = smk_import(smack, 0);
- if (sp == NULL)
- return -EINVAL;
+ if (sp == NULL) {
+ rc = -EINVAL;
+ goto free_out;
+ }
} else {
/* check known options */
if (strcmp(smack, smack_cipso_option) == 0)
sp = (char *)smack_cipso_option;
- else
- return -EINVAL;
+ else {
+ rc = -EINVAL;
+ goto free_out;
+ }
}
for (temp_mask = 0; m > 0; m--) {
@@ -1006,6 +1291,11 @@ static ssize_t smk_write_netlbladdr(struct file *file, const char __user *buf,
mutex_unlock(&smk_netlbladdr_lock);
+free_out:
+ kfree(smack);
+free_data_out:
+ kfree(data);
+
return rc;
}
@@ -1056,7 +1346,7 @@ static ssize_t smk_write_doi(struct file *file, const char __user *buf,
char temp[80];
int i;
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
if (count >= sizeof(temp) || count == 0)
@@ -1119,10 +1409,11 @@ static ssize_t smk_read_direct(struct file *filp, char __user *buf,
static ssize_t smk_write_direct(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
+ struct smack_known *skp;
char temp[80];
int i;
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
if (count >= sizeof(temp) || count == 0)
@@ -1136,7 +1427,20 @@ static ssize_t smk_write_direct(struct file *file, const char __user *buf,
if (sscanf(temp, "%d", &i) != 1)
return -EINVAL;
- smack_cipso_direct = i;
+ /*
+ * Don't do anything if the value hasn't actually changed.
+ * If it is changing reset the level on entries that were
+ * set up to be direct when they were created.
+ */
+ if (smack_cipso_direct != i) {
+ mutex_lock(&smack_known_lock);
+ list_for_each_entry_rcu(skp, &smack_known_list, list)
+ if (skp->smk_netlabel.attr.mls.lvl ==
+ smack_cipso_direct)
+ skp->smk_netlabel.attr.mls.lvl = i;
+ smack_cipso_direct = i;
+ mutex_unlock(&smack_known_lock);
+ }
return count;
}
@@ -1148,6 +1452,84 @@ static const struct file_operations smk_direct_ops = {
};
/**
+ * smk_read_mapped - read() for /smack/mapped
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @count: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t smk_read_mapped(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char temp[80];
+ ssize_t rc;
+
+ if (*ppos != 0)
+ return 0;
+
+ sprintf(temp, "%d", smack_cipso_mapped);
+ rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
+
+ return rc;
+}
+
+/**
+ * smk_write_mapped - write() for /smack/mapped
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_mapped(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct smack_known *skp;
+ char temp[80];
+ int i;
+
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ if (count >= sizeof(temp) || count == 0)
+ return -EINVAL;
+
+ if (copy_from_user(temp, buf, count) != 0)
+ return -EFAULT;
+
+ temp[count] = '\0';
+
+ if (sscanf(temp, "%d", &i) != 1)
+ return -EINVAL;
+
+ /*
+ * Don't do anything if the value hasn't actually changed.
+ * If it is changing reset the level on entries that were
+ * set up to be mapped when they were created.
+ */
+ if (smack_cipso_mapped != i) {
+ mutex_lock(&smack_known_lock);
+ list_for_each_entry_rcu(skp, &smack_known_list, list)
+ if (skp->smk_netlabel.attr.mls.lvl ==
+ smack_cipso_mapped)
+ skp->smk_netlabel.attr.mls.lvl = i;
+ smack_cipso_mapped = i;
+ mutex_unlock(&smack_known_lock);
+ }
+
+ return count;
+}
+
+static const struct file_operations smk_mapped_ops = {
+ .read = smk_read_mapped,
+ .write = smk_write_mapped,
+ .llseek = default_llseek,
+};
+
+/**
* smk_read_ambient - read() for /smack/ambient
* @filp: file pointer, not actually used
* @buf: where to put the result
@@ -1170,11 +1552,12 @@ static ssize_t smk_read_ambient(struct file *filp, char __user *buf,
*/
mutex_lock(&smack_ambient_lock);
- asize = strlen(smack_net_ambient) + 1;
+ asize = strlen(smack_net_ambient->smk_known) + 1;
if (cn >= asize)
rc = simple_read_from_buffer(buf, cn, ppos,
- smack_net_ambient, asize);
+ smack_net_ambient->smk_known,
+ asize);
else
rc = -EINVAL;
@@ -1195,32 +1578,40 @@ static ssize_t smk_read_ambient(struct file *filp, char __user *buf,
static ssize_t smk_write_ambient(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- char in[SMK_LABELLEN];
+ struct smack_known *skp;
char *oldambient;
- char *smack;
+ char *data;
+ int rc = count;
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
- if (count >= SMK_LABELLEN)
- return -EINVAL;
+ data = kzalloc(count + 1, GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
- if (copy_from_user(in, buf, count) != 0)
- return -EFAULT;
+ if (copy_from_user(data, buf, count) != 0) {
+ rc = -EFAULT;
+ goto out;
+ }
- smack = smk_import(in, count);
- if (smack == NULL)
- return -EINVAL;
+ skp = smk_import_entry(data, count);
+ if (skp == NULL) {
+ rc = -EINVAL;
+ goto out;
+ }
mutex_lock(&smack_ambient_lock);
- oldambient = smack_net_ambient;
- smack_net_ambient = smack;
+ oldambient = smack_net_ambient->smk_known;
+ smack_net_ambient = skp;
smk_unlbl_ambient(oldambient);
mutex_unlock(&smack_ambient_lock);
- return count;
+out:
+ kfree(data);
+ return rc;
}
static const struct file_operations smk_ambient_ops = {
@@ -1230,7 +1621,7 @@ static const struct file_operations smk_ambient_ops = {
};
/**
- * smk_read_onlycap - read() for /smack/onlycap
+ * smk_read_onlycap - read() for smackfs/onlycap
* @filp: file pointer, not actually used
* @buf: where to put the result
* @cn: maximum to send along
@@ -1249,7 +1640,7 @@ static ssize_t smk_read_onlycap(struct file *filp, char __user *buf,
return 0;
if (smack_onlycap != NULL)
- smack = smack_onlycap;
+ smack = smack_onlycap->smk_known;
asize = strlen(smack) + 1;
@@ -1260,7 +1651,7 @@ static ssize_t smk_read_onlycap(struct file *filp, char __user *buf,
}
/**
- * smk_write_onlycap - write() for /smack/onlycap
+ * smk_write_onlycap - write() for smackfs/onlycap
* @file: file pointer, not actually used
* @buf: where to get the data from
* @count: bytes sent
@@ -1271,10 +1662,11 @@ static ssize_t smk_read_onlycap(struct file *filp, char __user *buf,
static ssize_t smk_write_onlycap(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- char in[SMK_LABELLEN];
- char *sp = smk_of_task(current->cred->security);
+ char *data;
+ struct smack_known *skp = smk_of_task(current->cred->security);
+ int rc = count;
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
/*
@@ -1282,14 +1674,12 @@ static ssize_t smk_write_onlycap(struct file *file, const char __user *buf,
* explicitly for clarity. The smk_access() implementation
* would use smk_access(smack_onlycap, MAY_WRITE)
*/
- if (smack_onlycap != NULL && smack_onlycap != sp)
+ if (smack_onlycap != NULL && smack_onlycap != skp)
return -EPERM;
- if (count >= SMK_LABELLEN)
- return -EINVAL;
-
- if (copy_from_user(in, buf, count) != 0)
- return -EFAULT;
+ data = kzalloc(count, GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
/*
* Should the null string be passed in unset the onlycap value.
@@ -1297,10 +1687,17 @@ static ssize_t smk_write_onlycap(struct file *file, const char __user *buf,
* smk_import only expects to return NULL for errors. It
* is usually the case that a nullstring or "\n" would be
* bad to pass to smk_import but in fact this is useful here.
+ *
+ * smk_import will also reject a label beginning with '-',
+ * so "-usecapabilities" will also work.
*/
- smack_onlycap = smk_import(in, count);
+ if (copy_from_user(data, buf, count) != 0)
+ rc = -EFAULT;
+ else
+ smack_onlycap = smk_import_entry(data, count);
- return count;
+ kfree(data);
+ return rc;
}
static const struct file_operations smk_onlycap_ops = {
@@ -1347,7 +1744,7 @@ static ssize_t smk_write_logging(struct file *file, const char __user *buf,
char temp[32];
int i;
- if (!capable(CAP_MAC_ADMIN))
+ if (!smack_privileged(CAP_MAC_ADMIN))
return -EPERM;
if (count >= sizeof(temp) || count == 0)
@@ -1398,25 +1795,7 @@ static int load_self_seq_show(struct seq_file *s, void *v)
struct smack_rule *srp =
list_entry(list, struct smack_rule, list);
- seq_printf(s, "%s %s", (char *)srp->smk_subject,
- (char *)srp->smk_object);
-
- seq_putc(s, ' ');
-
- if (srp->smk_access & MAY_READ)
- seq_putc(s, 'r');
- if (srp->smk_access & MAY_WRITE)
- seq_putc(s, 'w');
- if (srp->smk_access & MAY_EXEC)
- seq_putc(s, 'x');
- if (srp->smk_access & MAY_APPEND)
- seq_putc(s, 'a');
- if (srp->smk_access & MAY_TRANSMUTE)
- seq_putc(s, 't');
- if (srp->smk_access == 0)
- seq_putc(s, '-');
-
- seq_putc(s, '\n');
+ smk_rule_show(s, srp, SMK_LABELLEN);
return 0;
}
@@ -1430,7 +1809,7 @@ static const struct seq_operations load_self_seq_ops = {
/**
- * smk_open_load_self - open() for /smack/load-self
+ * smk_open_load_self - open() for /smack/load-self2
* @inode: inode structure representing file
* @file: "load" file pointer
*
@@ -1454,8 +1833,8 @@ static ssize_t smk_write_load_self(struct file *file, const char __user *buf,
{
struct task_smack *tsp = current_security();
- return smk_write_load_list(file, buf, count, ppos, &tsp->smk_rules,
- &tsp->smk_rules_lock);
+ return smk_write_rules_list(file, buf, count, ppos, &tsp->smk_rules,
+ &tsp->smk_rules_lock, SMK_FIXED24_FMT);
}
static const struct file_operations smk_load_self_ops = {
@@ -1467,16 +1846,16 @@ static const struct file_operations smk_load_self_ops = {
};
/**
- * smk_write_access - handle access check transaction
+ * smk_user_access - handle access check transaction
* @file: file pointer
* @buf: data from user space
* @count: bytes sent
* @ppos: where to start - must be 0
*/
-static ssize_t smk_write_access(struct file *file, const char __user *buf,
- size_t count, loff_t *ppos)
+static ssize_t smk_user_access(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos, int format)
{
- struct smack_rule rule;
+ struct smack_parsed_rule rule;
char *data;
int res;
@@ -1484,16 +1863,44 @@ static ssize_t smk_write_access(struct file *file, const char __user *buf,
if (IS_ERR(data))
return PTR_ERR(data);
- if (count < SMK_LOADLEN || smk_parse_rule(data, &rule, 0))
+ if (format == SMK_FIXED24_FMT) {
+ if (count < SMK_LOADLEN)
+ return -EINVAL;
+ res = smk_parse_rule(data, &rule, 0);
+ } else {
+ /*
+ * simple_transaction_get() returns null-terminated data
+ */
+ res = smk_parse_long_rule(data, &rule, 0, 3);
+ }
+
+ if (res >= 0)
+ res = smk_access(rule.smk_subject, rule.smk_object,
+ rule.smk_access1, NULL);
+ else if (res != -ENOENT)
return -EINVAL;
- res = smk_access(rule.smk_subject, rule.smk_object, rule.smk_access,
- NULL);
data[0] = res == 0 ? '1' : '0';
data[1] = '\0';
simple_transaction_set(file, 2);
- return SMK_LOADLEN;
+
+ if (format == SMK_FIXED24_FMT)
+ return SMK_LOADLEN;
+ return count;
+}
+
+/**
+ * smk_write_access - handle access check transaction
+ * @file: file pointer
+ * @buf: data from user space
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ */
+static ssize_t smk_write_access(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return smk_user_access(file, buf, count, ppos, SMK_FIXED24_FMT);
}
static const struct file_operations smk_access_ops = {
@@ -1503,13 +1910,418 @@ static const struct file_operations smk_access_ops = {
.llseek = generic_file_llseek,
};
+
+/*
+ * Seq_file read operations for /smack/load2
+ */
+
+static int load2_seq_show(struct seq_file *s, void *v)
+{
+ struct list_head *list = v;
+ struct smack_master_list *smlp =
+ list_entry(list, struct smack_master_list, list);
+
+ smk_rule_show(s, smlp->smk_rule, SMK_LONGLABEL);
+
+ return 0;
+}
+
+static const struct seq_operations load2_seq_ops = {
+ .start = load2_seq_start,
+ .next = load2_seq_next,
+ .show = load2_seq_show,
+ .stop = smk_seq_stop,
+};
+
+/**
+ * smk_open_load2 - open() for /smack/load2
+ * @inode: inode structure representing file
+ * @file: "load2" file pointer
+ *
+ * For reading, use load2_seq_* seq_file reading operations.
+ */
+static int smk_open_load2(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &load2_seq_ops);
+}
+
+/**
+ * smk_write_load2 - write() for /smack/load2
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ *
+ */
+static ssize_t smk_write_load2(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ /*
+ * Must have privilege.
+ */
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ return smk_write_rules_list(file, buf, count, ppos, NULL, NULL,
+ SMK_LONG_FMT);
+}
+
+static const struct file_operations smk_load2_ops = {
+ .open = smk_open_load2,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = smk_write_load2,
+ .release = seq_release,
+};
+
+/*
+ * Seq_file read operations for /smack/load-self2
+ */
+
+static void *load_self2_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct task_smack *tsp = current_security();
+
+ return smk_seq_start(s, pos, &tsp->smk_rules);
+}
+
+static void *load_self2_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct task_smack *tsp = current_security();
+
+ return smk_seq_next(s, v, pos, &tsp->smk_rules);
+}
+
+static int load_self2_seq_show(struct seq_file *s, void *v)
+{
+ struct list_head *list = v;
+ struct smack_rule *srp =
+ list_entry(list, struct smack_rule, list);
+
+ smk_rule_show(s, srp, SMK_LONGLABEL);
+
+ return 0;
+}
+
+static const struct seq_operations load_self2_seq_ops = {
+ .start = load_self2_seq_start,
+ .next = load_self2_seq_next,
+ .show = load_self2_seq_show,
+ .stop = smk_seq_stop,
+};
+
+/**
+ * smk_open_load_self2 - open() for /smack/load-self2
+ * @inode: inode structure representing file
+ * @file: "load" file pointer
+ *
+ * For reading, use load_seq_* seq_file reading operations.
+ */
+static int smk_open_load_self2(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &load_self2_seq_ops);
+}
+
+/**
+ * smk_write_load_self2 - write() for /smack/load-self2
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ *
+ */
+static ssize_t smk_write_load_self2(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ struct task_smack *tsp = current_security();
+
+ return smk_write_rules_list(file, buf, count, ppos, &tsp->smk_rules,
+ &tsp->smk_rules_lock, SMK_LONG_FMT);
+}
+
+static const struct file_operations smk_load_self2_ops = {
+ .open = smk_open_load_self2,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .write = smk_write_load_self2,
+ .release = seq_release,
+};
+
+/**
+ * smk_write_access2 - handle access check transaction
+ * @file: file pointer
+ * @buf: data from user space
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ */
+static ssize_t smk_write_access2(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ return smk_user_access(file, buf, count, ppos, SMK_LONG_FMT);
+}
+
+static const struct file_operations smk_access2_ops = {
+ .write = smk_write_access2,
+ .read = simple_transaction_read,
+ .release = simple_transaction_release,
+ .llseek = generic_file_llseek,
+};
+
/**
- * smk_fill_super - fill the /smackfs superblock
+ * smk_write_revoke_subj - write() for /smack/revoke-subject
+ * @file: file pointer
+ * @buf: data from user space
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ */
+static ssize_t smk_write_revoke_subj(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *data = NULL;
+ const char *cp = NULL;
+ struct smack_known *skp;
+ struct smack_rule *sp;
+ struct list_head *rule_list;
+ struct mutex *rule_lock;
+ int rc = count;
+
+ if (*ppos != 0)
+ return -EINVAL;
+
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ if (count == 0 || count > SMK_LONGLABEL)
+ return -EINVAL;
+
+ data = kzalloc(count, GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(data, buf, count) != 0) {
+ rc = -EFAULT;
+ goto free_out;
+ }
+
+ cp = smk_parse_smack(data, count);
+ if (cp == NULL) {
+ rc = -EINVAL;
+ goto free_out;
+ }
+
+ skp = smk_find_entry(cp);
+ if (skp == NULL)
+ goto free_out;
+
+ rule_list = &skp->smk_rules;
+ rule_lock = &skp->smk_rules_lock;
+
+ mutex_lock(rule_lock);
+
+ list_for_each_entry_rcu(sp, rule_list, list)
+ sp->smk_access = 0;
+
+ mutex_unlock(rule_lock);
+
+free_out:
+ kfree(data);
+ kfree(cp);
+ return rc;
+}
+
+static const struct file_operations smk_revoke_subj_ops = {
+ .write = smk_write_revoke_subj,
+ .read = simple_transaction_read,
+ .release = simple_transaction_release,
+ .llseek = generic_file_llseek,
+};
+
+static struct kset *smackfs_kset;
+/**
+ * smk_init_sysfs - initialize /sys/fs/smackfs
+ *
+ */
+static int smk_init_sysfs(void)
+{
+ smackfs_kset = kset_create_and_add("smackfs", NULL, fs_kobj);
+ if (!smackfs_kset)
+ return -ENOMEM;
+ return 0;
+}
+
+/**
+ * smk_write_change_rule - write() for /smack/change-rule
+ * @file: file pointer
+ * @buf: data from user space
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ */
+static ssize_t smk_write_change_rule(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ /*
+ * Must have privilege.
+ */
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ return smk_write_rules_list(file, buf, count, ppos, NULL, NULL,
+ SMK_CHANGE_FMT);
+}
+
+static const struct file_operations smk_change_rule_ops = {
+ .write = smk_write_change_rule,
+ .read = simple_transaction_read,
+ .release = simple_transaction_release,
+ .llseek = generic_file_llseek,
+};
+
+/**
+ * smk_read_syslog - read() for smackfs/syslog
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @cn: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t smk_read_syslog(struct file *filp, char __user *buf,
+ size_t cn, loff_t *ppos)
+{
+ struct smack_known *skp;
+ ssize_t rc = -EINVAL;
+ int asize;
+
+ if (*ppos != 0)
+ return 0;
+
+ if (smack_syslog_label == NULL)
+ skp = &smack_known_star;
+ else
+ skp = smack_syslog_label;
+
+ asize = strlen(skp->smk_known) + 1;
+
+ if (cn >= asize)
+ rc = simple_read_from_buffer(buf, cn, ppos, skp->smk_known,
+ asize);
+
+ return rc;
+}
+
+/**
+ * smk_write_syslog - write() for smackfs/syslog
+ * @file: file pointer, not actually used
+ * @buf: where to get the data from
+ * @count: bytes sent
+ * @ppos: where to start
+ *
+ * Returns number of bytes written or error code, as appropriate
+ */
+static ssize_t smk_write_syslog(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char *data;
+ struct smack_known *skp;
+ int rc = count;
+
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ data = kzalloc(count, GFP_KERNEL);
+ if (data == NULL)
+ return -ENOMEM;
+
+ if (copy_from_user(data, buf, count) != 0)
+ rc = -EFAULT;
+ else {
+ skp = smk_import_entry(data, count);
+ if (skp == NULL)
+ rc = -EINVAL;
+ else
+ smack_syslog_label = smk_import_entry(data, count);
+ }
+
+ kfree(data);
+ return rc;
+}
+
+static const struct file_operations smk_syslog_ops = {
+ .read = smk_read_syslog,
+ .write = smk_write_syslog,
+ .llseek = default_llseek,
+};
+
+
+/**
+ * smk_read_ptrace - read() for /smack/ptrace
+ * @filp: file pointer, not actually used
+ * @buf: where to put the result
+ * @count: maximum to send along
+ * @ppos: where to start
+ *
+ * Returns number of bytes read or error code, as appropriate
+ */
+static ssize_t smk_read_ptrace(struct file *filp, char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char temp[32];
+ ssize_t rc;
+
+ if (*ppos != 0)
+ return 0;
+
+ sprintf(temp, "%d\n", smack_ptrace_rule);
+ rc = simple_read_from_buffer(buf, count, ppos, temp, strlen(temp));
+ return rc;
+}
+
+/**
+ * smk_write_ptrace - write() for /smack/ptrace
+ * @file: file pointer
+ * @buf: data from user space
+ * @count: bytes sent
+ * @ppos: where to start - must be 0
+ */
+static ssize_t smk_write_ptrace(struct file *file, const char __user *buf,
+ size_t count, loff_t *ppos)
+{
+ char temp[32];
+ int i;
+
+ if (!smack_privileged(CAP_MAC_ADMIN))
+ return -EPERM;
+
+ if (*ppos != 0 || count >= sizeof(temp) || count == 0)
+ return -EINVAL;
+
+ if (copy_from_user(temp, buf, count) != 0)
+ return -EFAULT;
+
+ temp[count] = '\0';
+
+ if (sscanf(temp, "%d", &i) != 1)
+ return -EINVAL;
+ if (i < SMACK_PTRACE_DEFAULT || i > SMACK_PTRACE_MAX)
+ return -EINVAL;
+ smack_ptrace_rule = i;
+
+ return count;
+}
+
+static const struct file_operations smk_ptrace_ops = {
+ .write = smk_write_ptrace,
+ .read = smk_read_ptrace,
+ .llseek = default_llseek,
+};
+
+/**
+ * smk_fill_super - fill the smackfs superblock
* @sb: the empty superblock
* @data: unused
* @silent: unused
*
- * Fill in the well known entries for /smack
+ * Fill in the well known entries for the smack filesystem
*
* Returns 0 on success, an error code on failure
*/
@@ -1539,6 +2351,25 @@ static int smk_fill_super(struct super_block *sb, void *data, int silent)
"load-self", &smk_load_self_ops, S_IRUGO|S_IWUGO},
[SMK_ACCESSES] = {
"access", &smk_access_ops, S_IRUGO|S_IWUGO},
+ [SMK_MAPPED] = {
+ "mapped", &smk_mapped_ops, S_IRUGO|S_IWUSR},
+ [SMK_LOAD2] = {
+ "load2", &smk_load2_ops, S_IRUGO|S_IWUSR},
+ [SMK_LOAD_SELF2] = {
+ "load-self2", &smk_load_self2_ops, S_IRUGO|S_IWUGO},
+ [SMK_ACCESS2] = {
+ "access2", &smk_access2_ops, S_IRUGO|S_IWUGO},
+ [SMK_CIPSO2] = {
+ "cipso2", &smk_cipso2_ops, S_IRUGO|S_IWUSR},
+ [SMK_REVOKE_SUBJ] = {
+ "revoke-subject", &smk_revoke_subj_ops,
+ S_IRUGO|S_IWUSR},
+ [SMK_CHANGE_RULE] = {
+ "change-rule", &smk_change_rule_ops, S_IRUGO|S_IWUSR},
+ [SMK_SYSLOG] = {
+ "syslog", &smk_syslog_ops, S_IRUGO|S_IWUSR},
+ [SMK_PTRACE] = {
+ "ptrace", &smk_ptrace_ops, S_IRUGO|S_IWUSR},
/* last one */
{""}
};
@@ -1551,7 +2382,6 @@ static int smk_fill_super(struct super_block *sb, void *data, int silent)
}
root_inode = sb->s_root->d_inode;
- root_inode->i_security = new_inode_smack(smack_known_floor.smk_known);
return 0;
}
@@ -1581,6 +2411,15 @@ static struct file_system_type smk_fs_type = {
static struct vfsmount *smackfs_mount;
+static int __init smk_preset_netlabel(struct smack_known *skp)
+{
+ skp->smk_netlabel.domain = skp->smk_known;
+ skp->smk_netlabel.flags =
+ NETLBL_SECATTR_DOMAIN | NETLBL_SECATTR_MLS_LVL;
+ return smk_netlbl_mls(smack_cipso_direct, skp->smk_known,
+ &skp->smk_netlabel, strlen(skp->smk_known));
+}
+
/**
* init_smk_fs - get the smackfs superblock
*
@@ -1597,10 +2436,15 @@ static struct vfsmount *smackfs_mount;
static int __init init_smk_fs(void)
{
int err;
+ int rc;
if (!security_module_enable(&smack_ops))
return 0;
+ err = smk_init_sysfs();
+ if (err)
+ printk(KERN_ERR "smackfs: sysfs mountpoint problem.\n");
+
err = register_filesystem(&smk_fs_type);
if (!err) {
smackfs_mount = kern_mount(&smk_fs_type);
@@ -1614,19 +2458,24 @@ static int __init init_smk_fs(void)
smk_cipso_doi();
smk_unlbl_ambient(NULL);
- mutex_init(&smack_known_floor.smk_rules_lock);
- mutex_init(&smack_known_hat.smk_rules_lock);
- mutex_init(&smack_known_huh.smk_rules_lock);
- mutex_init(&smack_known_invalid.smk_rules_lock);
- mutex_init(&smack_known_star.smk_rules_lock);
- mutex_init(&smack_known_web.smk_rules_lock);
-
- INIT_LIST_HEAD(&smack_known_floor.smk_rules);
- INIT_LIST_HEAD(&smack_known_hat.smk_rules);
- INIT_LIST_HEAD(&smack_known_huh.smk_rules);
- INIT_LIST_HEAD(&smack_known_invalid.smk_rules);
- INIT_LIST_HEAD(&smack_known_star.smk_rules);
- INIT_LIST_HEAD(&smack_known_web.smk_rules);
+ rc = smk_preset_netlabel(&smack_known_floor);
+ if (err == 0 && rc < 0)
+ err = rc;
+ rc = smk_preset_netlabel(&smack_known_hat);
+ if (err == 0 && rc < 0)
+ err = rc;
+ rc = smk_preset_netlabel(&smack_known_huh);
+ if (err == 0 && rc < 0)
+ err = rc;
+ rc = smk_preset_netlabel(&smack_known_invalid);
+ if (err == 0 && rc < 0)
+ err = rc;
+ rc = smk_preset_netlabel(&smack_known_star);
+ if (err == 0 && rc < 0)
+ err = rc;
+ rc = smk_preset_netlabel(&smack_known_web);
+ if (err == 0 && rc < 0)
+ err = rc;
return err;
}
diff --git a/security/tomoyo/audit.c b/security/tomoyo/audit.c
index 5ca47ea3049..c1b00375c9a 100644
--- a/security/tomoyo/audit.c
+++ b/security/tomoyo/audit.c
@@ -168,9 +168,14 @@ static char *tomoyo_print_header(struct tomoyo_request_info *r)
stamp.day, stamp.hour, stamp.min, stamp.sec, r->profile,
tomoyo_mode[r->mode], tomoyo_yesno(r->granted), gpid,
tomoyo_sys_getpid(), tomoyo_sys_getppid(),
- current_uid(), current_gid(), current_euid(),
- current_egid(), current_suid(), current_sgid(),
- current_fsuid(), current_fsgid());
+ from_kuid(&init_user_ns, current_uid()),
+ from_kgid(&init_user_ns, current_gid()),
+ from_kuid(&init_user_ns, current_euid()),
+ from_kgid(&init_user_ns, current_egid()),
+ from_kuid(&init_user_ns, current_suid()),
+ from_kgid(&init_user_ns, current_sgid()),
+ from_kuid(&init_user_ns, current_fsuid()),
+ from_kgid(&init_user_ns, current_fsgid()));
if (!obj)
goto no_obj_info;
if (!obj->validate_done) {
@@ -191,15 +196,19 @@ static char *tomoyo_print_header(struct tomoyo_request_info *r)
tomoyo_buffer_len - 1 - pos,
" path%u.parent={ uid=%u gid=%u "
"ino=%lu perm=0%o }", (i >> 1) + 1,
- stat->uid, stat->gid, (unsigned long)
- stat->ino, stat->mode & S_IALLUGO);
+ from_kuid(&init_user_ns, stat->uid),
+ from_kgid(&init_user_ns, stat->gid),
+ (unsigned long)stat->ino,
+ stat->mode & S_IALLUGO);
continue;
}
pos += snprintf(buffer + pos, tomoyo_buffer_len - 1 - pos,
" path%u={ uid=%u gid=%u ino=%lu major=%u"
" minor=%u perm=0%o type=%s", (i >> 1) + 1,
- stat->uid, stat->gid, (unsigned long)
- stat->ino, MAJOR(dev), MINOR(dev),
+ from_kuid(&init_user_ns, stat->uid),
+ from_kgid(&init_user_ns, stat->gid),
+ (unsigned long)stat->ino,
+ MAJOR(dev), MINOR(dev),
mode & S_IALLUGO, tomoyo_filetype(mode));
if (S_ISCHR(mode) || S_ISBLK(mode)) {
dev = stat->rdev;
@@ -446,11 +455,11 @@ void tomoyo_read_log(struct tomoyo_io_buffer *head)
* tomoyo_poll_log - Wait for an audit log.
*
* @file: Pointer to "struct file".
- * @wait: Pointer to "poll_table".
+ * @wait: Pointer to "poll_table". Maybe NULL.
*
* Returns POLLIN | POLLRDNORM when ready to read an audit log.
*/
-int tomoyo_poll_log(struct file *file, poll_table *wait)
+unsigned int tomoyo_poll_log(struct file *file, poll_table *wait)
{
if (tomoyo_log_count)
return POLLIN | POLLRDNORM;
diff --git a/security/tomoyo/common.c b/security/tomoyo/common.c
index c47d3ce6c73..283862aebdc 100644
--- a/security/tomoyo/common.c
+++ b/security/tomoyo/common.c
@@ -850,14 +850,9 @@ static int tomoyo_update_manager_entry(const char *manager,
policy_list[TOMOYO_ID_MANAGER],
};
int error = is_delete ? -ENOENT : -ENOMEM;
- if (tomoyo_domain_def(manager)) {
- if (!tomoyo_correct_domain(manager))
- return -EINVAL;
- e.is_domain = true;
- } else {
- if (!tomoyo_correct_path(manager))
- return -EINVAL;
- }
+ if (!tomoyo_correct_domain(manager) &&
+ !tomoyo_correct_word(manager))
+ return -EINVAL;
e.manager = tomoyo_get_name(manager);
if (e.manager) {
error = tomoyo_update_policy(&e.head, sizeof(e), &param,
@@ -930,25 +925,18 @@ static bool tomoyo_manager(void)
if (!tomoyo_policy_loaded)
return true;
- if (!tomoyo_manage_by_non_root && (task->cred->uid || task->cred->euid))
+ if (!tomoyo_manage_by_non_root &&
+ (!uid_eq(task->cred->uid, GLOBAL_ROOT_UID) ||
+ !uid_eq(task->cred->euid, GLOBAL_ROOT_UID)))
return false;
- list_for_each_entry_rcu(ptr, &tomoyo_kernel_namespace.
- policy_list[TOMOYO_ID_MANAGER], head.list) {
- if (!ptr->head.is_deleted && ptr->is_domain
- && !tomoyo_pathcmp(domainname, ptr->manager)) {
- found = true;
- break;
- }
- }
- if (found)
- return true;
exe = tomoyo_get_exe();
if (!exe)
return false;
list_for_each_entry_rcu(ptr, &tomoyo_kernel_namespace.
policy_list[TOMOYO_ID_MANAGER], head.list) {
- if (!ptr->head.is_deleted && !ptr->is_domain
- && !strcmp(exe, ptr->manager->name)) {
+ if (!ptr->head.is_deleted &&
+ (!tomoyo_pathcmp(domainname, ptr->manager) ||
+ !strcmp(exe, ptr->manager->name))) {
found = true;
break;
}
@@ -1069,7 +1057,7 @@ static int tomoyo_write_task(struct tomoyo_acl_param *param)
*
* @domainname: The name of domain.
*
- * Returns 0.
+ * Returns 0 on success, negative value otherwise.
*
* Caller holds tomoyo_read_lock().
*/
@@ -1081,7 +1069,7 @@ static int tomoyo_delete_domain(char *domainname)
name.name = domainname;
tomoyo_fill_path_info(&name);
if (mutex_lock_interruptible(&tomoyo_policy_lock))
- return 0;
+ return -EINTR;
/* Is there an active domain? */
list_for_each_entry_rcu(domain, &tomoyo_domain_list, list) {
/* Never delete tomoyo_kernel_domain */
@@ -1164,15 +1152,16 @@ static int tomoyo_write_domain(struct tomoyo_io_buffer *head)
bool is_select = !is_delete && tomoyo_str_starts(&data, "select ");
unsigned int profile;
if (*data == '<') {
+ int ret = 0;
domain = NULL;
if (is_delete)
- tomoyo_delete_domain(data);
+ ret = tomoyo_delete_domain(data);
else if (is_select)
domain = tomoyo_find_domain(data);
else
domain = tomoyo_assign_domain(data, false);
head->w.domain = domain;
- return 0;
+ return ret;
}
if (!domain)
return -EINVAL;
@@ -2111,7 +2100,7 @@ static struct tomoyo_domain_info *tomoyo_find_domain_by_qid
struct tomoyo_domain_info *domain = NULL;
spin_lock(&tomoyo_query_list_lock);
list_for_each_entry(ptr, &tomoyo_query_list, list) {
- if (ptr->serial != serial || ptr->answer)
+ if (ptr->serial != serial)
continue;
domain = ptr->domain;
break;
@@ -2130,28 +2119,13 @@ static struct tomoyo_domain_info *tomoyo_find_domain_by_qid
*
* Waits for access requests which violated policy in enforcing mode.
*/
-static int tomoyo_poll_query(struct file *file, poll_table *wait)
+static unsigned int tomoyo_poll_query(struct file *file, poll_table *wait)
{
- struct list_head *tmp;
- bool found = false;
- u8 i;
- for (i = 0; i < 2; i++) {
- spin_lock(&tomoyo_query_list_lock);
- list_for_each(tmp, &tomoyo_query_list) {
- struct tomoyo_query *ptr =
- list_entry(tmp, typeof(*ptr), list);
- if (ptr->answer)
- continue;
- found = true;
- break;
- }
- spin_unlock(&tomoyo_query_list_lock);
- if (found)
- return POLLIN | POLLRDNORM;
- if (i)
- break;
- poll_wait(file, &tomoyo_query_wait, wait);
- }
+ if (!list_empty(&tomoyo_query_list))
+ return POLLIN | POLLRDNORM;
+ poll_wait(file, &tomoyo_query_wait, wait);
+ if (!list_empty(&tomoyo_query_list))
+ return POLLIN | POLLRDNORM;
return 0;
}
@@ -2175,8 +2149,6 @@ static void tomoyo_read_query(struct tomoyo_io_buffer *head)
spin_lock(&tomoyo_query_list_lock);
list_for_each(tmp, &tomoyo_query_list) {
struct tomoyo_query *ptr = list_entry(tmp, typeof(*ptr), list);
- if (ptr->answer)
- continue;
if (pos++ != head->r.query_index)
continue;
len = ptr->query_len;
@@ -2194,8 +2166,6 @@ static void tomoyo_read_query(struct tomoyo_io_buffer *head)
spin_lock(&tomoyo_query_list_lock);
list_for_each(tmp, &tomoyo_query_list) {
struct tomoyo_query *ptr = list_entry(tmp, typeof(*ptr), list);
- if (ptr->answer)
- continue;
if (pos++ != head->r.query_index)
continue;
/*
@@ -2243,8 +2213,10 @@ static int tomoyo_write_answer(struct tomoyo_io_buffer *head)
struct tomoyo_query *ptr = list_entry(tmp, typeof(*ptr), list);
if (ptr->serial != serial)
continue;
- if (!ptr->answer)
- ptr->answer = answer;
+ ptr->answer = answer;
+ /* Remove from tomoyo_query_list. */
+ if (ptr->answer)
+ list_del_init(&ptr->list);
break;
}
spin_unlock(&tomoyo_query_list_lock);
@@ -2477,18 +2449,17 @@ int tomoyo_open_control(const u8 type, struct file *file)
* tomoyo_poll_control - poll() for /sys/kernel/security/tomoyo/ interface.
*
* @file: Pointer to "struct file".
- * @wait: Pointer to "poll_table".
+ * @wait: Pointer to "poll_table". Maybe NULL.
*
- * Waits for read readiness.
- * /sys/kernel/security/tomoyo/query is handled by /usr/sbin/tomoyo-queryd and
- * /sys/kernel/security/tomoyo/audit is handled by /usr/sbin/tomoyo-auditd.
+ * Returns POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM if ready to read/write,
+ * POLLOUT | POLLWRNORM otherwise.
*/
-int tomoyo_poll_control(struct file *file, poll_table *wait)
+unsigned int tomoyo_poll_control(struct file *file, poll_table *wait)
{
struct tomoyo_io_buffer *head = file->private_data;
- if (!head->poll)
- return -ENOSYS;
- return head->poll(file, wait);
+ if (head->poll)
+ return head->poll(file, wait) | POLLOUT | POLLWRNORM;
+ return POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM;
}
/**
@@ -2710,10 +2681,8 @@ out:
* tomoyo_close_control - close() for /sys/kernel/security/tomoyo/ interface.
*
* @head: Pointer to "struct tomoyo_io_buffer".
- *
- * Returns 0.
*/
-int tomoyo_close_control(struct tomoyo_io_buffer *head)
+void tomoyo_close_control(struct tomoyo_io_buffer *head)
{
/*
* If the file is /sys/kernel/security/tomoyo/query , decrement the
@@ -2723,7 +2692,6 @@ int tomoyo_close_control(struct tomoyo_io_buffer *head)
atomic_dec_and_test(&tomoyo_query_observers))
wake_up_all(&tomoyo_answer_wait);
tomoyo_notify_gc(head, false);
- return 0;
}
/**
diff --git a/security/tomoyo/common.h b/security/tomoyo/common.h
index 9512222d558..b897d486201 100644
--- a/security/tomoyo/common.h
+++ b/security/tomoyo/common.h
@@ -561,8 +561,8 @@ struct tomoyo_address_group {
/* Subset of "struct stat". Used by conditional ACL and audit logs. */
struct tomoyo_mini_stat {
- uid_t uid;
- gid_t gid;
+ kuid_t uid;
+ kgid_t gid;
ino_t ino;
umode_t mode;
dev_t dev;
@@ -788,7 +788,7 @@ struct tomoyo_acl_param {
struct tomoyo_io_buffer {
void (*read) (struct tomoyo_io_buffer *);
int (*write) (struct tomoyo_io_buffer *);
- int (*poll) (struct file *file, poll_table *wait);
+ unsigned int (*poll) (struct file *file, poll_table *wait);
/* Exclusive lock for this structure. */
struct mutex io_sem;
char __user *read_user_buf;
@@ -860,7 +860,6 @@ struct tomoyo_aggregator {
/* Structure for policy manager. */
struct tomoyo_manager {
struct tomoyo_acl_head head;
- bool is_domain; /* True if manager is a domainname. */
/* A path to program or a domainname. */
const struct tomoyo_path_info *manager;
};
@@ -959,7 +958,7 @@ const struct tomoyo_path_info *tomoyo_path_matches_group
(const struct tomoyo_path_info *pathname, const struct tomoyo_group *group);
int tomoyo_check_open_permission(struct tomoyo_domain_info *domain,
struct path *path, const int flag);
-int tomoyo_close_control(struct tomoyo_io_buffer *head);
+void tomoyo_close_control(struct tomoyo_io_buffer *head);
int tomoyo_env_perm(struct tomoyo_request_info *r, const char *env);
int tomoyo_execute_permission(struct tomoyo_request_info *r,
const struct tomoyo_path_info *filename);
@@ -971,7 +970,7 @@ int tomoyo_init_request_info(struct tomoyo_request_info *r,
const u8 index);
int tomoyo_mkdev_perm(const u8 operation, struct path *path,
const unsigned int mode, unsigned int dev);
-int tomoyo_mount_permission(char *dev_name, struct path *path,
+int tomoyo_mount_permission(const char *dev_name, struct path *path,
const char *type, unsigned long flags,
void *data_page);
int tomoyo_open_control(const u8 type, struct file *file);
@@ -981,8 +980,8 @@ int tomoyo_path_number_perm(const u8 operation, struct path *path,
unsigned long number);
int tomoyo_path_perm(const u8 operation, struct path *path,
const char *target);
-int tomoyo_poll_control(struct file *file, poll_table *wait);
-int tomoyo_poll_log(struct file *file, poll_table *wait);
+unsigned int tomoyo_poll_control(struct file *file, poll_table *wait);
+unsigned int tomoyo_poll_log(struct file *file, poll_table *wait);
int tomoyo_socket_bind_permission(struct socket *sock, struct sockaddr *addr,
int addr_len);
int tomoyo_socket_connect_permission(struct socket *sock,
diff --git a/security/tomoyo/condition.c b/security/tomoyo/condition.c
index 986330b8c73..63681e8be62 100644
--- a/security/tomoyo/condition.c
+++ b/security/tomoyo/condition.c
@@ -813,28 +813,28 @@ bool tomoyo_condition(struct tomoyo_request_info *r,
unsigned long value = 0;
switch (index) {
case TOMOYO_TASK_UID:
- value = current_uid();
+ value = from_kuid(&init_user_ns, current_uid());
break;
case TOMOYO_TASK_EUID:
- value = current_euid();
+ value = from_kuid(&init_user_ns, current_euid());
break;
case TOMOYO_TASK_SUID:
- value = current_suid();
+ value = from_kuid(&init_user_ns, current_suid());
break;
case TOMOYO_TASK_FSUID:
- value = current_fsuid();
+ value = from_kuid(&init_user_ns, current_fsuid());
break;
case TOMOYO_TASK_GID:
- value = current_gid();
+ value = from_kgid(&init_user_ns, current_gid());
break;
case TOMOYO_TASK_EGID:
- value = current_egid();
+ value = from_kgid(&init_user_ns, current_egid());
break;
case TOMOYO_TASK_SGID:
- value = current_sgid();
+ value = from_kgid(&init_user_ns, current_sgid());
break;
case TOMOYO_TASK_FSGID:
- value = current_fsgid();
+ value = from_kgid(&init_user_ns, current_fsgid());
break;
case TOMOYO_TASK_PID:
value = tomoyo_sys_getpid();
@@ -970,13 +970,13 @@ bool tomoyo_condition(struct tomoyo_request_info *r,
case TOMOYO_PATH2_UID:
case TOMOYO_PATH1_PARENT_UID:
case TOMOYO_PATH2_PARENT_UID:
- value = stat->uid;
+ value = from_kuid(&init_user_ns, stat->uid);
break;
case TOMOYO_PATH1_GID:
case TOMOYO_PATH2_GID:
case TOMOYO_PATH1_PARENT_GID:
case TOMOYO_PATH2_PARENT_GID:
- value = stat->gid;
+ value = from_kgid(&init_user_ns, stat->gid);
break;
case TOMOYO_PATH1_INO:
case TOMOYO_PATH2_INO:
diff --git a/security/tomoyo/domain.c b/security/tomoyo/domain.c
index 9027ac1534a..38651454ed0 100644
--- a/security/tomoyo/domain.c
+++ b/security/tomoyo/domain.c
@@ -886,12 +886,12 @@ bool tomoyo_dump_page(struct linux_binprm *bprm, unsigned long pos,
* But remove_arg_zero() uses kmap_atomic()/kunmap_atomic().
* So do I.
*/
- char *kaddr = kmap_atomic(page, KM_USER0);
+ char *kaddr = kmap_atomic(page);
dump->page = page;
memcpy(dump->data + offset, kaddr + offset,
PAGE_SIZE - offset);
- kunmap_atomic(kaddr, KM_USER0);
+ kunmap_atomic(kaddr);
}
/* Same with put_arg_page(page) in fs/exec.c */
#ifdef CONFIG_MMU
diff --git a/security/tomoyo/load_policy.c b/security/tomoyo/load_policy.c
index 67975405140..078fac0bb4c 100644
--- a/security/tomoyo/load_policy.c
+++ b/security/tomoyo/load_policy.c
@@ -102,7 +102,7 @@ void tomoyo_load_policy(const char *filename)
envp[0] = "HOME=/";
envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
envp[2] = NULL;
- call_usermodehelper(argv[0], argv, envp, 1);
+ call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
tomoyo_check_profile();
}
diff --git a/security/tomoyo/mount.c b/security/tomoyo/mount.c
index bee09d06205..390c646013c 100644
--- a/security/tomoyo/mount.c
+++ b/security/tomoyo/mount.c
@@ -71,7 +71,8 @@ static bool tomoyo_check_mount_acl(struct tomoyo_request_info *r,
*
* Caller holds tomoyo_read_lock().
*/
-static int tomoyo_mount_acl(struct tomoyo_request_info *r, char *dev_name,
+static int tomoyo_mount_acl(struct tomoyo_request_info *r,
+ const char *dev_name,
struct path *dir, const char *type,
unsigned long flags)
{
@@ -183,7 +184,7 @@ static int tomoyo_mount_acl(struct tomoyo_request_info *r, char *dev_name,
*
* Returns 0 on success, negative value otherwise.
*/
-int tomoyo_mount_permission(char *dev_name, struct path *path,
+int tomoyo_mount_permission(const char *dev_name, struct path *path,
const char *type, unsigned long flags,
void *data_page)
{
@@ -199,30 +200,32 @@ int tomoyo_mount_permission(char *dev_name, struct path *path,
if (flags & MS_REMOUNT) {
type = tomoyo_mounts[TOMOYO_MOUNT_REMOUNT];
flags &= ~MS_REMOUNT;
- }
- if (flags & MS_MOVE) {
- type = tomoyo_mounts[TOMOYO_MOUNT_MOVE];
- flags &= ~MS_MOVE;
- }
- if (flags & MS_BIND) {
+ } else if (flags & MS_BIND) {
type = tomoyo_mounts[TOMOYO_MOUNT_BIND];
flags &= ~MS_BIND;
- }
- if (flags & MS_UNBINDABLE) {
- type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_UNBINDABLE];
- flags &= ~MS_UNBINDABLE;
- }
- if (flags & MS_PRIVATE) {
+ } else if (flags & MS_SHARED) {
+ if (flags & (MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE))
+ return -EINVAL;
+ type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_SHARED];
+ flags &= ~MS_SHARED;
+ } else if (flags & MS_PRIVATE) {
+ if (flags & (MS_SHARED | MS_SLAVE | MS_UNBINDABLE))
+ return -EINVAL;
type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_PRIVATE];
flags &= ~MS_PRIVATE;
- }
- if (flags & MS_SLAVE) {
+ } else if (flags & MS_SLAVE) {
+ if (flags & (MS_SHARED | MS_PRIVATE | MS_UNBINDABLE))
+ return -EINVAL;
type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_SLAVE];
flags &= ~MS_SLAVE;
- }
- if (flags & MS_SHARED) {
- type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_SHARED];
- flags &= ~MS_SHARED;
+ } else if (flags & MS_UNBINDABLE) {
+ if (flags & (MS_SHARED | MS_PRIVATE | MS_SLAVE))
+ return -EINVAL;
+ type = tomoyo_mounts[TOMOYO_MOUNT_MAKE_UNBINDABLE];
+ flags &= ~MS_UNBINDABLE;
+ } else if (flags & MS_MOVE) {
+ type = tomoyo_mounts[TOMOYO_MOUNT_MOVE];
+ flags &= ~MS_MOVE;
}
if (!type)
type = "<NULL>";
diff --git a/security/tomoyo/realpath.c b/security/tomoyo/realpath.c
index 80a09c37cac..a3386d11942 100644
--- a/security/tomoyo/realpath.c
+++ b/security/tomoyo/realpath.c
@@ -173,7 +173,7 @@ static char *tomoyo_get_local_path(struct dentry *dentry, char * const buffer,
* Use filesystem name if filesystem does not support rename()
* operation.
*/
- if (inode->i_op && !inode->i_op->rename)
+ if (!inode->i_op->rename)
goto prepend_filesystem_name;
}
/* Prepend device name. */
@@ -282,7 +282,7 @@ char *tomoyo_realpath_from_path(struct path *path)
* Get local name for filesystems without rename() operation
* or dentry without vfsmount.
*/
- if (!path->mnt || (inode->i_op && !inode->i_op->rename))
+ if (!path->mnt || !inode->i_op->rename)
pos = tomoyo_get_local_path(path->dentry, buf,
buf_len - 1);
/* Get absolute name for the rest. */
diff --git a/security/tomoyo/securityfs_if.c b/security/tomoyo/securityfs_if.c
index 482b2a5f48f..179a955b319 100644
--- a/security/tomoyo/securityfs_if.c
+++ b/security/tomoyo/securityfs_if.c
@@ -135,7 +135,7 @@ static const struct file_operations tomoyo_self_operations = {
*/
static int tomoyo_open(struct inode *inode, struct file *file)
{
- const int key = ((u8 *) file->f_path.dentry->d_inode->i_private)
+ const int key = ((u8 *) file_inode(file)->i_private)
- ((u8 *) NULL);
return tomoyo_open_control(key, file);
}
@@ -143,23 +143,23 @@ static int tomoyo_open(struct inode *inode, struct file *file)
/**
* tomoyo_release - close() for /sys/kernel/security/tomoyo/ interface.
*
- * @inode: Pointer to "struct inode".
* @file: Pointer to "struct file".
*
- * Returns 0 on success, negative value otherwise.
*/
static int tomoyo_release(struct inode *inode, struct file *file)
{
- return tomoyo_close_control(file->private_data);
+ tomoyo_close_control(file->private_data);
+ return 0;
}
/**
* tomoyo_poll - poll() for /sys/kernel/security/tomoyo/ interface.
*
* @file: Pointer to "struct file".
- * @wait: Pointer to "poll_table".
+ * @wait: Pointer to "poll_table". Maybe NULL.
*
- * Returns 0 on success, negative value otherwise.
+ * Returns POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM if ready to read/write,
+ * POLLOUT | POLLWRNORM otherwise.
*/
static unsigned int tomoyo_poll(struct file *file, poll_table *wait)
{
diff --git a/security/tomoyo/tomoyo.c b/security/tomoyo/tomoyo.c
index 620d37c159a..f0b756e27fe 100644
--- a/security/tomoyo/tomoyo.c
+++ b/security/tomoyo/tomoyo.c
@@ -319,14 +319,14 @@ static int tomoyo_file_fcntl(struct file *file, unsigned int cmd,
}
/**
- * tomoyo_dentry_open - Target for security_dentry_open().
+ * tomoyo_file_open - Target for security_file_open().
*
* @f: Pointer to "struct file".
* @cred: Pointer to "struct cred".
*
* Returns 0 on success, negative value otherwise.
*/
-static int tomoyo_dentry_open(struct file *f, const struct cred *cred)
+static int tomoyo_file_open(struct file *f, const struct cred *cred)
{
int flags = f->f_flags;
/* Don't check read permission here if called from do_execve(). */
@@ -373,13 +373,15 @@ static int tomoyo_path_chmod(struct path *path, umode_t mode)
*
* Returns 0 on success, negative value otherwise.
*/
-static int tomoyo_path_chown(struct path *path, uid_t uid, gid_t gid)
+static int tomoyo_path_chown(struct path *path, kuid_t uid, kgid_t gid)
{
int error = 0;
- if (uid != (uid_t) -1)
- error = tomoyo_path_number_perm(TOMOYO_TYPE_CHOWN, path, uid);
- if (!error && gid != (gid_t) -1)
- error = tomoyo_path_number_perm(TOMOYO_TYPE_CHGRP, path, gid);
+ if (uid_valid(uid))
+ error = tomoyo_path_number_perm(TOMOYO_TYPE_CHOWN, path,
+ from_kuid(&init_user_ns, uid));
+ if (!error && gid_valid(gid))
+ error = tomoyo_path_number_perm(TOMOYO_TYPE_CHGRP, path,
+ from_kgid(&init_user_ns, gid));
return error;
}
@@ -406,8 +408,8 @@ static int tomoyo_path_chroot(struct path *path)
*
* Returns 0 on success, negative value otherwise.
*/
-static int tomoyo_sb_mount(char *dev_name, struct path *path,
- char *type, unsigned long flags, void *data)
+static int tomoyo_sb_mount(const char *dev_name, struct path *path,
+ const char *type, unsigned long flags, void *data)
{
return tomoyo_mount_permission(dev_name, path, type, flags, data);
}
@@ -510,7 +512,7 @@ static struct security_operations tomoyo_security_ops = {
.bprm_set_creds = tomoyo_bprm_set_creds,
.bprm_check_security = tomoyo_bprm_check_security,
.file_fcntl = tomoyo_file_fcntl,
- .dentry_open = tomoyo_dentry_open,
+ .file_open = tomoyo_file_open,
.path_truncate = tomoyo_path_truncate,
.path_unlink = tomoyo_path_unlink,
.path_mkdir = tomoyo_path_mkdir,
@@ -534,7 +536,7 @@ static struct security_operations tomoyo_security_ops = {
};
/* Lock for GC. */
-struct srcu_struct tomoyo_ss;
+DEFINE_SRCU(tomoyo_ss);
/**
* tomoyo_init - Register TOMOYO Linux as a LSM module.
@@ -548,8 +550,7 @@ static int __init tomoyo_init(void)
if (!security_module_enable(&tomoyo_security_ops))
return 0;
/* register ourselves with the security framework */
- if (register_security(&tomoyo_security_ops) ||
- init_srcu_struct(&tomoyo_ss))
+ if (register_security(&tomoyo_security_ops))
panic("Failure registering TOMOYO Linux");
printk(KERN_INFO "TOMOYO Linux initialized\n");
cred->security = &tomoyo_kernel_domain;
diff --git a/security/tomoyo/util.c b/security/tomoyo/util.c
index 867558c9833..2952ba576fb 100644
--- a/security/tomoyo/util.c
+++ b/security/tomoyo/util.c
@@ -949,18 +949,13 @@ bool tomoyo_path_matches_pattern(const struct tomoyo_path_info *filename,
const char *tomoyo_get_exe(void)
{
struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
const char *cp = NULL;
if (!mm)
return NULL;
down_read(&mm->mmap_sem);
- for (vma = mm->mmap; vma; vma = vma->vm_next) {
- if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file) {
- cp = tomoyo_realpath_from_path(&vma->vm_file->f_path);
- break;
- }
- }
+ if (mm->exe_file)
+ cp = tomoyo_realpath_from_path(&mm->exe_file->f_path);
up_read(&mm->mmap_sem);
return cp;
}
diff --git a/security/yama/Kconfig b/security/yama/Kconfig
new file mode 100644
index 00000000000..20ef5143c0c
--- /dev/null
+++ b/security/yama/Kconfig
@@ -0,0 +1,21 @@
+config SECURITY_YAMA
+ bool "Yama support"
+ depends on SECURITY
+ select SECURITYFS
+ select SECURITY_PATH
+ default n
+ help
+ This selects Yama, which extends DAC support with additional
+ system-wide security settings beyond regular Linux discretionary
+ access controls. Currently available is ptrace scope restriction.
+ Further information can be found in Documentation/security/Yama.txt.
+
+ If you are unsure how to answer this question, answer N.
+
+config SECURITY_YAMA_STACKED
+ bool "Yama stacked with other LSMs"
+ depends on SECURITY_YAMA
+ default n
+ help
+ When Yama is built into the kernel, force it to stack with the
+ selected primary LSM.
diff --git a/security/yama/Makefile b/security/yama/Makefile
new file mode 100644
index 00000000000..8b5e0658845
--- /dev/null
+++ b/security/yama/Makefile
@@ -0,0 +1,3 @@
+obj-$(CONFIG_SECURITY_YAMA) := yama.o
+
+yama-y := yama_lsm.o
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
new file mode 100644
index 00000000000..13c88fbcf03
--- /dev/null
+++ b/security/yama/yama_lsm.c
@@ -0,0 +1,443 @@
+/*
+ * Yama Linux Security Module
+ *
+ * Author: Kees Cook <keescook@chromium.org>
+ *
+ * Copyright (C) 2010 Canonical, Ltd.
+ * Copyright (C) 2011 The Chromium OS Authors.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2, as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/security.h>
+#include <linux/sysctl.h>
+#include <linux/ptrace.h>
+#include <linux/prctl.h>
+#include <linux/ratelimit.h>
+#include <linux/workqueue.h>
+
+#define YAMA_SCOPE_DISABLED 0
+#define YAMA_SCOPE_RELATIONAL 1
+#define YAMA_SCOPE_CAPABILITY 2
+#define YAMA_SCOPE_NO_ATTACH 3
+
+static int ptrace_scope = YAMA_SCOPE_RELATIONAL;
+
+/* describe a ptrace relationship for potential exception */
+struct ptrace_relation {
+ struct task_struct *tracer;
+ struct task_struct *tracee;
+ bool invalid;
+ struct list_head node;
+ struct rcu_head rcu;
+};
+
+static LIST_HEAD(ptracer_relations);
+static DEFINE_SPINLOCK(ptracer_relations_lock);
+
+static void yama_relation_cleanup(struct work_struct *work);
+static DECLARE_WORK(yama_relation_work, yama_relation_cleanup);
+
+/**
+ * yama_relation_cleanup - remove invalid entries from the relation list
+ *
+ */
+static void yama_relation_cleanup(struct work_struct *work)
+{
+ struct ptrace_relation *relation;
+
+ spin_lock(&ptracer_relations_lock);
+ rcu_read_lock();
+ list_for_each_entry_rcu(relation, &ptracer_relations, node) {
+ if (relation->invalid) {
+ list_del_rcu(&relation->node);
+ kfree_rcu(relation, rcu);
+ }
+ }
+ rcu_read_unlock();
+ spin_unlock(&ptracer_relations_lock);
+}
+
+/**
+ * yama_ptracer_add - add/replace an exception for this tracer/tracee pair
+ * @tracer: the task_struct of the process doing the ptrace
+ * @tracee: the task_struct of the process to be ptraced
+ *
+ * Each tracee can have, at most, one tracer registered. Each time this
+ * is called, the prior registered tracer will be replaced for the tracee.
+ *
+ * Returns 0 if relationship was added, -ve on error.
+ */
+static int yama_ptracer_add(struct task_struct *tracer,
+ struct task_struct *tracee)
+{
+ struct ptrace_relation *relation, *added;
+
+ added = kmalloc(sizeof(*added), GFP_KERNEL);
+ if (!added)
+ return -ENOMEM;
+
+ added->tracee = tracee;
+ added->tracer = tracer;
+ added->invalid = false;
+
+ spin_lock(&ptracer_relations_lock);
+ rcu_read_lock();
+ list_for_each_entry_rcu(relation, &ptracer_relations, node) {
+ if (relation->invalid)
+ continue;
+ if (relation->tracee == tracee) {
+ list_replace_rcu(&relation->node, &added->node);
+ kfree_rcu(relation, rcu);
+ goto out;
+ }
+ }
+
+ list_add_rcu(&added->node, &ptracer_relations);
+
+out:
+ rcu_read_unlock();
+ spin_unlock(&ptracer_relations_lock);
+ return 0;
+}
+
+/**
+ * yama_ptracer_del - remove exceptions related to the given tasks
+ * @tracer: remove any relation where tracer task matches
+ * @tracee: remove any relation where tracee task matches
+ */
+static void yama_ptracer_del(struct task_struct *tracer,
+ struct task_struct *tracee)
+{
+ struct ptrace_relation *relation;
+ bool marked = false;
+
+ rcu_read_lock();
+ list_for_each_entry_rcu(relation, &ptracer_relations, node) {
+ if (relation->invalid)
+ continue;
+ if (relation->tracee == tracee ||
+ (tracer && relation->tracer == tracer)) {
+ relation->invalid = true;
+ marked = true;
+ }
+ }
+ rcu_read_unlock();
+
+ if (marked)
+ schedule_work(&yama_relation_work);
+}
+
+/**
+ * yama_task_free - check for task_pid to remove from exception list
+ * @task: task being removed
+ */
+void yama_task_free(struct task_struct *task)
+{
+ yama_ptracer_del(task, task);
+}
+
+/**
+ * yama_task_prctl - check for Yama-specific prctl operations
+ * @option: operation
+ * @arg2: argument
+ * @arg3: argument
+ * @arg4: argument
+ * @arg5: argument
+ *
+ * Return 0 on success, -ve on error. -ENOSYS is returned when Yama
+ * does not handle the given option.
+ */
+int yama_task_prctl(int option, unsigned long arg2, unsigned long arg3,
+ unsigned long arg4, unsigned long arg5)
+{
+ int rc;
+ struct task_struct *myself = current;
+
+ rc = cap_task_prctl(option, arg2, arg3, arg4, arg5);
+ if (rc != -ENOSYS)
+ return rc;
+
+ switch (option) {
+ case PR_SET_PTRACER:
+ /* Since a thread can call prctl(), find the group leader
+ * before calling _add() or _del() on it, since we want
+ * process-level granularity of control. The tracer group
+ * leader checking is handled later when walking the ancestry
+ * at the time of PTRACE_ATTACH check.
+ */
+ rcu_read_lock();
+ if (!thread_group_leader(myself))
+ myself = rcu_dereference(myself->group_leader);
+ get_task_struct(myself);
+ rcu_read_unlock();
+
+ if (arg2 == 0) {
+ yama_ptracer_del(NULL, myself);
+ rc = 0;
+ } else if (arg2 == PR_SET_PTRACER_ANY || (int)arg2 == -1) {
+ rc = yama_ptracer_add(NULL, myself);
+ } else {
+ struct task_struct *tracer;
+
+ rcu_read_lock();
+ tracer = find_task_by_vpid(arg2);
+ if (tracer)
+ get_task_struct(tracer);
+ else
+ rc = -EINVAL;
+ rcu_read_unlock();
+
+ if (tracer) {
+ rc = yama_ptracer_add(tracer, myself);
+ put_task_struct(tracer);
+ }
+ }
+
+ put_task_struct(myself);
+ break;
+ }
+
+ return rc;
+}
+
+/**
+ * task_is_descendant - walk up a process family tree looking for a match
+ * @parent: the process to compare against while walking up from child
+ * @child: the process to start from while looking upwards for parent
+ *
+ * Returns 1 if child is a descendant of parent, 0 if not.
+ */
+static int task_is_descendant(struct task_struct *parent,
+ struct task_struct *child)
+{
+ int rc = 0;
+ struct task_struct *walker = child;
+
+ if (!parent || !child)
+ return 0;
+
+ rcu_read_lock();
+ if (!thread_group_leader(parent))
+ parent = rcu_dereference(parent->group_leader);
+ while (walker->pid > 0) {
+ if (!thread_group_leader(walker))
+ walker = rcu_dereference(walker->group_leader);
+ if (walker == parent) {
+ rc = 1;
+ break;
+ }
+ walker = rcu_dereference(walker->real_parent);
+ }
+ rcu_read_unlock();
+
+ return rc;
+}
+
+/**
+ * ptracer_exception_found - tracer registered as exception for this tracee
+ * @tracer: the task_struct of the process attempting ptrace
+ * @tracee: the task_struct of the process to be ptraced
+ *
+ * Returns 1 if tracer has is ptracer exception ancestor for tracee.
+ */
+static int ptracer_exception_found(struct task_struct *tracer,
+ struct task_struct *tracee)
+{
+ int rc = 0;
+ struct ptrace_relation *relation;
+ struct task_struct *parent = NULL;
+ bool found = false;
+
+ rcu_read_lock();
+ if (!thread_group_leader(tracee))
+ tracee = rcu_dereference(tracee->group_leader);
+ list_for_each_entry_rcu(relation, &ptracer_relations, node) {
+ if (relation->invalid)
+ continue;
+ if (relation->tracee == tracee) {
+ parent = relation->tracer;
+ found = true;
+ break;
+ }
+ }
+
+ if (found && (parent == NULL || task_is_descendant(parent, tracer)))
+ rc = 1;
+ rcu_read_unlock();
+
+ return rc;
+}
+
+/**
+ * yama_ptrace_access_check - validate PTRACE_ATTACH calls
+ * @child: task that current task is attempting to ptrace
+ * @mode: ptrace attach mode
+ *
+ * Returns 0 if following the ptrace is allowed, -ve on error.
+ */
+int yama_ptrace_access_check(struct task_struct *child,
+ unsigned int mode)
+{
+ int rc;
+
+ /* If standard caps disallows it, so does Yama. We should
+ * only tighten restrictions further.
+ */
+ rc = cap_ptrace_access_check(child, mode);
+ if (rc)
+ return rc;
+
+ /* require ptrace target be a child of ptracer on attach */
+ if (mode == PTRACE_MODE_ATTACH) {
+ switch (ptrace_scope) {
+ case YAMA_SCOPE_DISABLED:
+ /* No additional restrictions. */
+ break;
+ case YAMA_SCOPE_RELATIONAL:
+ rcu_read_lock();
+ if (!task_is_descendant(current, child) &&
+ !ptracer_exception_found(current, child) &&
+ !ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
+ rc = -EPERM;
+ rcu_read_unlock();
+ break;
+ case YAMA_SCOPE_CAPABILITY:
+ rcu_read_lock();
+ if (!ns_capable(__task_cred(child)->user_ns, CAP_SYS_PTRACE))
+ rc = -EPERM;
+ rcu_read_unlock();
+ break;
+ case YAMA_SCOPE_NO_ATTACH:
+ default:
+ rc = -EPERM;
+ break;
+ }
+ }
+
+ if (rc) {
+ printk_ratelimited(KERN_NOTICE
+ "ptrace of pid %d was attempted by: %s (pid %d)\n",
+ child->pid, current->comm, current->pid);
+ }
+
+ return rc;
+}
+
+/**
+ * yama_ptrace_traceme - validate PTRACE_TRACEME calls
+ * @parent: task that will become the ptracer of the current task
+ *
+ * Returns 0 if following the ptrace is allowed, -ve on error.
+ */
+int yama_ptrace_traceme(struct task_struct *parent)
+{
+ int rc;
+
+ /* If standard caps disallows it, so does Yama. We should
+ * only tighten restrictions further.
+ */
+ rc = cap_ptrace_traceme(parent);
+ if (rc)
+ return rc;
+
+ /* Only disallow PTRACE_TRACEME on more aggressive settings. */
+ switch (ptrace_scope) {
+ case YAMA_SCOPE_CAPABILITY:
+ if (!has_ns_capability(parent, current_user_ns(), CAP_SYS_PTRACE))
+ rc = -EPERM;
+ break;
+ case YAMA_SCOPE_NO_ATTACH:
+ rc = -EPERM;
+ break;
+ }
+
+ if (rc) {
+ printk_ratelimited(KERN_NOTICE
+ "ptraceme of pid %d was attempted by: %s (pid %d)\n",
+ current->pid, parent->comm, parent->pid);
+ }
+
+ return rc;
+}
+
+#ifndef CONFIG_SECURITY_YAMA_STACKED
+static struct security_operations yama_ops = {
+ .name = "yama",
+
+ .ptrace_access_check = yama_ptrace_access_check,
+ .ptrace_traceme = yama_ptrace_traceme,
+ .task_prctl = yama_task_prctl,
+ .task_free = yama_task_free,
+};
+#endif
+
+#ifdef CONFIG_SYSCTL
+static int yama_dointvec_minmax(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+ int rc;
+
+ if (write && !capable(CAP_SYS_PTRACE))
+ return -EPERM;
+
+ rc = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ if (rc)
+ return rc;
+
+ /* Lock the max value if it ever gets set. */
+ if (write && *(int *)table->data == *(int *)table->extra2)
+ table->extra1 = table->extra2;
+
+ return rc;
+}
+
+static int zero;
+static int max_scope = YAMA_SCOPE_NO_ATTACH;
+
+struct ctl_path yama_sysctl_path[] = {
+ { .procname = "kernel", },
+ { .procname = "yama", },
+ { }
+};
+
+static struct ctl_table yama_sysctl_table[] = {
+ {
+ .procname = "ptrace_scope",
+ .data = &ptrace_scope,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = yama_dointvec_minmax,
+ .extra1 = &zero,
+ .extra2 = &max_scope,
+ },
+ { }
+};
+#endif /* CONFIG_SYSCTL */
+
+static __init int yama_init(void)
+{
+#ifndef CONFIG_SECURITY_YAMA_STACKED
+ if (!security_module_enable(&yama_ops))
+ return 0;
+#endif
+
+ printk(KERN_INFO "Yama: becoming mindful.\n");
+
+#ifndef CONFIG_SECURITY_YAMA_STACKED
+ if (register_security(&yama_ops))
+ panic("Yama: kernel registration failed.\n");
+#endif
+
+#ifdef CONFIG_SYSCTL
+ if (!register_sysctl_paths(yama_sysctl_path, yama_sysctl_table))
+ panic("Yama: sysctl registration failed.\n");
+#endif
+
+ return 0;
+}
+
+security_initcall(yama_init);