diff options
Diffstat (limited to 'ipc')
| -rw-r--r-- | ipc/Makefile | 5 | ||||
| -rw-r--r-- | ipc/compat.c | 275 | ||||
| -rw-r--r-- | ipc/compat_mq.c | 58 | ||||
| -rw-r--r-- | ipc/ipc_sysctl.c | 284 | ||||
| -rw-r--r-- | ipc/ipcns_notifier.c | 92 | ||||
| -rw-r--r-- | ipc/mq_sysctl.c | 124 | ||||
| -rw-r--r-- | ipc/mqueue.c | 1175 | ||||
| -rw-r--r-- | ipc/msg.c | 1126 | ||||
| -rw-r--r-- | ipc/msgutil.c | 151 | ||||
| -rw-r--r-- | ipc/namespace.c | 199 | ||||
| -rw-r--r-- | ipc/sem.c | 2212 | ||||
| -rw-r--r-- | ipc/shm.c | 1283 | ||||
| -rw-r--r-- | ipc/syscall.c | 99 | ||||
| -rw-r--r-- | ipc/util.c | 1057 | ||||
| -rw-r--r-- | ipc/util.h | 198 |
15 files changed, 5651 insertions, 2687 deletions
diff --git a/ipc/Makefile b/ipc/Makefile index 0a6d626cd79..9075e172e52 100644 --- a/ipc/Makefile +++ b/ipc/Makefile @@ -3,7 +3,10 @@ # obj-$(CONFIG_SYSVIPC_COMPAT) += compat.o -obj-$(CONFIG_SYSVIPC) += util.o msgutil.o msg.o sem.o shm.o +obj-$(CONFIG_SYSVIPC) += util.o msgutil.o msg.o sem.o shm.o ipcns_notifier.o syscall.o +obj-$(CONFIG_SYSVIPC_SYSCTL) += ipc_sysctl.o obj_mq-$(CONFIG_COMPAT) += compat_mq.o obj-$(CONFIG_POSIX_MQUEUE) += mqueue.o msgutil.o $(obj_mq-y) +obj-$(CONFIG_IPC_NS) += namespace.o +obj-$(CONFIG_POSIX_MQUEUE_SYSCTL) += mq_sysctl.o diff --git a/ipc/compat.c b/ipc/compat.c index 1fe95f6659d..b5ef4f7946d 100644 --- a/ipc/compat.c +++ b/ipc/compat.c @@ -21,17 +21,16 @@ * */ #include <linux/compat.h> -#include <linux/config.h> #include <linux/errno.h> #include <linux/highuid.h> #include <linux/init.h> #include <linux/msg.h> #include <linux/shm.h> -#include <linux/slab.h> #include <linux/syscalls.h> +#include <linux/ptrace.h> -#include <asm/semaphore.h> -#include <asm/uaccess.h> +#include <linux/mutex.h> +#include <linux/uaccess.h> #include "util.h" @@ -114,12 +113,9 @@ struct compat_shm_info { compat_ulong_t swap_attempts, swap_successes; }; -extern int sem_ctls[]; -#define sc_semopm (sem_ctls[2]) -#define MAXBUF (64*1024) - static inline int compat_ipc_parse_version(int *cmd) { +#ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION int version = *cmd & IPC_64; /* this is tricky: architectures that have support for the old @@ -131,6 +127,10 @@ static inline int compat_ipc_parse_version(int *cmd) *cmd &= ~IPC_64; #endif return version; +#else + /* With the asm-generic APIs, we always use the 64-bit versions. */ + return IPC_64; +#endif } static inline int __get_compat_ipc64_perm(struct ipc64_perm *p64, @@ -194,7 +194,7 @@ static inline int __put_compat_ipc_perm(struct ipc64_perm *p, static inline int get_compat_semid64_ds(struct semid64_ds *s64, struct compat_semid64_ds __user *up64) { - if (!access_ok (VERIFY_READ, up64, sizeof(*up64))) + if (!access_ok(VERIFY_READ, up64, sizeof(*up64))) return -EFAULT; return __get_compat_ipc64_perm(&s64->sem_perm, &up64->sem_perm); } @@ -202,7 +202,7 @@ static inline int get_compat_semid64_ds(struct semid64_ds *s64, static inline int get_compat_semid_ds(struct semid64_ds *s, struct compat_semid_ds __user *up) { - if (!access_ok (VERIFY_READ, up, sizeof(*up))) + if (!access_ok(VERIFY_READ, up, sizeof(*up))) return -EFAULT; return __get_compat_ipc_perm(&s->sem_perm, &up->sem_perm); } @@ -212,7 +212,7 @@ static inline int put_compat_semid64_ds(struct semid64_ds *s64, { int err; - if (!access_ok (VERIFY_WRITE, up64, sizeof(*up64))) + if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) return -EFAULT; err = __put_compat_ipc64_perm(&s64->sem_perm, &up64->sem_perm); err |= __put_user(s64->sem_otime, &up64->sem_otime); @@ -226,8 +226,8 @@ static inline int put_compat_semid_ds(struct semid64_ds *s, { int err; - if (!access_ok (VERIFY_WRITE, up, sizeof(*up))) - err = -EFAULT; + if (!access_ok(VERIFY_WRITE, up, sizeof(*up))) + return -EFAULT; err = __put_compat_ipc_perm(&s->sem_perm, &up->sem_perm); err |= __put_user(s->sem_otime, &up->sem_otime); err |= __put_user(s->sem_ctime, &up->sem_ctime); @@ -235,23 +235,24 @@ static inline int put_compat_semid_ds(struct semid64_ds *s, return err; } -long compat_sys_semctl(int first, int second, int third, void __user *uptr) +static long do_compat_semctl(int first, int second, int third, u32 pad) { - union semun fourth; - u32 pad; + unsigned long fourth; int err, err2; struct semid64_ds s64; struct semid64_ds __user *up64; int version = compat_ipc_parse_version(&third); - if (!uptr) - return -EINVAL; - if (get_user(pad, (u32 __user *) uptr)) - return -EFAULT; + memset(&s64, 0, sizeof(s64)); + if ((third & (~IPC_64)) == SETVAL) - fourth.val = (int) pad; +#ifdef __BIG_ENDIAN + fourth = (unsigned long)pad << 32; +#else + fourth = pad; +#endif else - fourth.__pad = compat_ptr(pad); + fourth = (unsigned long)compat_ptr(pad); switch (third & (~IPC_64)) { case IPC_INFO: case IPC_RMID: @@ -269,7 +270,7 @@ long compat_sys_semctl(int first, int second, int third, void __user *uptr) case IPC_STAT: case SEM_STAT: up64 = compat_alloc_user_space(sizeof(s64)); - fourth.__pad = up64; + fourth = (unsigned long)up64; err = sys_semctl(first, second, third, fourth); if (err < 0) break; @@ -284,18 +285,18 @@ long compat_sys_semctl(int first, int second, int third, void __user *uptr) break; case IPC_SET: - if (version == IPC_64) { + if (version == IPC_64) err = get_compat_semid64_ds(&s64, compat_ptr(pad)); - } else { + else err = get_compat_semid_ds(&s64, compat_ptr(pad)); - } + up64 = compat_alloc_user_space(sizeof(s64)); if (copy_to_user(up64, &s64, sizeof(s64))) err = -EFAULT; if (err) break; - fourth.__pad = up64; + fourth = (unsigned long)up64; err = sys_semctl(first, second, third, fourth); break; @@ -306,61 +307,130 @@ long compat_sys_semctl(int first, int second, int third, void __user *uptr) return err; } -long compat_sys_msgsnd(int first, int second, int third, void __user *uptr) +static long compat_do_msg_fill(void __user *dest, struct msg_msg *msg, size_t bufsz) { - struct msgbuf __user *p; - struct compat_msgbuf __user *up = uptr; - long type; - - if (first < 0) - return -EINVAL; - if (second < 0 || (second >= MAXBUF - sizeof(struct msgbuf))) - return -EINVAL; - - p = compat_alloc_user_space(second + sizeof(struct msgbuf)); - if (get_user(type, &up->mtype) || - put_user(type, &p->mtype) || - copy_in_user(p->mtext, up->mtext, second)) + struct compat_msgbuf __user *msgp = dest; + size_t msgsz; + + if (put_user(msg->m_type, &msgp->mtype)) return -EFAULT; - return sys_msgsnd(first, p, second, third); + msgsz = (bufsz > msg->m_ts) ? msg->m_ts : bufsz; + if (store_msg(msgp->mtext, msg, msgsz)) + return -EFAULT; + return msgsz; } -long compat_sys_msgrcv(int first, int second, int msgtyp, int third, - int version, void __user *uptr) +#ifndef COMPAT_SHMLBA +#define COMPAT_SHMLBA SHMLBA +#endif + +#ifdef CONFIG_ARCH_WANT_OLD_COMPAT_IPC +COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second, + u32, third, compat_uptr_t, ptr, u32, fifth) { - struct msgbuf __user *p; - struct compat_msgbuf __user *up; - long type; - int err; + int version; + u32 pad; - if (first < 0) - return -EINVAL; - if (second < 0 || (second >= MAXBUF - sizeof(struct msgbuf))) - return -EINVAL; + version = call >> 16; /* hack for backward compatibility */ + call &= 0xffff; + + switch (call) { + case SEMOP: + /* struct sembuf is the same on 32 and 64bit :)) */ + return sys_semtimedop(first, compat_ptr(ptr), second, NULL); + case SEMTIMEDOP: + return compat_sys_semtimedop(first, compat_ptr(ptr), second, + compat_ptr(fifth)); + case SEMGET: + return sys_semget(first, second, third); + case SEMCTL: + if (!ptr) + return -EINVAL; + if (get_user(pad, (u32 __user *) compat_ptr(ptr))) + return -EFAULT; + return do_compat_semctl(first, second, third, pad); - if (!version) { - struct compat_ipc_kludge ipck; - err = -EINVAL; - if (!uptr) - goto out; - err = -EFAULT; - if (copy_from_user (&ipck, uptr, sizeof(ipck))) - goto out; - uptr = compat_ptr(ipck.msgp); - msgtyp = ipck.msgtyp; + case MSGSND: { + struct compat_msgbuf __user *up = compat_ptr(ptr); + compat_long_t type; + + if (first < 0 || second < 0) + return -EINVAL; + + if (get_user(type, &up->mtype)) + return -EFAULT; + + return do_msgsnd(first, type, up->mtext, second, third); } - p = compat_alloc_user_space(second + sizeof(struct msgbuf)); - err = sys_msgrcv(first, p, second, msgtyp, third); - if (err < 0) - goto out; - up = uptr; - if (get_user(type, &p->mtype) || - put_user(type, &up->mtype) || - copy_in_user(up->mtext, p->mtext, err)) - err = -EFAULT; -out: - return err; + case MSGRCV: { + void __user *uptr = compat_ptr(ptr); + + if (first < 0 || second < 0) + return -EINVAL; + + if (!version) { + struct compat_ipc_kludge ipck; + if (!uptr) + return -EINVAL; + if (copy_from_user(&ipck, uptr, sizeof(ipck))) + return -EFAULT; + uptr = compat_ptr(ipck.msgp); + fifth = ipck.msgtyp; + } + return do_msgrcv(first, uptr, second, (s32)fifth, third, + compat_do_msg_fill); + } + case MSGGET: + return sys_msgget(first, second); + case MSGCTL: + return compat_sys_msgctl(first, second, compat_ptr(ptr)); + + case SHMAT: { + int err; + unsigned long raddr; + + if (version == 1) + return -EINVAL; + err = do_shmat(first, compat_ptr(ptr), second, &raddr, + COMPAT_SHMLBA); + if (err < 0) + return err; + return put_user(raddr, (compat_ulong_t *)compat_ptr(third)); + } + case SHMDT: + return sys_shmdt(compat_ptr(ptr)); + case SHMGET: + return sys_shmget(first, (unsigned)second, third); + case SHMCTL: + return compat_sys_shmctl(first, second, compat_ptr(ptr)); + } + + return -ENOSYS; +} +#endif + +COMPAT_SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, int, arg) +{ + return do_compat_semctl(semid, semnum, cmd, arg); +} + +COMPAT_SYSCALL_DEFINE4(msgsnd, int, msqid, compat_uptr_t, msgp, + compat_ssize_t, msgsz, int, msgflg) +{ + struct compat_msgbuf __user *up = compat_ptr(msgp); + compat_long_t mtype; + + if (get_user(mtype, &up->mtype)) + return -EFAULT; + return do_msgsnd(msqid, mtype, up->mtext, (ssize_t)msgsz, msgflg); +} + +COMPAT_SYSCALL_DEFINE5(msgrcv, int, msqid, compat_uptr_t, msgp, + compat_ssize_t, msgsz, compat_long_t, msgtyp, int, msgflg) +{ + return do_msgrcv(msqid, compat_ptr(msgp), (ssize_t)msgsz, (long)msgtyp, + msgflg, compat_do_msg_fill); } static inline int get_compat_msqid64(struct msqid64_ds *m64, @@ -425,13 +495,15 @@ static inline int put_compat_msqid_ds(struct msqid64_ds *m, return err; } -long compat_sys_msgctl(int first, int second, void __user *uptr) +COMPAT_SYSCALL_DEFINE3(msgctl, int, first, int, second, void __user *, uptr) { int err, err2; struct msqid64_ds m64; int version = compat_ipc_parse_version(&second); void __user *p; + memset(&m64, 0, sizeof(m64)); + switch (second & (~IPC_64)) { case IPC_INFO: case IPC_RMID: @@ -440,11 +512,11 @@ long compat_sys_msgctl(int first, int second, void __user *uptr) break; case IPC_SET: - if (version == IPC_64) { + if (version == IPC_64) err = get_compat_msqid64(&m64, uptr); - } else { + else err = get_compat_msqid(&m64, uptr); - } + if (err) break; p = compat_alloc_user_space(sizeof(m64)); @@ -477,20 +549,16 @@ long compat_sys_msgctl(int first, int second, void __user *uptr) return err; } -long compat_sys_shmat(int first, int second, compat_uptr_t third, int version, - void __user *uptr) +COMPAT_SYSCALL_DEFINE3(shmat, int, shmid, compat_uptr_t, shmaddr, int, shmflg) { - int err; - unsigned long raddr; - compat_ulong_t __user *uaddr; + unsigned long ret; + long err; - if (version == 1) - return -EINVAL; - err = do_shmat(first, uptr, second, &raddr); - if (err < 0) + err = do_shmat(shmid, compat_ptr(shmaddr), shmflg, &ret, COMPAT_SHMLBA); + if (err) return err; - uaddr = compat_ptr(third); - return put_user(raddr, uaddr); + force_successful_syscall_return(); + return (long)ret; } static inline int get_compat_shmid64_ds(struct shmid64_ds *s64, @@ -552,6 +620,8 @@ static inline int put_compat_shminfo64(struct shminfo64 *smi, if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) return -EFAULT; + if (smi->shmmax > INT_MAX) + smi->shmmax = INT_MAX; err = __put_user(smi->shmmax, &up64->shmmax); err |= __put_user(smi->shmmin, &up64->shmmin); err |= __put_user(smi->shmmni, &up64->shmmni); @@ -567,6 +637,8 @@ static inline int put_compat_shminfo(struct shminfo64 *smi, if (!access_ok(VERIFY_WRITE, up, sizeof(*up))) return -EFAULT; + if (smi->shmmax > INT_MAX) + smi->shmmax = INT_MAX; err = __put_user(smi->shmmax, &up->shmmax); err |= __put_user(smi->shmmin, &up->shmmin); err |= __put_user(smi->shmmni, &up->shmmni); @@ -593,7 +665,7 @@ static inline int put_compat_shm_info(struct shm_info __user *ip, return err; } -long compat_sys_shmctl(int first, int second, void __user *uptr) +COMPAT_SYSCALL_DEFINE3(shmctl, int, first, int, second, void __user *, uptr) { void __user *p; struct shmid64_ds s64; @@ -601,6 +673,8 @@ long compat_sys_shmctl(int first, int second, void __user *uptr) int err, err2; int version = compat_ipc_parse_version(&second); + memset(&s64, 0, sizeof(s64)); + switch (second & (~IPC_64)) { case IPC_RMID: case SHM_LOCK: @@ -625,11 +699,11 @@ long compat_sys_shmctl(int first, int second, void __user *uptr) case IPC_SET: - if (version == IPC_64) { + if (version == IPC_64) err = get_compat_shmid64_ds(&s64, uptr); - } else { + else err = get_compat_shmid_ds(&s64, uptr); - } + if (err) break; p = compat_alloc_user_space(sizeof(s64)); @@ -672,17 +746,12 @@ long compat_sys_shmctl(int first, int second, void __user *uptr) return err; } -long compat_sys_semtimedop(int semid, struct sembuf __user *tsems, - unsigned nsops, const struct compat_timespec __user *timeout) +COMPAT_SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsems, + unsigned, nsops, + const struct compat_timespec __user *, timeout) { - struct timespec __user *ts64 = NULL; - if (timeout) { - struct timespec ts; - ts64 = compat_alloc_user_space(sizeof(*ts64)); - if (get_compat_timespec(&ts, timeout)) - return -EFAULT; - if (copy_to_user(ts64, &ts, sizeof(ts))) - return -EFAULT; - } + struct timespec __user *ts64; + if (compat_convert_timespec(&ts64, timeout)) + return -EFAULT; return sys_semtimedop(semid, tsems, nsops, ts64); } diff --git a/ipc/compat_mq.c b/ipc/compat_mq.c index d8d1e9ff4e8..ef6f91cc449 100644 --- a/ipc/compat_mq.c +++ b/ipc/compat_mq.c @@ -12,7 +12,7 @@ #include <linux/mqueue.h> #include <linux/syscalls.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> struct compat_mq_attr { compat_long_t mq_flags; /* message queue flags */ @@ -46,13 +46,16 @@ static inline int put_compat_mq_attr(const struct mq_attr *attr, | __put_user(attr->mq_curmsgs, &uattr->mq_curmsgs); } -asmlinkage long compat_sys_mq_open(const char __user *u_name, - int oflag, compat_mode_t mode, - struct compat_mq_attr __user *u_attr) +COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name, + int, oflag, compat_mode_t, mode, + struct compat_mq_attr __user *, u_attr) { void __user *p = NULL; if (u_attr && oflag & O_CREAT) { struct mq_attr attr; + + memset(&attr, 0, sizeof(attr)); + p = compat_alloc_user_space(sizeof(attr)); if (get_compat_mq_attr(&attr, u_attr) || copy_to_user(p, &attr, sizeof(attr))) @@ -61,49 +64,36 @@ asmlinkage long compat_sys_mq_open(const char __user *u_name, return sys_mq_open(u_name, oflag, mode, p); } -static int compat_prepare_timeout(struct timespec __user * *p, - const struct compat_timespec __user *u) -{ - struct timespec ts; - if (!u) { - *p = NULL; - return 0; - } - *p = compat_alloc_user_space(sizeof(ts)); - if (get_compat_timespec(&ts, u) || copy_to_user(*p, &ts, sizeof(ts))) - return -EFAULT; - return 0; -} - -asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes, - const char __user *u_msg_ptr, - size_t msg_len, unsigned int msg_prio, - const struct compat_timespec __user *u_abs_timeout) +COMPAT_SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, + const char __user *, u_msg_ptr, + compat_size_t, msg_len, unsigned int, msg_prio, + const struct compat_timespec __user *, u_abs_timeout) { struct timespec __user *u_ts; - if (compat_prepare_timeout(&u_ts, u_abs_timeout)) + if (compat_convert_timespec(&u_ts, u_abs_timeout)) return -EFAULT; return sys_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, u_ts); } -asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes, - char __user *u_msg_ptr, - size_t msg_len, unsigned int __user *u_msg_prio, - const struct compat_timespec __user *u_abs_timeout) +COMPAT_SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, + char __user *, u_msg_ptr, + compat_size_t, msg_len, unsigned int __user *, u_msg_prio, + const struct compat_timespec __user *, u_abs_timeout) { struct timespec __user *u_ts; - if (compat_prepare_timeout(&u_ts, u_abs_timeout)) + + if (compat_convert_timespec(&u_ts, u_abs_timeout)) return -EFAULT; return sys_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, u_ts); } -asmlinkage long compat_sys_mq_notify(mqd_t mqdes, - const struct compat_sigevent __user *u_notification) +COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, + const struct compat_sigevent __user *, u_notification) { struct sigevent __user *p = NULL; if (u_notification) { @@ -119,14 +109,16 @@ asmlinkage long compat_sys_mq_notify(mqd_t mqdes, return sys_mq_notify(mqdes, p); } -asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes, - const struct compat_mq_attr __user *u_mqstat, - struct compat_mq_attr __user *u_omqstat) +COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, + const struct compat_mq_attr __user *, u_mqstat, + struct compat_mq_attr __user *, u_omqstat) { struct mq_attr mqstat; struct mq_attr __user *p = compat_alloc_user_space(2 * sizeof(*p)); long ret; + memset(&mqstat, 0, sizeof(mqstat)); + if (u_mqstat) { if (get_compat_mq_attr(&mqstat, u_mqstat) || copy_to_user(p, &mqstat, sizeof(mqstat))) diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c new file mode 100644 index 00000000000..c3f0326e98d --- /dev/null +++ b/ipc/ipc_sysctl.c @@ -0,0 +1,284 @@ +/* + * Copyright (C) 2007 + * + * Author: Eric Biederman <ebiederm@xmision.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + */ + +#include <linux/module.h> +#include <linux/ipc.h> +#include <linux/nsproxy.h> +#include <linux/sysctl.h> +#include <linux/uaccess.h> +#include <linux/ipc_namespace.h> +#include <linux/msg.h> +#include "util.h" + +static void *get_ipc(struct ctl_table *table) +{ + char *which = table->data; + struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; + which = (which - (char *)&init_ipc_ns) + (char *)ipc_ns; + return which; +} + +#ifdef CONFIG_PROC_SYSCTL +static int proc_ipc_dointvec(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct ctl_table ipc_table; + + memcpy(&ipc_table, table, sizeof(ipc_table)); + ipc_table.data = get_ipc(table); + + return proc_dointvec(&ipc_table, write, buffer, lenp, ppos); +} + +static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct ctl_table ipc_table; + + memcpy(&ipc_table, table, sizeof(ipc_table)); + ipc_table.data = get_ipc(table); + + return proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos); +} + +static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct ipc_namespace *ns = current->nsproxy->ipc_ns; + int err = proc_ipc_dointvec_minmax(table, write, buffer, lenp, ppos); + + if (err < 0) + return err; + if (ns->shm_rmid_forced) + shm_destroy_orphaned(ns); + return err; +} + +static int proc_ipc_callback_dointvec_minmax(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct ctl_table ipc_table; + size_t lenp_bef = *lenp; + int rc; + + memcpy(&ipc_table, table, sizeof(ipc_table)); + ipc_table.data = get_ipc(table); + + rc = proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos); + + if (write && !rc && lenp_bef == *lenp) + /* + * Tunable has successfully been changed by hand. Disable its + * automatic adjustment. This simply requires unregistering + * the notifiers that trigger recalculation. + */ + unregister_ipcns_notifier(current->nsproxy->ipc_ns); + + return rc; +} + +static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct ctl_table ipc_table; + memcpy(&ipc_table, table, sizeof(ipc_table)); + ipc_table.data = get_ipc(table); + + return proc_doulongvec_minmax(&ipc_table, write, buffer, + lenp, ppos); +} + +/* + * Routine that is called when the file "auto_msgmni" has successfully been + * written. + * Two values are allowed: + * 0: unregister msgmni's callback routine from the ipc namespace notifier + * chain. This means that msgmni won't be recomputed anymore upon memory + * add/remove or ipc namespace creation/removal. + * 1: register back the callback routine. + */ +static void ipc_auto_callback(int val) +{ + if (!val) + unregister_ipcns_notifier(current->nsproxy->ipc_ns); + else { + /* + * Re-enable automatic recomputing only if not already + * enabled. + */ + recompute_msgmni(current->nsproxy->ipc_ns); + cond_register_ipcns_notifier(current->nsproxy->ipc_ns); + } +} + +static int proc_ipcauto_dointvec_minmax(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct ctl_table ipc_table; + size_t lenp_bef = *lenp; + int oldval; + int rc; + + memcpy(&ipc_table, table, sizeof(ipc_table)); + ipc_table.data = get_ipc(table); + oldval = *((int *)(ipc_table.data)); + + rc = proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos); + + if (write && !rc && lenp_bef == *lenp) { + int newval = *((int *)(ipc_table.data)); + /* + * The file "auto_msgmni" has correctly been set. + * React by (un)registering the corresponding tunable, if the + * value has changed. + */ + if (newval != oldval) + ipc_auto_callback(newval); + } + + return rc; +} + +#else +#define proc_ipc_doulongvec_minmax NULL +#define proc_ipc_dointvec NULL +#define proc_ipc_dointvec_minmax NULL +#define proc_ipc_dointvec_minmax_orphans NULL +#define proc_ipc_callback_dointvec_minmax NULL +#define proc_ipcauto_dointvec_minmax NULL +#endif + +static int zero; +static int one = 1; +static int int_max = INT_MAX; + +static struct ctl_table ipc_kern_table[] = { + { + .procname = "shmmax", + .data = &init_ipc_ns.shm_ctlmax, + .maxlen = sizeof(init_ipc_ns.shm_ctlmax), + .mode = 0644, + .proc_handler = proc_ipc_doulongvec_minmax, + }, + { + .procname = "shmall", + .data = &init_ipc_ns.shm_ctlall, + .maxlen = sizeof(init_ipc_ns.shm_ctlall), + .mode = 0644, + .proc_handler = proc_ipc_doulongvec_minmax, + }, + { + .procname = "shmmni", + .data = &init_ipc_ns.shm_ctlmni, + .maxlen = sizeof(init_ipc_ns.shm_ctlmni), + .mode = 0644, + .proc_handler = proc_ipc_dointvec, + }, + { + .procname = "shm_rmid_forced", + .data = &init_ipc_ns.shm_rmid_forced, + .maxlen = sizeof(init_ipc_ns.shm_rmid_forced), + .mode = 0644, + .proc_handler = proc_ipc_dointvec_minmax_orphans, + .extra1 = &zero, + .extra2 = &one, + }, + { + .procname = "msgmax", + .data = &init_ipc_ns.msg_ctlmax, + .maxlen = sizeof(init_ipc_ns.msg_ctlmax), + .mode = 0644, + .proc_handler = proc_ipc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &int_max, + }, + { + .procname = "msgmni", + .data = &init_ipc_ns.msg_ctlmni, + .maxlen = sizeof(init_ipc_ns.msg_ctlmni), + .mode = 0644, + .proc_handler = proc_ipc_callback_dointvec_minmax, + .extra1 = &zero, + .extra2 = &int_max, + }, + { + .procname = "msgmnb", + .data = &init_ipc_ns.msg_ctlmnb, + .maxlen = sizeof(init_ipc_ns.msg_ctlmnb), + .mode = 0644, + .proc_handler = proc_ipc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &int_max, + }, + { + .procname = "sem", + .data = &init_ipc_ns.sem_ctls, + .maxlen = 4*sizeof(int), + .mode = 0644, + .proc_handler = proc_ipc_dointvec, + }, + { + .procname = "auto_msgmni", + .data = &init_ipc_ns.auto_msgmni, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_ipcauto_dointvec_minmax, + .extra1 = &zero, + .extra2 = &one, + }, +#ifdef CONFIG_CHECKPOINT_RESTORE + { + .procname = "sem_next_id", + .data = &init_ipc_ns.ids[IPC_SEM_IDS].next_id, + .maxlen = sizeof(init_ipc_ns.ids[IPC_SEM_IDS].next_id), + .mode = 0644, + .proc_handler = proc_ipc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &int_max, + }, + { + .procname = "msg_next_id", + .data = &init_ipc_ns.ids[IPC_MSG_IDS].next_id, + .maxlen = sizeof(init_ipc_ns.ids[IPC_MSG_IDS].next_id), + .mode = 0644, + .proc_handler = proc_ipc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &int_max, + }, + { + .procname = "shm_next_id", + .data = &init_ipc_ns.ids[IPC_SHM_IDS].next_id, + .maxlen = sizeof(init_ipc_ns.ids[IPC_SHM_IDS].next_id), + .mode = 0644, + .proc_handler = proc_ipc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &int_max, + }, +#endif + {} +}; + +static struct ctl_table ipc_root_table[] = { + { + .procname = "kernel", + .mode = 0555, + .child = ipc_kern_table, + }, + {} +}; + +static int __init ipc_sysctl_init(void) +{ + register_sysctl_table(ipc_root_table); + return 0; +} + +device_initcall(ipc_sysctl_init); diff --git a/ipc/ipcns_notifier.c b/ipc/ipcns_notifier.c new file mode 100644 index 00000000000..b9b31a4f77e --- /dev/null +++ b/ipc/ipcns_notifier.c @@ -0,0 +1,92 @@ +/* + * linux/ipc/ipcns_notifier.c + * Copyright (C) 2007 BULL SA. Nadia Derbey + * + * Notification mechanism for ipc namespaces: + * The callback routine registered in the memory chain invokes the ipcns + * notifier chain with the IPCNS_MEMCHANGED event. + * Each callback routine registered in the ipcns namespace recomputes msgmni + * for the owning namespace. + */ + +#include <linux/msg.h> +#include <linux/rcupdate.h> +#include <linux/notifier.h> +#include <linux/nsproxy.h> +#include <linux/ipc_namespace.h> + +#include "util.h" + + + +static BLOCKING_NOTIFIER_HEAD(ipcns_chain); + + +static int ipcns_callback(struct notifier_block *self, + unsigned long action, void *arg) +{ + struct ipc_namespace *ns; + + switch (action) { + case IPCNS_MEMCHANGED: /* amount of lowmem has changed */ + case IPCNS_CREATED: + case IPCNS_REMOVED: + /* + * It's time to recompute msgmni + */ + ns = container_of(self, struct ipc_namespace, ipcns_nb); + /* + * No need to get a reference on the ns: the 1st job of + * free_ipc_ns() is to unregister the callback routine. + * blocking_notifier_chain_unregister takes the wr lock to do + * it. + * When this callback routine is called the rd lock is held by + * blocking_notifier_call_chain. + * So the ipc ns cannot be freed while we are here. + */ + recompute_msgmni(ns); + break; + default: + break; + } + + return NOTIFY_OK; +} + +int register_ipcns_notifier(struct ipc_namespace *ns) +{ + int rc; + + memset(&ns->ipcns_nb, 0, sizeof(ns->ipcns_nb)); + ns->ipcns_nb.notifier_call = ipcns_callback; + ns->ipcns_nb.priority = IPCNS_CALLBACK_PRI; + rc = blocking_notifier_chain_register(&ipcns_chain, &ns->ipcns_nb); + if (!rc) + ns->auto_msgmni = 1; + return rc; +} + +int cond_register_ipcns_notifier(struct ipc_namespace *ns) +{ + int rc; + + memset(&ns->ipcns_nb, 0, sizeof(ns->ipcns_nb)); + ns->ipcns_nb.notifier_call = ipcns_callback; + ns->ipcns_nb.priority = IPCNS_CALLBACK_PRI; + rc = blocking_notifier_chain_cond_register(&ipcns_chain, + &ns->ipcns_nb); + if (!rc) + ns->auto_msgmni = 1; + return rc; +} + +void unregister_ipcns_notifier(struct ipc_namespace *ns) +{ + blocking_notifier_chain_unregister(&ipcns_chain, &ns->ipcns_nb); + ns->auto_msgmni = 0; +} + +int ipcns_notify(unsigned long val) +{ + return blocking_notifier_call_chain(&ipcns_chain, val, NULL); +} diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c new file mode 100644 index 00000000000..68d4e953762 --- /dev/null +++ b/ipc/mq_sysctl.c @@ -0,0 +1,124 @@ +/* + * Copyright (C) 2007 IBM Corporation + * + * Author: Cedric Le Goater <clg@fr.ibm.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + */ + +#include <linux/nsproxy.h> +#include <linux/ipc_namespace.h> +#include <linux/sysctl.h> + +#ifdef CONFIG_PROC_SYSCTL +static void *get_mq(struct ctl_table *table) +{ + char *which = table->data; + struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; + which = (which - (char *)&init_ipc_ns) + (char *)ipc_ns; + return which; +} + +static int proc_mq_dointvec(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct ctl_table mq_table; + memcpy(&mq_table, table, sizeof(mq_table)); + mq_table.data = get_mq(table); + + return proc_dointvec(&mq_table, write, buffer, lenp, ppos); +} + +static int proc_mq_dointvec_minmax(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct ctl_table mq_table; + memcpy(&mq_table, table, sizeof(mq_table)); + mq_table.data = get_mq(table); + + return proc_dointvec_minmax(&mq_table, write, buffer, + lenp, ppos); +} +#else +#define proc_mq_dointvec NULL +#define proc_mq_dointvec_minmax NULL +#endif + +static int msg_max_limit_min = MIN_MSGMAX; +static int msg_max_limit_max = HARD_MSGMAX; + +static int msg_maxsize_limit_min = MIN_MSGSIZEMAX; +static int msg_maxsize_limit_max = HARD_MSGSIZEMAX; + +static struct ctl_table mq_sysctls[] = { + { + .procname = "queues_max", + .data = &init_ipc_ns.mq_queues_max, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_mq_dointvec, + }, + { + .procname = "msg_max", + .data = &init_ipc_ns.mq_msg_max, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_mq_dointvec_minmax, + .extra1 = &msg_max_limit_min, + .extra2 = &msg_max_limit_max, + }, + { + .procname = "msgsize_max", + .data = &init_ipc_ns.mq_msgsize_max, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_mq_dointvec_minmax, + .extra1 = &msg_maxsize_limit_min, + .extra2 = &msg_maxsize_limit_max, + }, + { + .procname = "msg_default", + .data = &init_ipc_ns.mq_msg_default, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_mq_dointvec_minmax, + .extra1 = &msg_max_limit_min, + .extra2 = &msg_max_limit_max, + }, + { + .procname = "msgsize_default", + .data = &init_ipc_ns.mq_msgsize_default, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_mq_dointvec_minmax, + .extra1 = &msg_maxsize_limit_min, + .extra2 = &msg_maxsize_limit_max, + }, + {} +}; + +static struct ctl_table mq_sysctl_dir[] = { + { + .procname = "mqueue", + .mode = 0555, + .child = mq_sysctls, + }, + {} +}; + +static struct ctl_table mq_sysctl_root[] = { + { + .procname = "fs", + .mode = 0555, + .child = mq_sysctl_dir, + }, + {} +}; + +struct ctl_table_header *mq_register_sysctl_table(void) +{ + return register_sysctl_table(mq_sysctl_root); +} diff --git a/ipc/mqueue.c b/ipc/mqueue.c index a0f18c9cc89..4fcf39af177 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -2,15 +2,18 @@ * POSIX message queues filesystem for Linux. * * Copyright (C) 2003,2004 Krzysztof Benedyczak (golbi@mat.uni.torun.pl) - * Michal Wronski (wrona@mat.uni.torun.pl) + * Michal Wronski (michal.wronski@gmail.com) * * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com) * Lockless receive & send, fd based notify: - * Manfred Spraul (manfred@colorfullife.com) + * Manfred Spraul (manfred@colorfullife.com) + * + * Audit: George Wilson (ltcgcw@us.ibm.com) * * This file is released under the GPL. */ +#include <linux/capability.h> #include <linux/init.h> #include <linux/pagemap.h> #include <linux/file.h> @@ -21,9 +24,18 @@ #include <linux/mqueue.h> #include <linux/msg.h> #include <linux/skbuff.h> +#include <linux/vmalloc.h> #include <linux/netlink.h> #include <linux/syscalls.h> +#include <linux/audit.h> #include <linux/signal.h> +#include <linux/mutex.h> +#include <linux/nsproxy.h> +#include <linux/pid.h> +#include <linux/ipc_namespace.h> +#include <linux/user_namespace.h> +#include <linux/slab.h> + #include <net/sock.h> #include "util.h" @@ -38,19 +50,11 @@ #define STATE_PENDING 1 #define STATE_READY 2 -/* used by sysctl */ -#define FS_MQUEUE 1 -#define CTL_QUEUESMAX 2 -#define CTL_MSGMAX 3 -#define CTL_MSGSIZEMAX 4 - -/* default values */ -#define DFLT_QUEUESMAX 256 /* max number of message queues */ -#define DFLT_MSGMAX 10 /* max number of messages in each queue */ -#define HARD_MSGMAX (131072/sizeof(void*)) -#define DFLT_MSGSIZEMAX 8192 /* max message size */ - -#define NOTIFY_COOKIE_LEN 32 +struct posix_msg_tree_node { + struct rb_node rb_node; + struct list_head msg_list; + int priority; +}; struct ext_wait_queue { /* queue of sleeping tasks */ struct task_struct *task; @@ -64,11 +68,13 @@ struct mqueue_inode_info { struct inode vfs_inode; wait_queue_head_t wait_q; - struct msg_msg **messages; + struct rb_root msg_tree; + struct posix_msg_tree_node *node_cache; struct mq_attr attr; struct sigevent notify; - pid_t notify_owner; + struct pid *notify_owner; + struct user_namespace *notify_user_ns; struct user_struct *user; /* user who created, for accounting */ struct sock *notify_sock; struct sk_buff *notify_cookie; @@ -79,233 +85,394 @@ struct mqueue_inode_info { unsigned long qsize; /* size of queue in memory (sum of all msgs) */ }; -static struct inode_operations mqueue_dir_inode_operations; -static struct file_operations mqueue_file_operations; -static struct super_operations mqueue_super_ops; +static const struct inode_operations mqueue_dir_inode_operations; +static const struct file_operations mqueue_file_operations; +static const struct super_operations mqueue_super_ops; static void remove_notification(struct mqueue_inode_info *info); -static spinlock_t mq_lock; -static kmem_cache_t *mqueue_inode_cachep; -static struct vfsmount *mqueue_mnt; - -static unsigned int queues_count; -static unsigned int queues_max = DFLT_QUEUESMAX; -static unsigned int msg_max = DFLT_MSGMAX; -static unsigned int msgsize_max = DFLT_MSGSIZEMAX; +static struct kmem_cache *mqueue_inode_cachep; -static struct ctl_table_header * mq_sysctl_table; +static struct ctl_table_header *mq_sysctl_table; static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode) { return container_of(inode, struct mqueue_inode_info, vfs_inode); } -static struct inode *mqueue_get_inode(struct super_block *sb, int mode, - struct mq_attr *attr) +/* + * This routine should be called with the mq_lock held. + */ +static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode) +{ + return get_ipc_ns(inode->i_sb->s_fs_info); +} + +static struct ipc_namespace *get_ns_from_inode(struct inode *inode) +{ + struct ipc_namespace *ns; + + spin_lock(&mq_lock); + ns = __get_ns_from_inode(inode); + spin_unlock(&mq_lock); + return ns; +} + +/* Auxiliary functions to manipulate messages' list */ +static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info) +{ + struct rb_node **p, *parent = NULL; + struct posix_msg_tree_node *leaf; + + p = &info->msg_tree.rb_node; + while (*p) { + parent = *p; + leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); + + if (likely(leaf->priority == msg->m_type)) + goto insert_msg; + else if (msg->m_type < leaf->priority) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + if (info->node_cache) { + leaf = info->node_cache; + info->node_cache = NULL; + } else { + leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC); + if (!leaf) + return -ENOMEM; + INIT_LIST_HEAD(&leaf->msg_list); + info->qsize += sizeof(*leaf); + } + leaf->priority = msg->m_type; + rb_link_node(&leaf->rb_node, parent, p); + rb_insert_color(&leaf->rb_node, &info->msg_tree); +insert_msg: + info->attr.mq_curmsgs++; + info->qsize += msg->m_ts; + list_add_tail(&msg->m_list, &leaf->msg_list); + return 0; +} + +static inline struct msg_msg *msg_get(struct mqueue_inode_info *info) +{ + struct rb_node **p, *parent = NULL; + struct posix_msg_tree_node *leaf; + struct msg_msg *msg; + +try_again: + p = &info->msg_tree.rb_node; + while (*p) { + parent = *p; + /* + * During insert, low priorities go to the left and high to the + * right. On receive, we want the highest priorities first, so + * walk all the way to the right. + */ + p = &(*p)->rb_right; + } + if (!parent) { + if (info->attr.mq_curmsgs) { + pr_warn_once("Inconsistency in POSIX message queue, " + "no tree element, but supposedly messages " + "should exist!\n"); + info->attr.mq_curmsgs = 0; + } + return NULL; + } + leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); + if (unlikely(list_empty(&leaf->msg_list))) { + pr_warn_once("Inconsistency in POSIX message queue, " + "empty leaf node but we haven't implemented " + "lazy leaf delete!\n"); + rb_erase(&leaf->rb_node, &info->msg_tree); + if (info->node_cache) { + info->qsize -= sizeof(*leaf); + kfree(leaf); + } else { + info->node_cache = leaf; + } + goto try_again; + } else { + msg = list_first_entry(&leaf->msg_list, + struct msg_msg, m_list); + list_del(&msg->m_list); + if (list_empty(&leaf->msg_list)) { + rb_erase(&leaf->rb_node, &info->msg_tree); + if (info->node_cache) { + info->qsize -= sizeof(*leaf); + kfree(leaf); + } else { + info->node_cache = leaf; + } + } + } + info->attr.mq_curmsgs--; + info->qsize -= msg->m_ts; + return msg; +} + +static struct inode *mqueue_get_inode(struct super_block *sb, + struct ipc_namespace *ipc_ns, umode_t mode, + struct mq_attr *attr) { + struct user_struct *u = current_user(); struct inode *inode; + int ret = -ENOMEM; inode = new_inode(sb); - if (inode) { - inode->i_mode = mode; - inode->i_uid = current->fsuid; - inode->i_gid = current->fsgid; - inode->i_blksize = PAGE_CACHE_SIZE; - inode->i_blocks = 0; - inode->i_mtime = inode->i_ctime = inode->i_atime = - CURRENT_TIME; + if (!inode) + goto err; + + inode->i_ino = get_next_ino(); + inode->i_mode = mode; + inode->i_uid = current_fsuid(); + inode->i_gid = current_fsgid(); + inode->i_mtime = inode->i_ctime = inode->i_atime = CURRENT_TIME; + + if (S_ISREG(mode)) { + struct mqueue_inode_info *info; + unsigned long mq_bytes, mq_treesize; + + inode->i_fop = &mqueue_file_operations; + inode->i_size = FILENT_SIZE; + /* mqueue specific info */ + info = MQUEUE_I(inode); + spin_lock_init(&info->lock); + init_waitqueue_head(&info->wait_q); + INIT_LIST_HEAD(&info->e_wait_q[0].list); + INIT_LIST_HEAD(&info->e_wait_q[1].list); + info->notify_owner = NULL; + info->notify_user_ns = NULL; + info->qsize = 0; + info->user = NULL; /* set when all is ok */ + info->msg_tree = RB_ROOT; + info->node_cache = NULL; + memset(&info->attr, 0, sizeof(info->attr)); + info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max, + ipc_ns->mq_msg_default); + info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, + ipc_ns->mq_msgsize_default); + if (attr) { + info->attr.mq_maxmsg = attr->mq_maxmsg; + info->attr.mq_msgsize = attr->mq_msgsize; + } + /* + * We used to allocate a static array of pointers and account + * the size of that array as well as one msg_msg struct per + * possible message into the queue size. That's no longer + * accurate as the queue is now an rbtree and will grow and + * shrink depending on usage patterns. We can, however, still + * account one msg_msg struct per message, but the nodes are + * allocated depending on priority usage, and most programs + * only use one, or a handful, of priorities. However, since + * this is pinned memory, we need to assume worst case, so + * that means the min(mq_maxmsg, max_priorities) * struct + * posix_msg_tree_node. + */ + mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + + min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * + sizeof(struct posix_msg_tree_node); + + mq_bytes = mq_treesize + (info->attr.mq_maxmsg * + info->attr.mq_msgsize); - if (S_ISREG(mode)) { - struct mqueue_inode_info *info; - struct task_struct *p = current; - struct user_struct *u = p->user; - unsigned long mq_bytes, mq_msg_tblsz; - - inode->i_fop = &mqueue_file_operations; - inode->i_size = FILENT_SIZE; - /* mqueue specific info */ - info = MQUEUE_I(inode); - spin_lock_init(&info->lock); - init_waitqueue_head(&info->wait_q); - INIT_LIST_HEAD(&info->e_wait_q[0].list); - INIT_LIST_HEAD(&info->e_wait_q[1].list); - info->messages = NULL; - info->notify_owner = 0; - info->qsize = 0; - info->user = NULL; /* set when all is ok */ - memset(&info->attr, 0, sizeof(info->attr)); - info->attr.mq_maxmsg = DFLT_MSGMAX; - info->attr.mq_msgsize = DFLT_MSGSIZEMAX; - if (attr) { - info->attr.mq_maxmsg = attr->mq_maxmsg; - info->attr.mq_msgsize = attr->mq_msgsize; - } - mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *); - mq_bytes = (mq_msg_tblsz + - (info->attr.mq_maxmsg * info->attr.mq_msgsize)); - - spin_lock(&mq_lock); - if (u->mq_bytes + mq_bytes < u->mq_bytes || - u->mq_bytes + mq_bytes > - p->signal->rlim[RLIMIT_MSGQUEUE].rlim_cur) { - spin_unlock(&mq_lock); - goto out_inode; - } - u->mq_bytes += mq_bytes; + spin_lock(&mq_lock); + if (u->mq_bytes + mq_bytes < u->mq_bytes || + u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) { spin_unlock(&mq_lock); - - info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL); - if (!info->messages) { - spin_lock(&mq_lock); - u->mq_bytes -= mq_bytes; - spin_unlock(&mq_lock); - goto out_inode; - } - /* all is ok */ - info->user = get_uid(u); - } else if (S_ISDIR(mode)) { - inode->i_nlink++; - /* Some things misbehave if size == 0 on a directory */ - inode->i_size = 2 * DIRENT_SIZE; - inode->i_op = &mqueue_dir_inode_operations; - inode->i_fop = &simple_dir_operations; + /* mqueue_evict_inode() releases info->messages */ + ret = -EMFILE; + goto out_inode; } + u->mq_bytes += mq_bytes; + spin_unlock(&mq_lock); + + /* all is ok */ + info->user = get_uid(u); + } else if (S_ISDIR(mode)) { + inc_nlink(inode); + /* Some things misbehave if size == 0 on a directory */ + inode->i_size = 2 * DIRENT_SIZE; + inode->i_op = &mqueue_dir_inode_operations; + inode->i_fop = &simple_dir_operations; } + return inode; out_inode: - make_bad_inode(inode); iput(inode); - return NULL; +err: + return ERR_PTR(ret); } static int mqueue_fill_super(struct super_block *sb, void *data, int silent) { struct inode *inode; + struct ipc_namespace *ns = data; sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_magic = MQUEUE_MAGIC; sb->s_op = &mqueue_super_ops; - inode = mqueue_get_inode(sb, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL); - if (!inode) - return -ENOMEM; + inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL); + if (IS_ERR(inode)) + return PTR_ERR(inode); - sb->s_root = d_alloc_root(inode); - if (!sb->s_root) { - iput(inode); + sb->s_root = d_make_root(inode); + if (!sb->s_root) return -ENOMEM; - } - return 0; } -static struct super_block *mqueue_get_sb(struct file_system_type *fs_type, - int flags, const char *dev_name, - void *data) +static struct dentry *mqueue_mount(struct file_system_type *fs_type, + int flags, const char *dev_name, + void *data) { - return get_sb_single(fs_type, flags, data, mqueue_fill_super); + if (!(flags & MS_KERNMOUNT)) { + struct ipc_namespace *ns = current->nsproxy->ipc_ns; + /* Don't allow mounting unless the caller has CAP_SYS_ADMIN + * over the ipc namespace. + */ + if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN)) + return ERR_PTR(-EPERM); + + data = ns; + } + return mount_ns(fs_type, flags, data, mqueue_fill_super); } -static void init_once(void *foo, kmem_cache_t * cachep, unsigned long flags) +static void init_once(void *foo) { struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; - if ((flags & (SLAB_CTOR_VERIFY | SLAB_CTOR_CONSTRUCTOR)) == - SLAB_CTOR_CONSTRUCTOR) - inode_init_once(&p->vfs_inode); + inode_init_once(&p->vfs_inode); } static struct inode *mqueue_alloc_inode(struct super_block *sb) { struct mqueue_inode_info *ei; - ei = kmem_cache_alloc(mqueue_inode_cachep, SLAB_KERNEL); + ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL); if (!ei) return NULL; return &ei->vfs_inode; } -static void mqueue_destroy_inode(struct inode *inode) +static void mqueue_i_callback(struct rcu_head *head) { + struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode)); } -static void mqueue_delete_inode(struct inode *inode) +static void mqueue_destroy_inode(struct inode *inode) +{ + call_rcu(&inode->i_rcu, mqueue_i_callback); +} + +static void mqueue_evict_inode(struct inode *inode) { struct mqueue_inode_info *info; struct user_struct *user; - unsigned long mq_bytes; - int i; + unsigned long mq_bytes, mq_treesize; + struct ipc_namespace *ipc_ns; + struct msg_msg *msg; - if (S_ISDIR(inode->i_mode)) { - clear_inode(inode); + clear_inode(inode); + + if (S_ISDIR(inode->i_mode)) return; - } + + ipc_ns = get_ns_from_inode(inode); info = MQUEUE_I(inode); spin_lock(&info->lock); - for (i = 0; i < info->attr.mq_curmsgs; i++) - free_msg(info->messages[i]); - kfree(info->messages); + while ((msg = msg_get(info)) != NULL) + free_msg(msg); + kfree(info->node_cache); spin_unlock(&info->lock); - clear_inode(inode); + /* Total amount of bytes accounted for the mqueue */ + mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + + min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * + sizeof(struct posix_msg_tree_node); + + mq_bytes = mq_treesize + (info->attr.mq_maxmsg * + info->attr.mq_msgsize); - mq_bytes = (info->attr.mq_maxmsg * sizeof(struct msg_msg *) + - (info->attr.mq_maxmsg * info->attr.mq_msgsize)); user = info->user; if (user) { spin_lock(&mq_lock); user->mq_bytes -= mq_bytes; - queues_count--; + /* + * get_ns_from_inode() ensures that the + * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns + * to which we now hold a reference, or it is NULL. + * We can't put it here under mq_lock, though. + */ + if (ipc_ns) + ipc_ns->mq_queues_count--; spin_unlock(&mq_lock); free_uid(user); } + if (ipc_ns) + put_ipc_ns(ipc_ns); } static int mqueue_create(struct inode *dir, struct dentry *dentry, - int mode, struct nameidata *nd) + umode_t mode, bool excl) { struct inode *inode; struct mq_attr *attr = dentry->d_fsdata; int error; + struct ipc_namespace *ipc_ns; spin_lock(&mq_lock); - if (queues_count >= queues_max && !capable(CAP_SYS_RESOURCE)) { + ipc_ns = __get_ns_from_inode(dir); + if (!ipc_ns) { + error = -EACCES; + goto out_unlock; + } + + if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && + !capable(CAP_SYS_RESOURCE)) { error = -ENOSPC; - goto out_lock; + goto out_unlock; } - queues_count++; + ipc_ns->mq_queues_count++; spin_unlock(&mq_lock); - inode = mqueue_get_inode(dir->i_sb, mode, attr); - if (!inode) { - error = -ENOMEM; + inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr); + if (IS_ERR(inode)) { + error = PTR_ERR(inode); spin_lock(&mq_lock); - queues_count--; - goto out_lock; + ipc_ns->mq_queues_count--; + goto out_unlock; } + put_ipc_ns(ipc_ns); dir->i_size += DIRENT_SIZE; dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; d_instantiate(dentry, inode); dget(dentry); return 0; -out_lock: +out_unlock: spin_unlock(&mq_lock); + if (ipc_ns) + put_ipc_ns(ipc_ns); return error; } static int mqueue_unlink(struct inode *dir, struct dentry *dentry) { - struct inode *inode = dentry->d_inode; + struct inode *inode = dentry->d_inode; dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; dir->i_size -= DIRENT_SIZE; - inode->i_nlink--; - dput(dentry); - return 0; + drop_nlink(inode); + dput(dentry); + return 0; } /* @@ -316,15 +483,11 @@ static int mqueue_unlink(struct inode *dir, struct dentry *dentry) * through std routines) */ static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, - size_t count, loff_t * off) + size_t count, loff_t *off) { - struct mqueue_inode_info *info = MQUEUE_I(filp->f_dentry->d_inode); + struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); char buffer[FILENT_SIZE]; - size_t slen; - loff_t o; - - if (!count) - return 0; + ssize_t ret; spin_lock(&info->lock); snprintf(buffer, sizeof(buffer), @@ -334,32 +497,25 @@ static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, (info->notify_owner && info->notify.sigev_notify == SIGEV_SIGNAL) ? info->notify.sigev_signo : 0, - info->notify_owner); + pid_vnr(info->notify_owner)); spin_unlock(&info->lock); buffer[sizeof(buffer)-1] = '\0'; - slen = strlen(buffer)+1; - - o = *off; - if (o > slen) - return 0; - if (o + count > slen) - count = slen - o; + ret = simple_read_from_buffer(u_data, count, off, buffer, + strlen(buffer)); + if (ret <= 0) + return ret; - if (copy_to_user(u_data, buffer + o, count)) - return -EFAULT; - - *off = o + count; - filp->f_dentry->d_inode->i_atime = filp->f_dentry->d_inode->i_ctime = CURRENT_TIME; - return count; + file_inode(filp)->i_atime = file_inode(filp)->i_ctime = CURRENT_TIME; + return ret; } -static int mqueue_flush_file(struct file *filp) +static int mqueue_flush_file(struct file *filp, fl_owner_t id) { - struct mqueue_inode_info *info = MQUEUE_I(filp->f_dentry->d_inode); + struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); spin_lock(&info->lock); - if (current->tgid == info->notify_owner) + if (task_tgid(current) == info->notify_owner) remove_notification(info); spin_unlock(&info->lock); @@ -368,7 +524,7 @@ static int mqueue_flush_file(struct file *filp) static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab) { - struct mqueue_inode_info *info = MQUEUE_I(filp->f_dentry->d_inode); + struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); int retval = 0; poll_wait(filp, &info->wait_q, poll_tab); @@ -407,7 +563,7 @@ static void wq_add(struct mqueue_inode_info *info, int sr, * sr: SEND or RECV */ static int wq_sleep(struct mqueue_inode_info *info, int sr, - long timeout, struct ext_wait_queue *ewp) + ktime_t *timeout, struct ext_wait_queue *ewp) { int retval; signed long time; @@ -418,7 +574,8 @@ static int wq_sleep(struct mqueue_inode_info *info, int sr, set_current_state(TASK_INTERRUPTIBLE); spin_unlock(&info->lock); - time = schedule_timeout(timeout); + time = schedule_hrtimeout_range_clock(timeout, 0, + HRTIMER_MODE_ABS, CLOCK_REALTIME); while (ewp->state == STATE_PENDING) cpu_relax(); @@ -462,30 +619,10 @@ static struct ext_wait_queue *wq_get_first_waiter( return list_entry(ptr, struct ext_wait_queue, list); } -/* Auxiliary functions to manipulate messages' list */ -static void msg_insert(struct msg_msg *ptr, struct mqueue_inode_info *info) -{ - int k; - - k = info->attr.mq_curmsgs - 1; - while (k >= 0 && info->messages[k]->m_type >= ptr->m_type) { - info->messages[k + 1] = info->messages[k]; - k--; - } - info->attr.mq_curmsgs++; - info->qsize += ptr->m_ts; - info->messages[k + 1] = ptr; -} - -static inline struct msg_msg *msg_get(struct mqueue_inode_info *info) -{ - info->qsize -= info->messages[--info->attr.mq_curmsgs]->m_ts; - return info->messages[info->attr.mq_curmsgs]; -} static inline void set_cookie(struct sk_buff *skb, char code) { - ((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code; + ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code; } /* @@ -511,234 +648,255 @@ static void __do_notify(struct mqueue_inode_info *info) sig_i.si_errno = 0; sig_i.si_code = SI_MESGQ; sig_i.si_value = info->notify.sigev_value; - sig_i.si_pid = current->tgid; - sig_i.si_uid = current->uid; - - kill_proc_info(info->notify.sigev_signo, - &sig_i, info->notify_owner); + /* map current pid/uid into info->owner's namespaces */ + rcu_read_lock(); + sig_i.si_pid = task_tgid_nr_ns(current, + ns_of_pid(info->notify_owner)); + sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid()); + rcu_read_unlock(); + + kill_pid_info(info->notify.sigev_signo, + &sig_i, info->notify_owner); break; case SIGEV_THREAD: set_cookie(info->notify_cookie, NOTIFY_WOKENUP); - netlink_sendskb(info->notify_sock, - info->notify_cookie, 0); + netlink_sendskb(info->notify_sock, info->notify_cookie); break; } /* after notification unregisters process */ - info->notify_owner = 0; + put_pid(info->notify_owner); + put_user_ns(info->notify_user_ns); + info->notify_owner = NULL; + info->notify_user_ns = NULL; } wake_up(&info->wait_q); } -static long prepare_timeout(const struct timespec __user *u_arg) +static int prepare_timeout(const struct timespec __user *u_abs_timeout, + ktime_t *expires, struct timespec *ts) { - struct timespec ts, nowts; - long timeout; - - if (u_arg) { - if (unlikely(copy_from_user(&ts, u_arg, - sizeof(struct timespec)))) - return -EFAULT; - - if (unlikely(ts.tv_nsec < 0 || ts.tv_sec < 0 - || ts.tv_nsec >= NSEC_PER_SEC)) - return -EINVAL; - nowts = CURRENT_TIME; - /* first subtract as jiffies can't be too big */ - ts.tv_sec -= nowts.tv_sec; - if (ts.tv_nsec < nowts.tv_nsec) { - ts.tv_nsec += NSEC_PER_SEC; - ts.tv_sec--; - } - ts.tv_nsec -= nowts.tv_nsec; - if (ts.tv_sec < 0) - return 0; - - timeout = timespec_to_jiffies(&ts) + 1; - } else - return MAX_SCHEDULE_TIMEOUT; + if (copy_from_user(ts, u_abs_timeout, sizeof(struct timespec))) + return -EFAULT; + if (!timespec_valid(ts)) + return -EINVAL; - return timeout; + *expires = timespec_to_ktime(*ts); + return 0; } static void remove_notification(struct mqueue_inode_info *info) { - if (info->notify_owner != 0 && + if (info->notify_owner != NULL && info->notify.sigev_notify == SIGEV_THREAD) { set_cookie(info->notify_cookie, NOTIFY_REMOVED); - netlink_sendskb(info->notify_sock, info->notify_cookie, 0); + netlink_sendskb(info->notify_sock, info->notify_cookie); } - info->notify_owner = 0; + put_pid(info->notify_owner); + put_user_ns(info->notify_user_ns); + info->notify_owner = NULL; + info->notify_user_ns = NULL; } -static int mq_attr_ok(struct mq_attr *attr) +static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr) { + int mq_treesize; + unsigned long total_size; + if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0) - return 0; + return -EINVAL; if (capable(CAP_SYS_RESOURCE)) { - if (attr->mq_maxmsg > HARD_MSGMAX) - return 0; + if (attr->mq_maxmsg > HARD_MSGMAX || + attr->mq_msgsize > HARD_MSGSIZEMAX) + return -EINVAL; } else { - if (attr->mq_maxmsg > msg_max || - attr->mq_msgsize > msgsize_max) - return 0; + if (attr->mq_maxmsg > ipc_ns->mq_msg_max || + attr->mq_msgsize > ipc_ns->mq_msgsize_max) + return -EINVAL; } /* check for overflow */ if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg) - return 0; - if ((unsigned long)(attr->mq_maxmsg * attr->mq_msgsize) + - (attr->mq_maxmsg * sizeof (struct msg_msg *)) < - (unsigned long)(attr->mq_maxmsg * attr->mq_msgsize)) - return 0; - return 1; + return -EOVERFLOW; + mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) + + min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) * + sizeof(struct posix_msg_tree_node); + total_size = attr->mq_maxmsg * attr->mq_msgsize; + if (total_size + mq_treesize < total_size) + return -EOVERFLOW; + return 0; } /* * Invoked when creating a new queue via sys_mq_open */ -static struct file *do_create(struct dentry *dir, struct dentry *dentry, - int oflag, mode_t mode, struct mq_attr __user *u_attr) +static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir, + struct path *path, int oflag, umode_t mode, + struct mq_attr *attr) { - struct file *filp; - struct mq_attr attr; + const struct cred *cred = current_cred(); int ret; - if (u_attr != NULL) { - if (copy_from_user(&attr, u_attr, sizeof(attr))) - return ERR_PTR(-EFAULT); - if (!mq_attr_ok(&attr)) - return ERR_PTR(-EINVAL); + if (attr) { + ret = mq_attr_ok(ipc_ns, attr); + if (ret) + return ERR_PTR(ret); /* store for use during create */ - dentry->d_fsdata = &attr; + path->dentry->d_fsdata = attr; + } else { + struct mq_attr def_attr; + + def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max, + ipc_ns->mq_msg_default); + def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, + ipc_ns->mq_msgsize_default); + ret = mq_attr_ok(ipc_ns, &def_attr); + if (ret) + return ERR_PTR(ret); } - mode &= ~current->fs->umask; - ret = vfs_create(dir->d_inode, dentry, mode, NULL); - dentry->d_fsdata = NULL; + mode &= ~current_umask(); + ret = vfs_create(dir, path->dentry, mode, true); + path->dentry->d_fsdata = NULL; if (ret) return ERR_PTR(ret); - - filp = dentry_open(dentry, mqueue_mnt, oflag); - if (!IS_ERR(filp)) - dget(dentry); - - return filp; + return dentry_open(path, oflag, cred); } /* Opens existing queue */ -static struct file *do_open(struct dentry *dentry, int oflag) +static struct file *do_open(struct path *path, int oflag) { -static int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, - MAY_READ | MAY_WRITE }; - struct file *filp; - + static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, + MAY_READ | MAY_WRITE }; + int acc; if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) return ERR_PTR(-EINVAL); - - if (permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE], NULL)) + acc = oflag2acc[oflag & O_ACCMODE]; + if (inode_permission(path->dentry->d_inode, acc)) return ERR_PTR(-EACCES); - - filp = dentry_open(dentry, mqueue_mnt, oflag); - - if (!IS_ERR(filp)) - dget(dentry); - - return filp; + return dentry_open(path, oflag, current_cred()); } -asmlinkage long sys_mq_open(const char __user *u_name, int oflag, mode_t mode, - struct mq_attr __user *u_attr) +SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, + struct mq_attr __user *, u_attr) { - struct dentry *dentry; + struct path path; struct file *filp; - char *name; + struct filename *name; + struct mq_attr attr; int fd, error; + struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; + struct vfsmount *mnt = ipc_ns->mq_mnt; + struct dentry *root = mnt->mnt_root; + int ro; + + if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr))) + return -EFAULT; + + audit_mq_open(oflag, mode, u_attr ? &attr : NULL); if (IS_ERR(name = getname(u_name))) return PTR_ERR(name); - fd = get_unused_fd(); + fd = get_unused_fd_flags(O_CLOEXEC); if (fd < 0) goto out_putname; - down(&mqueue_mnt->mnt_root->d_inode->i_sem); - dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name)); - if (IS_ERR(dentry)) { - error = PTR_ERR(dentry); - goto out_err; + ro = mnt_want_write(mnt); /* we'll drop it in any case */ + error = 0; + mutex_lock(&root->d_inode->i_mutex); + path.dentry = lookup_one_len(name->name, root, strlen(name->name)); + if (IS_ERR(path.dentry)) { + error = PTR_ERR(path.dentry); + goto out_putfd; } - mntget(mqueue_mnt); + path.mnt = mntget(mnt); if (oflag & O_CREAT) { - if (dentry->d_inode) { /* entry already exists */ - filp = (oflag & O_EXCL) ? ERR_PTR(-EEXIST) : - do_open(dentry, oflag); + if (path.dentry->d_inode) { /* entry already exists */ + audit_inode(name, path.dentry, 0); + if (oflag & O_EXCL) { + error = -EEXIST; + goto out; + } + filp = do_open(&path, oflag); } else { - filp = do_create(mqueue_mnt->mnt_root, dentry, - oflag, mode, u_attr); + if (ro) { + error = ro; + goto out; + } + audit_inode_parent_hidden(name, root); + filp = do_create(ipc_ns, root->d_inode, + &path, oflag, mode, + u_attr ? &attr : NULL); } - } else - filp = (dentry->d_inode) ? do_open(dentry, oflag) : - ERR_PTR(-ENOENT); - - dput(dentry); - - if (IS_ERR(filp)) { - error = PTR_ERR(filp); - goto out_putfd; + } else { + if (!path.dentry->d_inode) { + error = -ENOENT; + goto out; + } + audit_inode(name, path.dentry, 0); + filp = do_open(&path, oflag); } - set_close_on_exec(fd, 1); - fd_install(fd, filp); - goto out_upsem; - + if (!IS_ERR(filp)) + fd_install(fd, filp); + else + error = PTR_ERR(filp); +out: + path_put(&path); out_putfd: - mntput(mqueue_mnt); - put_unused_fd(fd); -out_err: - fd = error; -out_upsem: - up(&mqueue_mnt->mnt_root->d_inode->i_sem); + if (error) { + put_unused_fd(fd); + fd = error; + } + mutex_unlock(&root->d_inode->i_mutex); + if (!ro) + mnt_drop_write(mnt); out_putname: putname(name); return fd; } -asmlinkage long sys_mq_unlink(const char __user *u_name) +SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) { int err; - char *name; + struct filename *name; struct dentry *dentry; struct inode *inode = NULL; + struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; + struct vfsmount *mnt = ipc_ns->mq_mnt; name = getname(u_name); if (IS_ERR(name)) return PTR_ERR(name); - down(&mqueue_mnt->mnt_root->d_inode->i_sem); - dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name)); + audit_inode_parent_hidden(name, mnt->mnt_root); + err = mnt_want_write(mnt); + if (err) + goto out_name; + mutex_lock_nested(&mnt->mnt_root->d_inode->i_mutex, I_MUTEX_PARENT); + dentry = lookup_one_len(name->name, mnt->mnt_root, + strlen(name->name)); if (IS_ERR(dentry)) { err = PTR_ERR(dentry); goto out_unlock; } - if (!dentry->d_inode) { + inode = dentry->d_inode; + if (!inode) { err = -ENOENT; - goto out_err; + } else { + ihold(inode); + err = vfs_unlink(dentry->d_parent->d_inode, dentry, NULL); } - - inode = dentry->d_inode; - if (inode) - atomic_inc(&inode->i_count); - - err = vfs_unlink(dentry->d_parent->d_inode, dentry); -out_err: dput(dentry); out_unlock: - up(&mqueue_mnt->mnt_root->d_inode->i_sem); - putname(name); + mutex_unlock(&mnt->mnt_root->d_inode->i_mutex); if (inode) iput(inode); + mnt_drop_write(mnt); +out_name: + putname(name); return err; } @@ -753,7 +911,7 @@ out_unlock: * The receiver accepts the message and returns without grabbing the queue * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers * are necessary. The same algorithm is used for sysv semaphores, see - * ipc/sem.c fore more details. + * ipc/sem.c for more details. * * The same algorithm is used for senders. */ @@ -784,7 +942,8 @@ static inline void pipelined_receive(struct mqueue_inode_info *info) wake_up_interruptible(&info->wait_q); return; } - msg_insert(sender->msg, info); + if (msg_insert(sender->msg, info)) + return; list_del(&sender->list); sender->state = STATE_PENDING; wake_up_process(sender->task); @@ -792,36 +951,51 @@ static inline void pipelined_receive(struct mqueue_inode_info *info) sender->state = STATE_READY; } -asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr, - size_t msg_len, unsigned int msg_prio, - const struct timespec __user *u_abs_timeout) +SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, + size_t, msg_len, unsigned int, msg_prio, + const struct timespec __user *, u_abs_timeout) { - struct file *filp; + struct fd f; struct inode *inode; struct ext_wait_queue wait; struct ext_wait_queue *receiver; struct msg_msg *msg_ptr; struct mqueue_inode_info *info; - long timeout; - int ret; + ktime_t expires, *timeout = NULL; + struct timespec ts; + struct posix_msg_tree_node *new_leaf = NULL; + int ret = 0; + + if (u_abs_timeout) { + int res = prepare_timeout(u_abs_timeout, &expires, &ts); + if (res) + return res; + timeout = &expires; + } if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX)) return -EINVAL; - timeout = prepare_timeout(u_abs_timeout); + audit_mq_sendrecv(mqdes, msg_len, msg_prio, timeout ? &ts : NULL); - ret = -EBADF; - filp = fget(mqdes); - if (unlikely(!filp)) + f = fdget(mqdes); + if (unlikely(!f.file)) { + ret = -EBADF; goto out; + } - inode = filp->f_dentry->d_inode; - if (unlikely(filp->f_op != &mqueue_file_operations)) + inode = file_inode(f.file); + if (unlikely(f.file->f_op != &mqueue_file_operations)) { + ret = -EBADF; goto out_fput; + } info = MQUEUE_I(inode); + audit_inode(NULL, f.file->f_path.dentry, 0); - if (unlikely(!(filp->f_mode & FMODE_WRITE))) + if (unlikely(!(f.file->f_mode & FMODE_WRITE))) { + ret = -EBADF; goto out_fput; + } if (unlikely(msg_len > info->attr.mq_msgsize)) { ret = -EMSGSIZE; @@ -838,69 +1012,106 @@ asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr, msg_ptr->m_ts = msg_len; msg_ptr->m_type = msg_prio; + /* + * msg_insert really wants us to have a valid, spare node struct so + * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will + * fall back to that if necessary. + */ + if (!info->node_cache) + new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); + spin_lock(&info->lock); + if (!info->node_cache && new_leaf) { + /* Save our speculative allocation into the cache */ + INIT_LIST_HEAD(&new_leaf->msg_list); + info->node_cache = new_leaf; + info->qsize += sizeof(*new_leaf); + new_leaf = NULL; + } else { + kfree(new_leaf); + } + if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) { - if (filp->f_flags & O_NONBLOCK) { - spin_unlock(&info->lock); + if (f.file->f_flags & O_NONBLOCK) { ret = -EAGAIN; - } else if (unlikely(timeout < 0)) { - spin_unlock(&info->lock); - ret = timeout; } else { wait.task = current; wait.msg = (void *) msg_ptr; wait.state = STATE_NONE; ret = wq_sleep(info, SEND, timeout, &wait); + /* + * wq_sleep must be called with info->lock held, and + * returns with the lock released + */ + goto out_free; } - if (ret < 0) - free_msg(msg_ptr); } else { receiver = wq_get_first_waiter(info, RECV); if (receiver) { pipelined_send(info, msg_ptr, receiver); } else { /* adds message to the queue */ - msg_insert(msg_ptr, info); + ret = msg_insert(msg_ptr, info); + if (ret) + goto out_unlock; __do_notify(info); } inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; - spin_unlock(&info->lock); - ret = 0; } +out_unlock: + spin_unlock(&info->lock); +out_free: + if (ret) + free_msg(msg_ptr); out_fput: - fput(filp); + fdput(f); out: return ret; } -asmlinkage ssize_t sys_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr, - size_t msg_len, unsigned int __user *u_msg_prio, - const struct timespec __user *u_abs_timeout) +SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, + size_t, msg_len, unsigned int __user *, u_msg_prio, + const struct timespec __user *, u_abs_timeout) { - long timeout; ssize_t ret; struct msg_msg *msg_ptr; - struct file *filp; + struct fd f; struct inode *inode; struct mqueue_inode_info *info; struct ext_wait_queue wait; + ktime_t expires, *timeout = NULL; + struct timespec ts; + struct posix_msg_tree_node *new_leaf = NULL; + + if (u_abs_timeout) { + int res = prepare_timeout(u_abs_timeout, &expires, &ts); + if (res) + return res; + timeout = &expires; + } - timeout = prepare_timeout(u_abs_timeout); + audit_mq_sendrecv(mqdes, msg_len, 0, timeout ? &ts : NULL); - ret = -EBADF; - filp = fget(mqdes); - if (unlikely(!filp)) + f = fdget(mqdes); + if (unlikely(!f.file)) { + ret = -EBADF; goto out; + } - inode = filp->f_dentry->d_inode; - if (unlikely(filp->f_op != &mqueue_file_operations)) + inode = file_inode(f.file); + if (unlikely(f.file->f_op != &mqueue_file_operations)) { + ret = -EBADF; goto out_fput; + } info = MQUEUE_I(inode); + audit_inode(NULL, f.file->f_path.dentry, 0); - if (unlikely(!(filp->f_mode & FMODE_READ))) + if (unlikely(!(f.file->f_mode & FMODE_READ))) { + ret = -EBADF; goto out_fput; + } /* checks if buffer is big enough */ if (unlikely(msg_len < info->attr.mq_msgsize)) { @@ -908,16 +1119,29 @@ asmlinkage ssize_t sys_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr, goto out_fput; } + /* + * msg_insert really wants us to have a valid, spare node struct so + * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will + * fall back to that if necessary. + */ + if (!info->node_cache) + new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); + spin_lock(&info->lock); + + if (!info->node_cache && new_leaf) { + /* Save our speculative allocation into the cache */ + INIT_LIST_HEAD(&new_leaf->msg_list); + info->node_cache = new_leaf; + info->qsize += sizeof(*new_leaf); + } else { + kfree(new_leaf); + } + if (info->attr.mq_curmsgs == 0) { - if (filp->f_flags & O_NONBLOCK) { + if (f.file->f_flags & O_NONBLOCK) { spin_unlock(&info->lock); ret = -EAGAIN; - msg_ptr = NULL; - } else if (unlikely(timeout < 0)) { - spin_unlock(&info->lock); - ret = timeout; - msg_ptr = NULL; } else { wait.task = current; wait.state = STATE_NONE; @@ -945,7 +1169,7 @@ asmlinkage ssize_t sys_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr, free_msg(msg_ptr); } out_fput: - fput(filp); + fdput(f); out: return ret; } @@ -955,24 +1179,28 @@ out: * and he isn't currently owner of notification, will be silently discarded. * It isn't explicitly defined in the POSIX. */ -asmlinkage long sys_mq_notify(mqd_t mqdes, - const struct sigevent __user *u_notification) +SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, + const struct sigevent __user *, u_notification) { int ret; - struct file *filp; + struct fd f; struct sock *sock; struct inode *inode; struct sigevent notification; struct mqueue_inode_info *info; struct sk_buff *nc; - nc = NULL; - sock = NULL; - if (u_notification != NULL) { + if (u_notification) { if (copy_from_user(¬ification, u_notification, sizeof(struct sigevent))) return -EFAULT; + } + + audit_mq_notify(mqdes, u_notification ? ¬ification : NULL); + nc = NULL; + sock = NULL; + if (u_notification != NULL) { if (unlikely(notification.sigev_notify != SIGEV_NONE && notification.sigev_notify != SIGEV_SIGNAL && notification.sigev_notify != SIGEV_THREAD)) @@ -982,15 +1210,18 @@ asmlinkage long sys_mq_notify(mqd_t mqdes, return -EINVAL; } if (notification.sigev_notify == SIGEV_THREAD) { + long timeo; + /* create the notify skb */ nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); - ret = -ENOMEM; - if (!nc) + if (!nc) { + ret = -ENOMEM; goto out; - ret = -EFAULT; + } if (copy_from_user(nc->data, notification.sigev_value.sival_ptr, NOTIFY_COOKIE_LEN)) { + ret = -EFAULT; goto out; } @@ -998,21 +1229,23 @@ asmlinkage long sys_mq_notify(mqd_t mqdes, skb_put(nc, NOTIFY_COOKIE_LEN); /* and attach it to the socket */ retry: - filp = fget(notification.sigev_signo); - ret = -EBADF; - if (!filp) + f = fdget(notification.sigev_signo); + if (!f.file) { + ret = -EBADF; goto out; - sock = netlink_getsockbyfilp(filp); - fput(filp); + } + sock = netlink_getsockbyfilp(f.file); + fdput(f); if (IS_ERR(sock)) { ret = PTR_ERR(sock); sock = NULL; goto out; } - ret = netlink_attachskb(sock, nc, 0, MAX_SCHEDULE_TIMEOUT); + timeo = MAX_SCHEDULE_TIMEOUT; + ret = netlink_attachskb(sock, nc, &timeo, NULL); if (ret == 1) - goto retry; + goto retry; if (ret) { sock = NULL; nc = NULL; @@ -1021,24 +1254,27 @@ retry: } } - ret = -EBADF; - filp = fget(mqdes); - if (!filp) + f = fdget(mqdes); + if (!f.file) { + ret = -EBADF; goto out; + } - inode = filp->f_dentry->d_inode; - if (unlikely(filp->f_op != &mqueue_file_operations)) + inode = file_inode(f.file); + if (unlikely(f.file->f_op != &mqueue_file_operations)) { + ret = -EBADF; goto out_fput; + } info = MQUEUE_I(inode); ret = 0; spin_lock(&info->lock); if (u_notification == NULL) { - if (info->notify_owner == current->tgid) { + if (info->notify_owner == task_tgid(current)) { remove_notification(info); inode->i_atime = inode->i_ctime = CURRENT_TIME; } - } else if (info->notify_owner != 0) { + } else if (info->notify_owner != NULL) { ret = -EBUSY; } else { switch (notification.sigev_notify) { @@ -1058,28 +1294,30 @@ retry: info->notify.sigev_notify = SIGEV_SIGNAL; break; } - info->notify_owner = current->tgid; + + info->notify_owner = get_pid(task_tgid(current)); + info->notify_user_ns = get_user_ns(current_user_ns()); inode->i_atime = inode->i_ctime = CURRENT_TIME; } spin_unlock(&info->lock); out_fput: - fput(filp); + fdput(f); out: - if (sock) { + if (sock) netlink_detachskb(sock, nc); - } else if (nc) { + else if (nc) dev_kfree_skb(nc); - } + return ret; } -asmlinkage long sys_mq_getsetattr(mqd_t mqdes, - const struct mq_attr __user *u_mqstat, - struct mq_attr __user *u_omqstat) +SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, + const struct mq_attr __user *, u_mqstat, + struct mq_attr __user *, u_omqstat) { int ret; struct mq_attr mqstat, omqstat; - struct file *filp; + struct fd f; struct inode *inode; struct mqueue_inode_info *info; @@ -1090,25 +1328,31 @@ asmlinkage long sys_mq_getsetattr(mqd_t mqdes, return -EINVAL; } - ret = -EBADF; - filp = fget(mqdes); - if (!filp) + f = fdget(mqdes); + if (!f.file) { + ret = -EBADF; goto out; + } - inode = filp->f_dentry->d_inode; - if (unlikely(filp->f_op != &mqueue_file_operations)) + inode = file_inode(f.file); + if (unlikely(f.file->f_op != &mqueue_file_operations)) { + ret = -EBADF; goto out_fput; + } info = MQUEUE_I(inode); spin_lock(&info->lock); omqstat = info->attr; - omqstat.mq_flags = filp->f_flags & O_NONBLOCK; + omqstat.mq_flags = f.file->f_flags & O_NONBLOCK; if (u_mqstat) { + audit_mq_getsetattr(mqdes, &mqstat); + spin_lock(&f.file->f_lock); if (mqstat.mq_flags & O_NONBLOCK) - filp->f_flags |= O_NONBLOCK; + f.file->f_flags |= O_NONBLOCK; else - filp->f_flags &= ~O_NONBLOCK; + f.file->f_flags &= ~O_NONBLOCK; + spin_unlock(&f.file->f_lock); inode->i_atime = inode->i_ctime = CURRENT_TIME; } @@ -1121,94 +1365,65 @@ asmlinkage long sys_mq_getsetattr(mqd_t mqdes, ret = -EFAULT; out_fput: - fput(filp); + fdput(f); out: return ret; } -static struct inode_operations mqueue_dir_inode_operations = { +static const struct inode_operations mqueue_dir_inode_operations = { .lookup = simple_lookup, .create = mqueue_create, .unlink = mqueue_unlink, }; -static struct file_operations mqueue_file_operations = { +static const struct file_operations mqueue_file_operations = { .flush = mqueue_flush_file, .poll = mqueue_poll_file, .read = mqueue_read_file, + .llseek = default_llseek, }; -static struct super_operations mqueue_super_ops = { +static const struct super_operations mqueue_super_ops = { .alloc_inode = mqueue_alloc_inode, .destroy_inode = mqueue_destroy_inode, + .evict_inode = mqueue_evict_inode, .statfs = simple_statfs, - .delete_inode = mqueue_delete_inode, - .drop_inode = generic_delete_inode, }; static struct file_system_type mqueue_fs_type = { .name = "mqueue", - .get_sb = mqueue_get_sb, + .mount = mqueue_mount, .kill_sb = kill_litter_super, + .fs_flags = FS_USERNS_MOUNT, }; -static int msg_max_limit_min = DFLT_MSGMAX; -static int msg_max_limit_max = HARD_MSGMAX; - -static int msg_maxsize_limit_min = DFLT_MSGSIZEMAX; -static int msg_maxsize_limit_max = INT_MAX; - -static ctl_table mq_sysctls[] = { - { - .ctl_name = CTL_QUEUESMAX, - .procname = "queues_max", - .data = &queues_max, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, - { - .ctl_name = CTL_MSGMAX, - .procname = "msg_max", - .data = &msg_max, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec_minmax, - .extra1 = &msg_max_limit_min, - .extra2 = &msg_max_limit_max, - }, - { - .ctl_name = CTL_MSGSIZEMAX, - .procname = "msgsize_max", - .data = &msgsize_max, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec_minmax, - .extra1 = &msg_maxsize_limit_min, - .extra2 = &msg_maxsize_limit_max, - }, - { .ctl_name = 0 } -}; +int mq_init_ns(struct ipc_namespace *ns) +{ + ns->mq_queues_count = 0; + ns->mq_queues_max = DFLT_QUEUESMAX; + ns->mq_msg_max = DFLT_MSGMAX; + ns->mq_msgsize_max = DFLT_MSGSIZEMAX; + ns->mq_msg_default = DFLT_MSG; + ns->mq_msgsize_default = DFLT_MSGSIZE; + + ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns); + if (IS_ERR(ns->mq_mnt)) { + int err = PTR_ERR(ns->mq_mnt); + ns->mq_mnt = NULL; + return err; + } + return 0; +} -static ctl_table mq_sysctl_dir[] = { - { - .ctl_name = FS_MQUEUE, - .procname = "mqueue", - .mode = 0555, - .child = mq_sysctls, - }, - { .ctl_name = 0 } -}; +void mq_clear_sbinfo(struct ipc_namespace *ns) +{ + ns->mq_mnt->mnt_sb->s_fs_info = NULL; +} -static ctl_table mq_sysctl_root[] = { - { - .ctl_name = CTL_FS, - .procname = "fs", - .mode = 0555, - .child = mq_sysctl_dir, - }, - { .ctl_name = 0 } -}; +void mq_put_mnt(struct ipc_namespace *ns) +{ + kern_unmount(ns->mq_mnt); +} static int __init init_mqueue_fs(void) { @@ -1216,26 +1431,23 @@ static int __init init_mqueue_fs(void) mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache", sizeof(struct mqueue_inode_info), 0, - SLAB_HWCACHE_ALIGN, init_once, NULL); + SLAB_HWCACHE_ALIGN, init_once); if (mqueue_inode_cachep == NULL) return -ENOMEM; - /* ignore failues - they are not fatal */ - mq_sysctl_table = register_sysctl_table(mq_sysctl_root, 0); + /* ignore failures - they are not fatal */ + mq_sysctl_table = mq_register_sysctl_table(); error = register_filesystem(&mqueue_fs_type); if (error) goto out_sysctl; - if (IS_ERR(mqueue_mnt = kern_mount(&mqueue_fs_type))) { - error = PTR_ERR(mqueue_mnt); - goto out_filesystem; - } - - /* internal initialization - not common for vfs */ - queues_count = 0; spin_lock_init(&mq_lock); + error = mq_init_ns(&init_ipc_ns); + if (error) + goto out_filesystem; + return 0; out_filesystem: @@ -1243,11 +1455,8 @@ out_filesystem: out_sysctl: if (mq_sysctl_table) unregister_sysctl_table(mq_sysctl_table); - if (kmem_cache_destroy(mqueue_inode_cachep)) { - printk(KERN_INFO - "mqueue_inode_cache: not all structures were freed\n"); - } + kmem_cache_destroy(mqueue_inode_cachep); return error; } -__initcall(init_mqueue_fs); +device_initcall(init_mqueue_fs); diff --git a/ipc/msg.c b/ipc/msg.c index d035bd2aba9..c5d8e374998 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -1,6 +1,6 @@ /* * linux/ipc/msg.c - * Copyright (C) 1992 Krishna Balasubramanian + * Copyright (C) 1992 Krishna Balasubramanian * * Removed all the remaining kerneld mess * Catch the -EFAULT stuff properly @@ -12,14 +12,21 @@ * * mostly rewritten, threaded and wake-one semantics added * MSGMAX limit removed, sysctl's added - * (c) 1999 Manfred Spraul <manfreds@colorfullife.com> + * (c) 1999 Manfred Spraul <manfred@colorfullife.com> + * + * support for audit of ipc object properties and permission changes + * Dustin Kirkland <dustin.kirkland@us.ibm.com> + * + * namespaces support + * OpenVZ, SWsoft Inc. + * Pavel Emelianov <xemul@openvz.org> */ -#include <linux/config.h> -#include <linux/slab.h> +#include <linux/capability.h> #include <linux/msg.h> #include <linux/spinlock.h> #include <linux/init.h> +#include <linux/mm.h> #include <linux/proc_fs.h> #include <linux/list.h> #include <linux/security.h> @@ -27,224 +34,238 @@ #include <linux/syscalls.h> #include <linux/audit.h> #include <linux/seq_file.h> +#include <linux/rwsem.h> +#include <linux/nsproxy.h> +#include <linux/ipc_namespace.h> + #include <asm/current.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include "util.h" -/* sysctl: */ -int msg_ctlmax = MSGMAX; -int msg_ctlmnb = MSGMNB; -int msg_ctlmni = MSGMNI; - /* one msg_receiver structure for each sleeping receiver */ struct msg_receiver { - struct list_head r_list; - struct task_struct* r_tsk; - - int r_mode; - long r_msgtype; - long r_maxsize; - - struct msg_msg* volatile r_msg; + struct list_head r_list; + struct task_struct *r_tsk; + + int r_mode; + long r_msgtype; + long r_maxsize; + + /* + * Mark r_msg volatile so that the compiler + * does not try to get smart and optimize + * it. We rely on this for the lockless + * receive algorithm. + */ + struct msg_msg *volatile r_msg; }; /* one msg_sender for each sleeping sender */ struct msg_sender { - struct list_head list; - struct task_struct* tsk; + struct list_head list; + struct task_struct *tsk; }; #define SEARCH_ANY 1 #define SEARCH_EQUAL 2 #define SEARCH_NOTEQUAL 3 #define SEARCH_LESSEQUAL 4 +#define SEARCH_NUMBER 5 -static atomic_t msg_bytes = ATOMIC_INIT(0); -static atomic_t msg_hdrs = ATOMIC_INIT(0); +#define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS]) -static struct ipc_ids msg_ids; +static inline struct msg_queue *msq_obtain_object(struct ipc_namespace *ns, int id) +{ + struct kern_ipc_perm *ipcp = ipc_obtain_object(&msg_ids(ns), id); -#define msg_lock(id) ((struct msg_queue*)ipc_lock(&msg_ids,id)) -#define msg_unlock(msq) ipc_unlock(&(msq)->q_perm) -#define msg_rmid(id) ((struct msg_queue*)ipc_rmid(&msg_ids,id)) -#define msg_checkid(msq, msgid) \ - ipc_checkid(&msg_ids,&msq->q_perm,msgid) -#define msg_buildid(id, seq) \ - ipc_buildid(&msg_ids, id, seq) + if (IS_ERR(ipcp)) + return ERR_CAST(ipcp); -static void freeque (struct msg_queue *msq, int id); -static int newque (key_t key, int msgflg); -#ifdef CONFIG_PROC_FS -static int sysvipc_msg_proc_show(struct seq_file *s, void *it); -#endif + return container_of(ipcp, struct msg_queue, q_perm); +} -void __init msg_init (void) +static inline struct msg_queue *msq_obtain_object_check(struct ipc_namespace *ns, + int id) { - ipc_init_ids(&msg_ids,msg_ctlmni); - ipc_init_proc_interface("sysvipc/msg", - " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n", - &msg_ids, - sysvipc_msg_proc_show); + struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&msg_ids(ns), id); + + if (IS_ERR(ipcp)) + return ERR_CAST(ipcp); + + return container_of(ipcp, struct msg_queue, q_perm); +} + +static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s) +{ + ipc_rmid(&msg_ids(ns), &s->q_perm); +} + +static void msg_rcu_free(struct rcu_head *head) +{ + struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); + struct msg_queue *msq = ipc_rcu_to_struct(p); + + security_msg_queue_free(msq); + ipc_rcu_free(head); } -static int newque (key_t key, int msgflg) +/** + * newque - Create a new msg queue + * @ns: namespace + * @params: ptr to the structure that contains the key and msgflg + * + * Called with msg_ids.rwsem held (writer) + */ +static int newque(struct ipc_namespace *ns, struct ipc_params *params) { - int id; - int retval; struct msg_queue *msq; + int id, retval; + key_t key = params->key; + int msgflg = params->flg; - msq = ipc_rcu_alloc(sizeof(*msq)); - if (!msq) + msq = ipc_rcu_alloc(sizeof(*msq)); + if (!msq) return -ENOMEM; - msq->q_perm.mode = (msgflg & S_IRWXUGO); + msq->q_perm.mode = msgflg & S_IRWXUGO; msq->q_perm.key = key; msq->q_perm.security = NULL; retval = security_msg_queue_alloc(msq); if (retval) { - ipc_rcu_putref(msq); + ipc_rcu_putref(msq, ipc_rcu_free); return retval; } - id = ipc_addid(&msg_ids, &msq->q_perm, msg_ctlmni); - if(id == -1) { - security_msg_queue_free(msq); - ipc_rcu_putref(msq); - return -ENOSPC; + /* ipc_addid() locks msq upon success. */ + id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); + if (id < 0) { + ipc_rcu_putref(msq, msg_rcu_free); + return id; } - msq->q_id = msg_buildid(id,msq->q_perm.seq); msq->q_stime = msq->q_rtime = 0; msq->q_ctime = get_seconds(); msq->q_cbytes = msq->q_qnum = 0; - msq->q_qbytes = msg_ctlmnb; + msq->q_qbytes = ns->msg_ctlmnb; msq->q_lspid = msq->q_lrpid = 0; INIT_LIST_HEAD(&msq->q_messages); INIT_LIST_HEAD(&msq->q_receivers); INIT_LIST_HEAD(&msq->q_senders); - msg_unlock(msq); - return msq->q_id; + ipc_unlock_object(&msq->q_perm); + rcu_read_unlock(); + + return msq->q_perm.id; } -static inline void ss_add(struct msg_queue* msq, struct msg_sender* mss) +static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss) { - mss->tsk=current; - current->state=TASK_INTERRUPTIBLE; - list_add_tail(&mss->list,&msq->q_senders); + mss->tsk = current; + __set_current_state(TASK_INTERRUPTIBLE); + list_add_tail(&mss->list, &msq->q_senders); } -static inline void ss_del(struct msg_sender* mss) +static inline void ss_del(struct msg_sender *mss) { - if(mss->list.next != NULL) + if (mss->list.next != NULL) list_del(&mss->list); } -static void ss_wakeup(struct list_head* h, int kill) +static void ss_wakeup(struct list_head *h, int kill) { - struct list_head *tmp; + struct msg_sender *mss, *t; - tmp = h->next; - while (tmp != h) { - struct msg_sender* mss; - - mss = list_entry(tmp,struct msg_sender,list); - tmp = tmp->next; - if(kill) - mss->list.next=NULL; + list_for_each_entry_safe(mss, t, h, list) { + if (kill) + mss->list.next = NULL; wake_up_process(mss->tsk); } } -static void expunge_all(struct msg_queue* msq, int res) +static void expunge_all(struct msg_queue *msq, int res) { - struct list_head *tmp; + struct msg_receiver *msr, *t; - tmp = msq->q_receivers.next; - while (tmp != &msq->q_receivers) { - struct msg_receiver* msr; - - msr = list_entry(tmp,struct msg_receiver,r_list); - tmp = tmp->next; - msr->r_msg = NULL; + list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) { + msr->r_msg = NULL; /* initialize expunge ordering */ wake_up_process(msr->r_tsk); + /* + * Ensure that the wakeup is visible before setting r_msg as + * the receiving end depends on it: either spinning on a nil, + * or dealing with -EAGAIN cases. See lockless receive part 1 + * and 2 in do_msgrcv(). + */ smp_mb(); msr->r_msg = ERR_PTR(res); } } -/* - * freeque() wakes up waiters on the sender and receiver waiting queue, - * removes the message queue from message queue ID - * array, and cleans up all the messages associated with this queue. + +/* + * freeque() wakes up waiters on the sender and receiver waiting queue, + * removes the message queue from message queue ID IDR, and cleans up all the + * messages associated with this queue. * - * msg_ids.sem and the spinlock for this message queue is hold - * before freeque() is called. msg_ids.sem remains locked on exit. + * msg_ids.rwsem (writer) and the spinlock for this message queue are held + * before freeque() is called. msg_ids.rwsem remains locked on exit. */ -static void freeque (struct msg_queue *msq, int id) -{ - struct list_head *tmp; - - expunge_all(msq,-EIDRM); - ss_wakeup(&msq->q_senders,1); - msq = msg_rmid(id); - msg_unlock(msq); - - tmp = msq->q_messages.next; - while(tmp != &msq->q_messages) { - struct msg_msg* msg = list_entry(tmp,struct msg_msg,m_list); - tmp = tmp->next; - atomic_dec(&msg_hdrs); +static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) +{ + struct msg_msg *msg, *t; + struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm); + + expunge_all(msq, -EIDRM); + ss_wakeup(&msq->q_senders, 1); + msg_rmid(ns, msq); + ipc_unlock_object(&msq->q_perm); + rcu_read_unlock(); + + list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) { + atomic_dec(&ns->msg_hdrs); free_msg(msg); } - atomic_sub(msq->q_cbytes, &msg_bytes); - security_msg_queue_free(msq); - ipc_rcu_putref(msq); + atomic_sub(msq->q_cbytes, &ns->msg_bytes); + ipc_rcu_putref(msq, msg_rcu_free); } -asmlinkage long sys_msgget (key_t key, int msgflg) +/* + * Called with msg_ids.rwsem and ipcp locked. + */ +static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg) { - int id, ret = -EPERM; - struct msg_queue *msq; - - down(&msg_ids.sem); - if (key == IPC_PRIVATE) - ret = newque(key, msgflg); - else if ((id = ipc_findkey(&msg_ids, key)) == -1) { /* key not used */ - if (!(msgflg & IPC_CREAT)) - ret = -ENOENT; - else - ret = newque(key, msgflg); - } else if (msgflg & IPC_CREAT && msgflg & IPC_EXCL) { - ret = -EEXIST; - } else { - msq = msg_lock(id); - if(msq==NULL) - BUG(); - if (ipcperms(&msq->q_perm, msgflg)) - ret = -EACCES; - else { - int qid = msg_buildid(id, msq->q_perm.seq); - ret = security_msg_queue_associate(msq, msgflg); - if (!ret) - ret = qid; - } - msg_unlock(msq); - } - up(&msg_ids.sem); - return ret; + struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm); + + return security_msg_queue_associate(msq, msgflg); +} + +SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg) +{ + struct ipc_namespace *ns; + static const struct ipc_ops msg_ops = { + .getnew = newque, + .associate = msg_security, + }; + struct ipc_params msg_params; + + ns = current->nsproxy->ipc_ns; + + msg_params.key = key; + msg_params.flg = msgflg; + + return ipcget(ns, &msg_ids(ns), &msg_ops, &msg_params); } -static inline unsigned long copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version) +static inline unsigned long +copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version) { - switch(version) { + switch (version) { case IPC_64: - return copy_to_user (buf, in, sizeof(*in)); + return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: - { + { struct msqid_ds out; - memset(&out,0,sizeof(out)); + memset(&out, 0, sizeof(out)); ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm); @@ -252,18 +273,18 @@ static inline unsigned long copy_msqid_to_user(void __user *buf, struct msqid64_ out.msg_rtime = in->msg_rtime; out.msg_ctime = in->msg_ctime; - if(in->msg_cbytes > USHRT_MAX) + if (in->msg_cbytes > USHRT_MAX) out.msg_cbytes = USHRT_MAX; else out.msg_cbytes = in->msg_cbytes; out.msg_lcbytes = in->msg_cbytes; - if(in->msg_qnum > USHRT_MAX) + if (in->msg_qnum > USHRT_MAX) out.msg_qnum = USHRT_MAX; else out.msg_qnum = in->msg_qnum; - if(in->msg_qbytes > USHRT_MAX) + if (in->msg_qbytes > USHRT_MAX) out.msg_qbytes = USHRT_MAX; else out.msg_qbytes = in->msg_qbytes; @@ -272,137 +293,200 @@ static inline unsigned long copy_msqid_to_user(void __user *buf, struct msqid64_ out.msg_lspid = in->msg_lspid; out.msg_lrpid = in->msg_lrpid; - return copy_to_user (buf, &out, sizeof(out)); - } + return copy_to_user(buf, &out, sizeof(out)); + } default: return -EINVAL; } } -struct msq_setbuf { - unsigned long qbytes; - uid_t uid; - gid_t gid; - mode_t mode; -}; - -static inline unsigned long copy_msqid_from_user(struct msq_setbuf *out, void __user *buf, int version) +static inline unsigned long +copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version) { - switch(version) { + switch (version) { case IPC_64: - { - struct msqid64_ds tbuf; - - if (copy_from_user (&tbuf, buf, sizeof (tbuf))) + if (copy_from_user(out, buf, sizeof(*out))) return -EFAULT; - - out->qbytes = tbuf.msg_qbytes; - out->uid = tbuf.msg_perm.uid; - out->gid = tbuf.msg_perm.gid; - out->mode = tbuf.msg_perm.mode; - return 0; - } case IPC_OLD: - { + { struct msqid_ds tbuf_old; - if (copy_from_user (&tbuf_old, buf, sizeof (tbuf_old))) + if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) return -EFAULT; - out->uid = tbuf_old.msg_perm.uid; - out->gid = tbuf_old.msg_perm.gid; - out->mode = tbuf_old.msg_perm.mode; + out->msg_perm.uid = tbuf_old.msg_perm.uid; + out->msg_perm.gid = tbuf_old.msg_perm.gid; + out->msg_perm.mode = tbuf_old.msg_perm.mode; - if(tbuf_old.msg_qbytes == 0) - out->qbytes = tbuf_old.msg_lqbytes; + if (tbuf_old.msg_qbytes == 0) + out->msg_qbytes = tbuf_old.msg_lqbytes; else - out->qbytes = tbuf_old.msg_qbytes; + out->msg_qbytes = tbuf_old.msg_qbytes; return 0; - } + } default: return -EINVAL; } } -asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) +/* + * This function handles some msgctl commands which require the rwsem + * to be held in write mode. + * NOTE: no locks must be held, the rwsem is taken inside this function. + */ +static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd, + struct msqid_ds __user *buf, int version) { - int err, version; - struct msg_queue *msq; - struct msq_setbuf setbuf; struct kern_ipc_perm *ipcp; - - if (msqid < 0 || cmd < 0) - return -EINVAL; + struct msqid64_ds uninitialized_var(msqid64); + struct msg_queue *msq; + int err; - version = ipc_parse_version(&cmd); + if (cmd == IPC_SET) { + if (copy_msqid_from_user(&msqid64, buf, version)) + return -EFAULT; + } + + down_write(&msg_ids(ns).rwsem); + rcu_read_lock(); + + ipcp = ipcctl_pre_down_nolock(ns, &msg_ids(ns), msqid, cmd, + &msqid64.msg_perm, msqid64.msg_qbytes); + if (IS_ERR(ipcp)) { + err = PTR_ERR(ipcp); + goto out_unlock1; + } + + msq = container_of(ipcp, struct msg_queue, q_perm); + + err = security_msg_queue_msgctl(msq, cmd); + if (err) + goto out_unlock1; + + switch (cmd) { + case IPC_RMID: + ipc_lock_object(&msq->q_perm); + /* freeque unlocks the ipc object and rcu */ + freeque(ns, ipcp); + goto out_up; + case IPC_SET: + if (msqid64.msg_qbytes > ns->msg_ctlmnb && + !capable(CAP_SYS_RESOURCE)) { + err = -EPERM; + goto out_unlock1; + } + + ipc_lock_object(&msq->q_perm); + err = ipc_update_perm(&msqid64.msg_perm, ipcp); + if (err) + goto out_unlock0; + + msq->q_qbytes = msqid64.msg_qbytes; + + msq->q_ctime = get_seconds(); + /* sleeping receivers might be excluded by + * stricter permissions. + */ + expunge_all(msq, -EAGAIN); + /* sleeping senders might be able to send + * due to a larger queue size. + */ + ss_wakeup(&msq->q_senders, 0); + break; + default: + err = -EINVAL; + goto out_unlock1; + } + +out_unlock0: + ipc_unlock_object(&msq->q_perm); +out_unlock1: + rcu_read_unlock(); +out_up: + up_write(&msg_ids(ns).rwsem); + return err; +} + +static int msgctl_nolock(struct ipc_namespace *ns, int msqid, + int cmd, int version, void __user *buf) +{ + int err; + struct msg_queue *msq; switch (cmd) { - case IPC_INFO: - case MSG_INFO: - { + case IPC_INFO: + case MSG_INFO: + { struct msginfo msginfo; int max_id; + if (!buf) return -EFAULT; - /* We must not return kernel stack data. + + /* + * We must not return kernel stack data. * due to padding, it's not enough * to set all member fields. */ - err = security_msg_queue_msgctl(NULL, cmd); if (err) return err; - memset(&msginfo,0,sizeof(msginfo)); - msginfo.msgmni = msg_ctlmni; - msginfo.msgmax = msg_ctlmax; - msginfo.msgmnb = msg_ctlmnb; + memset(&msginfo, 0, sizeof(msginfo)); + msginfo.msgmni = ns->msg_ctlmni; + msginfo.msgmax = ns->msg_ctlmax; + msginfo.msgmnb = ns->msg_ctlmnb; msginfo.msgssz = MSGSSZ; msginfo.msgseg = MSGSEG; - down(&msg_ids.sem); + down_read(&msg_ids(ns).rwsem); if (cmd == MSG_INFO) { - msginfo.msgpool = msg_ids.in_use; - msginfo.msgmap = atomic_read(&msg_hdrs); - msginfo.msgtql = atomic_read(&msg_bytes); + msginfo.msgpool = msg_ids(ns).in_use; + msginfo.msgmap = atomic_read(&ns->msg_hdrs); + msginfo.msgtql = atomic_read(&ns->msg_bytes); } else { msginfo.msgmap = MSGMAP; msginfo.msgpool = MSGPOOL; msginfo.msgtql = MSGTQL; } - max_id = msg_ids.max_id; - up(&msg_ids.sem); - if (copy_to_user (buf, &msginfo, sizeof(struct msginfo))) + max_id = ipc_get_maxid(&msg_ids(ns)); + up_read(&msg_ids(ns).rwsem); + if (copy_to_user(buf, &msginfo, sizeof(struct msginfo))) return -EFAULT; - return (max_id < 0) ? 0: max_id; + return (max_id < 0) ? 0 : max_id; } + case MSG_STAT: case IPC_STAT: { struct msqid64_ds tbuf; int success_return; + if (!buf) return -EFAULT; - if(cmd == MSG_STAT && msqid >= msg_ids.entries->size) - return -EINVAL; - memset(&tbuf,0,sizeof(tbuf)); + memset(&tbuf, 0, sizeof(tbuf)); - msq = msg_lock(msqid); - if (msq == NULL) - return -EINVAL; - - if(cmd == MSG_STAT) { - success_return = msg_buildid(msqid, msq->q_perm.seq); + rcu_read_lock(); + if (cmd == MSG_STAT) { + msq = msq_obtain_object(ns, msqid); + if (IS_ERR(msq)) { + err = PTR_ERR(msq); + goto out_unlock; + } + success_return = msq->q_perm.id; } else { - err = -EIDRM; - if (msg_checkid(msq,msqid)) + msq = msq_obtain_object_check(ns, msqid); + if (IS_ERR(msq)) { + err = PTR_ERR(msq); goto out_unlock; + } success_return = 0; } + err = -EACCES; - if (ipcperms (&msq->q_perm, S_IRUGO)) + if (ipcperms(ns, &msq->q_perm, S_IRUGO)) goto out_unlock; err = security_msg_queue_msgctl(msq, cmd); @@ -418,341 +502,430 @@ asmlinkage long sys_msgctl (int msqid, int cmd, struct msqid_ds __user *buf) tbuf.msg_qbytes = msq->q_qbytes; tbuf.msg_lspid = msq->q_lspid; tbuf.msg_lrpid = msq->q_lrpid; - msg_unlock(msq); + rcu_read_unlock(); + if (copy_msqid_to_user(buf, &tbuf, version)) return -EFAULT; return success_return; } - case IPC_SET: - if (!buf) - return -EFAULT; - if (copy_msqid_from_user (&setbuf, buf, version)) - return -EFAULT; - if ((err = audit_ipc_perms(setbuf.qbytes, setbuf.uid, setbuf.gid, setbuf.mode))) - return err; - break; - case IPC_RMID: - break; + default: - return -EINVAL; + return -EINVAL; } - down(&msg_ids.sem); - msq = msg_lock(msqid); - err=-EINVAL; - if (msq == NULL) - goto out_up; + return err; +out_unlock: + rcu_read_unlock(); + return err; +} - err = -EIDRM; - if (msg_checkid(msq,msqid)) - goto out_unlock_up; - ipcp = &msq->q_perm; - err = -EPERM; - if (current->euid != ipcp->cuid && - current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN)) - /* We _could_ check for CAP_CHOWN above, but we don't */ - goto out_unlock_up; +SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf) +{ + int version; + struct ipc_namespace *ns; - err = security_msg_queue_msgctl(msq, cmd); - if (err) - goto out_unlock_up; + if (msqid < 0 || cmd < 0) + return -EINVAL; + + version = ipc_parse_version(&cmd); + ns = current->nsproxy->ipc_ns; switch (cmd) { + case IPC_INFO: + case MSG_INFO: + case MSG_STAT: /* msqid is an index rather than a msg queue id */ + case IPC_STAT: + return msgctl_nolock(ns, msqid, cmd, version, buf); case IPC_SET: - { - err = -EPERM; - if (setbuf.qbytes > msg_ctlmnb && !capable(CAP_SYS_RESOURCE)) - goto out_unlock_up; - - msq->q_qbytes = setbuf.qbytes; - - ipcp->uid = setbuf.uid; - ipcp->gid = setbuf.gid; - ipcp->mode = (ipcp->mode & ~S_IRWXUGO) | - (S_IRWXUGO & setbuf.mode); - msq->q_ctime = get_seconds(); - /* sleeping receivers might be excluded by - * stricter permissions. - */ - expunge_all(msq,-EAGAIN); - /* sleeping senders might be able to send - * due to a larger queue size. - */ - ss_wakeup(&msq->q_senders,0); - msg_unlock(msq); - break; - } case IPC_RMID: - freeque (msq, msqid); - break; + return msgctl_down(ns, msqid, cmd, buf, version); + default: + return -EINVAL; } - err = 0; -out_up: - up(&msg_ids.sem); - return err; -out_unlock_up: - msg_unlock(msq); - goto out_up; -out_unlock: - msg_unlock(msq); - return err; } -static int testmsg(struct msg_msg* msg,long type,int mode) +static int testmsg(struct msg_msg *msg, long type, int mode) { - switch(mode) - { - case SEARCH_ANY: + switch (mode) { + case SEARCH_ANY: + case SEARCH_NUMBER: + return 1; + case SEARCH_LESSEQUAL: + if (msg->m_type <= type) return 1; - case SEARCH_LESSEQUAL: - if(msg->m_type <=type) - return 1; - break; - case SEARCH_EQUAL: - if(msg->m_type == type) - return 1; - break; - case SEARCH_NOTEQUAL: - if(msg->m_type != type) - return 1; - break; + break; + case SEARCH_EQUAL: + if (msg->m_type == type) + return 1; + break; + case SEARCH_NOTEQUAL: + if (msg->m_type != type) + return 1; + break; } return 0; } -static inline int pipelined_send(struct msg_queue* msq, struct msg_msg* msg) +static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) { - struct list_head* tmp; + struct msg_receiver *msr, *t; + + list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) { + if (testmsg(msg, msr->r_msgtype, msr->r_mode) && + !security_msg_queue_msgrcv(msq, msg, msr->r_tsk, + msr->r_msgtype, msr->r_mode)) { - tmp = msq->q_receivers.next; - while (tmp != &msq->q_receivers) { - struct msg_receiver* msr; - msr = list_entry(tmp,struct msg_receiver,r_list); - tmp = tmp->next; - if(testmsg(msg,msr->r_msgtype,msr->r_mode) && - !security_msg_queue_msgrcv(msq, msg, msr->r_tsk, msr->r_msgtype, msr->r_mode)) { list_del(&msr->r_list); - if(msr->r_maxsize < msg->m_ts) { + if (msr->r_maxsize < msg->m_ts) { + /* initialize pipelined send ordering */ msr->r_msg = NULL; wake_up_process(msr->r_tsk); - smp_mb(); + smp_mb(); /* see barrier comment below */ msr->r_msg = ERR_PTR(-E2BIG); } else { msr->r_msg = NULL; - msq->q_lrpid = msr->r_tsk->pid; + msq->q_lrpid = task_pid_vnr(msr->r_tsk); msq->q_rtime = get_seconds(); wake_up_process(msr->r_tsk); + /* + * Ensure that the wakeup is visible before + * setting r_msg, as the receiving end depends + * on it. See lockless receive part 1 and 2 in + * do_msgrcv(). + */ smp_mb(); msr->r_msg = msg; + return 1; } } } + return 0; } -asmlinkage long sys_msgsnd (int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg) +long do_msgsnd(int msqid, long mtype, void __user *mtext, + size_t msgsz, int msgflg) { struct msg_queue *msq; struct msg_msg *msg; - long mtype; int err; - - if (msgsz > msg_ctlmax || (long) msgsz < 0 || msqid < 0) + struct ipc_namespace *ns; + + ns = current->nsproxy->ipc_ns; + + if (msgsz > ns->msg_ctlmax || (long) msgsz < 0 || msqid < 0) return -EINVAL; - if (get_user(mtype, &msgp->mtype)) - return -EFAULT; if (mtype < 1) return -EINVAL; - msg = load_msg(msgp->mtext, msgsz); - if(IS_ERR(msg)) + msg = load_msg(mtext, msgsz); + if (IS_ERR(msg)) return PTR_ERR(msg); msg->m_type = mtype; msg->m_ts = msgsz; - msq = msg_lock(msqid); - err=-EINVAL; - if(msq==NULL) - goto out_free; + rcu_read_lock(); + msq = msq_obtain_object_check(ns, msqid); + if (IS_ERR(msq)) { + err = PTR_ERR(msq); + goto out_unlock1; + } - err= -EIDRM; - if (msg_checkid(msq,msqid)) - goto out_unlock_free; + ipc_lock_object(&msq->q_perm); for (;;) { struct msg_sender s; - err=-EACCES; - if (ipcperms(&msq->q_perm, S_IWUGO)) - goto out_unlock_free; + err = -EACCES; + if (ipcperms(ns, &msq->q_perm, S_IWUGO)) + goto out_unlock0; + + /* raced with RMID? */ + if (!ipc_valid_object(&msq->q_perm)) { + err = -EIDRM; + goto out_unlock0; + } err = security_msg_queue_msgsnd(msq, msg, msgflg); if (err) - goto out_unlock_free; + goto out_unlock0; - if(msgsz + msq->q_cbytes <= msq->q_qbytes && + if (msgsz + msq->q_cbytes <= msq->q_qbytes && 1 + msq->q_qnum <= msq->q_qbytes) { break; } /* queue full, wait: */ - if(msgflg&IPC_NOWAIT) { - err=-EAGAIN; - goto out_unlock_free; + if (msgflg & IPC_NOWAIT) { + err = -EAGAIN; + goto out_unlock0; } + + /* enqueue the sender and prepare to block */ ss_add(msq, &s); - ipc_rcu_getref(msq); - msg_unlock(msq); + + if (!ipc_rcu_getref(msq)) { + err = -EIDRM; + goto out_unlock0; + } + + ipc_unlock_object(&msq->q_perm); + rcu_read_unlock(); schedule(); - ipc_lock_by_ptr(&msq->q_perm); - ipc_rcu_putref(msq); - if (msq->q_perm.deleted) { + rcu_read_lock(); + ipc_lock_object(&msq->q_perm); + + ipc_rcu_putref(msq, ipc_rcu_free); + /* raced with RMID? */ + if (!ipc_valid_object(&msq->q_perm)) { err = -EIDRM; - goto out_unlock_free; + goto out_unlock0; } + ss_del(&s); - + if (signal_pending(current)) { - err=-ERESTARTNOHAND; - goto out_unlock_free; + err = -ERESTARTNOHAND; + goto out_unlock0; } - } - msq->q_lspid = current->tgid; + } + msq->q_lspid = task_tgid_vnr(current); msq->q_stime = get_seconds(); - if(!pipelined_send(msq,msg)) { - /* noone is waiting for this message, enqueue it */ - list_add_tail(&msg->m_list,&msq->q_messages); + if (!pipelined_send(msq, msg)) { + /* no one is waiting for this message, enqueue it */ + list_add_tail(&msg->m_list, &msq->q_messages); msq->q_cbytes += msgsz; msq->q_qnum++; - atomic_add(msgsz,&msg_bytes); - atomic_inc(&msg_hdrs); + atomic_add(msgsz, &ns->msg_bytes); + atomic_inc(&ns->msg_hdrs); } - + err = 0; msg = NULL; -out_unlock_free: - msg_unlock(msq); -out_free: - if(msg!=NULL) +out_unlock0: + ipc_unlock_object(&msq->q_perm); +out_unlock1: + rcu_read_unlock(); + if (msg != NULL) free_msg(msg); return err; } -static inline int convert_mode(long* msgtyp, int msgflg) +SYSCALL_DEFINE4(msgsnd, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz, + int, msgflg) { - /* + long mtype; + + if (get_user(mtype, &msgp->mtype)) + return -EFAULT; + return do_msgsnd(msqid, mtype, msgp->mtext, msgsz, msgflg); +} + +static inline int convert_mode(long *msgtyp, int msgflg) +{ + if (msgflg & MSG_COPY) + return SEARCH_NUMBER; + /* * find message of correct type. * msgtyp = 0 => get first. * msgtyp > 0 => get first message of matching type. - * msgtyp < 0 => get message with least type must be < abs(msgtype). + * msgtyp < 0 => get message with least type must be < abs(msgtype). */ - if(*msgtyp==0) + if (*msgtyp == 0) return SEARCH_ANY; - if(*msgtyp<0) { - *msgtyp=-(*msgtyp); + if (*msgtyp < 0) { + *msgtyp = -*msgtyp; return SEARCH_LESSEQUAL; } - if(msgflg & MSG_EXCEPT) + if (msgflg & MSG_EXCEPT) return SEARCH_NOTEQUAL; return SEARCH_EQUAL; } -asmlinkage long sys_msgrcv (int msqid, struct msgbuf __user *msgp, size_t msgsz, - long msgtyp, int msgflg) +static long do_msg_fill(void __user *dest, struct msg_msg *msg, size_t bufsz) +{ + struct msgbuf __user *msgp = dest; + size_t msgsz; + + if (put_user(msg->m_type, &msgp->mtype)) + return -EFAULT; + + msgsz = (bufsz > msg->m_ts) ? msg->m_ts : bufsz; + if (store_msg(msgp->mtext, msg, msgsz)) + return -EFAULT; + return msgsz; +} + +#ifdef CONFIG_CHECKPOINT_RESTORE +/* + * This function creates new kernel message structure, large enough to store + * bufsz message bytes. + */ +static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz) +{ + struct msg_msg *copy; + + /* + * Create dummy message to copy real message to. + */ + copy = load_msg(buf, bufsz); + if (!IS_ERR(copy)) + copy->m_ts = bufsz; + return copy; +} + +static inline void free_copy(struct msg_msg *copy) +{ + if (copy) + free_msg(copy); +} +#else +static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz) +{ + return ERR_PTR(-ENOSYS); +} + +static inline void free_copy(struct msg_msg *copy) +{ +} +#endif + +static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode) +{ + struct msg_msg *msg, *found = NULL; + long count = 0; + + list_for_each_entry(msg, &msq->q_messages, m_list) { + if (testmsg(msg, *msgtyp, mode) && + !security_msg_queue_msgrcv(msq, msg, current, + *msgtyp, mode)) { + if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) { + *msgtyp = msg->m_type - 1; + found = msg; + } else if (mode == SEARCH_NUMBER) { + if (*msgtyp == count) + return msg; + } else + return msg; + count++; + } + } + + return found ?: ERR_PTR(-EAGAIN); +} + +long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgflg, + long (*msg_handler)(void __user *, struct msg_msg *, size_t)) { - struct msg_queue *msq; - struct msg_msg *msg; int mode; + struct msg_queue *msq; + struct ipc_namespace *ns; + struct msg_msg *msg, *copy = NULL; - if (msqid < 0 || (long) msgsz < 0) - return -EINVAL; - mode = convert_mode(&msgtyp,msgflg); + ns = current->nsproxy->ipc_ns; - msq = msg_lock(msqid); - if(msq==NULL) + if (msqid < 0 || (long) bufsz < 0) return -EINVAL; - msg = ERR_PTR(-EIDRM); - if (msg_checkid(msq,msqid)) - goto out_unlock; + if (msgflg & MSG_COPY) { + if ((msgflg & MSG_EXCEPT) || !(msgflg & IPC_NOWAIT)) + return -EINVAL; + copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax)); + if (IS_ERR(copy)) + return PTR_ERR(copy); + } + mode = convert_mode(&msgtyp, msgflg); + + rcu_read_lock(); + msq = msq_obtain_object_check(ns, msqid); + if (IS_ERR(msq)) { + rcu_read_unlock(); + free_copy(copy); + return PTR_ERR(msq); + } for (;;) { struct msg_receiver msr_d; - struct list_head* tmp; msg = ERR_PTR(-EACCES); - if (ipcperms (&msq->q_perm, S_IRUGO)) - goto out_unlock; + if (ipcperms(ns, &msq->q_perm, S_IRUGO)) + goto out_unlock1; - msg = ERR_PTR(-EAGAIN); - tmp = msq->q_messages.next; - while (tmp != &msq->q_messages) { - struct msg_msg *walk_msg; - walk_msg = list_entry(tmp,struct msg_msg,m_list); - if(testmsg(walk_msg,msgtyp,mode) && - !security_msg_queue_msgrcv(msq, walk_msg, current, msgtyp, mode)) { - msg = walk_msg; - if(mode == SEARCH_LESSEQUAL && walk_msg->m_type != 1) { - msg=walk_msg; - msgtyp=walk_msg->m_type-1; - } else { - msg=walk_msg; - break; - } - } - tmp = tmp->next; + ipc_lock_object(&msq->q_perm); + + /* raced with RMID? */ + if (!ipc_valid_object(&msq->q_perm)) { + msg = ERR_PTR(-EIDRM); + goto out_unlock0; } - if(!IS_ERR(msg)) { - /* Found a suitable message. Unlink it from the queue. */ - if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) { + + msg = find_msg(msq, &msgtyp, mode); + if (!IS_ERR(msg)) { + /* + * Found a suitable message. + * Unlink it from the queue. + */ + if ((bufsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) { msg = ERR_PTR(-E2BIG); - goto out_unlock; + goto out_unlock0; + } + /* + * If we are copying, then do not unlink message and do + * not update queue parameters. + */ + if (msgflg & MSG_COPY) { + msg = copy_msg(msg, copy); + goto out_unlock0; } + list_del(&msg->m_list); msq->q_qnum--; msq->q_rtime = get_seconds(); - msq->q_lrpid = current->tgid; + msq->q_lrpid = task_tgid_vnr(current); msq->q_cbytes -= msg->m_ts; - atomic_sub(msg->m_ts,&msg_bytes); - atomic_dec(&msg_hdrs); - ss_wakeup(&msq->q_senders,0); - msg_unlock(msq); - break; + atomic_sub(msg->m_ts, &ns->msg_bytes); + atomic_dec(&ns->msg_hdrs); + ss_wakeup(&msq->q_senders, 0); + + goto out_unlock0; } + /* No message waiting. Wait for a message */ if (msgflg & IPC_NOWAIT) { msg = ERR_PTR(-ENOMSG); - goto out_unlock; + goto out_unlock0; } - list_add_tail(&msr_d.r_list,&msq->q_receivers); + + list_add_tail(&msr_d.r_list, &msq->q_receivers); msr_d.r_tsk = current; msr_d.r_msgtype = msgtyp; msr_d.r_mode = mode; - if(msgflg & MSG_NOERROR) + if (msgflg & MSG_NOERROR) msr_d.r_maxsize = INT_MAX; - else - msr_d.r_maxsize = msgsz; + else + msr_d.r_maxsize = bufsz; msr_d.r_msg = ERR_PTR(-EAGAIN); - current->state = TASK_INTERRUPTIBLE; - msg_unlock(msq); + __set_current_state(TASK_INTERRUPTIBLE); + ipc_unlock_object(&msq->q_perm); + rcu_read_unlock(); schedule(); /* Lockless receive, part 1: * Disable preemption. We don't hold a reference to the queue * and getting a reference would defeat the idea of a lockless * operation, thus the code relies on rcu to guarantee the - * existance of msq: + * existence of msq: * Prior to destruction, expunge_all(-EIRDM) changes r_msg. * Thus if r_msg is -EAGAIN, then the queue not yet destroyed. * rcu_read_lock() prevents preemption between reading r_msg - * and the spin_lock() inside ipc_lock_by_ptr(). + * and acquiring the q_perm.lock in ipc_lock_object(). */ rcu_read_lock(); @@ -761,74 +934,145 @@ asmlinkage long sys_msgrcv (int msqid, struct msgbuf __user *msgp, size_t msgsz, * wake_up_process(). There is a race with exit(), see * ipc/mqueue.c for the details. */ - msg = (struct msg_msg*) msr_d.r_msg; + msg = (struct msg_msg *)msr_d.r_msg; while (msg == NULL) { cpu_relax(); - msg = (struct msg_msg*) msr_d.r_msg; + msg = (struct msg_msg *)msr_d.r_msg; } /* Lockless receive, part 3: * If there is a message or an error then accept it without * locking. */ - if(msg != ERR_PTR(-EAGAIN)) { - rcu_read_unlock(); - break; - } + if (msg != ERR_PTR(-EAGAIN)) + goto out_unlock1; /* Lockless receive, part 3: * Acquire the queue spinlock. */ - ipc_lock_by_ptr(&msq->q_perm); - rcu_read_unlock(); + ipc_lock_object(&msq->q_perm); /* Lockless receive, part 4: * Repeat test after acquiring the spinlock. */ - msg = (struct msg_msg*)msr_d.r_msg; - if(msg != ERR_PTR(-EAGAIN)) - goto out_unlock; + msg = (struct msg_msg *)msr_d.r_msg; + if (msg != ERR_PTR(-EAGAIN)) + goto out_unlock0; list_del(&msr_d.r_list); if (signal_pending(current)) { msg = ERR_PTR(-ERESTARTNOHAND); -out_unlock: - msg_unlock(msq); - break; + goto out_unlock0; } + + ipc_unlock_object(&msq->q_perm); } - if (IS_ERR(msg)) - return PTR_ERR(msg); - msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz; - if (put_user (msg->m_type, &msgp->mtype) || - store_msg(msgp->mtext, msg, msgsz)) { - msgsz = -EFAULT; +out_unlock0: + ipc_unlock_object(&msq->q_perm); +out_unlock1: + rcu_read_unlock(); + if (IS_ERR(msg)) { + free_copy(copy); + return PTR_ERR(msg); } + + bufsz = msg_handler(buf, msg, bufsz); free_msg(msg); - return msgsz; + + return bufsz; } +SYSCALL_DEFINE5(msgrcv, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz, + long, msgtyp, int, msgflg) +{ + return do_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg, do_msg_fill); +} + +/* + * Scale msgmni with the available lowmem size: the memory dedicated to msg + * queues should occupy at most 1/MSG_MEM_SCALE of lowmem. + * Also take into account the number of nsproxies created so far. + * This should be done staying within the (MSGMNI , IPCMNI/nr_ipc_ns) range. + */ +void recompute_msgmni(struct ipc_namespace *ns) +{ + struct sysinfo i; + unsigned long allowed; + int nb_ns; + + si_meminfo(&i); + allowed = (((i.totalram - i.totalhigh) / MSG_MEM_SCALE) * i.mem_unit) + / MSGMNB; + nb_ns = atomic_read(&nr_ipc_ns); + allowed /= nb_ns; + + if (allowed < MSGMNI) { + ns->msg_ctlmni = MSGMNI; + return; + } + + if (allowed > IPCMNI / nb_ns) { + ns->msg_ctlmni = IPCMNI / nb_ns; + return; + } + + ns->msg_ctlmni = allowed; +} + +void msg_init_ns(struct ipc_namespace *ns) +{ + ns->msg_ctlmax = MSGMAX; + ns->msg_ctlmnb = MSGMNB; + + recompute_msgmni(ns); + + atomic_set(&ns->msg_bytes, 0); + atomic_set(&ns->msg_hdrs, 0); + ipc_init_ids(&ns->ids[IPC_MSG_IDS]); +} + +#ifdef CONFIG_IPC_NS +void msg_exit_ns(struct ipc_namespace *ns) +{ + free_ipcs(ns, &msg_ids(ns), freeque); + idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr); +} +#endif + #ifdef CONFIG_PROC_FS static int sysvipc_msg_proc_show(struct seq_file *s, void *it) { + struct user_namespace *user_ns = seq_user_ns(s); struct msg_queue *msq = it; return seq_printf(s, - "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n", - msq->q_perm.key, - msq->q_id, - msq->q_perm.mode, - msq->q_cbytes, - msq->q_qnum, - msq->q_lspid, - msq->q_lrpid, - msq->q_perm.uid, - msq->q_perm.gid, - msq->q_perm.cuid, - msq->q_perm.cgid, - msq->q_stime, - msq->q_rtime, - msq->q_ctime); + "%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n", + msq->q_perm.key, + msq->q_perm.id, + msq->q_perm.mode, + msq->q_cbytes, + msq->q_qnum, + msq->q_lspid, + msq->q_lrpid, + from_kuid_munged(user_ns, msq->q_perm.uid), + from_kgid_munged(user_ns, msq->q_perm.gid), + from_kuid_munged(user_ns, msq->q_perm.cuid), + from_kgid_munged(user_ns, msq->q_perm.cgid), + msq->q_stime, + msq->q_rtime, + msq->q_ctime); } #endif + +void __init msg_init(void) +{ + msg_init_ns(&init_ipc_ns); + + printk(KERN_INFO "msgmni has been set to %d\n", + init_ipc_ns.msg_ctlmni); + + ipc_init_proc_interface("sysvipc/msg", + " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n", + IPC_MSG_IDS, sysvipc_msg_proc_show); +} diff --git a/ipc/msgutil.c b/ipc/msgutil.c index 66cfb87646e..7e7095974d5 100644 --- a/ipc/msgutil.c +++ b/ipc/msgutil.c @@ -1,5 +1,5 @@ /* - * linux/ipc/util.c + * linux/ipc/msgutil.c * Copyright (C) 1999, 2004 Manfred Spraul * * This file is released under GNU General Public Licence version 2 or @@ -13,64 +13,94 @@ #include <linux/security.h> #include <linux/slab.h> #include <linux/ipc.h> -#include <asm/uaccess.h> +#include <linux/msg.h> +#include <linux/ipc_namespace.h> +#include <linux/utsname.h> +#include <linux/proc_ns.h> +#include <linux/uaccess.h> #include "util.h" +DEFINE_SPINLOCK(mq_lock); + +/* + * The next 2 defines are here bc this is the only file + * compiled when either CONFIG_SYSVIPC and CONFIG_POSIX_MQUEUE + * and not CONFIG_IPC_NS. + */ +struct ipc_namespace init_ipc_ns = { + .count = ATOMIC_INIT(1), + .user_ns = &init_user_ns, + .proc_inum = PROC_IPC_INIT_INO, +}; + +atomic_t nr_ipc_ns = ATOMIC_INIT(1); + struct msg_msgseg { - struct msg_msgseg* next; + struct msg_msgseg *next; /* the next part of the message follows immediately */ }; -#define DATALEN_MSG (PAGE_SIZE-sizeof(struct msg_msg)) -#define DATALEN_SEG (PAGE_SIZE-sizeof(struct msg_msgseg)) +#define DATALEN_MSG ((size_t)PAGE_SIZE-sizeof(struct msg_msg)) +#define DATALEN_SEG ((size_t)PAGE_SIZE-sizeof(struct msg_msgseg)) -struct msg_msg *load_msg(const void __user *src, int len) + +static struct msg_msg *alloc_msg(size_t len) { struct msg_msg *msg; struct msg_msgseg **pseg; - int err; - int alen; - - alen = len; - if (alen > DATALEN_MSG) - alen = DATALEN_MSG; + size_t alen; - msg = (struct msg_msg *)kmalloc(sizeof(*msg) + alen, GFP_KERNEL); + alen = min(len, DATALEN_MSG); + msg = kmalloc(sizeof(*msg) + alen, GFP_KERNEL); if (msg == NULL) - return ERR_PTR(-ENOMEM); + return NULL; msg->next = NULL; msg->security = NULL; - if (copy_from_user(msg + 1, src, alen)) { - err = -EFAULT; - goto out_err; - } - len -= alen; - src = ((char __user *)src) + alen; pseg = &msg->next; while (len > 0) { struct msg_msgseg *seg; - alen = len; - if (alen > DATALEN_SEG) - alen = DATALEN_SEG; - seg = (struct msg_msgseg *)kmalloc(sizeof(*seg) + alen, - GFP_KERNEL); - if (seg == NULL) { - err = -ENOMEM; + alen = min(len, DATALEN_SEG); + seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL); + if (seg == NULL) goto out_err; - } *pseg = seg; seg->next = NULL; - if (copy_from_user(seg + 1, src, alen)) { - err = -EFAULT; - goto out_err; - } pseg = &seg->next; len -= alen; - src = ((char __user *)src) + alen; + } + + return msg; + +out_err: + free_msg(msg); + return NULL; +} + +struct msg_msg *load_msg(const void __user *src, size_t len) +{ + struct msg_msg *msg; + struct msg_msgseg *seg; + int err = -EFAULT; + size_t alen; + + msg = alloc_msg(len); + if (msg == NULL) + return ERR_PTR(-ENOMEM); + + alen = min(len, DATALEN_MSG); + if (copy_from_user(msg + 1, src, alen)) + goto out_err; + + for (seg = msg->next; seg != NULL; seg = seg->next) { + len -= alen; + src = (char __user *)src + alen; + alen = min(len, DATALEN_SEG); + if (copy_from_user(seg + 1, src, alen)) + goto out_err; } err = security_msg_msg_alloc(msg); @@ -83,30 +113,55 @@ out_err: free_msg(msg); return ERR_PTR(err); } +#ifdef CONFIG_CHECKPOINT_RESTORE +struct msg_msg *copy_msg(struct msg_msg *src, struct msg_msg *dst) +{ + struct msg_msgseg *dst_pseg, *src_pseg; + size_t len = src->m_ts; + size_t alen; + + BUG_ON(dst == NULL); + if (src->m_ts > dst->m_ts) + return ERR_PTR(-EINVAL); + + alen = min(len, DATALEN_MSG); + memcpy(dst + 1, src + 1, alen); + + for (dst_pseg = dst->next, src_pseg = src->next; + src_pseg != NULL; + dst_pseg = dst_pseg->next, src_pseg = src_pseg->next) { -int store_msg(void __user *dest, struct msg_msg *msg, int len) + len -= alen; + alen = min(len, DATALEN_SEG); + memcpy(dst_pseg + 1, src_pseg + 1, alen); + } + + dst->m_type = src->m_type; + dst->m_ts = src->m_ts; + + return dst; +} +#else +struct msg_msg *copy_msg(struct msg_msg *src, struct msg_msg *dst) +{ + return ERR_PTR(-ENOSYS); +} +#endif +int store_msg(void __user *dest, struct msg_msg *msg, size_t len) { - int alen; + size_t alen; struct msg_msgseg *seg; - alen = len; - if (alen > DATALEN_MSG) - alen = DATALEN_MSG; + alen = min(len, DATALEN_MSG); if (copy_to_user(dest, msg + 1, alen)) return -1; - len -= alen; - dest = ((char __user *)dest) + alen; - seg = msg->next; - while (len > 0) { - alen = len; - if (alen > DATALEN_SEG) - alen = DATALEN_SEG; + for (seg = msg->next; seg != NULL; seg = seg->next) { + len -= alen; + dest = (char __user *)dest + alen; + alen = min(len, DATALEN_SEG); if (copy_to_user(dest, seg + 1, alen)) return -1; - len -= alen; - dest = ((char __user *)dest) + alen; - seg = seg->next; } return 0; } diff --git a/ipc/namespace.c b/ipc/namespace.c new file mode 100644 index 00000000000..59451c1e214 --- /dev/null +++ b/ipc/namespace.c @@ -0,0 +1,199 @@ +/* + * linux/ipc/namespace.c + * Copyright (C) 2006 Pavel Emelyanov <xemul@openvz.org> OpenVZ, SWsoft Inc. + */ + +#include <linux/ipc.h> +#include <linux/msg.h> +#include <linux/ipc_namespace.h> +#include <linux/rcupdate.h> +#include <linux/nsproxy.h> +#include <linux/slab.h> +#include <linux/fs.h> +#include <linux/mount.h> +#include <linux/user_namespace.h> +#include <linux/proc_ns.h> + +#include "util.h" + +static struct ipc_namespace *create_ipc_ns(struct user_namespace *user_ns, + struct ipc_namespace *old_ns) +{ + struct ipc_namespace *ns; + int err; + + ns = kmalloc(sizeof(struct ipc_namespace), GFP_KERNEL); + if (ns == NULL) + return ERR_PTR(-ENOMEM); + + err = proc_alloc_inum(&ns->proc_inum); + if (err) { + kfree(ns); + return ERR_PTR(err); + } + + atomic_set(&ns->count, 1); + err = mq_init_ns(ns); + if (err) { + proc_free_inum(ns->proc_inum); + kfree(ns); + return ERR_PTR(err); + } + atomic_inc(&nr_ipc_ns); + + sem_init_ns(ns); + msg_init_ns(ns); + shm_init_ns(ns); + + /* + * msgmni has already been computed for the new ipc ns. + * Thus, do the ipcns creation notification before registering that + * new ipcns in the chain. + */ + ipcns_notify(IPCNS_CREATED); + register_ipcns_notifier(ns); + + ns->user_ns = get_user_ns(user_ns); + + return ns; +} + +struct ipc_namespace *copy_ipcs(unsigned long flags, + struct user_namespace *user_ns, struct ipc_namespace *ns) +{ + if (!(flags & CLONE_NEWIPC)) + return get_ipc_ns(ns); + return create_ipc_ns(user_ns, ns); +} + +/* + * free_ipcs - free all ipcs of one type + * @ns: the namespace to remove the ipcs from + * @ids: the table of ipcs to free + * @free: the function called to free each individual ipc + * + * Called for each kind of ipc when an ipc_namespace exits. + */ +void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids, + void (*free)(struct ipc_namespace *, struct kern_ipc_perm *)) +{ + struct kern_ipc_perm *perm; + int next_id; + int total, in_use; + + down_write(&ids->rwsem); + + in_use = ids->in_use; + + for (total = 0, next_id = 0; total < in_use; next_id++) { + perm = idr_find(&ids->ipcs_idr, next_id); + if (perm == NULL) + continue; + rcu_read_lock(); + ipc_lock_object(perm); + free(ns, perm); + total++; + } + up_write(&ids->rwsem); +} + +static void free_ipc_ns(struct ipc_namespace *ns) +{ + /* + * Unregistering the hotplug notifier at the beginning guarantees + * that the ipc namespace won't be freed while we are inside the + * callback routine. Since the blocking_notifier_chain_XXX routines + * hold a rw lock on the notifier list, unregister_ipcns_notifier() + * won't take the rw lock before blocking_notifier_call_chain() has + * released the rd lock. + */ + unregister_ipcns_notifier(ns); + sem_exit_ns(ns); + msg_exit_ns(ns); + shm_exit_ns(ns); + atomic_dec(&nr_ipc_ns); + + /* + * Do the ipcns removal notification after decrementing nr_ipc_ns in + * order to have a correct value when recomputing msgmni. + */ + ipcns_notify(IPCNS_REMOVED); + put_user_ns(ns->user_ns); + proc_free_inum(ns->proc_inum); + kfree(ns); +} + +/* + * put_ipc_ns - drop a reference to an ipc namespace. + * @ns: the namespace to put + * + * If this is the last task in the namespace exiting, and + * it is dropping the refcount to 0, then it can race with + * a task in another ipc namespace but in a mounts namespace + * which has this ipcns's mqueuefs mounted, doing some action + * with one of the mqueuefs files. That can raise the refcount. + * So dropping the refcount, and raising the refcount when + * accessing it through the VFS, are protected with mq_lock. + * + * (Clearly, a task raising the refcount on its own ipc_ns + * needn't take mq_lock since it can't race with the last task + * in the ipcns exiting). + */ +void put_ipc_ns(struct ipc_namespace *ns) +{ + if (atomic_dec_and_lock(&ns->count, &mq_lock)) { + mq_clear_sbinfo(ns); + spin_unlock(&mq_lock); + mq_put_mnt(ns); + free_ipc_ns(ns); + } +} + +static void *ipcns_get(struct task_struct *task) +{ + struct ipc_namespace *ns = NULL; + struct nsproxy *nsproxy; + + rcu_read_lock(); + nsproxy = task_nsproxy(task); + if (nsproxy) + ns = get_ipc_ns(nsproxy->ipc_ns); + rcu_read_unlock(); + + return ns; +} + +static void ipcns_put(void *ns) +{ + return put_ipc_ns(ns); +} + +static int ipcns_install(struct nsproxy *nsproxy, void *new) +{ + struct ipc_namespace *ns = new; + if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN) || + !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) + return -EPERM; + + /* Ditch state from the old ipc namespace */ + exit_sem(current); + put_ipc_ns(nsproxy->ipc_ns); + nsproxy->ipc_ns = get_ipc_ns(ns); + return 0; +} + +static unsigned int ipcns_inum(void *vp) +{ + struct ipc_namespace *ns = vp; + + return ns->proc_inum; +} + +const struct proc_ns_operations ipcns_operations = { + .name = "ipc", + .type = CLONE_NEWIPC, + .get = ipcns_get, + .put = ipcns_put, + .install = ipcns_install, + .inum = ipcns_inum, +}; diff --git a/ipc/sem.c b/ipc/sem.c index 19af028a3e3..454f6c6020a 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -3,92 +3,149 @@ * Copyright (C) 1992 Krishna Balasubramanian * Copyright (C) 1995 Eric Schenk, Bruno Haible * - * IMPLEMENTATION NOTES ON CODE REWRITE (Eric Schenk, January 1995): - * This code underwent a massive rewrite in order to solve some problems - * with the original code. In particular the original code failed to - * wake up processes that were waiting for semval to go to 0 if the - * value went to 0 and was then incremented rapidly enough. In solving - * this problem I have also modified the implementation so that it - * processes pending operations in a FIFO manner, thus give a guarantee - * that processes waiting for a lock on the semaphore won't starve - * unless another locking process fails to unlock. - * In addition the following two changes in behavior have been introduced: - * - The original implementation of semop returned the value - * last semaphore element examined on success. This does not - * match the manual page specifications, and effectively - * allows the user to read the semaphore even if they do not - * have read permissions. The implementation now returns 0 - * on success as stated in the manual page. - * - There is some confusion over whether the set of undo adjustments - * to be performed at exit should be done in an atomic manner. - * That is, if we are attempting to decrement the semval should we queue - * up and wait until we can do so legally? - * The original implementation attempted to do this. - * The current implementation does not do so. This is because I don't - * think it is the right thing (TM) to do, and because I couldn't - * see a clean way to get the old behavior with the new design. - * The POSIX standard and SVID should be consulted to determine - * what behavior is mandated. - * - * Further notes on refinement (Christoph Rohland, December 1998): - * - The POSIX standard says, that the undo adjustments simply should - * redo. So the current implementation is o.K. - * - The previous code had two flaws: - * 1) It actively gave the semaphore to the next waiting process - * sleeping on the semaphore. Since this process did not have the - * cpu this led to many unnecessary context switches and bad - * performance. Now we only check which process should be able to - * get the semaphore and if this process wants to reduce some - * semaphore value we simply wake it up without doing the - * operation. So it has to try to get it later. Thus e.g. the - * running process may reacquire the semaphore during the current - * time slice. If it only waits for zero or increases the semaphore, - * we do the operation in advance and wake it up. - * 2) It did not wake up all zero waiting processes. We try to do - * better but only get the semops right which only wait for zero or - * increase. If there are decrement operations in the operations - * array we do the same as before. - * - * With the incarnation of O(1) scheduler, it becomes unnecessary to perform - * check/retry algorithm for waking up blocked processes as the new scheduler - * is better at handling thread switch than the old one. - * * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com> * * SMP-threaded, sysctl's added - * (c) 1999 Manfred Spraul <manfreds@colorfullife.com> + * (c) 1999 Manfred Spraul <manfred@colorfullife.com> * Enforced range limit on SEM_UNDO - * (c) 2001 Red Hat Inc <alan@redhat.com> + * (c) 2001 Red Hat Inc * Lockless wakeup * (c) 2003 Manfred Spraul <manfred@colorfullife.com> + * Further wakeup optimizations, documentation + * (c) 2010 Manfred Spraul <manfred@colorfullife.com> + * + * support for audit of ipc object properties and permission changes + * Dustin Kirkland <dustin.kirkland@us.ibm.com> + * + * namespaces support + * OpenVZ, SWsoft Inc. + * Pavel Emelianov <xemul@openvz.org> + * + * Implementation notes: (May 2010) + * This file implements System V semaphores. + * + * User space visible behavior: + * - FIFO ordering for semop() operations (just FIFO, not starvation + * protection) + * - multiple semaphore operations that alter the same semaphore in + * one semop() are handled. + * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and + * SETALL calls. + * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO. + * - undo adjustments at process exit are limited to 0..SEMVMX. + * - namespace are supported. + * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing + * to /proc/sys/kernel/sem. + * - statistics about the usage are reported in /proc/sysvipc/sem. + * + * Internals: + * - scalability: + * - all global variables are read-mostly. + * - semop() calls and semctl(RMID) are synchronized by RCU. + * - most operations do write operations (actually: spin_lock calls) to + * the per-semaphore array structure. + * Thus: Perfect SMP scaling between independent semaphore arrays. + * If multiple semaphores in one array are used, then cache line + * trashing on the semaphore array spinlock will limit the scaling. + * - semncnt and semzcnt are calculated on demand in count_semcnt() + * - the task that performs a successful semop() scans the list of all + * sleeping tasks and completes any pending operations that can be fulfilled. + * Semaphores are actively given to waiting tasks (necessary for FIFO). + * (see update_queue()) + * - To improve the scalability, the actual wake-up calls are performed after + * dropping all locks. (see wake_up_sem_queue_prepare(), + * wake_up_sem_queue_do()) + * - All work is done by the waker, the woken up task does not have to do + * anything - not even acquiring a lock or dropping a refcount. + * - A woken up task may not even touch the semaphore array anymore, it may + * have been destroyed already by a semctl(RMID). + * - The synchronizations between wake-ups due to a timeout/signal and a + * wake-up due to a completed semaphore operation is achieved by using an + * intermediate state (IN_WAKEUP). + * - UNDO values are stored in an array (one per process and per + * semaphore array, lazily allocated). For backwards compatibility, multiple + * modes for the UNDO variables are supported (per process, per thread) + * (see copy_semundo, CLONE_SYSVSEM) + * - There are two lists of the pending operations: a per-array list + * and per-semaphore list (stored in the array). This allows to achieve FIFO + * ordering without always scanning all pending operations. + * The worst-case behavior is nevertheless O(N^2) for N wakeups. */ -#include <linux/config.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/time.h> -#include <linux/smp_lock.h> #include <linux/security.h> #include <linux/syscalls.h> #include <linux/audit.h> +#include <linux/capability.h> #include <linux/seq_file.h> -#include <asm/uaccess.h> +#include <linux/rwsem.h> +#include <linux/nsproxy.h> +#include <linux/ipc_namespace.h> + +#include <linux/uaccess.h> #include "util.h" +/* One semaphore structure for each semaphore in the system. */ +struct sem { + int semval; /* current value */ + int sempid; /* pid of last operation */ + spinlock_t lock; /* spinlock for fine-grained semtimedop */ + struct list_head pending_alter; /* pending single-sop operations */ + /* that alter the semaphore */ + struct list_head pending_const; /* pending single-sop operations */ + /* that do not alter the semaphore*/ + time_t sem_otime; /* candidate for sem_otime */ +} ____cacheline_aligned_in_smp; + +/* One queue for each sleeping process in the system. */ +struct sem_queue { + struct list_head list; /* queue of pending operations */ + struct task_struct *sleeper; /* this process */ + struct sem_undo *undo; /* undo structure */ + int pid; /* process id of requesting process */ + int status; /* completion status of operation */ + struct sembuf *sops; /* array of pending operations */ + struct sembuf *blocking; /* the operation that blocked */ + int nsops; /* number of operations */ + int alter; /* does *sops alter the array? */ +}; + +/* Each task has a list of undo requests. They are executed automatically + * when the process exits. + */ +struct sem_undo { + struct list_head list_proc; /* per-process list: * + * all undos from one process + * rcu protected */ + struct rcu_head rcu; /* rcu struct for sem_undo */ + struct sem_undo_list *ulp; /* back ptr to sem_undo_list */ + struct list_head list_id; /* per semaphore array list: + * all undos for one array */ + int semid; /* semaphore set identifier */ + short *semadj; /* array of adjustments */ + /* one per semaphore */ +}; + +/* sem_undo_list controls shared access to the list of sem_undo structures + * that may be shared among all a CLONE_SYSVSEM task group. + */ +struct sem_undo_list { + atomic_t refcnt; + spinlock_t lock; + struct list_head list_proc; +}; + + +#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS]) -#define sem_lock(id) ((struct sem_array*)ipc_lock(&sem_ids,id)) -#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm) -#define sem_rmid(id) ((struct sem_array*)ipc_rmid(&sem_ids,id)) -#define sem_checkid(sma, semid) \ - ipc_checkid(&sem_ids,&sma->sem_perm,semid) -#define sem_buildid(id, seq) \ - ipc_buildid(&sem_ids, id, seq) -static struct ipc_ids sem_ids; +#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid) -static int newary (key_t, int, int); -static void freeary (struct sem_array *sma, int id); +static int newary(struct ipc_namespace *, struct ipc_params *); +static void freeary(struct ipc_namespace *, struct kern_ipc_perm *); #ifdef CONFIG_PROC_FS static int sysvipc_sem_proc_show(struct seq_file *s, void *it); #endif @@ -97,30 +154,283 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it); #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */ /* - * linked list protection: + * Locking: * sem_undo.id_next, - * sem_array.sem_pending{,last}, - * sem_array.sem_undo: sem_lock() for read/write + * sem_array.complex_count, + * sem_array.pending{_alter,_cont}, + * sem_array.sem_undo: global sem_lock() for read/write * sem_undo.proc_next: only "current" is allowed to read/write that field. - * + * + * sem_array.sem_base[i].pending_{const,alter}: + * global or semaphore sem_lock() for read/write */ -int sem_ctls[4] = {SEMMSL, SEMMNS, SEMOPM, SEMMNI}; -#define sc_semmsl (sem_ctls[0]) -#define sc_semmns (sem_ctls[1]) -#define sc_semopm (sem_ctls[2]) -#define sc_semmni (sem_ctls[3]) +#define sc_semmsl sem_ctls[0] +#define sc_semmns sem_ctls[1] +#define sc_semopm sem_ctls[2] +#define sc_semmni sem_ctls[3] -static int used_sems; +void sem_init_ns(struct ipc_namespace *ns) +{ + ns->sc_semmsl = SEMMSL; + ns->sc_semmns = SEMMNS; + ns->sc_semopm = SEMOPM; + ns->sc_semmni = SEMMNI; + ns->used_sems = 0; + ipc_init_ids(&ns->ids[IPC_SEM_IDS]); +} -void __init sem_init (void) +#ifdef CONFIG_IPC_NS +void sem_exit_ns(struct ipc_namespace *ns) { - used_sems = 0; - ipc_init_ids(&sem_ids,sc_semmni); + free_ipcs(ns, &sem_ids(ns), freeary); + idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr); +} +#endif + +void __init sem_init(void) +{ + sem_init_ns(&init_ipc_ns); ipc_init_proc_interface("sysvipc/sem", " key semid perms nsems uid gid cuid cgid otime ctime\n", - &sem_ids, - sysvipc_sem_proc_show); + IPC_SEM_IDS, sysvipc_sem_proc_show); +} + +/** + * unmerge_queues - unmerge queues, if possible. + * @sma: semaphore array + * + * The function unmerges the wait queues if complex_count is 0. + * It must be called prior to dropping the global semaphore array lock. + */ +static void unmerge_queues(struct sem_array *sma) +{ + struct sem_queue *q, *tq; + + /* complex operations still around? */ + if (sma->complex_count) + return; + /* + * We will switch back to simple mode. + * Move all pending operation back into the per-semaphore + * queues. + */ + list_for_each_entry_safe(q, tq, &sma->pending_alter, list) { + struct sem *curr; + curr = &sma->sem_base[q->sops[0].sem_num]; + + list_add_tail(&q->list, &curr->pending_alter); + } + INIT_LIST_HEAD(&sma->pending_alter); +} + +/** + * merge_queues - merge single semop queues into global queue + * @sma: semaphore array + * + * This function merges all per-semaphore queues into the global queue. + * It is necessary to achieve FIFO ordering for the pending single-sop + * operations when a multi-semop operation must sleep. + * Only the alter operations must be moved, the const operations can stay. + */ +static void merge_queues(struct sem_array *sma) +{ + int i; + for (i = 0; i < sma->sem_nsems; i++) { + struct sem *sem = sma->sem_base + i; + + list_splice_init(&sem->pending_alter, &sma->pending_alter); + } +} + +static void sem_rcu_free(struct rcu_head *head) +{ + struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); + struct sem_array *sma = ipc_rcu_to_struct(p); + + security_sem_free(sma); + ipc_rcu_free(head); +} + +/* + * Wait until all currently ongoing simple ops have completed. + * Caller must own sem_perm.lock. + * New simple ops cannot start, because simple ops first check + * that sem_perm.lock is free. + * that a) sem_perm.lock is free and b) complex_count is 0. + */ +static void sem_wait_array(struct sem_array *sma) +{ + int i; + struct sem *sem; + + if (sma->complex_count) { + /* The thread that increased sma->complex_count waited on + * all sem->lock locks. Thus we don't need to wait again. + */ + return; + } + + for (i = 0; i < sma->sem_nsems; i++) { + sem = sma->sem_base + i; + spin_unlock_wait(&sem->lock); + } +} + +/* + * If the request contains only one semaphore operation, and there are + * no complex transactions pending, lock only the semaphore involved. + * Otherwise, lock the entire semaphore array, since we either have + * multiple semaphores in our own semops, or we need to look at + * semaphores from other pending complex operations. + */ +static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, + int nsops) +{ + struct sem *sem; + + if (nsops != 1) { + /* Complex operation - acquire a full lock */ + ipc_lock_object(&sma->sem_perm); + + /* And wait until all simple ops that are processed + * right now have dropped their locks. + */ + sem_wait_array(sma); + return -1; + } + + /* + * Only one semaphore affected - try to optimize locking. + * The rules are: + * - optimized locking is possible if no complex operation + * is either enqueued or processed right now. + * - The test for enqueued complex ops is simple: + * sma->complex_count != 0 + * - Testing for complex ops that are processed right now is + * a bit more difficult. Complex ops acquire the full lock + * and first wait that the running simple ops have completed. + * (see above) + * Thus: If we own a simple lock and the global lock is free + * and complex_count is now 0, then it will stay 0 and + * thus just locking sem->lock is sufficient. + */ + sem = sma->sem_base + sops->sem_num; + + if (sma->complex_count == 0) { + /* + * It appears that no complex operation is around. + * Acquire the per-semaphore lock. + */ + spin_lock(&sem->lock); + + /* Then check that the global lock is free */ + if (!spin_is_locked(&sma->sem_perm.lock)) { + /* spin_is_locked() is not a memory barrier */ + smp_mb(); + + /* Now repeat the test of complex_count: + * It can't change anymore until we drop sem->lock. + * Thus: if is now 0, then it will stay 0. + */ + if (sma->complex_count == 0) { + /* fast path successful! */ + return sops->sem_num; + } + } + spin_unlock(&sem->lock); + } + + /* slow path: acquire the full lock */ + ipc_lock_object(&sma->sem_perm); + + if (sma->complex_count == 0) { + /* False alarm: + * There is no complex operation, thus we can switch + * back to the fast path. + */ + spin_lock(&sem->lock); + ipc_unlock_object(&sma->sem_perm); + return sops->sem_num; + } else { + /* Not a false alarm, thus complete the sequence for a + * full lock. + */ + sem_wait_array(sma); + return -1; + } +} + +static inline void sem_unlock(struct sem_array *sma, int locknum) +{ + if (locknum == -1) { + unmerge_queues(sma); + ipc_unlock_object(&sma->sem_perm); + } else { + struct sem *sem = sma->sem_base + locknum; + spin_unlock(&sem->lock); + } +} + +/* + * sem_lock_(check_) routines are called in the paths where the rwsem + * is not held. + * + * The caller holds the RCU read lock. + */ +static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, + int id, struct sembuf *sops, int nsops, int *locknum) +{ + struct kern_ipc_perm *ipcp; + struct sem_array *sma; + + ipcp = ipc_obtain_object(&sem_ids(ns), id); + if (IS_ERR(ipcp)) + return ERR_CAST(ipcp); + + sma = container_of(ipcp, struct sem_array, sem_perm); + *locknum = sem_lock(sma, sops, nsops); + + /* ipc_rmid() may have already freed the ID while sem_lock + * was spinning: verify that the structure is still valid + */ + if (ipc_valid_object(ipcp)) + return container_of(ipcp, struct sem_array, sem_perm); + + sem_unlock(sma, *locknum); + return ERR_PTR(-EINVAL); +} + +static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id) +{ + struct kern_ipc_perm *ipcp = ipc_obtain_object(&sem_ids(ns), id); + + if (IS_ERR(ipcp)) + return ERR_CAST(ipcp); + + return container_of(ipcp, struct sem_array, sem_perm); +} + +static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns, + int id) +{ + struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id); + + if (IS_ERR(ipcp)) + return ERR_CAST(ipcp); + + return container_of(ipcp, struct sem_array, sem_perm); +} + +static inline void sem_lock_and_putref(struct sem_array *sma) +{ + sem_lock(sma, NULL, -1); + ipc_rcu_putref(sma, ipc_rcu_free); +} + +static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) +{ + ipc_rmid(&sem_ids(ns), &s->sem_perm); } /* @@ -128,18 +438,18 @@ void __init sem_init (void) * Without the check/retry algorithm a lockless wakeup is possible: * - queue.status is initialized to -EINTR before blocking. * - wakeup is performed by - * * unlinking the queue entry from sma->sem_pending + * * unlinking the queue entry from the pending list * * setting queue.status to IN_WAKEUP * This is the notification for the blocked thread that a * result value is imminent. * * call wake_up_process * * set queue.status to the final value. * - the previously blocked thread checks queue.status: - * * if it's IN_WAKEUP, then it must wait until the value changes - * * if it's not -EINTR, then the operation was completed by - * update_queue. semtimedop can return queue.status without - * performing any operation on the semaphore array. - * * otherwise it must acquire the spinlock and check what's up. + * * if it's IN_WAKEUP, then it must wait until the value changes + * * if it's not -EINTR, then the operation was completed by + * update_queue. semtimedop can return queue.status without + * performing any operation on the sem array. + * * otherwise it must acquire the spinlock and check what's up. * * The two-stage algorithm is necessary to protect against the following * races: @@ -157,24 +467,35 @@ void __init sem_init (void) */ #define IN_WAKEUP 1 -static int newary (key_t key, int nsems, int semflg) +/** + * newary - Create a new semaphore set + * @ns: namespace + * @params: ptr to the structure that contains key, semflg and nsems + * + * Called with sem_ids.rwsem held (as a writer) + */ +static int newary(struct ipc_namespace *ns, struct ipc_params *params) { int id; int retval; struct sem_array *sma; int size; + key_t key = params->key; + int nsems = params->u.nsems; + int semflg = params->flg; + int i; if (!nsems) return -EINVAL; - if (used_sems + nsems > sc_semmns) + if (ns->used_sems + nsems > ns->sc_semmns) return -ENOSPC; - size = sizeof (*sma) + nsems * sizeof (struct sem); + size = sizeof(*sma) + nsems * sizeof(struct sem); sma = ipc_rcu_alloc(size); - if (!sma) { + if (!sma) return -ENOMEM; - } - memset (sma, 0, size); + + memset(sma, 0, size); sma->sem_perm.mode = (semflg & S_IRWXUGO); sma->sem_perm.key = key; @@ -182,118 +503,112 @@ static int newary (key_t key, int nsems, int semflg) sma->sem_perm.security = NULL; retval = security_sem_alloc(sma); if (retval) { - ipc_rcu_putref(sma); + ipc_rcu_putref(sma, ipc_rcu_free); return retval; } - id = ipc_addid(&sem_ids, &sma->sem_perm, sc_semmni); - if(id == -1) { - security_sem_free(sma); - ipc_rcu_putref(sma); - return -ENOSPC; + id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); + if (id < 0) { + ipc_rcu_putref(sma, sem_rcu_free); + return id; } - used_sems += nsems; + ns->used_sems += nsems; - sma->sem_id = sem_buildid(id, sma->sem_perm.seq); sma->sem_base = (struct sem *) &sma[1]; - /* sma->sem_pending = NULL; */ - sma->sem_pending_last = &sma->sem_pending; - /* sma->undo = NULL; */ + + for (i = 0; i < nsems; i++) { + INIT_LIST_HEAD(&sma->sem_base[i].pending_alter); + INIT_LIST_HEAD(&sma->sem_base[i].pending_const); + spin_lock_init(&sma->sem_base[i].lock); + } + + sma->complex_count = 0; + INIT_LIST_HEAD(&sma->pending_alter); + INIT_LIST_HEAD(&sma->pending_const); + INIT_LIST_HEAD(&sma->list_id); sma->sem_nsems = nsems; sma->sem_ctime = get_seconds(); - sem_unlock(sma); + sem_unlock(sma, -1); + rcu_read_unlock(); - return sma->sem_id; + return sma->sem_perm.id; } -asmlinkage long sys_semget (key_t key, int nsems, int semflg) + +/* + * Called with sem_ids.rwsem and ipcp locked. + */ +static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg) { - int id, err = -EINVAL; struct sem_array *sma; - if (nsems < 0 || nsems > sc_semmsl) - return -EINVAL; - down(&sem_ids.sem); - - if (key == IPC_PRIVATE) { - err = newary(key, nsems, semflg); - } else if ((id = ipc_findkey(&sem_ids, key)) == -1) { /* key not used */ - if (!(semflg & IPC_CREAT)) - err = -ENOENT; - else - err = newary(key, nsems, semflg); - } else if (semflg & IPC_CREAT && semflg & IPC_EXCL) { - err = -EEXIST; - } else { - sma = sem_lock(id); - if(sma==NULL) - BUG(); - if (nsems > sma->sem_nsems) - err = -EINVAL; - else if (ipcperms(&sma->sem_perm, semflg)) - err = -EACCES; - else { - int semid = sem_buildid(id, sma->sem_perm.seq); - err = security_sem_associate(sma, semflg); - if (!err) - err = semid; - } - sem_unlock(sma); - } - - up(&sem_ids.sem); - return err; + sma = container_of(ipcp, struct sem_array, sem_perm); + return security_sem_associate(sma, semflg); } -/* Manage the doubly linked list sma->sem_pending as a FIFO: - * insert new queue elements at the tail sma->sem_pending_last. +/* + * Called with sem_ids.rwsem and ipcp locked. */ -static inline void append_to_queue (struct sem_array * sma, - struct sem_queue * q) +static inline int sem_more_checks(struct kern_ipc_perm *ipcp, + struct ipc_params *params) { - *(q->prev = sma->sem_pending_last) = q; - *(sma->sem_pending_last = &q->next) = NULL; -} + struct sem_array *sma; -static inline void prepend_to_queue (struct sem_array * sma, - struct sem_queue * q) -{ - q->next = sma->sem_pending; - *(q->prev = &sma->sem_pending) = q; - if (q->next) - q->next->prev = &q->next; - else /* sma->sem_pending_last == &sma->sem_pending */ - sma->sem_pending_last = &q->next; + sma = container_of(ipcp, struct sem_array, sem_perm); + if (params->u.nsems > sma->sem_nsems) + return -EINVAL; + + return 0; } -static inline void remove_from_queue (struct sem_array * sma, - struct sem_queue * q) +SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) { - *(q->prev) = q->next; - if (q->next) - q->next->prev = q->prev; - else /* sma->sem_pending_last == &q->next */ - sma->sem_pending_last = q->prev; - q->prev = NULL; /* mark as removed */ + struct ipc_namespace *ns; + static const struct ipc_ops sem_ops = { + .getnew = newary, + .associate = sem_security, + .more_checks = sem_more_checks, + }; + struct ipc_params sem_params; + + ns = current->nsproxy->ipc_ns; + + if (nsems < 0 || nsems > ns->sc_semmsl) + return -EINVAL; + + sem_params.key = key; + sem_params.flg = semflg; + sem_params.u.nsems = nsems; + + return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params); } -/* - * Determine whether a sequence of semaphore operations would succeed - * all at once. Return 0 if yes, 1 if need to sleep, else return error code. +/** + * perform_atomic_semop - Perform (if possible) a semaphore operation + * @sma: semaphore array + * @q: struct sem_queue that describes the operation + * + * Returns 0 if the operation was possible. + * Returns 1 if the operation is impossible, the caller must sleep. + * Negative values are error codes. */ - -static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops, - int nsops, struct sem_undo *un, int pid) +static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q) { - int result, sem_op; + int result, sem_op, nsops, pid; struct sembuf *sop; - struct sem * curr; + struct sem *curr; + struct sembuf *sops; + struct sem_undo *un; + + sops = q->sops; + nsops = q->nsops; + un = q->undo; for (sop = sops; sop < sops + nsops; sop++) { curr = sma->sem_base + sop->sem_num; sem_op = sop->sem_op; result = curr->semval; - + if (!sem_op && result) goto would_block; @@ -302,26 +617,25 @@ static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops, goto would_block; if (result > SEMVMX) goto out_of_range; + if (sop->sem_flg & SEM_UNDO) { int undo = un->semadj[sop->sem_num] - sem_op; - /* - * Exceeding the undo range is an error. - */ + /* Exceeding the undo range is an error. */ if (undo < (-SEMAEM - 1) || undo > SEMAEM) goto out_of_range; + un->semadj[sop->sem_num] = undo; } + curr->semval = result; } sop--; + pid = q->pid; while (sop >= sops) { sma->sem_base[sop->sem_num].sempid = pid; - if (sop->sem_flg & SEM_UNDO) - un->semadj[sop->sem_num] -= sop->sem_op; sop--; } - - sma->sem_otime = get_seconds(); + return 0; out_of_range: @@ -329,6 +643,8 @@ out_of_range: goto undo; would_block: + q->blocking = sop; + if (sop->sem_flg & IPC_NOWAIT) result = -EAGAIN; else @@ -337,153 +653,482 @@ would_block: undo: sop--; while (sop >= sops) { - sma->sem_base[sop->sem_num].semval -= sop->sem_op; + sem_op = sop->sem_op; + sma->sem_base[sop->sem_num].semval -= sem_op; + if (sop->sem_flg & SEM_UNDO) + un->semadj[sop->sem_num] += sem_op; sop--; } return result; } -/* Go through the pending queue for the indicated semaphore - * looking for tasks that can be completed. +/** wake_up_sem_queue_prepare(q, error): Prepare wake-up + * @q: queue entry that must be signaled + * @error: Error value for the signal + * + * Prepare the wake-up of the queue entry q. */ -static void update_queue (struct sem_array * sma) +static void wake_up_sem_queue_prepare(struct list_head *pt, + struct sem_queue *q, int error) { - int error; - struct sem_queue * q; + if (list_empty(pt)) { + /* + * Hold preempt off so that we don't get preempted and have the + * wakee busy-wait until we're scheduled back on. + */ + preempt_disable(); + } + q->status = IN_WAKEUP; + q->pid = error; - q = sma->sem_pending; - while(q) { - error = try_atomic_semop(sma, q->sops, q->nsops, - q->undo, q->pid); + list_add_tail(&q->list, pt); +} + +/** + * wake_up_sem_queue_do - do the actual wake-up + * @pt: list of tasks to be woken up + * + * Do the actual wake-up. + * The function is called without any locks held, thus the semaphore array + * could be destroyed already and the tasks can disappear as soon as the + * status is set to the actual return code. + */ +static void wake_up_sem_queue_do(struct list_head *pt) +{ + struct sem_queue *q, *t; + int did_something; + + did_something = !list_empty(pt); + list_for_each_entry_safe(q, t, pt, list) { + wake_up_process(q->sleeper); + /* q can disappear immediately after writing q->status. */ + smp_wmb(); + q->status = q->pid; + } + if (did_something) + preempt_enable(); +} + +static void unlink_queue(struct sem_array *sma, struct sem_queue *q) +{ + list_del(&q->list); + if (q->nsops > 1) + sma->complex_count--; +} + +/** check_restart(sma, q) + * @sma: semaphore array + * @q: the operation that just completed + * + * update_queue is O(N^2) when it restarts scanning the whole queue of + * waiting operations. Therefore this function checks if the restart is + * really necessary. It is called after a previously waiting operation + * modified the array. + * Note that wait-for-zero operations are handled without restart. + */ +static int check_restart(struct sem_array *sma, struct sem_queue *q) +{ + /* pending complex alter operations are too difficult to analyse */ + if (!list_empty(&sma->pending_alter)) + return 1; + + /* we were a sleeping complex operation. Too difficult */ + if (q->nsops > 1) + return 1; + + /* It is impossible that someone waits for the new value: + * - complex operations always restart. + * - wait-for-zero are handled seperately. + * - q is a previously sleeping simple operation that + * altered the array. It must be a decrement, because + * simple increments never sleep. + * - If there are older (higher priority) decrements + * in the queue, then they have observed the original + * semval value and couldn't proceed. The operation + * decremented to value - thus they won't proceed either. + */ + return 0; +} + +/** + * wake_const_ops - wake up non-alter tasks + * @sma: semaphore array. + * @semnum: semaphore that was modified. + * @pt: list head for the tasks that must be woken up. + * + * wake_const_ops must be called after a semaphore in a semaphore array + * was set to 0. If complex const operations are pending, wake_const_ops must + * be called with semnum = -1, as well as with the number of each modified + * semaphore. + * The tasks that must be woken up are added to @pt. The return code + * is stored in q->pid. + * The function returns 1 if at least one operation was completed successfully. + */ +static int wake_const_ops(struct sem_array *sma, int semnum, + struct list_head *pt) +{ + struct sem_queue *q; + struct list_head *walk; + struct list_head *pending_list; + int semop_completed = 0; + + if (semnum == -1) + pending_list = &sma->pending_const; + else + pending_list = &sma->sem_base[semnum].pending_const; + + walk = pending_list->next; + while (walk != pending_list) { + int error; + + q = container_of(walk, struct sem_queue, list); + walk = walk->next; + + error = perform_atomic_semop(sma, q); - /* Does q->sleeper still need to sleep? */ if (error <= 0) { - struct sem_queue *n; - remove_from_queue(sma,q); - q->status = IN_WAKEUP; + /* operation completed, remove from queue & wakeup */ + + unlink_queue(sma, q); + + wake_up_sem_queue_prepare(pt, q, error); + if (error == 0) + semop_completed = 1; + } + } + return semop_completed; +} + +/** + * do_smart_wakeup_zero - wakeup all wait for zero tasks + * @sma: semaphore array + * @sops: operations that were performed + * @nsops: number of operations + * @pt: list head of the tasks that must be woken up. + * + * Checks all required queue for wait-for-zero operations, based + * on the actual changes that were performed on the semaphore array. + * The function returns 1 if at least one operation was completed successfully. + */ +static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, + int nsops, struct list_head *pt) +{ + int i; + int semop_completed = 0; + int got_zero = 0; + + /* first: the per-semaphore queues, if known */ + if (sops) { + for (i = 0; i < nsops; i++) { + int num = sops[i].sem_num; + + if (sma->sem_base[num].semval == 0) { + got_zero = 1; + semop_completed |= wake_const_ops(sma, num, pt); + } + } + } else { + /* + * No sops means modified semaphores not known. + * Assume all were changed. + */ + for (i = 0; i < sma->sem_nsems; i++) { + if (sma->sem_base[i].semval == 0) { + got_zero = 1; + semop_completed |= wake_const_ops(sma, i, pt); + } + } + } + /* + * If one of the modified semaphores got 0, + * then check the global queue, too. + */ + if (got_zero) + semop_completed |= wake_const_ops(sma, -1, pt); + + return semop_completed; +} + + +/** + * update_queue - look for tasks that can be completed. + * @sma: semaphore array. + * @semnum: semaphore that was modified. + * @pt: list head for the tasks that must be woken up. + * + * update_queue must be called after a semaphore in a semaphore array + * was modified. If multiple semaphores were modified, update_queue must + * be called with semnum = -1, as well as with the number of each modified + * semaphore. + * The tasks that must be woken up are added to @pt. The return code + * is stored in q->pid. + * The function internally checks if const operations can now succeed. + * + * The function return 1 if at least one semop was completed successfully. + */ +static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt) +{ + struct sem_queue *q; + struct list_head *walk; + struct list_head *pending_list; + int semop_completed = 0; + + if (semnum == -1) + pending_list = &sma->pending_alter; + else + pending_list = &sma->sem_base[semnum].pending_alter; + +again: + walk = pending_list->next; + while (walk != pending_list) { + int error, restart; + + q = container_of(walk, struct sem_queue, list); + walk = walk->next; + + /* If we are scanning the single sop, per-semaphore list of + * one semaphore and that semaphore is 0, then it is not + * necessary to scan further: simple increments + * that affect only one entry succeed immediately and cannot + * be in the per semaphore pending queue, and decrements + * cannot be successful if the value is already 0. + */ + if (semnum != -1 && sma->sem_base[semnum].semval == 0) + break; + + error = perform_atomic_semop(sma, q); + + /* Does q->sleeper still need to sleep? */ + if (error > 0) + continue; + + unlink_queue(sma, q); + + if (error) { + restart = 0; + } else { + semop_completed = 1; + do_smart_wakeup_zero(sma, q->sops, q->nsops, pt); + restart = check_restart(sma, q); + } + + wake_up_sem_queue_prepare(pt, q, error); + if (restart) + goto again; + } + return semop_completed; +} + +/** + * set_semotime - set sem_otime + * @sma: semaphore array + * @sops: operations that modified the array, may be NULL + * + * sem_otime is replicated to avoid cache line trashing. + * This function sets one instance to the current time. + */ +static void set_semotime(struct sem_array *sma, struct sembuf *sops) +{ + if (sops == NULL) { + sma->sem_base[0].sem_otime = get_seconds(); + } else { + sma->sem_base[sops[0].sem_num].sem_otime = + get_seconds(); + } +} + +/** + * do_smart_update - optimized update_queue + * @sma: semaphore array + * @sops: operations that were performed + * @nsops: number of operations + * @otime: force setting otime + * @pt: list head of the tasks that must be woken up. + * + * do_smart_update() does the required calls to update_queue and wakeup_zero, + * based on the actual changes that were performed on the semaphore array. + * Note that the function does not do the actual wake-up: the caller is + * responsible for calling wake_up_sem_queue_do(@pt). + * It is safe to perform this call after dropping all locks. + */ +static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops, + int otime, struct list_head *pt) +{ + int i; + + otime |= do_smart_wakeup_zero(sma, sops, nsops, pt); + + if (!list_empty(&sma->pending_alter)) { + /* semaphore array uses the global queue - just process it. */ + otime |= update_queue(sma, -1, pt); + } else { + if (!sops) { /* - * Continue scanning. The next operation - * that must be checked depends on the type of the - * completed operation: - * - if the operation modified the array, then - * restart from the head of the queue and - * check for threads that might be waiting - * for semaphore values to become 0. - * - if the operation didn't modify the array, - * then just continue. - */ - if (q->alter) - n = sma->sem_pending; - else - n = q->next; - wake_up_process(q->sleeper); - /* hands-off: q will disappear immediately after - * writing q->status. + * No sops, thus the modified semaphores are not + * known. Check all. */ - q->status = error; - q = n; + for (i = 0; i < sma->sem_nsems; i++) + otime |= update_queue(sma, i, pt); } else { - q = q->next; + /* + * Check the semaphores that were increased: + * - No complex ops, thus all sleeping ops are + * decrease. + * - if we decreased the value, then any sleeping + * semaphore ops wont be able to run: If the + * previous value was too small, then the new + * value will be too small, too. + */ + for (i = 0; i < nsops; i++) { + if (sops[i].sem_op > 0) { + otime |= update_queue(sma, + sops[i].sem_num, pt); + } + } } } + if (otime) + set_semotime(sma, sops); +} + +/* + * check_qop: Test if a queued operation sleeps on the semaphore semnum + */ +static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q, + bool count_zero) +{ + struct sembuf *sop = q->blocking; + + /* + * Linux always (since 0.99.10) reported a task as sleeping on all + * semaphores. This violates SUS, therefore it was changed to the + * standard compliant behavior. + * Give the administrators a chance to notice that an application + * might misbehave because it relies on the Linux behavior. + */ + pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n" + "The task %s (%d) triggered the difference, watch for misbehavior.\n", + current->comm, task_pid_nr(current)); + + if (sop->sem_num != semnum) + return 0; + + if (count_zero && sop->sem_op == 0) + return 1; + if (!count_zero && sop->sem_op < 0) + return 1; + + return 0; } /* The following counts are associated to each semaphore: * semncnt number of tasks waiting on semval being nonzero * semzcnt number of tasks waiting on semval being zero - * This model assumes that a task waits on exactly one semaphore. - * Since semaphore operations are to be performed atomically, tasks actually - * wait on a whole sequence of semaphores simultaneously. - * The counts we return here are a rough approximation, but still - * warrant that semncnt+semzcnt>0 if the task is on the pending queue. + * + * Per definition, a task waits only on the semaphore of the first semop + * that cannot proceed, even if additional operation would block, too. */ -static int count_semncnt (struct sem_array * sma, ushort semnum) +static int count_semcnt(struct sem_array *sma, ushort semnum, + bool count_zero) { - int semncnt; - struct sem_queue * q; + struct list_head *l; + struct sem_queue *q; + int semcnt; - semncnt = 0; - for (q = sma->sem_pending; q; q = q->next) { - struct sembuf * sops = q->sops; - int nsops = q->nsops; - int i; - for (i = 0; i < nsops; i++) - if (sops[i].sem_num == semnum - && (sops[i].sem_op < 0) - && !(sops[i].sem_flg & IPC_NOWAIT)) - semncnt++; + semcnt = 0; + /* First: check the simple operations. They are easy to evaluate */ + if (count_zero) + l = &sma->sem_base[semnum].pending_const; + else + l = &sma->sem_base[semnum].pending_alter; + + list_for_each_entry(q, l, list) { + /* all task on a per-semaphore list sleep on exactly + * that semaphore + */ + semcnt++; } - return semncnt; -} -static int count_semzcnt (struct sem_array * sma, ushort semnum) -{ - int semzcnt; - struct sem_queue * q; - semzcnt = 0; - for (q = sma->sem_pending; q; q = q->next) { - struct sembuf * sops = q->sops; - int nsops = q->nsops; - int i; - for (i = 0; i < nsops; i++) - if (sops[i].sem_num == semnum - && (sops[i].sem_op == 0) - && !(sops[i].sem_flg & IPC_NOWAIT)) - semzcnt++; + /* Then: check the complex operations. */ + list_for_each_entry(q, &sma->pending_alter, list) { + semcnt += check_qop(sma, semnum, q, count_zero); } - return semzcnt; + if (count_zero) { + list_for_each_entry(q, &sma->pending_const, list) { + semcnt += check_qop(sma, semnum, q, count_zero); + } + } + return semcnt; } -/* Free a semaphore set. freeary() is called with sem_ids.sem down and - * the spinlock for this semaphore set hold. sem_ids.sem remains locked - * on exit. +/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked + * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem + * remains locked on exit. */ -static void freeary (struct sem_array *sma, int id) +static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) { - struct sem_undo *un; - struct sem_queue *q; - int size; - - /* Invalidate the existing undo structures for this semaphore set. - * (They will be freed without any further action in exit_sem() - * or during the next semop.) - */ - for (un = sma->undo; un; un = un->id_next) + struct sem_undo *un, *tu; + struct sem_queue *q, *tq; + struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm); + struct list_head tasks; + int i; + + /* Free the existing undo structures for this semaphore set. */ + ipc_assert_locked_object(&sma->sem_perm); + list_for_each_entry_safe(un, tu, &sma->list_id, list_id) { + list_del(&un->list_id); + spin_lock(&un->ulp->lock); un->semid = -1; + list_del_rcu(&un->list_proc); + spin_unlock(&un->ulp->lock); + kfree_rcu(un, rcu); + } /* Wake up all pending processes and let them fail with EIDRM. */ - q = sma->sem_pending; - while(q) { - struct sem_queue *n; - /* lazy remove_from_queue: we are killing the whole queue */ - q->prev = NULL; - n = q->next; - q->status = IN_WAKEUP; - wake_up_process(q->sleeper); /* doesn't sleep */ - q->status = -EIDRM; /* hands-off q */ - q = n; - } - - /* Remove the semaphore set from the ID array*/ - sma = sem_rmid(id); - sem_unlock(sma); - - used_sems -= sma->sem_nsems; - size = sizeof (*sma) + sma->sem_nsems * sizeof (struct sem); - security_sem_free(sma); - ipc_rcu_putref(sma); + INIT_LIST_HEAD(&tasks); + list_for_each_entry_safe(q, tq, &sma->pending_const, list) { + unlink_queue(sma, q); + wake_up_sem_queue_prepare(&tasks, q, -EIDRM); + } + + list_for_each_entry_safe(q, tq, &sma->pending_alter, list) { + unlink_queue(sma, q); + wake_up_sem_queue_prepare(&tasks, q, -EIDRM); + } + for (i = 0; i < sma->sem_nsems; i++) { + struct sem *sem = sma->sem_base + i; + list_for_each_entry_safe(q, tq, &sem->pending_const, list) { + unlink_queue(sma, q); + wake_up_sem_queue_prepare(&tasks, q, -EIDRM); + } + list_for_each_entry_safe(q, tq, &sem->pending_alter, list) { + unlink_queue(sma, q); + wake_up_sem_queue_prepare(&tasks, q, -EIDRM); + } + } + + /* Remove the semaphore set from the IDR */ + sem_rmid(ns, sma); + sem_unlock(sma, -1); + rcu_read_unlock(); + + wake_up_sem_queue_do(&tasks); + ns->used_sems -= sma->sem_nsems; + ipc_rcu_putref(sma, sem_rcu_free); } static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version) { - switch(version) { + switch (version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: { struct semid_ds out; + memset(&out, 0, sizeof(out)); + ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm); out.sem_otime = in->sem_otime; @@ -497,12 +1142,28 @@ static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, } } -static int semctl_nolock(int semid, int semnum, int cmd, int version, union semun arg) +static time_t get_semotime(struct sem_array *sma) { - int err = -EINVAL; + int i; + time_t res; + + res = sma->sem_base[0].sem_otime; + for (i = 1; i < sma->sem_nsems; i++) { + time_t to = sma->sem_base[i].sem_otime; + + if (to > res) + res = to; + } + return res; +} + +static int semctl_nolock(struct ipc_namespace *ns, int semid, + int cmd, int version, void __user *p) +{ + int err; struct sem_array *sma; - switch(cmd) { + switch (cmd) { case IPC_INFO: case SEM_INFO: { @@ -512,132 +1173,217 @@ static int semctl_nolock(int semid, int semnum, int cmd, int version, union semu err = security_sem_semctl(NULL, cmd); if (err) return err; - - memset(&seminfo,0,sizeof(seminfo)); - seminfo.semmni = sc_semmni; - seminfo.semmns = sc_semmns; - seminfo.semmsl = sc_semmsl; - seminfo.semopm = sc_semopm; + + memset(&seminfo, 0, sizeof(seminfo)); + seminfo.semmni = ns->sc_semmni; + seminfo.semmns = ns->sc_semmns; + seminfo.semmsl = ns->sc_semmsl; + seminfo.semopm = ns->sc_semopm; seminfo.semvmx = SEMVMX; seminfo.semmnu = SEMMNU; seminfo.semmap = SEMMAP; seminfo.semume = SEMUME; - down(&sem_ids.sem); + down_read(&sem_ids(ns).rwsem); if (cmd == SEM_INFO) { - seminfo.semusz = sem_ids.in_use; - seminfo.semaem = used_sems; + seminfo.semusz = sem_ids(ns).in_use; + seminfo.semaem = ns->used_sems; } else { seminfo.semusz = SEMUSZ; seminfo.semaem = SEMAEM; } - max_id = sem_ids.max_id; - up(&sem_ids.sem); - if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo))) + max_id = ipc_get_maxid(&sem_ids(ns)); + up_read(&sem_ids(ns).rwsem); + if (copy_to_user(p, &seminfo, sizeof(struct seminfo))) return -EFAULT; - return (max_id < 0) ? 0: max_id; + return (max_id < 0) ? 0 : max_id; } + case IPC_STAT: case SEM_STAT: { struct semid64_ds tbuf; - int id; - - if(semid >= sem_ids.entries->size) - return -EINVAL; + int id = 0; - memset(&tbuf,0,sizeof(tbuf)); + memset(&tbuf, 0, sizeof(tbuf)); - sma = sem_lock(semid); - if(sma == NULL) - return -EINVAL; + rcu_read_lock(); + if (cmd == SEM_STAT) { + sma = sem_obtain_object(ns, semid); + if (IS_ERR(sma)) { + err = PTR_ERR(sma); + goto out_unlock; + } + id = sma->sem_perm.id; + } else { + sma = sem_obtain_object_check(ns, semid); + if (IS_ERR(sma)) { + err = PTR_ERR(sma); + goto out_unlock; + } + } err = -EACCES; - if (ipcperms (&sma->sem_perm, S_IRUGO)) + if (ipcperms(ns, &sma->sem_perm, S_IRUGO)) goto out_unlock; err = security_sem_semctl(sma, cmd); if (err) goto out_unlock; - id = sem_buildid(semid, sma->sem_perm.seq); - kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); - tbuf.sem_otime = sma->sem_otime; - tbuf.sem_ctime = sma->sem_ctime; - tbuf.sem_nsems = sma->sem_nsems; - sem_unlock(sma); - if (copy_semid_to_user (arg.buf, &tbuf, version)) + tbuf.sem_otime = get_semotime(sma); + tbuf.sem_ctime = sma->sem_ctime; + tbuf.sem_nsems = sma->sem_nsems; + rcu_read_unlock(); + if (copy_semid_to_user(p, &tbuf, version)) return -EFAULT; return id; } default: return -EINVAL; } - return err; out_unlock: - sem_unlock(sma); + rcu_read_unlock(); return err; } -static int semctl_main(int semid, int semnum, int cmd, int version, union semun arg) +static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum, + unsigned long arg) { + struct sem_undo *un; struct sem_array *sma; - struct sem* curr; + struct sem *curr; int err; - ushort fast_sem_io[SEMMSL_FAST]; - ushort* sem_io = fast_sem_io; - int nsems; + struct list_head tasks; + int val; +#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN) + /* big-endian 64bit */ + val = arg >> 32; +#else + /* 32bit or little-endian 64bit */ + val = arg; +#endif + + if (val > SEMVMX || val < 0) + return -ERANGE; - sma = sem_lock(semid); - if(sma==NULL) + INIT_LIST_HEAD(&tasks); + + rcu_read_lock(); + sma = sem_obtain_object_check(ns, semid); + if (IS_ERR(sma)) { + rcu_read_unlock(); + return PTR_ERR(sma); + } + + if (semnum < 0 || semnum >= sma->sem_nsems) { + rcu_read_unlock(); return -EINVAL; + } - nsems = sma->sem_nsems; - err=-EIDRM; - if (sem_checkid(sma,semid)) - goto out_unlock; + if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) { + rcu_read_unlock(); + return -EACCES; + } + + err = security_sem_semctl(sma, SETVAL); + if (err) { + rcu_read_unlock(); + return -EACCES; + } + + sem_lock(sma, NULL, -1); + + if (!ipc_valid_object(&sma->sem_perm)) { + sem_unlock(sma, -1); + rcu_read_unlock(); + return -EIDRM; + } + + curr = &sma->sem_base[semnum]; + + ipc_assert_locked_object(&sma->sem_perm); + list_for_each_entry(un, &sma->list_id, list_id) + un->semadj[semnum] = 0; + + curr->semval = val; + curr->sempid = task_tgid_vnr(current); + sma->sem_ctime = get_seconds(); + /* maybe some queued-up processes were waiting for this */ + do_smart_update(sma, NULL, 0, 0, &tasks); + sem_unlock(sma, -1); + rcu_read_unlock(); + wake_up_sem_queue_do(&tasks); + return 0; +} + +static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, + int cmd, void __user *p) +{ + struct sem_array *sma; + struct sem *curr; + int err, nsems; + ushort fast_sem_io[SEMMSL_FAST]; + ushort *sem_io = fast_sem_io; + struct list_head tasks; + + INIT_LIST_HEAD(&tasks); + + rcu_read_lock(); + sma = sem_obtain_object_check(ns, semid); + if (IS_ERR(sma)) { + rcu_read_unlock(); + return PTR_ERR(sma); + } + + nsems = sma->sem_nsems; err = -EACCES; - if (ipcperms (&sma->sem_perm, (cmd==SETVAL||cmd==SETALL)?S_IWUGO:S_IRUGO)) - goto out_unlock; + if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO)) + goto out_rcu_wakeup; err = security_sem_semctl(sma, cmd); if (err) - goto out_unlock; + goto out_rcu_wakeup; err = -EACCES; switch (cmd) { case GETALL: { - ushort __user *array = arg.array; + ushort __user *array = p; int i; - if(nsems > SEMMSL_FAST) { - ipc_rcu_getref(sma); - sem_unlock(sma); - + sem_lock(sma, NULL, -1); + if (!ipc_valid_object(&sma->sem_perm)) { + err = -EIDRM; + goto out_unlock; + } + if (nsems > SEMMSL_FAST) { + if (!ipc_rcu_getref(sma)) { + err = -EIDRM; + goto out_unlock; + } + sem_unlock(sma, -1); + rcu_read_unlock(); sem_io = ipc_alloc(sizeof(ushort)*nsems); - if(sem_io == NULL) { - ipc_lock_by_ptr(&sma->sem_perm); - ipc_rcu_putref(sma); - sem_unlock(sma); + if (sem_io == NULL) { + ipc_rcu_putref(sma, ipc_rcu_free); return -ENOMEM; } - ipc_lock_by_ptr(&sma->sem_perm); - ipc_rcu_putref(sma); - if (sma->sem_perm.deleted) { - sem_unlock(sma); + rcu_read_lock(); + sem_lock_and_putref(sma); + if (!ipc_valid_object(&sma->sem_perm)) { err = -EIDRM; - goto out_free; + goto out_unlock; } } - for (i = 0; i < sma->sem_nsems; i++) sem_io[i] = sma->sem_base[i].semval; - sem_unlock(sma); + sem_unlock(sma, -1); + rcu_read_unlock(); err = 0; - if(copy_to_user(array, sem_io, nsems*sizeof(ushort))) + if (copy_to_user(array, sem_io, nsems*sizeof(ushort))) err = -EFAULT; goto out_free; } @@ -646,74 +1392,65 @@ static int semctl_main(int semid, int semnum, int cmd, int version, union semun int i; struct sem_undo *un; - ipc_rcu_getref(sma); - sem_unlock(sma); + if (!ipc_rcu_getref(sma)) { + err = -EIDRM; + goto out_rcu_wakeup; + } + rcu_read_unlock(); - if(nsems > SEMMSL_FAST) { + if (nsems > SEMMSL_FAST) { sem_io = ipc_alloc(sizeof(ushort)*nsems); - if(sem_io == NULL) { - ipc_lock_by_ptr(&sma->sem_perm); - ipc_rcu_putref(sma); - sem_unlock(sma); + if (sem_io == NULL) { + ipc_rcu_putref(sma, ipc_rcu_free); return -ENOMEM; } } - if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort))) { - ipc_lock_by_ptr(&sma->sem_perm); - ipc_rcu_putref(sma); - sem_unlock(sma); + if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) { + ipc_rcu_putref(sma, ipc_rcu_free); err = -EFAULT; goto out_free; } for (i = 0; i < nsems; i++) { if (sem_io[i] > SEMVMX) { - ipc_lock_by_ptr(&sma->sem_perm); - ipc_rcu_putref(sma); - sem_unlock(sma); + ipc_rcu_putref(sma, ipc_rcu_free); err = -ERANGE; goto out_free; } } - ipc_lock_by_ptr(&sma->sem_perm); - ipc_rcu_putref(sma); - if (sma->sem_perm.deleted) { - sem_unlock(sma); + rcu_read_lock(); + sem_lock_and_putref(sma); + if (!ipc_valid_object(&sma->sem_perm)) { err = -EIDRM; - goto out_free; + goto out_unlock; } for (i = 0; i < nsems; i++) sma->sem_base[i].semval = sem_io[i]; - for (un = sma->undo; un; un = un->id_next) + + ipc_assert_locked_object(&sma->sem_perm); + list_for_each_entry(un, &sma->list_id, list_id) { for (i = 0; i < nsems; i++) un->semadj[i] = 0; + } sma->sem_ctime = get_seconds(); /* maybe some queued-up processes were waiting for this */ - update_queue(sma); + do_smart_update(sma, NULL, 0, 0, &tasks); err = 0; goto out_unlock; } - case IPC_STAT: - { - struct semid64_ds tbuf; - memset(&tbuf,0,sizeof(tbuf)); - kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); - tbuf.sem_otime = sma->sem_otime; - tbuf.sem_ctime = sma->sem_ctime; - tbuf.sem_nsems = sma->sem_nsems; - sem_unlock(sma); - if (copy_semid_to_user (arg.buf, &tbuf, version)) - return -EFAULT; - return 0; - } - /* GETVAL, GETPID, GETNCTN, GETZCNT, SETVAL: fall-through */ + /* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */ } err = -EINVAL; - if(semnum < 0 || semnum >= nsems) - goto out_unlock; + if (semnum < 0 || semnum >= nsems) + goto out_rcu_wakeup; + sem_lock(sma, NULL, -1); + if (!ipc_valid_object(&sma->sem_perm)) { + err = -EIDRM; + goto out_unlock; + } curr = &sma->sem_base[semnum]; switch (cmd) { @@ -724,70 +1461,42 @@ static int semctl_main(int semid, int semnum, int cmd, int version, union semun err = curr->sempid; goto out_unlock; case GETNCNT: - err = count_semncnt(sma,semnum); + err = count_semcnt(sma, semnum, 0); goto out_unlock; case GETZCNT: - err = count_semzcnt(sma,semnum); - goto out_unlock; - case SETVAL: - { - int val = arg.val; - struct sem_undo *un; - err = -ERANGE; - if (val > SEMVMX || val < 0) - goto out_unlock; - - for (un = sma->undo; un; un = un->id_next) - un->semadj[semnum] = 0; - curr->semval = val; - curr->sempid = current->tgid; - sma->sem_ctime = get_seconds(); - /* maybe some queued-up processes were waiting for this */ - update_queue(sma); - err = 0; + err = count_semcnt(sma, semnum, 1); goto out_unlock; } - } + out_unlock: - sem_unlock(sma); + sem_unlock(sma, -1); +out_rcu_wakeup: + rcu_read_unlock(); + wake_up_sem_queue_do(&tasks); out_free: - if(sem_io != fast_sem_io) + if (sem_io != fast_sem_io) ipc_free(sem_io, sizeof(ushort)*nsems); return err; } -struct sem_setbuf { - uid_t uid; - gid_t gid; - mode_t mode; -}; - -static inline unsigned long copy_semid_from_user(struct sem_setbuf *out, void __user *buf, int version) +static inline unsigned long +copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version) { - switch(version) { + switch (version) { case IPC_64: - { - struct semid64_ds tbuf; - - if(copy_from_user(&tbuf, buf, sizeof(tbuf))) + if (copy_from_user(out, buf, sizeof(*out))) return -EFAULT; - - out->uid = tbuf.sem_perm.uid; - out->gid = tbuf.sem_perm.gid; - out->mode = tbuf.sem_perm.mode; - return 0; - } case IPC_OLD: { struct semid_ds tbuf_old; - if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) + if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) return -EFAULT; - out->uid = tbuf_old.sem_perm.uid; - out->gid = tbuf_old.sem_perm.gid; - out->mode = tbuf_old.sem_perm.mode; + out->sem_perm.uid = tbuf_old.sem_perm.uid; + out->sem_perm.gid = tbuf_old.sem_perm.gid; + out->sem_perm.mode = tbuf_old.sem_perm.mode; return 0; } @@ -796,132 +1505,102 @@ static inline unsigned long copy_semid_from_user(struct sem_setbuf *out, void __ } } -static int semctl_down(int semid, int semnum, int cmd, int version, union semun arg) +/* + * This function handles some semctl commands which require the rwsem + * to be held in write mode. + * NOTE: no locks must be held, the rwsem is taken inside this function. + */ +static int semctl_down(struct ipc_namespace *ns, int semid, + int cmd, int version, void __user *p) { struct sem_array *sma; int err; - struct sem_setbuf setbuf; + struct semid64_ds semid64; struct kern_ipc_perm *ipcp; - if(cmd == IPC_SET) { - if(copy_semid_from_user (&setbuf, arg.buf, version)) + if (cmd == IPC_SET) { + if (copy_semid_from_user(&semid64, p, version)) return -EFAULT; - if ((err = audit_ipc_perms(0, setbuf.uid, setbuf.gid, setbuf.mode))) - return err; } - sma = sem_lock(semid); - if(sma==NULL) - return -EINVAL; - if (sem_checkid(sma,semid)) { - err=-EIDRM; - goto out_unlock; - } - ipcp = &sma->sem_perm; - - if (current->euid != ipcp->cuid && - current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN)) { - err=-EPERM; - goto out_unlock; + down_write(&sem_ids(ns).rwsem); + rcu_read_lock(); + + ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd, + &semid64.sem_perm, 0); + if (IS_ERR(ipcp)) { + err = PTR_ERR(ipcp); + goto out_unlock1; } + sma = container_of(ipcp, struct sem_array, sem_perm); + err = security_sem_semctl(sma, cmd); if (err) - goto out_unlock; + goto out_unlock1; - switch(cmd){ + switch (cmd) { case IPC_RMID: - freeary(sma, semid); - err = 0; - break; + sem_lock(sma, NULL, -1); + /* freeary unlocks the ipc object and rcu */ + freeary(ns, ipcp); + goto out_up; case IPC_SET: - ipcp->uid = setbuf.uid; - ipcp->gid = setbuf.gid; - ipcp->mode = (ipcp->mode & ~S_IRWXUGO) - | (setbuf.mode & S_IRWXUGO); + sem_lock(sma, NULL, -1); + err = ipc_update_perm(&semid64.sem_perm, ipcp); + if (err) + goto out_unlock0; sma->sem_ctime = get_seconds(); - sem_unlock(sma); - err = 0; break; default: - sem_unlock(sma); err = -EINVAL; - break; + goto out_unlock1; } - return err; -out_unlock: - sem_unlock(sma); +out_unlock0: + sem_unlock(sma, -1); +out_unlock1: + rcu_read_unlock(); +out_up: + up_write(&sem_ids(ns).rwsem); return err; } -asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg) +SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg) { - int err = -EINVAL; int version; + struct ipc_namespace *ns; + void __user *p = (void __user *)arg; if (semid < 0) return -EINVAL; version = ipc_parse_version(&cmd); + ns = current->nsproxy->ipc_ns; - switch(cmd) { + switch (cmd) { case IPC_INFO: case SEM_INFO: + case IPC_STAT: case SEM_STAT: - err = semctl_nolock(semid,semnum,cmd,version,arg); - return err; + return semctl_nolock(ns, semid, cmd, version, p); case GETALL: case GETVAL: case GETPID: case GETNCNT: case GETZCNT: - case IPC_STAT: - case SETVAL: case SETALL: - err = semctl_main(semid,semnum,cmd,version,arg); - return err; + return semctl_main(ns, semid, semnum, cmd, p); + case SETVAL: + return semctl_setval(ns, semid, semnum, arg); case IPC_RMID: case IPC_SET: - down(&sem_ids.sem); - err = semctl_down(semid,semnum,cmd,version,arg); - up(&sem_ids.sem); - return err; + return semctl_down(ns, semid, cmd, version, p); default: return -EINVAL; } } -static inline void lock_semundo(void) -{ - struct sem_undo_list *undo_list; - - undo_list = current->sysvsem.undo_list; - if (undo_list) - spin_lock(&undo_list->lock); -} - -/* This code has an interaction with copy_semundo(). - * Consider; two tasks are sharing the undo_list. task1 - * acquires the undo_list lock in lock_semundo(). If task2 now - * exits before task1 releases the lock (by calling - * unlock_semundo()), then task1 will never call spin_unlock(). - * This leave the sem_undo_list in a locked state. If task1 now creats task3 - * and once again shares the sem_undo_list, the sem_undo_list will still be - * locked, and future SEM_UNDO operations will deadlock. This case is - * dealt with in copy_semundo() by having it reinitialize the spin lock when - * the refcnt goes from 1 to 2. - */ -static inline void unlock_semundo(void) -{ - struct sem_undo_list *undo_list; - - undo_list = current->sysvsem.undo_list; - if (undo_list) - spin_unlock(&undo_list->lock); -} - - /* If the task doesn't already have a undo_list, then allocate one * here. We guarantee there is only one thread using this undo list, * and current is THE ONE @@ -936,139 +1615,189 @@ static inline void unlock_semundo(void) static inline int get_undo_list(struct sem_undo_list **undo_listp) { struct sem_undo_list *undo_list; - int size; undo_list = current->sysvsem.undo_list; if (!undo_list) { - size = sizeof(struct sem_undo_list); - undo_list = (struct sem_undo_list *) kmalloc(size, GFP_KERNEL); + undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL); if (undo_list == NULL) return -ENOMEM; - memset(undo_list, 0, size); spin_lock_init(&undo_list->lock); atomic_set(&undo_list->refcnt, 1); + INIT_LIST_HEAD(&undo_list->list_proc); + current->sysvsem.undo_list = undo_list; } *undo_listp = undo_list; return 0; } +static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid) +{ + struct sem_undo *un; + + list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) { + if (un->semid == semid) + return un; + } + return NULL; +} + static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) { - struct sem_undo **last, *un; + struct sem_undo *un; - last = &ulp->proc_list; - un = *last; - while(un != NULL) { - if(un->semid==semid) - break; - if(un->semid==-1) { - *last=un->proc_next; - kfree(un); - } else { - last=&un->proc_next; - } - un=*last; + assert_spin_locked(&ulp->lock); + + un = __lookup_undo(ulp, semid); + if (un) { + list_del_rcu(&un->list_proc); + list_add_rcu(&un->list_proc, &ulp->list_proc); } return un; } -static struct sem_undo *find_undo(int semid) +/** + * find_alloc_undo - lookup (and if not present create) undo array + * @ns: namespace + * @semid: semaphore array id + * + * The function looks up (and if not present creates) the undo structure. + * The size of the undo structure depends on the size of the semaphore + * array, thus the alloc path is not that straightforward. + * Lifetime-rules: sem_undo is rcu-protected, on success, the function + * performs a rcu_read_lock(). + */ +static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) { struct sem_array *sma; struct sem_undo_list *ulp; struct sem_undo *un, *new; - int nsems; - int error; + int nsems, error; error = get_undo_list(&ulp); if (error) return ERR_PTR(error); - lock_semundo(); + rcu_read_lock(); + spin_lock(&ulp->lock); un = lookup_undo(ulp, semid); - unlock_semundo(); - if (likely(un!=NULL)) + spin_unlock(&ulp->lock); + if (likely(un != NULL)) goto out; /* no undo structure around - allocate one. */ - sma = sem_lock(semid); - un = ERR_PTR(-EINVAL); - if(sma==NULL) - goto out; - un = ERR_PTR(-EIDRM); - if (sem_checkid(sma,semid)) { - sem_unlock(sma); - goto out; + /* step 1: figure out the size of the semaphore array */ + sma = sem_obtain_object_check(ns, semid); + if (IS_ERR(sma)) { + rcu_read_unlock(); + return ERR_CAST(sma); } + nsems = sma->sem_nsems; - ipc_rcu_getref(sma); - sem_unlock(sma); + if (!ipc_rcu_getref(sma)) { + rcu_read_unlock(); + un = ERR_PTR(-EIDRM); + goto out; + } + rcu_read_unlock(); - new = (struct sem_undo *) kmalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); + /* step 2: allocate new undo structure */ + new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); if (!new) { - ipc_lock_by_ptr(&sma->sem_perm); - ipc_rcu_putref(sma); - sem_unlock(sma); + ipc_rcu_putref(sma, ipc_rcu_free); return ERR_PTR(-ENOMEM); } - memset(new, 0, sizeof(struct sem_undo) + sizeof(short)*nsems); - new->semadj = (short *) &new[1]; - new->semid = semid; - lock_semundo(); - un = lookup_undo(ulp, semid); - if (un) { - unlock_semundo(); + /* step 3: Acquire the lock on semaphore array */ + rcu_read_lock(); + sem_lock_and_putref(sma); + if (!ipc_valid_object(&sma->sem_perm)) { + sem_unlock(sma, -1); + rcu_read_unlock(); kfree(new); - ipc_lock_by_ptr(&sma->sem_perm); - ipc_rcu_putref(sma); - sem_unlock(sma); + un = ERR_PTR(-EIDRM); goto out; } - ipc_lock_by_ptr(&sma->sem_perm); - ipc_rcu_putref(sma); - if (sma->sem_perm.deleted) { - sem_unlock(sma); - unlock_semundo(); + spin_lock(&ulp->lock); + + /* + * step 4: check for races: did someone else allocate the undo struct? + */ + un = lookup_undo(ulp, semid); + if (un) { kfree(new); - un = ERR_PTR(-EIDRM); - goto out; + goto success; } - new->proc_next = ulp->proc_list; - ulp->proc_list = new; - new->id_next = sma->undo; - sma->undo = new; - sem_unlock(sma); + /* step 5: initialize & link new undo structure */ + new->semadj = (short *) &new[1]; + new->ulp = ulp; + new->semid = semid; + assert_spin_locked(&ulp->lock); + list_add_rcu(&new->list_proc, &ulp->list_proc); + ipc_assert_locked_object(&sma->sem_perm); + list_add(&new->list_id, &sma->list_id); un = new; - unlock_semundo(); + +success: + spin_unlock(&ulp->lock); + sem_unlock(sma, -1); out: return un; } -asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops, - unsigned nsops, const struct timespec __user *timeout) + +/** + * get_queue_result - retrieve the result code from sem_queue + * @q: Pointer to queue structure + * + * Retrieve the return code from the pending queue. If IN_WAKEUP is found in + * q->status, then we must loop until the value is replaced with the final + * value: This may happen if a task is woken up by an unrelated event (e.g. + * signal) and in parallel the task is woken up by another task because it got + * the requested semaphores. + * + * The function can be called with or without holding the semaphore spinlock. + */ +static int get_queue_result(struct sem_queue *q) +{ + int error; + + error = q->status; + while (unlikely(error == IN_WAKEUP)) { + cpu_relax(); + error = q->status; + } + + return error; +} + +SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, + unsigned, nsops, const struct timespec __user *, timeout) { int error = -EINVAL; struct sem_array *sma; struct sembuf fast_sops[SEMOPM_FAST]; - struct sembuf* sops = fast_sops, *sop; + struct sembuf *sops = fast_sops, *sop; struct sem_undo *un; - int undos = 0, alter = 0, max; + int undos = 0, alter = 0, max, locknum; struct sem_queue queue; unsigned long jiffies_left = 0; + struct ipc_namespace *ns; + struct list_head tasks; + + ns = current->nsproxy->ipc_ns; if (nsops < 1 || semid < 0) return -EINVAL; - if (nsops > sc_semopm) + if (nsops > ns->sc_semopm) return -E2BIG; - if(nsops > SEMOPM_FAST) { - sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL); - if(sops==NULL) + if (nsops > SEMOPM_FAST) { + sops = kmalloc(sizeof(*sops)*nsops, GFP_KERNEL); + if (sops == NULL) return -ENOMEM; } - if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) { - error=-EFAULT; + if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) { + error = -EFAULT; goto out_free; } if (timeout) { @@ -1094,132 +1823,197 @@ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops, alter = 1; } -retry_undos: + INIT_LIST_HEAD(&tasks); + if (undos) { - un = find_undo(semid); + /* On success, find_alloc_undo takes the rcu_read_lock */ + un = find_alloc_undo(ns, semid); if (IS_ERR(un)) { error = PTR_ERR(un); goto out_free; } - } else + } else { un = NULL; + rcu_read_lock(); + } - sma = sem_lock(semid); - error=-EINVAL; - if(sma==NULL) + sma = sem_obtain_object_check(ns, semid); + if (IS_ERR(sma)) { + rcu_read_unlock(); + error = PTR_ERR(sma); goto out_free; - error = -EIDRM; - if (sem_checkid(sma,semid)) - goto out_unlock_free; - /* - * semid identifies are not unique - find_undo may have - * allocated an undo structure, it was invalidated by an RMID - * and now a new array with received the same id. Check and retry. - */ - if (un && un->semid == -1) { - sem_unlock(sma); - goto retry_undos; } + error = -EFBIG; if (max >= sma->sem_nsems) - goto out_unlock_free; + goto out_rcu_wakeup; error = -EACCES; - if (ipcperms(&sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) - goto out_unlock_free; + if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) + goto out_rcu_wakeup; error = security_sem_semop(sma, sops, nsops, alter); if (error) - goto out_unlock_free; + goto out_rcu_wakeup; - error = try_atomic_semop (sma, sops, nsops, un, current->tgid); - if (error <= 0) { - if (alter && error == 0) - update_queue (sma); + error = -EIDRM; + locknum = sem_lock(sma, sops, nsops); + /* + * We eventually might perform the following check in a lockless + * fashion, considering ipc_valid_object() locking constraints. + * If nsops == 1 and there is no contention for sem_perm.lock, then + * only a per-semaphore lock is held and it's OK to proceed with the + * check below. More details on the fine grained locking scheme + * entangled here and why it's RMID race safe on comments at sem_lock() + */ + if (!ipc_valid_object(&sma->sem_perm)) goto out_unlock_free; - } - - /* We need to sleep on this operation, so we put the current - * task into the pending queue and go to sleep. + /* + * semid identifiers are not unique - find_alloc_undo may have + * allocated an undo structure, it was invalidated by an RMID + * and now a new array with received the same id. Check and fail. + * This case can be detected checking un->semid. The existence of + * "un" itself is guaranteed by rcu. */ - - queue.sma = sma; + if (un && un->semid == -1) + goto out_unlock_free; + queue.sops = sops; queue.nsops = nsops; queue.undo = un; - queue.pid = current->tgid; - queue.id = semid; + queue.pid = task_tgid_vnr(current); queue.alter = alter; - if (alter) - append_to_queue(sma ,&queue); - else - prepend_to_queue(sma ,&queue); + + error = perform_atomic_semop(sma, &queue); + if (error == 0) { + /* If the operation was successful, then do + * the required updates. + */ + if (alter) + do_smart_update(sma, sops, nsops, 1, &tasks); + else + set_semotime(sma, sops); + } + if (error <= 0) + goto out_unlock_free; + + /* We need to sleep on this operation, so we put the current + * task into the pending queue and go to sleep. + */ + + if (nsops == 1) { + struct sem *curr; + curr = &sma->sem_base[sops->sem_num]; + + if (alter) { + if (sma->complex_count) { + list_add_tail(&queue.list, + &sma->pending_alter); + } else { + + list_add_tail(&queue.list, + &curr->pending_alter); + } + } else { + list_add_tail(&queue.list, &curr->pending_const); + } + } else { + if (!sma->complex_count) + merge_queues(sma); + + if (alter) + list_add_tail(&queue.list, &sma->pending_alter); + else + list_add_tail(&queue.list, &sma->pending_const); + + sma->complex_count++; + } queue.status = -EINTR; queue.sleeper = current; + +sleep_again: current->state = TASK_INTERRUPTIBLE; - sem_unlock(sma); + sem_unlock(sma, locknum); + rcu_read_unlock(); if (timeout) jiffies_left = schedule_timeout(jiffies_left); else schedule(); - error = queue.status; - while(unlikely(error == IN_WAKEUP)) { - cpu_relax(); - error = queue.status; - } + error = get_queue_result(&queue); if (error != -EINTR) { /* fast path: update_queue already obtained all requested - * resources */ + * resources. + * Perform a smp_mb(): User space could assume that semop() + * is a memory barrier: Without the mb(), the cpu could + * speculatively read in user space stale data that was + * overwritten by the previous owner of the semaphore. + */ + smp_mb(); + goto out_free; } - sma = sem_lock(semid); - if(sma==NULL) { - if(queue.prev != NULL) - BUG(); - error = -EIDRM; + rcu_read_lock(); + sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum); + + /* + * Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing. + */ + error = get_queue_result(&queue); + + /* + * Array removed? If yes, leave without sem_unlock(). + */ + if (IS_ERR(sma)) { + rcu_read_unlock(); goto out_free; } + /* - * If queue.status != -EINTR we are woken up by another process + * If queue.status != -EINTR we are woken up by another process. + * Leave without unlink_queue(), but with sem_unlock(). */ - error = queue.status; - if (error != -EINTR) { + if (error != -EINTR) goto out_unlock_free; - } /* * If an interrupt occurred we have to clean up the queue */ if (timeout && jiffies_left == 0) error = -EAGAIN; - remove_from_queue(sma,&queue); - goto out_unlock_free; + + /* + * If the wakeup was spurious, just retry + */ + if (error == -EINTR && !signal_pending(current)) + goto sleep_again; + + unlink_queue(sma, &queue); out_unlock_free: - sem_unlock(sma); + sem_unlock(sma, locknum); +out_rcu_wakeup: + rcu_read_unlock(); + wake_up_sem_queue_do(&tasks); out_free: - if(sops != fast_sops) + if (sops != fast_sops) kfree(sops); return error; } -asmlinkage long sys_semop (int semid, struct sembuf __user *tsops, unsigned nsops) +SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops, + unsigned, nsops) { return sys_semtimedop(semid, tsops, nsops, NULL); } /* If CLONE_SYSVSEM is set, establish sharing of SEM_UNDO state between * parent and child tasks. - * - * See the notes above unlock_semundo() regarding the spin_lock_init() - * in this code. Initialize the undo_list->lock here instead of get_undo_list() - * because of the reasoning in the comment above unlock_semundo. */ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) @@ -1233,7 +2027,7 @@ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) return error; atomic_inc(&undo_list->refcnt); tsk->sysvsem.undo_list = undo_list; - } else + } else tsk->sysvsem.undo_list = NULL; return 0; @@ -1253,53 +2047,72 @@ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) */ void exit_sem(struct task_struct *tsk) { - struct sem_undo_list *undo_list; - struct sem_undo *u, **up; + struct sem_undo_list *ulp; - undo_list = tsk->sysvsem.undo_list; - if (!undo_list) + ulp = tsk->sysvsem.undo_list; + if (!ulp) return; + tsk->sysvsem.undo_list = NULL; - if (!atomic_dec_and_test(&undo_list->refcnt)) + if (!atomic_dec_and_test(&ulp->refcnt)) return; - /* There's no need to hold the semundo list lock, as current - * is the last task exiting for this undo list. - */ - for (up = &undo_list->proc_list; (u = *up); *up = u->proc_next, kfree(u)) { + for (;;) { struct sem_array *sma; - int nsems, i; - struct sem_undo *un, **unp; - int semid; - - semid = u->semid; + struct sem_undo *un; + struct list_head tasks; + int semid, i; + + rcu_read_lock(); + un = list_entry_rcu(ulp->list_proc.next, + struct sem_undo, list_proc); + if (&un->list_proc == &ulp->list_proc) + semid = -1; + else + semid = un->semid; + + if (semid == -1) { + rcu_read_unlock(); + break; + } - if(semid == -1) + sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid); + /* exit_sem raced with IPC_RMID, nothing to do */ + if (IS_ERR(sma)) { + rcu_read_unlock(); continue; - sma = sem_lock(semid); - if (sma == NULL) + } + + sem_lock(sma, NULL, -1); + /* exit_sem raced with IPC_RMID, nothing to do */ + if (!ipc_valid_object(&sma->sem_perm)) { + sem_unlock(sma, -1); + rcu_read_unlock(); + continue; + } + un = __lookup_undo(ulp, semid); + if (un == NULL) { + /* exit_sem raced with IPC_RMID+semget() that created + * exactly the same semid. Nothing to do. + */ + sem_unlock(sma, -1); + rcu_read_unlock(); continue; + } - if (u->semid == -1) - goto next_entry; + /* remove un from the linked lists */ + ipc_assert_locked_object(&sma->sem_perm); + list_del(&un->list_id); - BUG_ON(sem_checkid(sma,u->semid)); + spin_lock(&ulp->lock); + list_del_rcu(&un->list_proc); + spin_unlock(&ulp->lock); - /* remove u from the sma->undo list */ - for (unp = &sma->undo; (un = *unp); unp = &un->id_next) { - if (u == un) - goto found; - } - printk ("exit_sem undo list error id=%d\n", u->semid); - goto next_entry; -found: - *unp = un->id_next; - /* perform adjustments registered in u */ - nsems = sma->sem_nsems; - for (i = 0; i < nsems; i++) { - struct sem * sem = &sma->sem_base[i]; - if (u->semadj[i]) { - sem->semval += u->semadj[i]; + /* perform adjustments registered in un */ + for (i = 0; i < sma->sem_nsems; i++) { + struct sem *semaphore = &sma->sem_base[i]; + if (un->semadj[i]) { + semaphore->semval += un->semadj[i]; /* * Range checks of the new semaphore value, * not defined by sus: @@ -1311,40 +2124,55 @@ found: * Linux caps the semaphore value, both at 0 * and at SEMVMX. * - * Manfred <manfred@colorfullife.com> + * Manfred <manfred@colorfullife.com> */ - if (sem->semval < 0) - sem->semval = 0; - if (sem->semval > SEMVMX) - sem->semval = SEMVMX; - sem->sempid = current->tgid; + if (semaphore->semval < 0) + semaphore->semval = 0; + if (semaphore->semval > SEMVMX) + semaphore->semval = SEMVMX; + semaphore->sempid = task_tgid_vnr(current); } } - sma->sem_otime = get_seconds(); /* maybe some queued-up processes were waiting for this */ - update_queue(sma); -next_entry: - sem_unlock(sma); + INIT_LIST_HEAD(&tasks); + do_smart_update(sma, NULL, 0, 1, &tasks); + sem_unlock(sma, -1); + rcu_read_unlock(); + wake_up_sem_queue_do(&tasks); + + kfree_rcu(un, rcu); } - kfree(undo_list); + kfree(ulp); } #ifdef CONFIG_PROC_FS static int sysvipc_sem_proc_show(struct seq_file *s, void *it) { + struct user_namespace *user_ns = seq_user_ns(s); struct sem_array *sma = it; + time_t sem_otime; + + /* + * The proc interface isn't aware of sem_lock(), it calls + * ipc_lock_object() directly (in sysvipc_find_ipc). + * In order to stay compatible with sem_lock(), we must wait until + * all simple semop() calls have left their critical regions. + */ + sem_wait_array(sma); + + sem_otime = get_semotime(sma); return seq_printf(s, - "%10d %10d %4o %10lu %5u %5u %5u %5u %10lu %10lu\n", + "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n", sma->sem_perm.key, - sma->sem_id, + sma->sem_perm.id, sma->sem_perm.mode, sma->sem_nsems, - sma->sem_perm.uid, - sma->sem_perm.gid, - sma->sem_perm.cuid, - sma->sem_perm.cgid, - sma->sem_otime, + from_kuid_munged(user_ns, sma->sem_perm.uid), + from_kgid_munged(user_ns, sma->sem_perm.gid), + from_kuid_munged(user_ns, sma->sem_perm.cuid), + from_kgid_munged(user_ns, sma->sem_perm.cgid), + sem_otime, sma->sem_ctime); } #endif diff --git a/ipc/shm.c b/ipc/shm.c index 587d836d80d..89fc354156c 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -13,9 +13,17 @@ * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com> * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com> * + * support for audit of ipc object properties and permission changes + * Dustin Kirkland <dustin.kirkland@us.ibm.com> + * + * namespaces support + * OpenVZ, SWsoft Inc. + * Pavel Emelianov <xemul@openvz.org> + * + * Better ipc lock (kern_ipc_perm.lock) handling + * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013. */ -#include <linux/config.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/hugetlb.h> @@ -27,169 +35,469 @@ #include <linux/security.h> #include <linux/syscalls.h> #include <linux/audit.h> +#include <linux/capability.h> #include <linux/ptrace.h> #include <linux/seq_file.h> +#include <linux/rwsem.h> +#include <linux/nsproxy.h> +#include <linux/mount.h> +#include <linux/ipc_namespace.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include "util.h" -#define shm_flags shm_perm.mode +struct shm_file_data { + int id; + struct ipc_namespace *ns; + struct file *file; + const struct vm_operations_struct *vm_ops; +}; + +#define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data)) -static struct file_operations shm_file_operations; -static struct vm_operations_struct shm_vm_ops; +static const struct file_operations shm_file_operations; +static const struct vm_operations_struct shm_vm_ops; -static struct ipc_ids shm_ids; +#define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS]) -#define shm_lock(id) ((struct shmid_kernel*)ipc_lock(&shm_ids,id)) -#define shm_unlock(shp) ipc_unlock(&(shp)->shm_perm) -#define shm_get(id) ((struct shmid_kernel*)ipc_get(&shm_ids,id)) -#define shm_buildid(id, seq) \ - ipc_buildid(&shm_ids, id, seq) +#define shm_unlock(shp) \ + ipc_unlock(&(shp)->shm_perm) -static int newseg (key_t key, int shmflg, size_t size); -static void shm_open (struct vm_area_struct *shmd); -static void shm_close (struct vm_area_struct *shmd); +static int newseg(struct ipc_namespace *, struct ipc_params *); +static void shm_open(struct vm_area_struct *vma); +static void shm_close(struct vm_area_struct *vma); +static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp); #ifdef CONFIG_PROC_FS static int sysvipc_shm_proc_show(struct seq_file *s, void *it); #endif -size_t shm_ctlmax = SHMMAX; -size_t shm_ctlall = SHMALL; -int shm_ctlmni = SHMMNI; +void shm_init_ns(struct ipc_namespace *ns) +{ + ns->shm_ctlmax = SHMMAX; + ns->shm_ctlall = SHMALL; + ns->shm_ctlmni = SHMMNI; + ns->shm_rmid_forced = 0; + ns->shm_tot = 0; + ipc_init_ids(&shm_ids(ns)); +} -static int shm_tot; /* total number of shared memory pages */ +/* + * Called with shm_ids.rwsem (writer) and the shp structure locked. + * Only shm_ids.rwsem remains locked on exit. + */ +static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) +{ + struct shmid_kernel *shp; + shp = container_of(ipcp, struct shmid_kernel, shm_perm); -void __init shm_init (void) + if (shp->shm_nattch) { + shp->shm_perm.mode |= SHM_DEST; + /* Do not find it any more */ + shp->shm_perm.key = IPC_PRIVATE; + shm_unlock(shp); + } else + shm_destroy(ns, shp); +} + +#ifdef CONFIG_IPC_NS +void shm_exit_ns(struct ipc_namespace *ns) { - ipc_init_ids(&shm_ids, 1); - ipc_init_proc_interface("sysvipc/shm", - " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n", - &shm_ids, - sysvipc_shm_proc_show); + free_ipcs(ns, &shm_ids(ns), do_shm_rmid); + idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr); } +#endif -static inline int shm_checkid(struct shmid_kernel *s, int id) +static int __init ipc_ns_init(void) { - if (ipc_checkid(&shm_ids,&s->shm_perm,id)) - return -EIDRM; + shm_init_ns(&init_ipc_ns); return 0; } -static inline struct shmid_kernel *shm_rmid(int id) +pure_initcall(ipc_ns_init); + +void __init shm_init(void) { - return (struct shmid_kernel *)ipc_rmid(&shm_ids,id); + ipc_init_proc_interface("sysvipc/shm", +#if BITS_PER_LONG <= 32 + " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", +#else + " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", +#endif + IPC_SHM_IDS, sysvipc_shm_proc_show); } -static inline int shm_addid(struct shmid_kernel *shp) +static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id) { - return ipc_addid(&shm_ids, &shp->shm_perm, shm_ctlmni); + struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id); + + if (IS_ERR(ipcp)) + return ERR_CAST(ipcp); + + return container_of(ipcp, struct shmid_kernel, shm_perm); } +static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id) +{ + struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id); + if (IS_ERR(ipcp)) + return ERR_CAST(ipcp); -static inline void shm_inc (int id) { - struct shmid_kernel *shp; + return container_of(ipcp, struct shmid_kernel, shm_perm); +} - if(!(shp = shm_lock(id))) - BUG(); - shp->shm_atim = get_seconds(); - shp->shm_lprid = current->tgid; - shp->shm_nattch++; - shm_unlock(shp); +/* + * shm_lock_(check_) routines are called in the paths where the rwsem + * is not necessarily held. + */ +static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) +{ + struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id); + + if (IS_ERR(ipcp)) + return (struct shmid_kernel *)ipcp; + + return container_of(ipcp, struct shmid_kernel, shm_perm); +} + +static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp) +{ + rcu_read_lock(); + ipc_lock_object(&ipcp->shm_perm); +} + +static void shm_rcu_free(struct rcu_head *head) +{ + struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); + struct shmid_kernel *shp = ipc_rcu_to_struct(p); + + security_shm_free(shp); + ipc_rcu_free(head); } +static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) +{ + ipc_rmid(&shm_ids(ns), &s->shm_perm); +} + + /* This is called by fork, once for every shm attach. */ -static void shm_open (struct vm_area_struct *shmd) +static void shm_open(struct vm_area_struct *vma) { - shm_inc (shmd->vm_file->f_dentry->d_inode->i_ino); + struct file *file = vma->vm_file; + struct shm_file_data *sfd = shm_file_data(file); + struct shmid_kernel *shp; + + shp = shm_lock(sfd->ns, sfd->id); + BUG_ON(IS_ERR(shp)); + shp->shm_atim = get_seconds(); + shp->shm_lprid = task_tgid_vnr(current); + shp->shm_nattch++; + shm_unlock(shp); } /* * shm_destroy - free the struct shmid_kernel * + * @ns: namespace * @shp: struct to free * - * It has to be called with shp and shm_ids.sem locked, + * It has to be called with shp and shm_ids.rwsem (writer) locked, * but returns with shp unlocked and freed. */ -static void shm_destroy (struct shmid_kernel *shp) +static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) { - shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; - shm_rmid (shp->id); + struct file *shm_file; + + shm_file = shp->shm_file; + shp->shm_file = NULL; + ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; + shm_rmid(ns, shp); shm_unlock(shp); - if (!is_file_hugepages(shp->shm_file)) - shmem_lock(shp->shm_file, 0, shp->mlock_user); - else - user_shm_unlock(shp->shm_file->f_dentry->d_inode->i_size, - shp->mlock_user); - fput (shp->shm_file); - security_shm_free(shp); - ipc_rcu_putref(shp); + if (!is_file_hugepages(shm_file)) + shmem_lock(shm_file, 0, shp->mlock_user); + else if (shp->mlock_user) + user_shm_unlock(file_inode(shm_file)->i_size, shp->mlock_user); + fput(shm_file); + ipc_rcu_putref(shp, shm_rcu_free); } /* - * remove the attach descriptor shmd. + * shm_may_destroy - identifies whether shm segment should be destroyed now + * + * Returns true if and only if there are no active users of the segment and + * one of the following is true: + * + * 1) shmctl(id, IPC_RMID, NULL) was called for this shp + * + * 2) sysctl kernel.shm_rmid_forced is set to 1. + */ +static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) +{ + return (shp->shm_nattch == 0) && + (ns->shm_rmid_forced || + (shp->shm_perm.mode & SHM_DEST)); +} + +/* + * remove the attach descriptor vma. * free memory for segment if it is marked destroyed. * The descriptor has already been removed from the current->mm->mmap list * and will later be kfree()d. */ -static void shm_close (struct vm_area_struct *shmd) +static void shm_close(struct vm_area_struct *vma) { - struct file * file = shmd->vm_file; - int id = file->f_dentry->d_inode->i_ino; + struct file *file = vma->vm_file; + struct shm_file_data *sfd = shm_file_data(file); struct shmid_kernel *shp; + struct ipc_namespace *ns = sfd->ns; - down (&shm_ids.sem); + down_write(&shm_ids(ns).rwsem); /* remove from the list of attaches of the shm segment */ - if(!(shp = shm_lock(id))) - BUG(); - shp->shm_lprid = current->tgid; + shp = shm_lock(ns, sfd->id); + BUG_ON(IS_ERR(shp)); + shp->shm_lprid = task_tgid_vnr(current); shp->shm_dtim = get_seconds(); shp->shm_nattch--; - if(shp->shm_nattch == 0 && - shp->shm_flags & SHM_DEST) - shm_destroy (shp); + if (shm_may_destroy(ns, shp)) + shm_destroy(ns, shp); else shm_unlock(shp); - up (&shm_ids.sem); + up_write(&shm_ids(ns).rwsem); } -static int shm_mmap(struct file * file, struct vm_area_struct * vma) +/* Called with ns->shm_ids(ns).rwsem locked */ +static int shm_try_destroy_current(int id, void *p, void *data) { - file_accessed(file); + struct ipc_namespace *ns = data; + struct kern_ipc_perm *ipcp = p; + struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm); + + if (shp->shm_creator != current) + return 0; + + /* + * Mark it as orphaned to destroy the segment when + * kernel.shm_rmid_forced is changed. + * It is noop if the following shm_may_destroy() returns true. + */ + shp->shm_creator = NULL; + + /* + * Don't even try to destroy it. If shm_rmid_forced=0 and IPC_RMID + * is not set, it shouldn't be deleted here. + */ + if (!ns->shm_rmid_forced) + return 0; + + if (shm_may_destroy(ns, shp)) { + shm_lock_by_ptr(shp); + shm_destroy(ns, shp); + } + return 0; +} + +/* Called with ns->shm_ids(ns).rwsem locked */ +static int shm_try_destroy_orphaned(int id, void *p, void *data) +{ + struct ipc_namespace *ns = data; + struct kern_ipc_perm *ipcp = p; + struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm); + + /* + * We want to destroy segments without users and with already + * exit'ed originating process. + * + * As shp->* are changed under rwsem, it's safe to skip shp locking. + */ + if (shp->shm_creator != NULL) + return 0; + + if (shm_may_destroy(ns, shp)) { + shm_lock_by_ptr(shp); + shm_destroy(ns, shp); + } + return 0; +} + +void shm_destroy_orphaned(struct ipc_namespace *ns) +{ + down_write(&shm_ids(ns).rwsem); + if (shm_ids(ns).in_use) + idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns); + up_write(&shm_ids(ns).rwsem); +} + + +void exit_shm(struct task_struct *task) +{ + struct ipc_namespace *ns = task->nsproxy->ipc_ns; + + if (shm_ids(ns).in_use == 0) + return; + + /* Destroy all already created segments, but not mapped yet */ + down_write(&shm_ids(ns).rwsem); + if (shm_ids(ns).in_use) + idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns); + up_write(&shm_ids(ns).rwsem); +} + +static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) +{ + struct file *file = vma->vm_file; + struct shm_file_data *sfd = shm_file_data(file); + + return sfd->vm_ops->fault(vma, vmf); +} + +#ifdef CONFIG_NUMA +static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new) +{ + struct file *file = vma->vm_file; + struct shm_file_data *sfd = shm_file_data(file); + int err = 0; + if (sfd->vm_ops->set_policy) + err = sfd->vm_ops->set_policy(vma, new); + return err; +} + +static struct mempolicy *shm_get_policy(struct vm_area_struct *vma, + unsigned long addr) +{ + struct file *file = vma->vm_file; + struct shm_file_data *sfd = shm_file_data(file); + struct mempolicy *pol = NULL; + + if (sfd->vm_ops->get_policy) + pol = sfd->vm_ops->get_policy(vma, addr); + else if (vma->vm_policy) + pol = vma->vm_policy; + + return pol; +} +#endif + +static int shm_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct shm_file_data *sfd = shm_file_data(file); + int ret; + + ret = sfd->file->f_op->mmap(sfd->file, vma); + if (ret != 0) + return ret; + sfd->vm_ops = vma->vm_ops; +#ifdef CONFIG_MMU + BUG_ON(!sfd->vm_ops->fault); +#endif vma->vm_ops = &shm_vm_ops; - shm_inc(file->f_dentry->d_inode->i_ino); + shm_open(vma); + + return ret; +} + +static int shm_release(struct inode *ino, struct file *file) +{ + struct shm_file_data *sfd = shm_file_data(file); + + put_ipc_ns(sfd->ns); + shm_file_data(file) = NULL; + kfree(sfd); return 0; } -static struct file_operations shm_file_operations = { - .mmap = shm_mmap +static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync) +{ + struct shm_file_data *sfd = shm_file_data(file); + + if (!sfd->file->f_op->fsync) + return -EINVAL; + return sfd->file->f_op->fsync(sfd->file, start, end, datasync); +} + +static long shm_fallocate(struct file *file, int mode, loff_t offset, + loff_t len) +{ + struct shm_file_data *sfd = shm_file_data(file); + + if (!sfd->file->f_op->fallocate) + return -EOPNOTSUPP; + return sfd->file->f_op->fallocate(file, mode, offset, len); +} + +static unsigned long shm_get_unmapped_area(struct file *file, + unsigned long addr, unsigned long len, unsigned long pgoff, + unsigned long flags) +{ + struct shm_file_data *sfd = shm_file_data(file); + return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len, + pgoff, flags); +} + +static const struct file_operations shm_file_operations = { + .mmap = shm_mmap, + .fsync = shm_fsync, + .release = shm_release, +#ifndef CONFIG_MMU + .get_unmapped_area = shm_get_unmapped_area, +#endif + .llseek = noop_llseek, + .fallocate = shm_fallocate, +}; + +static const struct file_operations shm_file_operations_huge = { + .mmap = shm_mmap, + .fsync = shm_fsync, + .release = shm_release, + .get_unmapped_area = shm_get_unmapped_area, + .llseek = noop_llseek, + .fallocate = shm_fallocate, }; -static struct vm_operations_struct shm_vm_ops = { +int is_file_shm_hugepages(struct file *file) +{ + return file->f_op == &shm_file_operations_huge; +} + +static const struct vm_operations_struct shm_vm_ops = { .open = shm_open, /* callback for a new vm-area open */ .close = shm_close, /* callback for when the vm-area is released */ - .nopage = shmem_nopage, -#if defined(CONFIG_NUMA) && defined(CONFIG_SHMEM) - .set_policy = shmem_set_policy, - .get_policy = shmem_get_policy, + .fault = shm_fault, +#if defined(CONFIG_NUMA) + .set_policy = shm_set_policy, + .get_policy = shm_get_policy, #endif }; -static int newseg (key_t key, int shmflg, size_t size) +/** + * newseg - Create a new shared memory segment + * @ns: namespace + * @params: ptr to the structure that contains key, size and shmflg + * + * Called with shm_ids.rwsem held as a writer. + */ +static int newseg(struct ipc_namespace *ns, struct ipc_params *params) { + key_t key = params->key; + int shmflg = params->flg; + size_t size = params->u.size; int error; struct shmid_kernel *shp; - int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT; - struct file * file; + size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; + struct file *file; char name[13]; int id; + vm_flags_t acctflag = 0; - if (size < SHMMIN || size > shm_ctlmax) + if (size < SHMMIN || size > ns->shm_ctlmax) return -EINVAL; - if (shm_tot + numpages >= shm_ctlall) + if (numpages << PAGE_SHIFT < size) + return -ENOSPC; + + if (ns->shm_tot + numpages < ns->shm_tot || + ns->shm_tot + numpages > ns->shm_ctlall) return -ENOSPC; shp = ipc_rcu_alloc(sizeof(*shp)); @@ -197,112 +505,140 @@ static int newseg (key_t key, int shmflg, size_t size) return -ENOMEM; shp->shm_perm.key = key; - shp->shm_flags = (shmflg & S_IRWXUGO); + shp->shm_perm.mode = (shmflg & S_IRWXUGO); shp->mlock_user = NULL; shp->shm_perm.security = NULL; error = security_shm_alloc(shp); if (error) { - ipc_rcu_putref(shp); + ipc_rcu_putref(shp, ipc_rcu_free); return error; } + sprintf(name, "SYSV%08x", key); if (shmflg & SHM_HUGETLB) { - /* hugetlb_zero_setup takes care of mlock user accounting */ - file = hugetlb_zero_setup(size); - shp->mlock_user = current->user; + struct hstate *hs; + size_t hugesize; + + hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); + if (!hs) { + error = -EINVAL; + goto no_file; + } + hugesize = ALIGN(size, huge_page_size(hs)); + + /* hugetlb_file_setup applies strict accounting */ + if (shmflg & SHM_NORESERVE) + acctflag = VM_NORESERVE; + file = hugetlb_file_setup(name, hugesize, acctflag, + &shp->mlock_user, HUGETLB_SHMFS_INODE, + (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); } else { - int acctflag = VM_ACCOUNT; /* * Do not allow no accounting for OVERCOMMIT_NEVER, even - * if it's asked for. + * if it's asked for. */ if ((shmflg & SHM_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER) - acctflag = 0; - sprintf (name, "SYSV%08x", key); + acctflag = VM_NORESERVE; file = shmem_file_setup(name, size, acctflag); } error = PTR_ERR(file); if (IS_ERR(file)) goto no_file; - error = -ENOSPC; - id = shm_addid(shp); - if(id == -1) + id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); + if (id < 0) { + error = id; goto no_id; + } - shp->shm_cprid = current->tgid; + shp->shm_cprid = task_tgid_vnr(current); shp->shm_lprid = 0; shp->shm_atim = shp->shm_dtim = 0; shp->shm_ctim = get_seconds(); shp->shm_segsz = size; shp->shm_nattch = 0; - shp->id = shm_buildid(id,shp->shm_perm.seq); shp->shm_file = file; - file->f_dentry->d_inode->i_ino = shp->id; + shp->shm_creator = current; - /* Hugetlb ops would have already been assigned. */ - if (!(shmflg & SHM_HUGETLB)) - file->f_op = &shm_file_operations; + /* + * shmid gets reported as "inode#" in /proc/pid/maps. + * proc-ps tools use this. Changing this will break them. + */ + file_inode(file)->i_ino = shp->shm_perm.id; - shm_tot += numpages; - shm_unlock(shp); - return shp->id; + ns->shm_tot += numpages; + error = shp->shm_perm.id; + + ipc_unlock_object(&shp->shm_perm); + rcu_read_unlock(); + return error; no_id: + if (is_file_hugepages(file) && shp->mlock_user) + user_shm_unlock(size, shp->mlock_user); fput(file); no_file: - security_shm_free(shp); - ipc_rcu_putref(shp); + ipc_rcu_putref(shp, shm_rcu_free); return error; } -asmlinkage long sys_shmget (key_t key, size_t size, int shmflg) +/* + * Called with shm_ids.rwsem and ipcp locked. + */ +static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg) { struct shmid_kernel *shp; - int err, id = 0; - - down(&shm_ids.sem); - if (key == IPC_PRIVATE) { - err = newseg(key, shmflg, size); - } else if ((id = ipc_findkey(&shm_ids, key)) == -1) { - if (!(shmflg & IPC_CREAT)) - err = -ENOENT; - else - err = newseg(key, shmflg, size); - } else if ((shmflg & IPC_CREAT) && (shmflg & IPC_EXCL)) { - err = -EEXIST; - } else { - shp = shm_lock(id); - if(shp==NULL) - BUG(); - if (shp->shm_segsz < size) - err = -EINVAL; - else if (ipcperms(&shp->shm_perm, shmflg)) - err = -EACCES; - else { - int shmid = shm_buildid(id, shp->shm_perm.seq); - err = security_shm_associate(shp, shmflg); - if (!err) - err = shmid; - } - shm_unlock(shp); - } - up(&shm_ids.sem); - return err; + shp = container_of(ipcp, struct shmid_kernel, shm_perm); + return security_shm_associate(shp, shmflg); +} + +/* + * Called with shm_ids.rwsem and ipcp locked. + */ +static inline int shm_more_checks(struct kern_ipc_perm *ipcp, + struct ipc_params *params) +{ + struct shmid_kernel *shp; + + shp = container_of(ipcp, struct shmid_kernel, shm_perm); + if (shp->shm_segsz < params->u.size) + return -EINVAL; + + return 0; +} + +SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) +{ + struct ipc_namespace *ns; + static const struct ipc_ops shm_ops = { + .getnew = newseg, + .associate = shm_security, + .more_checks = shm_more_checks, + }; + struct ipc_params shm_params; + + ns = current->nsproxy->ipc_ns; + + shm_params.key = key; + shm_params.flg = shmflg; + shm_params.u.size = size; + + return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params); } static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version) { - switch(version) { + switch (version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: { struct shmid_ds out; + memset(&out, 0, sizeof(out)); ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm); out.shm_segsz = in->shm_segsz; out.shm_atime = in->shm_atime; @@ -319,28 +655,14 @@ static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ } } -struct shm_setbuf { - uid_t uid; - gid_t gid; - mode_t mode; -}; - -static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __user *buf, int version) +static inline unsigned long +copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version) { - switch(version) { + switch (version) { case IPC_64: - { - struct shmid64_ds tbuf; - - if (copy_from_user(&tbuf, buf, sizeof(tbuf))) + if (copy_from_user(out, buf, sizeof(*out))) return -EFAULT; - - out->uid = tbuf.shm_perm.uid; - out->gid = tbuf.shm_perm.gid; - out->mode = tbuf.shm_flags; - return 0; - } case IPC_OLD: { struct shmid_ds tbuf_old; @@ -348,9 +670,9 @@ static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __ if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) return -EFAULT; - out->uid = tbuf_old.shm_perm.uid; - out->gid = tbuf_old.shm_perm.gid; - out->mode = tbuf_old.shm_flags; + out->shm_perm.uid = tbuf_old.shm_perm.uid; + out->shm_perm.gid = tbuf_old.shm_perm.gid; + out->shm_perm.mode = tbuf_old.shm_perm.mode; return 0; } @@ -361,14 +683,14 @@ static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __ static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version) { - switch(version) { + switch (version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: { struct shminfo out; - if(in->shmmax > INT_MAX) + if (in->shmmax > INT_MAX) out.shmmax = INT_MAX; else out.shmmax = (int)in->shmmax; @@ -376,7 +698,7 @@ static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminf out.shmmin = in->shmmin; out.shmmni = in->shmmni; out.shmseg = in->shmseg; - out.shmall = in->shmall; + out.shmall = in->shmall; return copy_to_user(buf, &out, sizeof(out)); } @@ -385,69 +707,156 @@ static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminf } } -static void shm_get_stat(unsigned long *rss, unsigned long *swp) +/* + * Calculate and add used RSS and swap pages of a shm. + * Called with shm_ids.rwsem held as a reader + */ +static void shm_add_rss_swap(struct shmid_kernel *shp, + unsigned long *rss_add, unsigned long *swp_add) +{ + struct inode *inode; + + inode = file_inode(shp->shm_file); + + if (is_file_hugepages(shp->shm_file)) { + struct address_space *mapping = inode->i_mapping; + struct hstate *h = hstate_file(shp->shm_file); + *rss_add += pages_per_huge_page(h) * mapping->nrpages; + } else { +#ifdef CONFIG_SHMEM + struct shmem_inode_info *info = SHMEM_I(inode); + spin_lock(&info->lock); + *rss_add += inode->i_mapping->nrpages; + *swp_add += info->swapped; + spin_unlock(&info->lock); +#else + *rss_add += inode->i_mapping->nrpages; +#endif + } +} + +/* + * Called with shm_ids.rwsem held as a reader + */ +static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, + unsigned long *swp) { - int i; + int next_id; + int total, in_use; *rss = 0; *swp = 0; - for (i = 0; i <= shm_ids.max_id; i++) { + in_use = shm_ids(ns).in_use; + + for (total = 0, next_id = 0; total < in_use; next_id++) { + struct kern_ipc_perm *ipc; struct shmid_kernel *shp; - struct inode *inode; - shp = shm_get(i); - if(!shp) + ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id); + if (ipc == NULL) continue; + shp = container_of(ipc, struct shmid_kernel, shm_perm); - inode = shp->shm_file->f_dentry->d_inode; + shm_add_rss_swap(shp, rss, swp); - if (is_file_hugepages(shp->shm_file)) { - struct address_space *mapping = inode->i_mapping; - *rss += (HPAGE_SIZE/PAGE_SIZE)*mapping->nrpages; - } else { - struct shmem_inode_info *info = SHMEM_I(inode); - spin_lock(&info->lock); - *rss += inode->i_mapping->nrpages; - *swp += info->swapped; - spin_unlock(&info->lock); - } + total++; } } -asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) +/* + * This function handles some shmctl commands which require the rwsem + * to be held in write mode. + * NOTE: no locks must be held, the rwsem is taken inside this function. + */ +static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd, + struct shmid_ds __user *buf, int version) { - struct shm_setbuf setbuf; + struct kern_ipc_perm *ipcp; + struct shmid64_ds shmid64; struct shmid_kernel *shp; - int err, version; + int err; + + if (cmd == IPC_SET) { + if (copy_shmid_from_user(&shmid64, buf, version)) + return -EFAULT; + } - if (cmd < 0 || shmid < 0) { + down_write(&shm_ids(ns).rwsem); + rcu_read_lock(); + + ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd, + &shmid64.shm_perm, 0); + if (IS_ERR(ipcp)) { + err = PTR_ERR(ipcp); + goto out_unlock1; + } + + shp = container_of(ipcp, struct shmid_kernel, shm_perm); + + err = security_shm_shmctl(shp, cmd); + if (err) + goto out_unlock1; + + switch (cmd) { + case IPC_RMID: + ipc_lock_object(&shp->shm_perm); + /* do_shm_rmid unlocks the ipc object and rcu */ + do_shm_rmid(ns, ipcp); + goto out_up; + case IPC_SET: + ipc_lock_object(&shp->shm_perm); + err = ipc_update_perm(&shmid64.shm_perm, ipcp); + if (err) + goto out_unlock0; + shp->shm_ctim = get_seconds(); + break; + default: err = -EINVAL; - goto out; + goto out_unlock1; } - version = ipc_parse_version(&cmd); +out_unlock0: + ipc_unlock_object(&shp->shm_perm); +out_unlock1: + rcu_read_unlock(); +out_up: + up_write(&shm_ids(ns).rwsem); + return err; +} - switch (cmd) { /* replace with proc interface ? */ - case IPC_INFO: - { - struct shminfo64 shminfo; +static int shmctl_nolock(struct ipc_namespace *ns, int shmid, + int cmd, int version, void __user *buf) +{ + int err; + struct shmid_kernel *shp; + /* preliminary security checks for *_INFO */ + if (cmd == IPC_INFO || cmd == SHM_INFO) { err = security_shm_shmctl(NULL, cmd); if (err) return err; + } + + switch (cmd) { + case IPC_INFO: + { + struct shminfo64 shminfo; - memset(&shminfo,0,sizeof(shminfo)); - shminfo.shmmni = shminfo.shmseg = shm_ctlmni; - shminfo.shmmax = shm_ctlmax; - shminfo.shmall = shm_ctlall; + memset(&shminfo, 0, sizeof(shminfo)); + shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni; + shminfo.shmmax = ns->shm_ctlmax; + shminfo.shmall = ns->shm_ctlall; shminfo.shmmin = SHMMIN; - if(copy_shminfo_to_user (buf, &shminfo, version)) + if (copy_shminfo_to_user(buf, &shminfo, version)) return -EFAULT; - /* reading a integer is always atomic */ - err= shm_ids.max_id; - if(err<0) + + down_read(&shm_ids(ns).rwsem); + err = ipc_get_maxid(&shm_ids(ns)); + up_read(&shm_ids(ns).rwsem); + + if (err < 0) err = 0; goto out; } @@ -455,20 +864,16 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) { struct shm_info shm_info; - err = security_shm_shmctl(NULL, cmd); - if (err) - return err; - - memset(&shm_info,0,sizeof(shm_info)); - down(&shm_ids.sem); - shm_info.used_ids = shm_ids.in_use; - shm_get_stat (&shm_info.shm_rss, &shm_info.shm_swp); - shm_info.shm_tot = shm_tot; + memset(&shm_info, 0, sizeof(shm_info)); + down_read(&shm_ids(ns).rwsem); + shm_info.used_ids = shm_ids(ns).in_use; + shm_get_stat(ns, &shm_info.shm_rss, &shm_info.shm_swp); + shm_info.shm_tot = ns->shm_tot; shm_info.swap_attempts = 0; shm_info.swap_successes = 0; - err = shm_ids.max_id; - up(&shm_ids.sem); - if(copy_to_user (buf, &shm_info, sizeof(shm_info))) { + err = ipc_get_maxid(&shm_ids(ns)); + up_read(&shm_ids(ns).rwsem); + if (copy_to_user(buf, &shm_info, sizeof(shm_info))) { err = -EFAULT; goto out; } @@ -481,28 +886,33 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) { struct shmid64_ds tbuf; int result; - memset(&tbuf, 0, sizeof(tbuf)); - shp = shm_lock(shmid); - if(shp==NULL) { - err = -EINVAL; - goto out; - } else if(cmd==SHM_STAT) { - err = -EINVAL; - if (shmid > shm_ids.max_id) + + rcu_read_lock(); + if (cmd == SHM_STAT) { + shp = shm_obtain_object(ns, shmid); + if (IS_ERR(shp)) { + err = PTR_ERR(shp); goto out_unlock; - result = shm_buildid(shmid, shp->shm_perm.seq); + } + result = shp->shm_perm.id; } else { - err = shm_checkid(shp,shmid); - if(err) + shp = shm_obtain_object_check(ns, shmid); + if (IS_ERR(shp)) { + err = PTR_ERR(shp); goto out_unlock; + } result = 0; } - err=-EACCES; - if (ipcperms (&shp->shm_perm, S_IRUGO)) + + err = -EACCES; + if (ipcperms(ns, &shp->shm_perm, S_IRUGO)) goto out_unlock; + err = security_shm_shmctl(shp, cmd); if (err) goto out_unlock; + + memset(&tbuf, 0, sizeof(tbuf)); kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm); tbuf.shm_segsz = shp->shm_segsz; tbuf.shm_atime = shp->shm_atim; @@ -510,152 +920,120 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) tbuf.shm_ctime = shp->shm_ctim; tbuf.shm_cpid = shp->shm_cprid; tbuf.shm_lpid = shp->shm_lprid; - if (!is_file_hugepages(shp->shm_file)) - tbuf.shm_nattch = shp->shm_nattch; - else - tbuf.shm_nattch = file_count(shp->shm_file) - 1; - shm_unlock(shp); - if(copy_shmid_to_user (buf, &tbuf, version)) + tbuf.shm_nattch = shp->shm_nattch; + rcu_read_unlock(); + + if (copy_shmid_to_user(buf, &tbuf, version)) err = -EFAULT; else err = result; goto out; } + default: + return -EINVAL; + } + +out_unlock: + rcu_read_unlock(); +out: + return err; +} + +SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) +{ + struct shmid_kernel *shp; + int err, version; + struct ipc_namespace *ns; + + if (cmd < 0 || shmid < 0) + return -EINVAL; + + version = ipc_parse_version(&cmd); + ns = current->nsproxy->ipc_ns; + + switch (cmd) { + case IPC_INFO: + case SHM_INFO: + case SHM_STAT: + case IPC_STAT: + return shmctl_nolock(ns, shmid, cmd, version, buf); + case IPC_RMID: + case IPC_SET: + return shmctl_down(ns, shmid, cmd, buf, version); case SHM_LOCK: case SHM_UNLOCK: { - shp = shm_lock(shmid); - if(shp==NULL) { - err = -EINVAL; - goto out; - } - err = shm_checkid(shp,shmid); - if(err) - goto out_unlock; + struct file *shm_file; - if (!capable(CAP_IPC_LOCK)) { - err = -EPERM; - if (current->euid != shp->shm_perm.uid && - current->euid != shp->shm_perm.cuid) - goto out_unlock; - if (cmd == SHM_LOCK && - !current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur) - goto out_unlock; + rcu_read_lock(); + shp = shm_obtain_object_check(ns, shmid); + if (IS_ERR(shp)) { + err = PTR_ERR(shp); + goto out_unlock1; } + audit_ipc_obj(&(shp->shm_perm)); err = security_shm_shmctl(shp, cmd); if (err) - goto out_unlock; - - if(cmd==SHM_LOCK) { - struct user_struct * user = current->user; - if (!is_file_hugepages(shp->shm_file)) { - err = shmem_lock(shp->shm_file, 1, user); - if (!err) { - shp->shm_flags |= SHM_LOCKED; - shp->mlock_user = user; - } - } - } else if (!is_file_hugepages(shp->shm_file)) { - shmem_lock(shp->shm_file, 0, shp->mlock_user); - shp->shm_flags &= ~SHM_LOCKED; - shp->mlock_user = NULL; + goto out_unlock1; + + ipc_lock_object(&shp->shm_perm); + + /* check if shm_destroy() is tearing down shp */ + if (!ipc_valid_object(&shp->shm_perm)) { + err = -EIDRM; + goto out_unlock0; } - shm_unlock(shp); - goto out; - } - case IPC_RMID: - { - /* - * We cannot simply remove the file. The SVID states - * that the block remains until the last person - * detaches from it, then is deleted. A shmat() on - * an RMID segment is legal in older Linux and if - * we change it apps break... - * - * Instead we set a destroyed flag, and then blow - * the name away when the usage hits zero. - */ - down(&shm_ids.sem); - shp = shm_lock(shmid); - err = -EINVAL; - if (shp == NULL) - goto out_up; - err = shm_checkid(shp, shmid); - if(err) - goto out_unlock_up; - - if (current->euid != shp->shm_perm.uid && - current->euid != shp->shm_perm.cuid && - !capable(CAP_SYS_ADMIN)) { - err=-EPERM; - goto out_unlock_up; + + if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) { + kuid_t euid = current_euid(); + if (!uid_eq(euid, shp->shm_perm.uid) && + !uid_eq(euid, shp->shm_perm.cuid)) { + err = -EPERM; + goto out_unlock0; + } + if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) { + err = -EPERM; + goto out_unlock0; + } } - err = security_shm_shmctl(shp, cmd); - if (err) - goto out_unlock_up; - - if (shp->shm_nattch){ - shp->shm_flags |= SHM_DEST; - /* Do not find it any more */ - shp->shm_perm.key = IPC_PRIVATE; - shm_unlock(shp); - } else - shm_destroy (shp); - up(&shm_ids.sem); - goto out; - } + shm_file = shp->shm_file; + if (is_file_hugepages(shm_file)) + goto out_unlock0; - case IPC_SET: - { - if (copy_shmid_from_user (&setbuf, buf, version)) { - err = -EFAULT; - goto out; - } - if ((err = audit_ipc_perms(0, setbuf.uid, setbuf.gid, setbuf.mode))) - return err; - down(&shm_ids.sem); - shp = shm_lock(shmid); - err=-EINVAL; - if(shp==NULL) - goto out_up; - err = shm_checkid(shp,shmid); - if(err) - goto out_unlock_up; - err=-EPERM; - if (current->euid != shp->shm_perm.uid && - current->euid != shp->shm_perm.cuid && - !capable(CAP_SYS_ADMIN)) { - goto out_unlock_up; + if (cmd == SHM_LOCK) { + struct user_struct *user = current_user(); + err = shmem_lock(shm_file, 1, user); + if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) { + shp->shm_perm.mode |= SHM_LOCKED; + shp->mlock_user = user; + } + goto out_unlock0; } - err = security_shm_shmctl(shp, cmd); - if (err) - goto out_unlock_up; - - shp->shm_perm.uid = setbuf.uid; - shp->shm_perm.gid = setbuf.gid; - shp->shm_flags = (shp->shm_flags & ~S_IRWXUGO) - | (setbuf.mode & S_IRWXUGO); - shp->shm_ctim = get_seconds(); - break; + /* SHM_UNLOCK */ + if (!(shp->shm_perm.mode & SHM_LOCKED)) + goto out_unlock0; + shmem_lock(shm_file, 0, shp->mlock_user); + shp->shm_perm.mode &= ~SHM_LOCKED; + shp->mlock_user = NULL; + get_file(shm_file); + ipc_unlock_object(&shp->shm_perm); + rcu_read_unlock(); + shmem_unlock_mapping(shm_file->f_mapping); + + fput(shm_file); + return err; } - default: - err = -EINVAL; - goto out; + return -EINVAL; } - err = 0; -out_unlock_up: - shm_unlock(shp); -out_up: - up(&shm_ids.sem); - goto out; -out_unlock: - shm_unlock(shp); -out: +out_unlock0: + ipc_unlock_object(&shp->shm_perm); +out_unlock1: + rcu_read_unlock(); return err; } @@ -666,48 +1044,52 @@ out: * "raddr" thing points to kernel space, and there has to be a wrapper around * this. */ -long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr) +long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, + unsigned long shmlba) { struct shmid_kernel *shp; unsigned long addr; unsigned long size; - struct file * file; + struct file *file; int err; unsigned long flags; unsigned long prot; - unsigned long o_flags; int acc_mode; - void *user_addr; - - if (shmid < 0) { - err = -EINVAL; + struct ipc_namespace *ns; + struct shm_file_data *sfd; + struct path path; + fmode_t f_mode; + unsigned long populate = 0; + + err = -EINVAL; + if (shmid < 0) goto out; - } else if ((addr = (ulong)shmaddr)) { - if (addr & (SHMLBA-1)) { + else if ((addr = (ulong)shmaddr)) { + if (addr & (shmlba - 1)) { if (shmflg & SHM_RND) - addr &= ~(SHMLBA-1); /* round down */ + addr &= ~(shmlba - 1); /* round down */ else #ifndef __ARCH_FORCE_SHMLBA if (addr & ~PAGE_MASK) #endif - return -EINVAL; + goto out; } flags = MAP_SHARED | MAP_FIXED; } else { if ((shmflg & SHM_REMAP)) - return -EINVAL; + goto out; flags = MAP_SHARED; } if (shmflg & SHM_RDONLY) { prot = PROT_READ; - o_flags = O_RDONLY; acc_mode = S_IRUGO; + f_mode = FMODE_READ; } else { prot = PROT_READ | PROT_WRITE; - o_flags = O_RDWR; acc_mode = S_IRUGO | S_IWUGO; + f_mode = FMODE_READ | FMODE_WRITE; } if (shmflg & SHM_EXEC) { prot |= PROT_EXEC; @@ -718,36 +1100,73 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr) * We cannot rely on the fs check since SYSV IPC does have an * additional creator id... */ - shp = shm_lock(shmid); - if(shp == NULL) { - err = -EINVAL; - goto out; - } - err = shm_checkid(shp,shmid); - if (err) { - shm_unlock(shp); - goto out; - } - if (ipcperms(&shp->shm_perm, acc_mode)) { - shm_unlock(shp); - err = -EACCES; - goto out; + ns = current->nsproxy->ipc_ns; + rcu_read_lock(); + shp = shm_obtain_object_check(ns, shmid); + if (IS_ERR(shp)) { + err = PTR_ERR(shp); + goto out_unlock; } + err = -EACCES; + if (ipcperms(ns, &shp->shm_perm, acc_mode)) + goto out_unlock; + err = security_shm_shmat(shp, shmaddr, shmflg); - if (err) { - shm_unlock(shp); - return err; + if (err) + goto out_unlock; + + ipc_lock_object(&shp->shm_perm); + + /* check if shm_destroy() is tearing down shp */ + if (!ipc_valid_object(&shp->shm_perm)) { + ipc_unlock_object(&shp->shm_perm); + err = -EIDRM; + goto out_unlock; } - - file = shp->shm_file; - size = i_size_read(file->f_dentry->d_inode); + + path = shp->shm_file->f_path; + path_get(&path); shp->shm_nattch++; - shm_unlock(shp); + size = i_size_read(path.dentry->d_inode); + ipc_unlock_object(&shp->shm_perm); + rcu_read_unlock(); + + err = -ENOMEM; + sfd = kzalloc(sizeof(*sfd), GFP_KERNEL); + if (!sfd) { + path_put(&path); + goto out_nattch; + } + + file = alloc_file(&path, f_mode, + is_file_hugepages(shp->shm_file) ? + &shm_file_operations_huge : + &shm_file_operations); + err = PTR_ERR(file); + if (IS_ERR(file)) { + kfree(sfd); + path_put(&path); + goto out_nattch; + } + + file->private_data = sfd; + file->f_mapping = shp->shm_file->f_mapping; + sfd->id = shp->shm_perm.id; + sfd->ns = get_ipc_ns(ns); + sfd->file = shp->shm_file; + sfd->vm_ops = NULL; + + err = security_mmap_file(file, prot, flags); + if (err) + goto out_fput; down_write(¤t->mm->mmap_sem); if (addr && !(shmflg & SHM_REMAP)) { - user_addr = ERR_PTR(-EINVAL); + err = -EINVAL; + if (addr + size < addr) + goto invalid; + if (find_vma_intersection(current->mm, addr, addr + size)) goto invalid; /* @@ -758,37 +1177,44 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr) addr > current->mm->start_stack - size - PAGE_SIZE * 5) goto invalid; } - - user_addr = (void*) do_mmap (file, addr, size, prot, flags, 0); + addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate); + *raddr = addr; + err = 0; + if (IS_ERR_VALUE(addr)) + err = (long)addr; invalid: up_write(¤t->mm->mmap_sem); + if (populate) + mm_populate(addr, populate); - down (&shm_ids.sem); - if(!(shp = shm_lock(shmid))) - BUG(); +out_fput: + fput(file); + +out_nattch: + down_write(&shm_ids(ns).rwsem); + shp = shm_lock(ns, shmid); + BUG_ON(IS_ERR(shp)); shp->shm_nattch--; - if(shp->shm_nattch == 0 && - shp->shm_flags & SHM_DEST) - shm_destroy (shp); + if (shm_may_destroy(ns, shp)) + shm_destroy(ns, shp); else shm_unlock(shp); - up (&shm_ids.sem); + up_write(&shm_ids(ns).rwsem); + return err; - *raddr = (unsigned long) user_addr; - err = 0; - if (IS_ERR(user_addr)) - err = PTR_ERR(user_addr); +out_unlock: + rcu_read_unlock(); out: return err; } -asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg) +SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg) { unsigned long ret; long err; - err = do_shmat(shmid, shmaddr, shmflg, &ret); + err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA); if (err) return err; force_successful_syscall_return(); @@ -799,13 +1225,19 @@ asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg) * detach and kill segment if marked destroyed. * The work is done in shm_close. */ -asmlinkage long sys_shmdt(char __user *shmaddr) +SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) { struct mm_struct *mm = current->mm; - struct vm_area_struct *vma, *next; + struct vm_area_struct *vma; unsigned long addr = (unsigned long)shmaddr; - loff_t size = 0; int retval = -EINVAL; +#ifdef CONFIG_MMU + loff_t size = 0; + struct vm_area_struct *next; +#endif + + if (addr & ~PAGE_MASK) + return retval; down_write(&mm->mmap_sem); @@ -831,6 +1263,7 @@ asmlinkage long sys_shmdt(char __user *shmaddr) */ vma = find_vma(mm, addr); +#ifdef CONFIG_MMU while (vma) { next = vma->vm_next; @@ -839,11 +1272,11 @@ asmlinkage long sys_shmdt(char __user *shmaddr) * a fragment created by mprotect() and/or munmap(), or it * otherwise it starts at this address with no hassles. */ - if ((vma->vm_ops == &shm_vm_ops || is_vm_hugetlb_page(vma)) && + if ((vma->vm_ops == &shm_vm_ops) && (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) { - size = vma->vm_file->f_dentry->d_inode->i_size; + size = file_inode(vma->vm_file)->i_size; do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); /* * We discovered the size of the shm segment, so @@ -861,19 +1294,30 @@ asmlinkage long sys_shmdt(char __user *shmaddr) /* * We need look no further than the maximum address a fragment * could possibly have landed at. Also cast things to loff_t to - * prevent overflows and make comparisions vs. equal-width types. + * prevent overflows and make comparisons vs. equal-width types. */ + size = PAGE_ALIGN(size); while (vma && (loff_t)(vma->vm_end - addr) <= size) { next = vma->vm_next; /* finding a matching vma now does not alter retval */ - if ((vma->vm_ops == &shm_vm_ops || is_vm_hugetlb_page(vma)) && + if ((vma->vm_ops == &shm_vm_ops) && (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); vma = next; } +#else /* CONFIG_MMU */ + /* under NOMMU conditions, the exact address to be destroyed must be + * given */ + if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) { + do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); + retval = 0; + } + +#endif + up_write(&mm->mmap_sem); return retval; } @@ -881,30 +1325,37 @@ asmlinkage long sys_shmdt(char __user *shmaddr) #ifdef CONFIG_PROC_FS static int sysvipc_shm_proc_show(struct seq_file *s, void *it) { + struct user_namespace *user_ns = seq_user_ns(s); struct shmid_kernel *shp = it; - char *format; + unsigned long rss = 0, swp = 0; -#define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n" -#define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n" + shm_add_rss_swap(shp, &rss, &swp); - if (sizeof(size_t) <= sizeof(int)) - format = SMALL_STRING; - else - format = BIG_STRING; - return seq_printf(s, format, +#if BITS_PER_LONG <= 32 +#define SIZE_SPEC "%10lu" +#else +#define SIZE_SPEC "%21lu" +#endif + + return seq_printf(s, + "%10d %10d %4o " SIZE_SPEC " %5u %5u " + "%5lu %5u %5u %5u %5u %10lu %10lu %10lu " + SIZE_SPEC " " SIZE_SPEC "\n", shp->shm_perm.key, - shp->id, - shp->shm_flags, + shp->shm_perm.id, + shp->shm_perm.mode, shp->shm_segsz, shp->shm_cprid, shp->shm_lprid, - is_file_hugepages(shp->shm_file) ? (file_count(shp->shm_file) - 1) : shp->shm_nattch, - shp->shm_perm.uid, - shp->shm_perm.gid, - shp->shm_perm.cuid, - shp->shm_perm.cgid, + shp->shm_nattch, + from_kuid_munged(user_ns, shp->shm_perm.uid), + from_kgid_munged(user_ns, shp->shm_perm.gid), + from_kuid_munged(user_ns, shp->shm_perm.cuid), + from_kgid_munged(user_ns, shp->shm_perm.cgid), shp->shm_atim, shp->shm_dtim, - shp->shm_ctim); + shp->shm_ctim, + rss * PAGE_SIZE, + swp * PAGE_SIZE); } #endif diff --git a/ipc/syscall.c b/ipc/syscall.c new file mode 100644 index 00000000000..52429489cde --- /dev/null +++ b/ipc/syscall.c @@ -0,0 +1,99 @@ +/* + * sys_ipc() is the old de-multiplexer for the SysV IPC calls. + * + * This is really horribly ugly, and new architectures should just wire up + * the individual syscalls instead. + */ +#include <linux/unistd.h> + +#ifdef __ARCH_WANT_SYS_IPC +#include <linux/errno.h> +#include <linux/ipc.h> +#include <linux/shm.h> +#include <linux/syscalls.h> +#include <linux/uaccess.h> + +SYSCALL_DEFINE6(ipc, unsigned int, call, int, first, unsigned long, second, + unsigned long, third, void __user *, ptr, long, fifth) +{ + int version, ret; + + version = call >> 16; /* hack for backward compatibility */ + call &= 0xffff; + + switch (call) { + case SEMOP: + return sys_semtimedop(first, (struct sembuf __user *)ptr, + second, NULL); + case SEMTIMEDOP: + return sys_semtimedop(first, (struct sembuf __user *)ptr, + second, + (const struct timespec __user *)fifth); + + case SEMGET: + return sys_semget(first, second, third); + case SEMCTL: { + unsigned long arg; + if (!ptr) + return -EINVAL; + if (get_user(arg, (unsigned long __user *) ptr)) + return -EFAULT; + return sys_semctl(first, second, third, arg); + } + + case MSGSND: + return sys_msgsnd(first, (struct msgbuf __user *) ptr, + second, third); + case MSGRCV: + switch (version) { + case 0: { + struct ipc_kludge tmp; + if (!ptr) + return -EINVAL; + + if (copy_from_user(&tmp, + (struct ipc_kludge __user *) ptr, + sizeof(tmp))) + return -EFAULT; + return sys_msgrcv(first, tmp.msgp, second, + tmp.msgtyp, third); + } + default: + return sys_msgrcv(first, + (struct msgbuf __user *) ptr, + second, fifth, third); + } + case MSGGET: + return sys_msgget((key_t) first, second); + case MSGCTL: + return sys_msgctl(first, second, (struct msqid_ds __user *)ptr); + + case SHMAT: + switch (version) { + default: { + unsigned long raddr; + ret = do_shmat(first, (char __user *)ptr, + second, &raddr, SHMLBA); + if (ret) + return ret; + return put_user(raddr, (unsigned long __user *) third); + } + case 1: + /* + * This was the entry point for kernel-originating calls + * from iBCS2 in 2.2 days. + */ + return -EINVAL; + } + case SHMDT: + return sys_shmdt((char __user *)ptr); + case SHMGET: + return sys_shmget(first, second, third); + case SHMCTL: + return sys_shmctl(first, second, + (struct shmid_ds __user *) ptr); + default: + return -ENOSYS; + } +} +#endif diff --git a/ipc/util.c b/ipc/util.c index 10e836d0d89..27d74e69fd5 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -7,25 +7,58 @@ * Occurs in several places in the IPC code. * Chris Evans, <chris@ferret.lmh.ox.ac.uk> * Nov 1999 - ipc helper functions, unified SMP locking - * Manfred Spraul <manfreds@colorfullife.com> + * Manfred Spraul <manfred@colorfullife.com> * Oct 2002 - One lock per IPC id. RCU ipc_free for lock-free grow_ary(). * Mingming Cao <cmm@us.ibm.com> + * Mar 2006 - support for audit of ipc object properties + * Dustin Kirkland <dustin.kirkland@us.ibm.com> + * Jun 2006 - namespaces ssupport + * OpenVZ, SWsoft Inc. + * Pavel Emelianov <xemul@openvz.org> + * + * General sysv ipc locking scheme: + * rcu_read_lock() + * obtain the ipc object (kern_ipc_perm) by looking up the id in an idr + * tree. + * - perform initial checks (capabilities, auditing and permission, + * etc). + * - perform read-only operations, such as STAT, INFO commands. + * acquire the ipc lock (kern_ipc_perm.lock) through + * ipc_lock_object() + * - perform data updates, such as SET, RMID commands and + * mechanism-specific operations (semop/semtimedop, + * msgsnd/msgrcv, shmat/shmdt). + * drop the ipc lock, through ipc_unlock_object(). + * rcu_read_unlock() + * + * The ids->rwsem must be taken when: + * - creating, removing and iterating the existing entries in ipc + * identifier sets. + * - iterating through files under /proc/sysvipc/ + * + * Note that sems have a special fast path that avoids kern_ipc_perm.lock - + * see sem_lock(). */ -#include <linux/config.h> #include <linux/mm.h> #include <linux/shm.h> #include <linux/init.h> #include <linux/msg.h> -#include <linux/smp_lock.h> #include <linux/vmalloc.h> #include <linux/slab.h> +#include <linux/notifier.h> +#include <linux/capability.h> #include <linux/highuid.h> #include <linux/security.h> #include <linux/rcupdate.h> #include <linux/workqueue.h> #include <linux/seq_file.h> #include <linux/proc_fs.h> +#include <linux/audit.h> +#include <linux/nsproxy.h> +#include <linux/rwsem.h> +#include <linux/memory.h> +#include <linux/ipc_namespace.h> #include <asm/unistd.h> @@ -34,80 +67,96 @@ struct ipc_proc_iface { const char *path; const char *header; - struct ipc_ids *ids; + int ids; int (*show)(struct seq_file *, void *); }; +static void ipc_memory_notifier(struct work_struct *work) +{ + ipcns_notify(IPCNS_MEMCHANGED); +} + +static int ipc_memory_callback(struct notifier_block *self, + unsigned long action, void *arg) +{ + static DECLARE_WORK(ipc_memory_wq, ipc_memory_notifier); + + switch (action) { + case MEM_ONLINE: /* memory successfully brought online */ + case MEM_OFFLINE: /* or offline: it's time to recompute msgmni */ + /* + * This is done by invoking the ipcns notifier chain with the + * IPC_MEMCHANGED event. + * In order not to keep the lock on the hotplug memory chain + * for too long, queue a work item that will, when waken up, + * activate the ipcns notification chain. + */ + schedule_work(&ipc_memory_wq); + break; + case MEM_GOING_ONLINE: + case MEM_GOING_OFFLINE: + case MEM_CANCEL_ONLINE: + case MEM_CANCEL_OFFLINE: + default: + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block ipc_memory_nb = { + .notifier_call = ipc_memory_callback, + .priority = IPC_CALLBACK_PRI, +}; + /** - * ipc_init - initialise IPC subsystem + * ipc_init - initialise ipc subsystem + * + * The various sysv ipc resources (semaphores, messages and shared + * memory) are initialised. * - * The various system5 IPC resources (semaphores, messages and shared - * memory are initialised + * A callback routine is registered into the memory hotplug notifier + * chain: since msgmni scales to lowmem this callback routine will be + * called upon successful memory add / remove to recompute msmgni. */ - static int __init ipc_init(void) { sem_init(); msg_init(); shm_init(); + register_hotmemory_notifier(&ipc_memory_nb); + register_ipcns_notifier(&init_ipc_ns); return 0; } -__initcall(ipc_init); +device_initcall(ipc_init); /** - * ipc_init_ids - initialise IPC identifiers - * @ids: Identifier set - * @size: Number of identifiers + * ipc_init_ids - initialise ipc identifiers + * @ids: ipc identifier set * - * Given a size for the ipc identifier range (limited below IPCMNI) - * set up the sequence range to use then allocate and initialise the - * array itself. + * Set up the sequence range to use for the ipc identifier range (limited + * below IPCMNI) then initialise the ids idr. */ - -void __init ipc_init_ids(struct ipc_ids* ids, int size) +void ipc_init_ids(struct ipc_ids *ids) { - int i; - sema_init(&ids->sem,1); - - if(size > IPCMNI) - size = IPCMNI; ids->in_use = 0; - ids->max_id = -1; ids->seq = 0; - { - int seq_limit = INT_MAX/SEQ_MULTIPLIER; - if(seq_limit > USHRT_MAX) - ids->seq_max = USHRT_MAX; - else - ids->seq_max = seq_limit; - } - - ids->entries = ipc_rcu_alloc(sizeof(struct kern_ipc_perm *)*size + - sizeof(struct ipc_id_ary)); - - if(ids->entries == NULL) { - printk(KERN_ERR "ipc_init_ids() failed, ipc service disabled.\n"); - size = 0; - ids->entries = &ids->nullentry; - } - ids->entries->size = size; - for(i=0;i<size;i++) - ids->entries->p[i] = NULL; + ids->next_id = -1; + init_rwsem(&ids->rwsem); + idr_init(&ids->ipcs_idr); } #ifdef CONFIG_PROC_FS -static struct file_operations sysvipc_proc_fops; +static const struct file_operations sysvipc_proc_fops; /** - * ipc_init_proc_interface - Create a proc interface for sysipc types - * using a seq_file interface. - * @path: Path in procfs - * @header: Banner to be printed at the beginning of the file. - * @ids: ipc id table to iterate. - * @show: show routine. + * ipc_init_proc_interface - create a proc interface for sysipc types using a seq_file interface. + * @path: Path in procfs + * @header: Banner to be printed at the beginning of the file. + * @ids: ipc id table to iterate. + * @show: show routine. */ void __init ipc_init_proc_interface(const char *path, const char *header, - struct ipc_ids *ids, - int (*show)(struct seq_file *, void *)) + int ids, int (*show)(struct seq_file *, void *)) { struct proc_dir_entry *pde; struct ipc_proc_iface *iface; @@ -120,361 +169,377 @@ void __init ipc_init_proc_interface(const char *path, const char *header, iface->ids = ids; iface->show = show; - pde = create_proc_entry(path, - S_IRUGO, /* world readable */ - NULL /* parent dir */); - if (pde) { - pde->data = iface; - pde->proc_fops = &sysvipc_proc_fops; - } else { + pde = proc_create_data(path, + S_IRUGO, /* world readable */ + NULL, /* parent dir */ + &sysvipc_proc_fops, + iface); + if (!pde) kfree(iface); - } } #endif /** - * ipc_findkey - find a key in an ipc identifier set - * @ids: Identifier set - * @key: The key to find - * - * Requires ipc_ids.sem locked. - * Returns the identifier if found or -1 if not. + * ipc_findkey - find a key in an ipc identifier set + * @ids: ipc identifier set + * @key: key to find + * + * Returns the locked pointer to the ipc structure if found or NULL + * otherwise. If key is found ipc points to the owning ipc structure + * + * Called with ipc_ids.rwsem held. */ - -int ipc_findkey(struct ipc_ids* ids, key_t key) +static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key) { - int id; - struct kern_ipc_perm* p; - int max_id = ids->max_id; + struct kern_ipc_perm *ipc; + int next_id; + int total; - /* - * rcu_dereference() is not needed here - * since ipc_ids.sem is held - */ - for (id = 0; id <= max_id; id++) { - p = ids->entries->p[id]; - if(p==NULL) + for (total = 0, next_id = 0; total < ids->in_use; next_id++) { + ipc = idr_find(&ids->ipcs_idr, next_id); + + if (ipc == NULL) + continue; + + if (ipc->key != key) { + total++; continue; - if (key == p->key) - return id; + } + + rcu_read_lock(); + ipc_lock_object(ipc); + return ipc; } - return -1; + + return NULL; } -/* - * Requires ipc_ids.sem locked +/** + * ipc_get_maxid - get the last assigned id + * @ids: ipc identifier set + * + * Called with ipc_ids.rwsem held. */ -static int grow_ary(struct ipc_ids* ids, int newsize) -{ - struct ipc_id_ary* new; - struct ipc_id_ary* old; - int i; - int size = ids->entries->size; - - if(newsize > IPCMNI) - newsize = IPCMNI; - if(newsize <= size) - return newsize; - - new = ipc_rcu_alloc(sizeof(struct kern_ipc_perm *)*newsize + - sizeof(struct ipc_id_ary)); - if(new == NULL) - return size; - new->size = newsize; - memcpy(new->p, ids->entries->p, sizeof(struct kern_ipc_perm *)*size + - sizeof(struct ipc_id_ary)); - for(i=size;i<newsize;i++) { - new->p[i] = NULL; - } - old = ids->entries; +int ipc_get_maxid(struct ipc_ids *ids) +{ + struct kern_ipc_perm *ipc; + int max_id = -1; + int total, id; - /* - * Use rcu_assign_pointer() to make sure the memcpyed contents - * of the new array are visible before the new array becomes visible. - */ - rcu_assign_pointer(ids->entries, new); + if (ids->in_use == 0) + return -1; - ipc_rcu_putref(old); - return newsize; + if (ids->in_use == IPCMNI) + return IPCMNI - 1; + + /* Look for the last assigned id */ + total = 0; + for (id = 0; id < IPCMNI && total < ids->in_use; id++) { + ipc = idr_find(&ids->ipcs_idr, id); + if (ipc != NULL) { + max_id = id; + total++; + } + } + return max_id; } /** - * ipc_addid - add an IPC identifier - * @ids: IPC identifier set - * @new: new IPC permission set - * @size: new size limit for the id array + * ipc_addid - add an ipc identifier + * @ids: ipc identifier set + * @new: new ipc permission set + * @size: limit for the number of used ids * - * Add an entry 'new' to the IPC arrays. The permissions object is - * initialised and the first free entry is set up and the id assigned - * is returned. The list is returned in a locked state on success. - * On failure the list is not locked and -1 is returned. + * Add an entry 'new' to the ipc ids idr. The permissions object is + * initialised and the first free entry is set up and the id assigned + * is returned. The 'new' entry is returned in a locked state on success. + * On failure the entry is not locked and a negative err-code is returned. * - * Called with ipc_ids.sem held. + * Called with writer ipc_ids.rwsem held. */ - -int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) +int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size) { + kuid_t euid; + kgid_t egid; int id; + int next_id = ids->next_id; - size = grow_ary(ids,size); - - /* - * rcu_dereference()() is not needed here since - * ipc_ids.sem is held - */ - for (id = 0; id < size; id++) { - if(ids->entries->p[id] == NULL) - goto found; - } - return -1; -found: - ids->in_use++; - if (id > ids->max_id) - ids->max_id = id; + if (size > IPCMNI) + size = IPCMNI; - new->cuid = new->uid = current->euid; - new->gid = new->cgid = current->egid; + if (ids->in_use >= size) + return -ENOSPC; - new->seq = ids->seq++; - if(ids->seq > ids->seq_max) - ids->seq = 0; + idr_preload(GFP_KERNEL); spin_lock_init(&new->lock); - new->deleted = 0; + new->deleted = false; rcu_read_lock(); spin_lock(&new->lock); - ids->entries->p[id] = new; - return id; -} -/** - * ipc_rmid - remove an IPC identifier - * @ids: identifier set - * @id: Identifier to remove - * - * The identifier must be valid, and in use. The kernel will panic if - * fed an invalid identifier. The entry is removed and internal - * variables recomputed. The object associated with the identifier - * is returned. - * ipc_ids.sem and the spinlock for this ID is hold before this function - * is called, and remain locked on the exit. - */ - -struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id) -{ - struct kern_ipc_perm* p; - int lid = id % SEQ_MULTIPLIER; - if(lid >= ids->entries->size) - BUG(); - - /* - * do not need a rcu_dereference()() here to force ordering - * on Alpha, since the ipc_ids.sem is held. - */ - p = ids->entries->p[lid]; - ids->entries->p[lid] = NULL; - if(p==NULL) - BUG(); - ids->in_use--; + id = idr_alloc(&ids->ipcs_idr, new, + (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0, + GFP_NOWAIT); + idr_preload_end(); + if (id < 0) { + spin_unlock(&new->lock); + rcu_read_unlock(); + return id; + } - if (lid == ids->max_id) { - do { - lid--; - if(lid == -1) - break; - } while (ids->entries->p[lid] == NULL); - ids->max_id = lid; + ids->in_use++; + + current_euid_egid(&euid, &egid); + new->cuid = new->uid = euid; + new->gid = new->cgid = egid; + + if (next_id < 0) { + new->seq = ids->seq++; + if (ids->seq > IPCID_SEQ_MAX) + ids->seq = 0; + } else { + new->seq = ipcid_to_seqx(next_id); + ids->next_id = -1; } - p->deleted = 1; - return p; + + new->id = ipc_buildid(id, new->seq); + return id; } /** - * ipc_alloc - allocate ipc space - * @size: size desired + * ipcget_new - create a new ipc object + * @ns: ipc namespace + * @ids: ipc identifer set + * @ops: the actual creation routine to call + * @params: its parameters * - * Allocate memory from the appropriate pools and return a pointer to it. - * NULL is returned if the allocation fails + * This routine is called by sys_msgget, sys_semget() and sys_shmget() + * when the key is IPC_PRIVATE. */ - -void* ipc_alloc(int size) +static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids, + const struct ipc_ops *ops, struct ipc_params *params) { - void* out; - if(size > PAGE_SIZE) - out = vmalloc(size); - else - out = kmalloc(size, GFP_KERNEL); - return out; + int err; + + down_write(&ids->rwsem); + err = ops->getnew(ns, params); + up_write(&ids->rwsem); + return err; } /** - * ipc_free - free ipc space - * @ptr: pointer returned by ipc_alloc - * @size: size of block + * ipc_check_perms - check security and permissions for an ipc object + * @ns: ipc namespace + * @ipcp: ipc permission set + * @ops: the actual security routine to call + * @params: its parameters * - * Free a block created with ipc_alloc. The caller must know the size - * used in the allocation call. - */ - -void ipc_free(void* ptr, int size) -{ - if(size > PAGE_SIZE) - vfree(ptr); - else - kfree(ptr); -} - -/* - * rcu allocations: - * There are three headers that are prepended to the actual allocation: - * - during use: ipc_rcu_hdr. - * - during the rcu grace period: ipc_rcu_grace. - * - [only if vmalloc]: ipc_rcu_sched. - * Their lifetime doesn't overlap, thus the headers share the same memory. - * Unlike a normal union, they are right-aligned, thus some container_of - * forward/backward casting is necessary: + * This routine is called by sys_msgget(), sys_semget() and sys_shmget() + * when the key is not IPC_PRIVATE and that key already exists in the + * ds IDR. + * + * On success, the ipc id is returned. + * + * It is called with ipc_ids.rwsem and ipcp->lock held. */ -struct ipc_rcu_hdr +static int ipc_check_perms(struct ipc_namespace *ns, + struct kern_ipc_perm *ipcp, + const struct ipc_ops *ops, + struct ipc_params *params) { - int refcount; - int is_vmalloc; - void *data[0]; -}; - - -struct ipc_rcu_grace -{ - struct rcu_head rcu; - /* "void *" makes sure alignment of following data is sane. */ - void *data[0]; -}; - -struct ipc_rcu_sched -{ - struct work_struct work; - /* "void *" makes sure alignment of following data is sane. */ - void *data[0]; -}; - -#define HDRLEN_KMALLOC (sizeof(struct ipc_rcu_grace) > sizeof(struct ipc_rcu_hdr) ? \ - sizeof(struct ipc_rcu_grace) : sizeof(struct ipc_rcu_hdr)) -#define HDRLEN_VMALLOC (sizeof(struct ipc_rcu_sched) > HDRLEN_KMALLOC ? \ - sizeof(struct ipc_rcu_sched) : HDRLEN_KMALLOC) + int err; + + if (ipcperms(ns, ipcp, params->flg)) + err = -EACCES; + else { + err = ops->associate(ipcp, params->flg); + if (!err) + err = ipcp->id; + } -static inline int rcu_use_vmalloc(int size) -{ - /* Too big for a single page? */ - if (HDRLEN_KMALLOC + size > PAGE_SIZE) - return 1; - return 0; + return err; } /** - * ipc_rcu_alloc - allocate ipc and rcu space - * @size: size desired + * ipcget_public - get an ipc object or create a new one + * @ns: ipc namespace + * @ids: ipc identifer set + * @ops: the actual creation routine to call + * @params: its parameters * - * Allocate memory for the rcu header structure + the object. - * Returns the pointer to the object. - * NULL is returned if the allocation fails. + * This routine is called by sys_msgget, sys_semget() and sys_shmget() + * when the key is not IPC_PRIVATE. + * It adds a new entry if the key is not found and does some permission + * / security checkings if the key is found. + * + * On success, the ipc id is returned. */ - -void* ipc_rcu_alloc(int size) +static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids, + const struct ipc_ops *ops, struct ipc_params *params) { - void* out; - /* - * We prepend the allocation with the rcu struct, and - * workqueue if necessary (for vmalloc). + struct kern_ipc_perm *ipcp; + int flg = params->flg; + int err; + + /* + * Take the lock as a writer since we are potentially going to add + * a new entry + read locks are not "upgradable" */ - if (rcu_use_vmalloc(size)) { - out = vmalloc(HDRLEN_VMALLOC + size); - if (out) { - out += HDRLEN_VMALLOC; - container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1; - container_of(out, struct ipc_rcu_hdr, data)->refcount = 1; - } + down_write(&ids->rwsem); + ipcp = ipc_findkey(ids, params->key); + if (ipcp == NULL) { + /* key not used */ + if (!(flg & IPC_CREAT)) + err = -ENOENT; + else + err = ops->getnew(ns, params); } else { - out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL); - if (out) { - out += HDRLEN_KMALLOC; - container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0; - container_of(out, struct ipc_rcu_hdr, data)->refcount = 1; + /* ipc object has been locked by ipc_findkey() */ + + if (flg & IPC_CREAT && flg & IPC_EXCL) + err = -EEXIST; + else { + err = 0; + if (ops->more_checks) + err = ops->more_checks(ipcp, params); + if (!err) + /* + * ipc_check_perms returns the IPC id on + * success + */ + err = ipc_check_perms(ns, ipcp, ops, params); } + ipc_unlock(ipcp); } + up_write(&ids->rwsem); - return out; + return err; } -void ipc_rcu_getref(void *ptr) + +/** + * ipc_rmid - remove an ipc identifier + * @ids: ipc identifier set + * @ipcp: ipc perm structure containing the identifier to remove + * + * ipc_ids.rwsem (as a writer) and the spinlock for this ID are held + * before this function is called, and remain locked on the exit. + */ +void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp) { - container_of(ptr, struct ipc_rcu_hdr, data)->refcount++; + int lid = ipcid_to_idx(ipcp->id); + + idr_remove(&ids->ipcs_idr, lid); + ids->in_use--; + ipcp->deleted = true; } /** - * ipc_schedule_free - free ipc + rcu space - * - * Since RCU callback function is called in bh, - * we need to defer the vfree to schedule_work + * ipc_alloc - allocate ipc space + * @size: size desired + * + * Allocate memory from the appropriate pools and return a pointer to it. + * NULL is returned if the allocation fails */ -static void ipc_schedule_free(struct rcu_head *head) +void *ipc_alloc(int size) { - struct ipc_rcu_grace *grace = - container_of(head, struct ipc_rcu_grace, rcu); - struct ipc_rcu_sched *sched = - container_of(&(grace->data[0]), struct ipc_rcu_sched, data[0]); - - INIT_WORK(&sched->work, vfree, sched); - schedule_work(&sched->work); + void *out; + if (size > PAGE_SIZE) + out = vmalloc(size); + else + out = kmalloc(size, GFP_KERNEL); + return out; } /** - * ipc_immediate_free - free ipc + rcu space + * ipc_free - free ipc space + * @ptr: pointer returned by ipc_alloc + * @size: size of block * - * Free from the RCU callback context + * Free a block created with ipc_alloc(). The caller must know the size + * used in the allocation call. + */ +void ipc_free(void *ptr, int size) +{ + if (size > PAGE_SIZE) + vfree(ptr); + else + kfree(ptr); +} + +/** + * ipc_rcu_alloc - allocate ipc and rcu space + * @size: size desired * + * Allocate memory for the rcu header structure + the object. + * Returns the pointer to the object or NULL upon failure. */ -static void ipc_immediate_free(struct rcu_head *head) +void *ipc_rcu_alloc(int size) { - struct ipc_rcu_grace *free = - container_of(head, struct ipc_rcu_grace, rcu); - kfree(free); + /* + * We prepend the allocation with the rcu struct + */ + struct ipc_rcu *out = ipc_alloc(sizeof(struct ipc_rcu) + size); + if (unlikely(!out)) + return NULL; + atomic_set(&out->refcount, 1); + return out + 1; } -void ipc_rcu_putref(void *ptr) +int ipc_rcu_getref(void *ptr) { - if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0) + struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1; + + return atomic_inc_not_zero(&p->refcount); +} + +void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head)) +{ + struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1; + + if (!atomic_dec_and_test(&p->refcount)) return; - if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { - call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu, - ipc_schedule_free); - } else { - call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu, - ipc_immediate_free); - } + call_rcu(&p->rcu, func); +} + +void ipc_rcu_free(struct rcu_head *head) +{ + struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); + + if (is_vmalloc_addr(p)) + vfree(p); + else + kfree(p); } /** - * ipcperms - check IPC permissions - * @ipcp: IPC permission set - * @flag: desired permission set. + * ipcperms - check ipc permissions + * @ns: ipc namespace + * @ipcp: ipc permission set + * @flag: desired permission set + * + * Check user, group, other permissions for access + * to ipc resources. return 0 if allowed * - * Check user, group, other permissions for access - * to ipc resources. return 0 if allowed + * @flag will most probably be 0 or S_...UGO from <linux/stat.h> */ - -int ipcperms (struct kern_ipc_perm *ipcp, short flag) -{ /* flag will most probably be 0 or S_...UGO from <linux/stat.h> */ +int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag) +{ + kuid_t euid = current_euid(); int requested_mode, granted_mode; + audit_ipc_obj(ipcp); requested_mode = (flag >> 6) | (flag >> 3) | flag; granted_mode = ipcp->mode; - if (current->euid == ipcp->cuid || current->euid == ipcp->uid) + if (uid_eq(euid, ipcp->cuid) || + uid_eq(euid, ipcp->uid)) granted_mode >>= 6; else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid)) granted_mode >>= 3; /* is there some bit set in requested_mode but not in granted_mode? */ - if ((requested_mode & ~granted_mode & 0007) && - !capable(CAP_IPC_OWNER)) + if ((requested_mode & ~granted_mode & 0007) && + !ns_capable(ns->user_ns, CAP_IPC_OWNER)) return -1; return security_ipc_permission(ipcp, flag); @@ -486,36 +551,33 @@ int ipcperms (struct kern_ipc_perm *ipcp, short flag) */ /** - * kernel_to_ipc64_perm - convert kernel ipc permissions to user - * @in: kernel permissions - * @out: new style IPC permissions + * kernel_to_ipc64_perm - convert kernel ipc permissions to user + * @in: kernel permissions + * @out: new style ipc permissions * - * Turn the kernel object 'in' into a set of permissions descriptions - * for returning to userspace (out). + * Turn the kernel object @in into a set of permissions descriptions + * for returning to userspace (@out). */ - - -void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out) +void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out) { out->key = in->key; - out->uid = in->uid; - out->gid = in->gid; - out->cuid = in->cuid; - out->cgid = in->cgid; + out->uid = from_kuid_munged(current_user_ns(), in->uid); + out->gid = from_kgid_munged(current_user_ns(), in->gid); + out->cuid = from_kuid_munged(current_user_ns(), in->cuid); + out->cgid = from_kgid_munged(current_user_ns(), in->cgid); out->mode = in->mode; out->seq = in->seq; } /** - * ipc64_perm_to_ipc_perm - convert old ipc permissions to new - * @in: new style IPC permissions - * @out: old style IPC permissions + * ipc64_perm_to_ipc_perm - convert new ipc permissions to old + * @in: new style ipc permissions + * @out: old style ipc permissions * - * Turn the new style permissions object in into a compatibility - * object and store it into the 'out' pointer. + * Turn the new style permissions object @in into a compatibility + * object and store it into the @out pointer. */ - -void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out) +void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out) { out->key = in->key; SET_UID(out->uid, in->uid); @@ -526,94 +588,181 @@ void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out) out->seq = in->seq; } -/* - * So far only shm_get_stat() calls ipc_get() via shm_get(), so ipc_get() - * is called with shm_ids.sem locked. Since grow_ary() is also called with - * shm_ids.sem down(for Shared Memory), there is no need to add read - * barriers here to gurantee the writes in grow_ary() are seen in order - * here (for Alpha). - * - * However ipc_get() itself does not necessary require ipc_ids.sem down. So - * if in the future ipc_get() is used by other places without ipc_ids.sem - * down, then ipc_get() needs read memery barriers as ipc_lock() does. +/** + * ipc_obtain_object + * @ids: ipc identifier set + * @id: ipc id to look for + * + * Look for an id in the ipc ids idr and return associated ipc object. + * + * Call inside the RCU critical section. + * The ipc object is *not* locked on exit. */ -struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id) +struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id) { - struct kern_ipc_perm* out; - int lid = id % SEQ_MULTIPLIER; - if(lid >= ids->entries->size) - return NULL; - out = ids->entries->p[lid]; + struct kern_ipc_perm *out; + int lid = ipcid_to_idx(id); + + out = idr_find(&ids->ipcs_idr, lid); + if (!out) + return ERR_PTR(-EINVAL); + return out; } -struct kern_ipc_perm* ipc_lock(struct ipc_ids* ids, int id) +/** + * ipc_lock - lock an ipc structure without rwsem held + * @ids: ipc identifier set + * @id: ipc id to look for + * + * Look for an id in the ipc ids idr and lock the associated ipc object. + * + * The ipc object is locked on successful exit. + */ +struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id) { - struct kern_ipc_perm* out; - int lid = id % SEQ_MULTIPLIER; - struct ipc_id_ary* entries; + struct kern_ipc_perm *out; rcu_read_lock(); - entries = rcu_dereference(ids->entries); - if(lid >= entries->size) { - rcu_read_unlock(); - return NULL; - } - out = entries->p[lid]; - if(out == NULL) { - rcu_read_unlock(); - return NULL; - } + out = ipc_obtain_object(ids, id); + if (IS_ERR(out)) + goto err1; + spin_lock(&out->lock); - + /* ipc_rmid() may have already freed the ID while ipc_lock * was spinning: here verify that the structure is still valid */ - if (out->deleted) { - spin_unlock(&out->lock); - rcu_read_unlock(); - return NULL; - } + if (ipc_valid_object(out)) + return out; + + spin_unlock(&out->lock); + out = ERR_PTR(-EINVAL); +err1: + rcu_read_unlock(); return out; } -void ipc_lock_by_ptr(struct kern_ipc_perm *perm) +/** + * ipc_obtain_object_check + * @ids: ipc identifier set + * @id: ipc id to look for + * + * Similar to ipc_obtain_object() but also checks + * the ipc object reference counter. + * + * Call inside the RCU critical section. + * The ipc object is *not* locked on exit. + */ +struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id) { - rcu_read_lock(); - spin_lock(&perm->lock); + struct kern_ipc_perm *out = ipc_obtain_object(ids, id); + + if (IS_ERR(out)) + goto out; + + if (ipc_checkid(out, id)) + return ERR_PTR(-EIDRM); +out: + return out; } -void ipc_unlock(struct kern_ipc_perm* perm) +/** + * ipcget - Common sys_*get() code + * @ns: namsepace + * @ids: ipc identifier set + * @ops: operations to be called on ipc object creation, permission checks + * and further checks + * @params: the parameters needed by the previous operations. + * + * Common routine called by sys_msgget(), sys_semget() and sys_shmget(). + */ +int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids, + const struct ipc_ops *ops, struct ipc_params *params) { - spin_unlock(&perm->lock); - rcu_read_unlock(); + if (params->key == IPC_PRIVATE) + return ipcget_new(ns, ids, ops, params); + else + return ipcget_public(ns, ids, ops, params); } -int ipc_buildid(struct ipc_ids* ids, int id, int seq) +/** + * ipc_update_perm - update the permissions of an ipc object + * @in: the permission given as input. + * @out: the permission of the ipc to set. + */ +int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out) { - return SEQ_MULTIPLIER*seq + id; + kuid_t uid = make_kuid(current_user_ns(), in->uid); + kgid_t gid = make_kgid(current_user_ns(), in->gid); + if (!uid_valid(uid) || !gid_valid(gid)) + return -EINVAL; + + out->uid = uid; + out->gid = gid; + out->mode = (out->mode & ~S_IRWXUGO) + | (in->mode & S_IRWXUGO); + + return 0; } -int ipc_checkid(struct ipc_ids* ids, struct kern_ipc_perm* ipcp, int uid) +/** + * ipcctl_pre_down_nolock - retrieve an ipc and check permissions for some IPC_XXX cmd + * @ns: ipc namespace + * @ids: the table of ids where to look for the ipc + * @id: the id of the ipc to retrieve + * @cmd: the cmd to check + * @perm: the permission to set + * @extra_perm: one extra permission parameter used by msq + * + * This function does some common audit and permissions check for some IPC_XXX + * cmd and is called from semctl_down, shmctl_down and msgctl_down. + * It must be called without any lock held and + * - retrieves the ipc with the given id in the given table. + * - performs some audit and permission check, depending on the given cmd + * - returns a pointer to the ipc object or otherwise, the corresponding error. + * + * Call holding the both the rwsem and the rcu read lock. + */ +struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns, + struct ipc_ids *ids, int id, int cmd, + struct ipc64_perm *perm, int extra_perm) { - if(uid/SEQ_MULTIPLIER != ipcp->seq) - return 1; - return 0; + kuid_t euid; + int err = -EPERM; + struct kern_ipc_perm *ipcp; + + ipcp = ipc_obtain_object_check(ids, id); + if (IS_ERR(ipcp)) { + err = PTR_ERR(ipcp); + goto err; + } + + audit_ipc_obj(ipcp); + if (cmd == IPC_SET) + audit_ipc_set_perm(extra_perm, perm->uid, + perm->gid, perm->mode); + + euid = current_euid(); + if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid) || + ns_capable(ns->user_ns, CAP_SYS_ADMIN)) + return ipcp; /* successful lookup */ +err: + return ERR_PTR(err); } -#ifdef __ARCH_WANT_IPC_PARSE_VERSION +#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION /** - * ipc_parse_version - IPC call version - * @cmd: pointer to command + * ipc_parse_version - ipc call version + * @cmd: pointer to command * - * Return IPC_64 for new style IPC and IPC_OLD for old style IPC. - * The cmd value is turned from an encoding command and version into - * just the command code. + * Return IPC_64 for new style IPC and IPC_OLD for old style IPC. + * The @cmd value is turned from an encoding command and version into + * just the command code. */ - -int ipc_parse_version (int *cmd) +int ipc_parse_version(int *cmd) { if (*cmd & IPC_64) { *cmd ^= IPC_64; @@ -623,26 +772,39 @@ int ipc_parse_version (int *cmd) } } -#endif /* __ARCH_WANT_IPC_PARSE_VERSION */ +#endif /* CONFIG_ARCH_WANT_IPC_PARSE_VERSION */ #ifdef CONFIG_PROC_FS -static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos) +struct ipc_proc_iter { + struct ipc_namespace *ns; + struct ipc_proc_iface *iface; +}; + +/* + * This routine locks the ipc structure found at least at position pos. + */ +static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos, + loff_t *new_pos) { - struct ipc_proc_iface *iface = s->private; - struct kern_ipc_perm *ipc = it; - loff_t p; + struct kern_ipc_perm *ipc; + int total, id; - /* If we had an ipc id locked before, unlock it */ - if (ipc && ipc != SEQ_START_TOKEN) - ipc_unlock(ipc); + total = 0; + for (id = 0; id < pos && total < ids->in_use; id++) { + ipc = idr_find(&ids->ipcs_idr, id); + if (ipc != NULL) + total++; + } - /* - * p = *pos - 1 (because id 0 starts at position 1) - * + 1 (because we increment the position by one) - */ - for (p = *pos; p <= iface->ids->max_id; p++) { - if ((ipc = ipc_lock(iface->ids, p)) != NULL) { - *pos = p + 1; + if (total >= ids->in_use) + return NULL; + + for (; pos < IPCMNI; pos++) { + ipc = idr_find(&ids->ipcs_idr, pos); + if (ipc != NULL) { + *new_pos = pos + 1; + rcu_read_lock(); + ipc_lock_object(ipc); return ipc; } } @@ -651,21 +813,36 @@ static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos) return NULL; } +static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos) +{ + struct ipc_proc_iter *iter = s->private; + struct ipc_proc_iface *iface = iter->iface; + struct kern_ipc_perm *ipc = it; + + /* If we had an ipc id locked before, unlock it */ + if (ipc && ipc != SEQ_START_TOKEN) + ipc_unlock(ipc); + + return sysvipc_find_ipc(&iter->ns->ids[iface->ids], *pos, pos); +} + /* - * File positions: pos 0 -> header, pos n -> ipc id + 1. - * SeqFile iterator: iterator value locked shp or SEQ_TOKEN_START. + * File positions: pos 0 -> header, pos n -> ipc id = n - 1. + * SeqFile iterator: iterator value locked ipc pointer or SEQ_TOKEN_START. */ static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos) { - struct ipc_proc_iface *iface = s->private; - struct kern_ipc_perm *ipc; - loff_t p; + struct ipc_proc_iter *iter = s->private; + struct ipc_proc_iface *iface = iter->iface; + struct ipc_ids *ids; + + ids = &iter->ns->ids[iface->ids]; /* * Take the lock - this will be released by the corresponding * call to stop(). */ - down(&iface->ids->sem); + down_read(&ids->rwsem); /* pos < 0 is invalid */ if (*pos < 0) @@ -676,31 +853,29 @@ static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos) return SEQ_START_TOKEN; /* Find the (pos-1)th ipc */ - for (p = *pos - 1; p <= iface->ids->max_id; p++) { - if ((ipc = ipc_lock(iface->ids, p)) != NULL) { - *pos = p + 1; - return ipc; - } - } - return NULL; + return sysvipc_find_ipc(ids, *pos - 1, pos); } static void sysvipc_proc_stop(struct seq_file *s, void *it) { struct kern_ipc_perm *ipc = it; - struct ipc_proc_iface *iface = s->private; + struct ipc_proc_iter *iter = s->private; + struct ipc_proc_iface *iface = iter->iface; + struct ipc_ids *ids; - /* If we had a locked segment, release it */ + /* If we had a locked structure, release it */ if (ipc && ipc != SEQ_START_TOKEN) ipc_unlock(ipc); + ids = &iter->ns->ids[iface->ids]; /* Release the lock we took in start() */ - up(&iface->ids->sem); + up_read(&ids->rwsem); } static int sysvipc_proc_show(struct seq_file *s, void *it) { - struct ipc_proc_iface *iface = s->private; + struct ipc_proc_iter *iter = s->private; + struct ipc_proc_iface *iface = iter->iface; if (it == SEQ_START_TOKEN) return seq_puts(s, iface->header); @@ -708,29 +883,51 @@ static int sysvipc_proc_show(struct seq_file *s, void *it) return iface->show(s, it); } -static struct seq_operations sysvipc_proc_seqops = { +static const struct seq_operations sysvipc_proc_seqops = { .start = sysvipc_proc_start, .stop = sysvipc_proc_stop, .next = sysvipc_proc_next, .show = sysvipc_proc_show, }; -static int sysvipc_proc_open(struct inode *inode, struct file *file) { +static int sysvipc_proc_open(struct inode *inode, struct file *file) +{ int ret; struct seq_file *seq; + struct ipc_proc_iter *iter; + + ret = -ENOMEM; + iter = kmalloc(sizeof(*iter), GFP_KERNEL); + if (!iter) + goto out; ret = seq_open(file, &sysvipc_proc_seqops); - if (!ret) { - seq = file->private_data; - seq->private = PDE(inode)->data; + if (ret) { + kfree(iter); + goto out; } + + seq = file->private_data; + seq->private = iter; + + iter->iface = PDE_DATA(inode); + iter->ns = get_ipc_ns(current->nsproxy->ipc_ns); +out: return ret; } -static struct file_operations sysvipc_proc_fops = { +static int sysvipc_proc_release(struct inode *inode, struct file *file) +{ + struct seq_file *seq = file->private_data; + struct ipc_proc_iter *iter = seq->private; + put_ipc_ns(iter->ns); + return seq_release_private(inode, file); +} + +static const struct file_operations sysvipc_proc_fops = { .open = sysvipc_proc_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = sysvipc_proc_release, }; #endif /* CONFIG_PROC_FS */ diff --git a/ipc/util.h b/ipc/util.h index fc9a28be079..1a5a0fcd099 100644 --- a/ipc/util.h +++ b/ipc/util.h @@ -2,58 +2,123 @@ * linux/ipc/util.h * Copyright (C) 1999 Christoph Rohland * - * ipc helper functions (c) 1999 Manfred Spraul <manfreds@colorfullife.com> + * ipc helper functions (c) 1999 Manfred Spraul <manfred@colorfullife.com> + * namespaces support. 2006 OpenVZ, SWsoft Inc. + * Pavel Emelianov <xemul@openvz.org> */ #ifndef _IPC_UTIL_H #define _IPC_UTIL_H -#define USHRT_MAX 0xffff +#include <linux/unistd.h> +#include <linux/err.h> + #define SEQ_MULTIPLIER (IPCMNI) -void sem_init (void); -void msg_init (void); -void shm_init (void); +void sem_init(void); +void msg_init(void); +void shm_init(void); + +struct ipc_namespace; + +#ifdef CONFIG_POSIX_MQUEUE +extern void mq_clear_sbinfo(struct ipc_namespace *ns); +extern void mq_put_mnt(struct ipc_namespace *ns); +#else +static inline void mq_clear_sbinfo(struct ipc_namespace *ns) { } +static inline void mq_put_mnt(struct ipc_namespace *ns) { } +#endif + +#ifdef CONFIG_SYSVIPC +void sem_init_ns(struct ipc_namespace *ns); +void msg_init_ns(struct ipc_namespace *ns); +void shm_init_ns(struct ipc_namespace *ns); + +void sem_exit_ns(struct ipc_namespace *ns); +void msg_exit_ns(struct ipc_namespace *ns); +void shm_exit_ns(struct ipc_namespace *ns); +#else +static inline void sem_init_ns(struct ipc_namespace *ns) { } +static inline void msg_init_ns(struct ipc_namespace *ns) { } +static inline void shm_init_ns(struct ipc_namespace *ns) { } + +static inline void sem_exit_ns(struct ipc_namespace *ns) { } +static inline void msg_exit_ns(struct ipc_namespace *ns) { } +static inline void shm_exit_ns(struct ipc_namespace *ns) { } +#endif + +struct ipc_rcu { + struct rcu_head rcu; + atomic_t refcount; +} ____cacheline_aligned_in_smp; -struct ipc_id_ary { - int size; - struct kern_ipc_perm *p[0]; +#define ipc_rcu_to_struct(p) ((void *)(p+1)) + +/* + * Structure that holds the parameters needed by the ipc operations + * (see after) + */ +struct ipc_params { + key_t key; + int flg; + union { + size_t size; /* for shared memories */ + int nsems; /* for semaphores */ + } u; /* holds the getnew() specific param */ }; -struct ipc_ids { - int in_use; - int max_id; - unsigned short seq; - unsigned short seq_max; - struct semaphore sem; - struct ipc_id_ary nullentry; - struct ipc_id_ary* entries; +/* + * Structure that holds some ipc operations. This structure is used to unify + * the calls to sys_msgget(), sys_semget(), sys_shmget() + * . routine to call to create a new ipc object. Can be one of newque, + * newary, newseg + * . routine to call to check permissions for a new ipc object. + * Can be one of security_msg_associate, security_sem_associate, + * security_shm_associate + * . routine to call for an extra check if needed + */ +struct ipc_ops { + int (*getnew)(struct ipc_namespace *, struct ipc_params *); + int (*associate)(struct kern_ipc_perm *, int); + int (*more_checks)(struct kern_ipc_perm *, struct ipc_params *); }; struct seq_file; -void __init ipc_init_ids(struct ipc_ids* ids, int size); +struct ipc_ids; + +void ipc_init_ids(struct ipc_ids *); #ifdef CONFIG_PROC_FS void __init ipc_init_proc_interface(const char *path, const char *header, - struct ipc_ids *ids, - int (*show)(struct seq_file *, void *)); + int ids, int (*show)(struct seq_file *, void *)); #else #define ipc_init_proc_interface(path, header, ids, show) do {} while (0) #endif -/* must be called with ids->sem acquired.*/ -int ipc_findkey(struct ipc_ids* ids, key_t key); -int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size); +#define IPC_SEM_IDS 0 +#define IPC_MSG_IDS 1 +#define IPC_SHM_IDS 2 + +#define ipcid_to_idx(id) ((id) % SEQ_MULTIPLIER) +#define ipcid_to_seqx(id) ((id) / SEQ_MULTIPLIER) +#define IPCID_SEQ_MAX min_t(int, INT_MAX/SEQ_MULTIPLIER, USHRT_MAX) + +/* must be called with ids->rwsem acquired for writing */ +int ipc_addid(struct ipc_ids *, struct kern_ipc_perm *, int); + +/* must be called with ids->rwsem acquired for reading */ +int ipc_get_maxid(struct ipc_ids *); /* must be called with both locks acquired. */ -struct kern_ipc_perm* ipc_rmid(struct ipc_ids* ids, int id); +void ipc_rmid(struct ipc_ids *, struct kern_ipc_perm *); -int ipcperms (struct kern_ipc_perm *ipcp, short flg); +/* must be called with ipcp locked */ +int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flg); /* for rare, potentially huge allocations. * both function can sleep */ -void* ipc_alloc(int size); -void ipc_free(void* ptr, int size); +void *ipc_alloc(int size); +void ipc_free(void *ptr, int size); /* * For allocation that need to be freed by RCU. @@ -61,29 +126,82 @@ void ipc_free(void* ptr, int size); * getref increases the refcount, the putref call that reduces the recount * to 0 schedules the rcu destruction. Caller must guarantee locking. */ -void* ipc_rcu_alloc(int size); -void ipc_rcu_getref(void *ptr); -void ipc_rcu_putref(void *ptr); +void *ipc_rcu_alloc(int size); +int ipc_rcu_getref(void *ptr); +void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head)); +void ipc_rcu_free(struct rcu_head *head); -struct kern_ipc_perm* ipc_get(struct ipc_ids* ids, int id); -struct kern_ipc_perm* ipc_lock(struct ipc_ids* ids, int id); -void ipc_lock_by_ptr(struct kern_ipc_perm *ipcp); -void ipc_unlock(struct kern_ipc_perm* perm); -int ipc_buildid(struct ipc_ids* ids, int id, int seq); -int ipc_checkid(struct ipc_ids* ids, struct kern_ipc_perm* ipcp, int uid); +struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int); +struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id); void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out); void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out); +int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out); +struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns, + struct ipc_ids *ids, int id, int cmd, + struct ipc64_perm *perm, int extra_perm); -#if defined(__ia64__) || defined(__x86_64__) || defined(__hppa__) || defined(__XTENSA__) - /* On IA-64, we always use the "64-bit version" of the IPC structures. */ +#ifndef CONFIG_ARCH_WANT_IPC_PARSE_VERSION +/* On IA-64, we always use the "64-bit version" of the IPC structures. */ # define ipc_parse_version(cmd) IPC_64 #else -int ipc_parse_version (int *cmd); +int ipc_parse_version(int *cmd); #endif extern void free_msg(struct msg_msg *msg); -extern struct msg_msg *load_msg(const void __user *src, int len); -extern int store_msg(void __user *dest, struct msg_msg *msg, int len); +extern struct msg_msg *load_msg(const void __user *src, size_t len); +extern struct msg_msg *copy_msg(struct msg_msg *src, struct msg_msg *dst); +extern int store_msg(void __user *dest, struct msg_msg *msg, size_t len); + +extern void recompute_msgmni(struct ipc_namespace *); + +static inline int ipc_buildid(int id, int seq) +{ + return SEQ_MULTIPLIER * seq + id; +} + +static inline int ipc_checkid(struct kern_ipc_perm *ipcp, int uid) +{ + return uid / SEQ_MULTIPLIER != ipcp->seq; +} + +static inline void ipc_lock_object(struct kern_ipc_perm *perm) +{ + spin_lock(&perm->lock); +} + +static inline void ipc_unlock_object(struct kern_ipc_perm *perm) +{ + spin_unlock(&perm->lock); +} + +static inline void ipc_assert_locked_object(struct kern_ipc_perm *perm) +{ + assert_spin_locked(&perm->lock); +} + +static inline void ipc_unlock(struct kern_ipc_perm *perm) +{ + ipc_unlock_object(perm); + rcu_read_unlock(); +} +/* + * ipc_valid_object() - helper to sort out IPC_RMID races for codepaths + * where the respective ipc_ids.rwsem is not being held down. + * Checks whether the ipc object is still around or if it's gone already, as + * ipc_rmid() may have already freed the ID while the ipc lock was spinning. + * Needs to be called with kern_ipc_perm.lock held -- exception made for one + * checkpoint case at sys_semtimedop() as noted in code commentary. + */ +static inline bool ipc_valid_object(struct kern_ipc_perm *perm) +{ + return !perm->deleted; +} + +struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id); +int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids, + const struct ipc_ops *ops, struct ipc_params *params); +void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids, + void (*free)(struct ipc_namespace *, struct kern_ipc_perm *)); #endif |
