diff options
Diffstat (limited to 'ipc')
| -rw-r--r-- | ipc/Makefile | 3 | ||||
| -rw-r--r-- | ipc/compat.c | 255 | ||||
| -rw-r--r-- | ipc/compat_mq.c | 58 | ||||
| -rw-r--r-- | ipc/ipc_sysctl.c | 248 | ||||
| -rw-r--r-- | ipc/ipcns_notifier.c | 92 | ||||
| -rw-r--r-- | ipc/mq_sysctl.c | 124 | ||||
| -rw-r--r-- | ipc/mqueue.c | 1116 | ||||
| -rw-r--r-- | ipc/msg.c | 812 | ||||
| -rw-r--r-- | ipc/msgutil.c | 147 | ||||
| -rw-r--r-- | ipc/namespace.c | 155 | ||||
| -rw-r--r-- | ipc/sem.c | 1934 | ||||
| -rw-r--r-- | ipc/shm.c | 919 | ||||
| -rw-r--r-- | ipc/syscall.c | 99 | ||||
| -rw-r--r-- | ipc/util.c | 793 | ||||
| -rw-r--r-- | ipc/util.h | 124 |
15 files changed, 4433 insertions, 2446 deletions
diff --git a/ipc/Makefile b/ipc/Makefile index 5fc5e33ea04..9075e172e52 100644 --- a/ipc/Makefile +++ b/ipc/Makefile @@ -3,9 +3,10 @@ # obj-$(CONFIG_SYSVIPC_COMPAT) += compat.o -obj-$(CONFIG_SYSVIPC) += util.o msgutil.o msg.o sem.o shm.o +obj-$(CONFIG_SYSVIPC) += util.o msgutil.o msg.o sem.o shm.o ipcns_notifier.o syscall.o obj-$(CONFIG_SYSVIPC_SYSCTL) += ipc_sysctl.o obj_mq-$(CONFIG_COMPAT) += compat_mq.o obj-$(CONFIG_POSIX_MQUEUE) += mqueue.o msgutil.o $(obj_mq-y) obj-$(CONFIG_IPC_NS) += namespace.o +obj-$(CONFIG_POSIX_MQUEUE_SYSCTL) += mq_sysctl.o diff --git a/ipc/compat.c b/ipc/compat.c index ab76fb0ef84..b5ef4f7946d 100644 --- a/ipc/compat.c +++ b/ipc/compat.c @@ -26,11 +26,11 @@ #include <linux/init.h> #include <linux/msg.h> #include <linux/shm.h> -#include <linux/slab.h> #include <linux/syscalls.h> +#include <linux/ptrace.h> #include <linux/mutex.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include "util.h" @@ -113,11 +113,9 @@ struct compat_shm_info { compat_ulong_t swap_attempts, swap_successes; }; -extern int sem_ctls[]; -#define sc_semopm (sem_ctls[2]) - static inline int compat_ipc_parse_version(int *cmd) { +#ifdef CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION int version = *cmd & IPC_64; /* this is tricky: architectures that have support for the old @@ -129,6 +127,10 @@ static inline int compat_ipc_parse_version(int *cmd) *cmd &= ~IPC_64; #endif return version; +#else + /* With the asm-generic APIs, we always use the 64-bit versions. */ + return IPC_64; +#endif } static inline int __get_compat_ipc64_perm(struct ipc64_perm *p64, @@ -192,7 +194,7 @@ static inline int __put_compat_ipc_perm(struct ipc64_perm *p, static inline int get_compat_semid64_ds(struct semid64_ds *s64, struct compat_semid64_ds __user *up64) { - if (!access_ok (VERIFY_READ, up64, sizeof(*up64))) + if (!access_ok(VERIFY_READ, up64, sizeof(*up64))) return -EFAULT; return __get_compat_ipc64_perm(&s64->sem_perm, &up64->sem_perm); } @@ -200,7 +202,7 @@ static inline int get_compat_semid64_ds(struct semid64_ds *s64, static inline int get_compat_semid_ds(struct semid64_ds *s, struct compat_semid_ds __user *up) { - if (!access_ok (VERIFY_READ, up, sizeof(*up))) + if (!access_ok(VERIFY_READ, up, sizeof(*up))) return -EFAULT; return __get_compat_ipc_perm(&s->sem_perm, &up->sem_perm); } @@ -210,7 +212,7 @@ static inline int put_compat_semid64_ds(struct semid64_ds *s64, { int err; - if (!access_ok (VERIFY_WRITE, up64, sizeof(*up64))) + if (!access_ok(VERIFY_WRITE, up64, sizeof(*up64))) return -EFAULT; err = __put_compat_ipc64_perm(&s64->sem_perm, &up64->sem_perm); err |= __put_user(s64->sem_otime, &up64->sem_otime); @@ -224,7 +226,7 @@ static inline int put_compat_semid_ds(struct semid64_ds *s, { int err; - if (!access_ok (VERIFY_WRITE, up, sizeof(*up))) + if (!access_ok(VERIFY_WRITE, up, sizeof(*up))) return -EFAULT; err = __put_compat_ipc_perm(&s->sem_perm, &up->sem_perm); err |= __put_user(s->sem_otime, &up->sem_otime); @@ -233,23 +235,24 @@ static inline int put_compat_semid_ds(struct semid64_ds *s, return err; } -long compat_sys_semctl(int first, int second, int third, void __user *uptr) +static long do_compat_semctl(int first, int second, int third, u32 pad) { - union semun fourth; - u32 pad; + unsigned long fourth; int err, err2; struct semid64_ds s64; struct semid64_ds __user *up64; int version = compat_ipc_parse_version(&third); - if (!uptr) - return -EINVAL; - if (get_user(pad, (u32 __user *) uptr)) - return -EFAULT; + memset(&s64, 0, sizeof(s64)); + if ((third & (~IPC_64)) == SETVAL) - fourth.val = (int) pad; +#ifdef __BIG_ENDIAN + fourth = (unsigned long)pad << 32; +#else + fourth = pad; +#endif else - fourth.__pad = compat_ptr(pad); + fourth = (unsigned long)compat_ptr(pad); switch (third & (~IPC_64)) { case IPC_INFO: case IPC_RMID: @@ -267,7 +270,7 @@ long compat_sys_semctl(int first, int second, int third, void __user *uptr) case IPC_STAT: case SEM_STAT: up64 = compat_alloc_user_space(sizeof(s64)); - fourth.__pad = up64; + fourth = (unsigned long)up64; err = sys_semctl(first, second, third, fourth); if (err < 0) break; @@ -282,18 +285,18 @@ long compat_sys_semctl(int first, int second, int third, void __user *uptr) break; case IPC_SET: - if (version == IPC_64) { + if (version == IPC_64) err = get_compat_semid64_ds(&s64, compat_ptr(pad)); - } else { + else err = get_compat_semid_ds(&s64, compat_ptr(pad)); - } + up64 = compat_alloc_user_space(sizeof(s64)); if (copy_to_user(up64, &s64, sizeof(s64))) err = -EFAULT; if (err) break; - fourth.__pad = up64; + fourth = (unsigned long)up64; err = sys_semctl(first, second, third, fourth); break; @@ -304,53 +307,130 @@ long compat_sys_semctl(int first, int second, int third, void __user *uptr) return err; } -long compat_sys_msgsnd(int first, int second, int third, void __user *uptr) +static long compat_do_msg_fill(void __user *dest, struct msg_msg *msg, size_t bufsz) { - struct compat_msgbuf __user *up = uptr; - long type; + struct compat_msgbuf __user *msgp = dest; + size_t msgsz; - if (first < 0) - return -EINVAL; - if (second < 0) - return -EINVAL; - - if (get_user(type, &up->mtype)) + if (put_user(msg->m_type, &msgp->mtype)) return -EFAULT; - return do_msgsnd(first, type, up->mtext, second, third); + msgsz = (bufsz > msg->m_ts) ? msg->m_ts : bufsz; + if (store_msg(msgp->mtext, msg, msgsz)) + return -EFAULT; + return msgsz; } -long compat_sys_msgrcv(int first, int second, int msgtyp, int third, - int version, void __user *uptr) +#ifndef COMPAT_SHMLBA +#define COMPAT_SHMLBA SHMLBA +#endif + +#ifdef CONFIG_ARCH_WANT_OLD_COMPAT_IPC +COMPAT_SYSCALL_DEFINE6(ipc, u32, call, int, first, int, second, + u32, third, compat_uptr_t, ptr, u32, fifth) { - struct compat_msgbuf __user *up; - long type; - int err; + int version; + u32 pad; - if (first < 0) - return -EINVAL; - if (second < 0) - return -EINVAL; + version = call >> 16; /* hack for backward compatibility */ + call &= 0xffff; + + switch (call) { + case SEMOP: + /* struct sembuf is the same on 32 and 64bit :)) */ + return sys_semtimedop(first, compat_ptr(ptr), second, NULL); + case SEMTIMEDOP: + return compat_sys_semtimedop(first, compat_ptr(ptr), second, + compat_ptr(fifth)); + case SEMGET: + return sys_semget(first, second, third); + case SEMCTL: + if (!ptr) + return -EINVAL; + if (get_user(pad, (u32 __user *) compat_ptr(ptr))) + return -EFAULT; + return do_compat_semctl(first, second, third, pad); - if (!version) { - struct compat_ipc_kludge ipck; - err = -EINVAL; - if (!uptr) - goto out; - err = -EFAULT; - if (copy_from_user (&ipck, uptr, sizeof(ipck))) - goto out; - uptr = compat_ptr(ipck.msgp); - msgtyp = ipck.msgtyp; + case MSGSND: { + struct compat_msgbuf __user *up = compat_ptr(ptr); + compat_long_t type; + + if (first < 0 || second < 0) + return -EINVAL; + + if (get_user(type, &up->mtype)) + return -EFAULT; + + return do_msgsnd(first, type, up->mtext, second, third); } - up = uptr; - err = do_msgrcv(first, &type, up->mtext, second, msgtyp, third); - if (err < 0) - goto out; - if (put_user(type, &up->mtype)) - err = -EFAULT; -out: - return err; + case MSGRCV: { + void __user *uptr = compat_ptr(ptr); + + if (first < 0 || second < 0) + return -EINVAL; + + if (!version) { + struct compat_ipc_kludge ipck; + if (!uptr) + return -EINVAL; + if (copy_from_user(&ipck, uptr, sizeof(ipck))) + return -EFAULT; + uptr = compat_ptr(ipck.msgp); + fifth = ipck.msgtyp; + } + return do_msgrcv(first, uptr, second, (s32)fifth, third, + compat_do_msg_fill); + } + case MSGGET: + return sys_msgget(first, second); + case MSGCTL: + return compat_sys_msgctl(first, second, compat_ptr(ptr)); + + case SHMAT: { + int err; + unsigned long raddr; + + if (version == 1) + return -EINVAL; + err = do_shmat(first, compat_ptr(ptr), second, &raddr, + COMPAT_SHMLBA); + if (err < 0) + return err; + return put_user(raddr, (compat_ulong_t *)compat_ptr(third)); + } + case SHMDT: + return sys_shmdt(compat_ptr(ptr)); + case SHMGET: + return sys_shmget(first, (unsigned)second, third); + case SHMCTL: + return compat_sys_shmctl(first, second, compat_ptr(ptr)); + } + + return -ENOSYS; +} +#endif + +COMPAT_SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, int, arg) +{ + return do_compat_semctl(semid, semnum, cmd, arg); +} + +COMPAT_SYSCALL_DEFINE4(msgsnd, int, msqid, compat_uptr_t, msgp, + compat_ssize_t, msgsz, int, msgflg) +{ + struct compat_msgbuf __user *up = compat_ptr(msgp); + compat_long_t mtype; + + if (get_user(mtype, &up->mtype)) + return -EFAULT; + return do_msgsnd(msqid, mtype, up->mtext, (ssize_t)msgsz, msgflg); +} + +COMPAT_SYSCALL_DEFINE5(msgrcv, int, msqid, compat_uptr_t, msgp, + compat_ssize_t, msgsz, compat_long_t, msgtyp, int, msgflg) +{ + return do_msgrcv(msqid, compat_ptr(msgp), (ssize_t)msgsz, (long)msgtyp, + msgflg, compat_do_msg_fill); } static inline int get_compat_msqid64(struct msqid64_ds *m64, @@ -415,13 +495,15 @@ static inline int put_compat_msqid_ds(struct msqid64_ds *m, return err; } -long compat_sys_msgctl(int first, int second, void __user *uptr) +COMPAT_SYSCALL_DEFINE3(msgctl, int, first, int, second, void __user *, uptr) { int err, err2; struct msqid64_ds m64; int version = compat_ipc_parse_version(&second); void __user *p; + memset(&m64, 0, sizeof(m64)); + switch (second & (~IPC_64)) { case IPC_INFO: case IPC_RMID: @@ -430,11 +512,11 @@ long compat_sys_msgctl(int first, int second, void __user *uptr) break; case IPC_SET: - if (version == IPC_64) { + if (version == IPC_64) err = get_compat_msqid64(&m64, uptr); - } else { + else err = get_compat_msqid(&m64, uptr); - } + if (err) break; p = compat_alloc_user_space(sizeof(m64)); @@ -467,20 +549,16 @@ long compat_sys_msgctl(int first, int second, void __user *uptr) return err; } -long compat_sys_shmat(int first, int second, compat_uptr_t third, int version, - void __user *uptr) +COMPAT_SYSCALL_DEFINE3(shmat, int, shmid, compat_uptr_t, shmaddr, int, shmflg) { - int err; - unsigned long raddr; - compat_ulong_t __user *uaddr; + unsigned long ret; + long err; - if (version == 1) - return -EINVAL; - err = do_shmat(first, uptr, second, &raddr); - if (err < 0) + err = do_shmat(shmid, compat_ptr(shmaddr), shmflg, &ret, COMPAT_SHMLBA); + if (err) return err; - uaddr = compat_ptr(third); - return put_user(raddr, uaddr); + force_successful_syscall_return(); + return (long)ret; } static inline int get_compat_shmid64_ds(struct shmid64_ds *s64, @@ -587,7 +665,7 @@ static inline int put_compat_shm_info(struct shm_info __user *ip, return err; } -long compat_sys_shmctl(int first, int second, void __user *uptr) +COMPAT_SYSCALL_DEFINE3(shmctl, int, first, int, second, void __user *, uptr) { void __user *p; struct shmid64_ds s64; @@ -595,6 +673,8 @@ long compat_sys_shmctl(int first, int second, void __user *uptr) int err, err2; int version = compat_ipc_parse_version(&second); + memset(&s64, 0, sizeof(s64)); + switch (second & (~IPC_64)) { case IPC_RMID: case SHM_LOCK: @@ -619,11 +699,11 @@ long compat_sys_shmctl(int first, int second, void __user *uptr) case IPC_SET: - if (version == IPC_64) { + if (version == IPC_64) err = get_compat_shmid64_ds(&s64, uptr); - } else { + else err = get_compat_shmid_ds(&s64, uptr); - } + if (err) break; p = compat_alloc_user_space(sizeof(s64)); @@ -666,17 +746,12 @@ long compat_sys_shmctl(int first, int second, void __user *uptr) return err; } -long compat_sys_semtimedop(int semid, struct sembuf __user *tsems, - unsigned nsops, const struct compat_timespec __user *timeout) +COMPAT_SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsems, + unsigned, nsops, + const struct compat_timespec __user *, timeout) { - struct timespec __user *ts64 = NULL; - if (timeout) { - struct timespec ts; - ts64 = compat_alloc_user_space(sizeof(*ts64)); - if (get_compat_timespec(&ts, timeout)) - return -EFAULT; - if (copy_to_user(ts64, &ts, sizeof(ts))) - return -EFAULT; - } + struct timespec __user *ts64; + if (compat_convert_timespec(&ts64, timeout)) + return -EFAULT; return sys_semtimedop(semid, tsems, nsops, ts64); } diff --git a/ipc/compat_mq.c b/ipc/compat_mq.c index d8d1e9ff4e8..ef6f91cc449 100644 --- a/ipc/compat_mq.c +++ b/ipc/compat_mq.c @@ -12,7 +12,7 @@ #include <linux/mqueue.h> #include <linux/syscalls.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> struct compat_mq_attr { compat_long_t mq_flags; /* message queue flags */ @@ -46,13 +46,16 @@ static inline int put_compat_mq_attr(const struct mq_attr *attr, | __put_user(attr->mq_curmsgs, &uattr->mq_curmsgs); } -asmlinkage long compat_sys_mq_open(const char __user *u_name, - int oflag, compat_mode_t mode, - struct compat_mq_attr __user *u_attr) +COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name, + int, oflag, compat_mode_t, mode, + struct compat_mq_attr __user *, u_attr) { void __user *p = NULL; if (u_attr && oflag & O_CREAT) { struct mq_attr attr; + + memset(&attr, 0, sizeof(attr)); + p = compat_alloc_user_space(sizeof(attr)); if (get_compat_mq_attr(&attr, u_attr) || copy_to_user(p, &attr, sizeof(attr))) @@ -61,49 +64,36 @@ asmlinkage long compat_sys_mq_open(const char __user *u_name, return sys_mq_open(u_name, oflag, mode, p); } -static int compat_prepare_timeout(struct timespec __user * *p, - const struct compat_timespec __user *u) -{ - struct timespec ts; - if (!u) { - *p = NULL; - return 0; - } - *p = compat_alloc_user_space(sizeof(ts)); - if (get_compat_timespec(&ts, u) || copy_to_user(*p, &ts, sizeof(ts))) - return -EFAULT; - return 0; -} - -asmlinkage long compat_sys_mq_timedsend(mqd_t mqdes, - const char __user *u_msg_ptr, - size_t msg_len, unsigned int msg_prio, - const struct compat_timespec __user *u_abs_timeout) +COMPAT_SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, + const char __user *, u_msg_ptr, + compat_size_t, msg_len, unsigned int, msg_prio, + const struct compat_timespec __user *, u_abs_timeout) { struct timespec __user *u_ts; - if (compat_prepare_timeout(&u_ts, u_abs_timeout)) + if (compat_convert_timespec(&u_ts, u_abs_timeout)) return -EFAULT; return sys_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, u_ts); } -asmlinkage ssize_t compat_sys_mq_timedreceive(mqd_t mqdes, - char __user *u_msg_ptr, - size_t msg_len, unsigned int __user *u_msg_prio, - const struct compat_timespec __user *u_abs_timeout) +COMPAT_SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, + char __user *, u_msg_ptr, + compat_size_t, msg_len, unsigned int __user *, u_msg_prio, + const struct compat_timespec __user *, u_abs_timeout) { struct timespec __user *u_ts; - if (compat_prepare_timeout(&u_ts, u_abs_timeout)) + + if (compat_convert_timespec(&u_ts, u_abs_timeout)) return -EFAULT; return sys_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, u_ts); } -asmlinkage long compat_sys_mq_notify(mqd_t mqdes, - const struct compat_sigevent __user *u_notification) +COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, + const struct compat_sigevent __user *, u_notification) { struct sigevent __user *p = NULL; if (u_notification) { @@ -119,14 +109,16 @@ asmlinkage long compat_sys_mq_notify(mqd_t mqdes, return sys_mq_notify(mqdes, p); } -asmlinkage long compat_sys_mq_getsetattr(mqd_t mqdes, - const struct compat_mq_attr __user *u_mqstat, - struct compat_mq_attr __user *u_omqstat) +COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, + const struct compat_mq_attr __user *, u_mqstat, + struct compat_mq_attr __user *, u_omqstat) { struct mq_attr mqstat; struct mq_attr __user *p = compat_alloc_user_space(2 * sizeof(*p)); long ret; + memset(&mqstat, 0, sizeof(mqstat)); + if (u_mqstat) { if (get_compat_mq_attr(&mqstat, u_mqstat) || copy_to_user(p, &mqstat, sizeof(mqstat))) diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c index 7f4235bed51..c3f0326e98d 100644 --- a/ipc/ipc_sysctl.c +++ b/ipc/ipc_sysctl.c @@ -15,8 +15,10 @@ #include <linux/sysctl.h> #include <linux/uaccess.h> #include <linux/ipc_namespace.h> +#include <linux/msg.h> +#include "util.h" -static void *get_ipc(ctl_table *table) +static void *get_ipc(struct ctl_table *table) { char *which = table->data; struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; @@ -24,146 +26,248 @@ static void *get_ipc(ctl_table *table) return which; } -#ifdef CONFIG_PROC_FS -static int proc_ipc_dointvec(ctl_table *table, int write, struct file *filp, +#ifdef CONFIG_PROC_SYSCTL +static int proc_ipc_dointvec(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table ipc_table; + memcpy(&ipc_table, table, sizeof(ipc_table)); ipc_table.data = get_ipc(table); - return proc_dointvec(&ipc_table, write, filp, buffer, lenp, ppos); + return proc_dointvec(&ipc_table, write, buffer, lenp, ppos); } -static int proc_ipc_doulongvec_minmax(ctl_table *table, int write, - struct file *filp, void __user *buffer, size_t *lenp, loff_t *ppos) +static int proc_ipc_dointvec_minmax(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table ipc_table; + memcpy(&ipc_table, table, sizeof(ipc_table)); ipc_table.data = get_ipc(table); - return proc_doulongvec_minmax(&ipc_table, write, filp, buffer, - lenp, ppos); + return proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos); } -#else -#define proc_ipc_doulongvec_minmax NULL -#define proc_ipc_dointvec NULL -#endif +static int proc_ipc_dointvec_minmax_orphans(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct ipc_namespace *ns = current->nsproxy->ipc_ns; + int err = proc_ipc_dointvec_minmax(table, write, buffer, lenp, ppos); + + if (err < 0) + return err; + if (ns->shm_rmid_forced) + shm_destroy_orphaned(ns); + return err; +} + +static int proc_ipc_callback_dointvec_minmax(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct ctl_table ipc_table; + size_t lenp_bef = *lenp; + int rc; -#ifdef CONFIG_SYSCTL_SYSCALL -/* The generic sysctl ipc data routine. */ -static int sysctl_ipc_data(ctl_table *table, int __user *name, int nlen, - void __user *oldval, size_t __user *oldlenp, - void __user *newval, size_t newlen) + memcpy(&ipc_table, table, sizeof(ipc_table)); + ipc_table.data = get_ipc(table); + + rc = proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos); + + if (write && !rc && lenp_bef == *lenp) + /* + * Tunable has successfully been changed by hand. Disable its + * automatic adjustment. This simply requires unregistering + * the notifiers that trigger recalculation. + */ + unregister_ipcns_notifier(current->nsproxy->ipc_ns); + + return rc; +} + +static int proc_ipc_doulongvec_minmax(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct ctl_table ipc_table; + memcpy(&ipc_table, table, sizeof(ipc_table)); + ipc_table.data = get_ipc(table); + + return proc_doulongvec_minmax(&ipc_table, write, buffer, + lenp, ppos); +} + +/* + * Routine that is called when the file "auto_msgmni" has successfully been + * written. + * Two values are allowed: + * 0: unregister msgmni's callback routine from the ipc namespace notifier + * chain. This means that msgmni won't be recomputed anymore upon memory + * add/remove or ipc namespace creation/removal. + * 1: register back the callback routine. + */ +static void ipc_auto_callback(int val) { - size_t len; - void *data; - - /* Get out of I don't have a variable */ - if (!table->data || !table->maxlen) - return -ENOTDIR; - - data = get_ipc(table); - if (!data) - return -ENOTDIR; - - if (oldval && oldlenp) { - if (get_user(len, oldlenp)) - return -EFAULT; - if (len) { - if (len > table->maxlen) - len = table->maxlen; - if (copy_to_user(oldval, data, len)) - return -EFAULT; - if (put_user(len, oldlenp)) - return -EFAULT; - } + if (!val) + unregister_ipcns_notifier(current->nsproxy->ipc_ns); + else { + /* + * Re-enable automatic recomputing only if not already + * enabled. + */ + recompute_msgmni(current->nsproxy->ipc_ns); + cond_register_ipcns_notifier(current->nsproxy->ipc_ns); } +} + +static int proc_ipcauto_dointvec_minmax(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct ctl_table ipc_table; + size_t lenp_bef = *lenp; + int oldval; + int rc; + + memcpy(&ipc_table, table, sizeof(ipc_table)); + ipc_table.data = get_ipc(table); + oldval = *((int *)(ipc_table.data)); - if (newval && newlen) { - if (newlen > table->maxlen) - newlen = table->maxlen; + rc = proc_dointvec_minmax(&ipc_table, write, buffer, lenp, ppos); - if (copy_from_user(data, newval, newlen)) - return -EFAULT; + if (write && !rc && lenp_bef == *lenp) { + int newval = *((int *)(ipc_table.data)); + /* + * The file "auto_msgmni" has correctly been set. + * React by (un)registering the corresponding tunable, if the + * value has changed. + */ + if (newval != oldval) + ipc_auto_callback(newval); } - return 1; + + return rc; } + #else -#define sysctl_ipc_data NULL +#define proc_ipc_doulongvec_minmax NULL +#define proc_ipc_dointvec NULL +#define proc_ipc_dointvec_minmax NULL +#define proc_ipc_dointvec_minmax_orphans NULL +#define proc_ipc_callback_dointvec_minmax NULL +#define proc_ipcauto_dointvec_minmax NULL #endif +static int zero; +static int one = 1; +static int int_max = INT_MAX; + static struct ctl_table ipc_kern_table[] = { { - .ctl_name = KERN_SHMMAX, .procname = "shmmax", .data = &init_ipc_ns.shm_ctlmax, - .maxlen = sizeof (init_ipc_ns.shm_ctlmax), + .maxlen = sizeof(init_ipc_ns.shm_ctlmax), .mode = 0644, .proc_handler = proc_ipc_doulongvec_minmax, - .strategy = sysctl_ipc_data, }, { - .ctl_name = KERN_SHMALL, .procname = "shmall", .data = &init_ipc_ns.shm_ctlall, - .maxlen = sizeof (init_ipc_ns.shm_ctlall), + .maxlen = sizeof(init_ipc_ns.shm_ctlall), .mode = 0644, .proc_handler = proc_ipc_doulongvec_minmax, - .strategy = sysctl_ipc_data, }, { - .ctl_name = KERN_SHMMNI, .procname = "shmmni", .data = &init_ipc_ns.shm_ctlmni, - .maxlen = sizeof (init_ipc_ns.shm_ctlmni), + .maxlen = sizeof(init_ipc_ns.shm_ctlmni), .mode = 0644, .proc_handler = proc_ipc_dointvec, - .strategy = sysctl_ipc_data, }, { - .ctl_name = KERN_MSGMAX, + .procname = "shm_rmid_forced", + .data = &init_ipc_ns.shm_rmid_forced, + .maxlen = sizeof(init_ipc_ns.shm_rmid_forced), + .mode = 0644, + .proc_handler = proc_ipc_dointvec_minmax_orphans, + .extra1 = &zero, + .extra2 = &one, + }, + { .procname = "msgmax", .data = &init_ipc_ns.msg_ctlmax, - .maxlen = sizeof (init_ipc_ns.msg_ctlmax), + .maxlen = sizeof(init_ipc_ns.msg_ctlmax), .mode = 0644, - .proc_handler = proc_ipc_dointvec, - .strategy = sysctl_ipc_data, + .proc_handler = proc_ipc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &int_max, }, { - .ctl_name = KERN_MSGMNI, .procname = "msgmni", .data = &init_ipc_ns.msg_ctlmni, - .maxlen = sizeof (init_ipc_ns.msg_ctlmni), + .maxlen = sizeof(init_ipc_ns.msg_ctlmni), .mode = 0644, - .proc_handler = proc_ipc_dointvec, - .strategy = sysctl_ipc_data, + .proc_handler = proc_ipc_callback_dointvec_minmax, + .extra1 = &zero, + .extra2 = &int_max, }, { - .ctl_name = KERN_MSGMNB, .procname = "msgmnb", .data = &init_ipc_ns.msg_ctlmnb, - .maxlen = sizeof (init_ipc_ns.msg_ctlmnb), + .maxlen = sizeof(init_ipc_ns.msg_ctlmnb), .mode = 0644, - .proc_handler = proc_ipc_dointvec, - .strategy = sysctl_ipc_data, + .proc_handler = proc_ipc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &int_max, }, { - .ctl_name = KERN_SEM, .procname = "sem", .data = &init_ipc_ns.sem_ctls, - .maxlen = 4*sizeof (int), + .maxlen = 4*sizeof(int), .mode = 0644, .proc_handler = proc_ipc_dointvec, - .strategy = sysctl_ipc_data, }, + { + .procname = "auto_msgmni", + .data = &init_ipc_ns.auto_msgmni, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_ipcauto_dointvec_minmax, + .extra1 = &zero, + .extra2 = &one, + }, +#ifdef CONFIG_CHECKPOINT_RESTORE + { + .procname = "sem_next_id", + .data = &init_ipc_ns.ids[IPC_SEM_IDS].next_id, + .maxlen = sizeof(init_ipc_ns.ids[IPC_SEM_IDS].next_id), + .mode = 0644, + .proc_handler = proc_ipc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &int_max, + }, + { + .procname = "msg_next_id", + .data = &init_ipc_ns.ids[IPC_MSG_IDS].next_id, + .maxlen = sizeof(init_ipc_ns.ids[IPC_MSG_IDS].next_id), + .mode = 0644, + .proc_handler = proc_ipc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &int_max, + }, + { + .procname = "shm_next_id", + .data = &init_ipc_ns.ids[IPC_SHM_IDS].next_id, + .maxlen = sizeof(init_ipc_ns.ids[IPC_SHM_IDS].next_id), + .mode = 0644, + .proc_handler = proc_ipc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &int_max, + }, +#endif {} }; static struct ctl_table ipc_root_table[] = { { - .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555, .child = ipc_kern_table, @@ -177,4 +281,4 @@ static int __init ipc_sysctl_init(void) return 0; } -__initcall(ipc_sysctl_init); +device_initcall(ipc_sysctl_init); diff --git a/ipc/ipcns_notifier.c b/ipc/ipcns_notifier.c new file mode 100644 index 00000000000..b9b31a4f77e --- /dev/null +++ b/ipc/ipcns_notifier.c @@ -0,0 +1,92 @@ +/* + * linux/ipc/ipcns_notifier.c + * Copyright (C) 2007 BULL SA. Nadia Derbey + * + * Notification mechanism for ipc namespaces: + * The callback routine registered in the memory chain invokes the ipcns + * notifier chain with the IPCNS_MEMCHANGED event. + * Each callback routine registered in the ipcns namespace recomputes msgmni + * for the owning namespace. + */ + +#include <linux/msg.h> +#include <linux/rcupdate.h> +#include <linux/notifier.h> +#include <linux/nsproxy.h> +#include <linux/ipc_namespace.h> + +#include "util.h" + + + +static BLOCKING_NOTIFIER_HEAD(ipcns_chain); + + +static int ipcns_callback(struct notifier_block *self, + unsigned long action, void *arg) +{ + struct ipc_namespace *ns; + + switch (action) { + case IPCNS_MEMCHANGED: /* amount of lowmem has changed */ + case IPCNS_CREATED: + case IPCNS_REMOVED: + /* + * It's time to recompute msgmni + */ + ns = container_of(self, struct ipc_namespace, ipcns_nb); + /* + * No need to get a reference on the ns: the 1st job of + * free_ipc_ns() is to unregister the callback routine. + * blocking_notifier_chain_unregister takes the wr lock to do + * it. + * When this callback routine is called the rd lock is held by + * blocking_notifier_call_chain. + * So the ipc ns cannot be freed while we are here. + */ + recompute_msgmni(ns); + break; + default: + break; + } + + return NOTIFY_OK; +} + +int register_ipcns_notifier(struct ipc_namespace *ns) +{ + int rc; + + memset(&ns->ipcns_nb, 0, sizeof(ns->ipcns_nb)); + ns->ipcns_nb.notifier_call = ipcns_callback; + ns->ipcns_nb.priority = IPCNS_CALLBACK_PRI; + rc = blocking_notifier_chain_register(&ipcns_chain, &ns->ipcns_nb); + if (!rc) + ns->auto_msgmni = 1; + return rc; +} + +int cond_register_ipcns_notifier(struct ipc_namespace *ns) +{ + int rc; + + memset(&ns->ipcns_nb, 0, sizeof(ns->ipcns_nb)); + ns->ipcns_nb.notifier_call = ipcns_callback; + ns->ipcns_nb.priority = IPCNS_CALLBACK_PRI; + rc = blocking_notifier_chain_cond_register(&ipcns_chain, + &ns->ipcns_nb); + if (!rc) + ns->auto_msgmni = 1; + return rc; +} + +void unregister_ipcns_notifier(struct ipc_namespace *ns) +{ + blocking_notifier_chain_unregister(&ipcns_chain, &ns->ipcns_nb); + ns->auto_msgmni = 0; +} + +int ipcns_notify(unsigned long val) +{ + return blocking_notifier_call_chain(&ipcns_chain, val, NULL); +} diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c new file mode 100644 index 00000000000..68d4e953762 --- /dev/null +++ b/ipc/mq_sysctl.c @@ -0,0 +1,124 @@ +/* + * Copyright (C) 2007 IBM Corporation + * + * Author: Cedric Le Goater <clg@fr.ibm.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License as + * published by the Free Software Foundation, version 2 of the + * License. + */ + +#include <linux/nsproxy.h> +#include <linux/ipc_namespace.h> +#include <linux/sysctl.h> + +#ifdef CONFIG_PROC_SYSCTL +static void *get_mq(struct ctl_table *table) +{ + char *which = table->data; + struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; + which = (which - (char *)&init_ipc_ns) + (char *)ipc_ns; + return which; +} + +static int proc_mq_dointvec(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct ctl_table mq_table; + memcpy(&mq_table, table, sizeof(mq_table)); + mq_table.data = get_mq(table); + + return proc_dointvec(&mq_table, write, buffer, lenp, ppos); +} + +static int proc_mq_dointvec_minmax(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, loff_t *ppos) +{ + struct ctl_table mq_table; + memcpy(&mq_table, table, sizeof(mq_table)); + mq_table.data = get_mq(table); + + return proc_dointvec_minmax(&mq_table, write, buffer, + lenp, ppos); +} +#else +#define proc_mq_dointvec NULL +#define proc_mq_dointvec_minmax NULL +#endif + +static int msg_max_limit_min = MIN_MSGMAX; +static int msg_max_limit_max = HARD_MSGMAX; + +static int msg_maxsize_limit_min = MIN_MSGSIZEMAX; +static int msg_maxsize_limit_max = HARD_MSGSIZEMAX; + +static struct ctl_table mq_sysctls[] = { + { + .procname = "queues_max", + .data = &init_ipc_ns.mq_queues_max, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_mq_dointvec, + }, + { + .procname = "msg_max", + .data = &init_ipc_ns.mq_msg_max, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_mq_dointvec_minmax, + .extra1 = &msg_max_limit_min, + .extra2 = &msg_max_limit_max, + }, + { + .procname = "msgsize_max", + .data = &init_ipc_ns.mq_msgsize_max, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_mq_dointvec_minmax, + .extra1 = &msg_maxsize_limit_min, + .extra2 = &msg_maxsize_limit_max, + }, + { + .procname = "msg_default", + .data = &init_ipc_ns.mq_msg_default, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_mq_dointvec_minmax, + .extra1 = &msg_max_limit_min, + .extra2 = &msg_max_limit_max, + }, + { + .procname = "msgsize_default", + .data = &init_ipc_ns.mq_msgsize_default, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = proc_mq_dointvec_minmax, + .extra1 = &msg_maxsize_limit_min, + .extra2 = &msg_maxsize_limit_max, + }, + {} +}; + +static struct ctl_table mq_sysctl_dir[] = { + { + .procname = "mqueue", + .mode = 0555, + .child = mq_sysctls, + }, + {} +}; + +static struct ctl_table mq_sysctl_root[] = { + { + .procname = "fs", + .mode = 0555, + .child = mq_sysctl_dir, + }, + {} +}; + +struct ctl_table_header *mq_register_sysctl_table(void) +{ + return register_sysctl_table(mq_sysctl_root); +} diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 60f7a27f7a9..4fcf39af177 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -6,7 +6,7 @@ * * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com) * Lockless receive & send, fd based notify: - * Manfred Spraul (manfred@colorfullife.com) + * Manfred Spraul (manfred@colorfullife.com) * * Audit: George Wilson (ltcgcw@us.ibm.com) * @@ -24,6 +24,7 @@ #include <linux/mqueue.h> #include <linux/msg.h> #include <linux/skbuff.h> +#include <linux/vmalloc.h> #include <linux/netlink.h> #include <linux/syscalls.h> #include <linux/audit.h> @@ -31,6 +32,9 @@ #include <linux/mutex.h> #include <linux/nsproxy.h> #include <linux/pid.h> +#include <linux/ipc_namespace.h> +#include <linux/user_namespace.h> +#include <linux/slab.h> #include <net/sock.h> #include "util.h" @@ -46,12 +50,11 @@ #define STATE_PENDING 1 #define STATE_READY 2 -/* default values */ -#define DFLT_QUEUESMAX 256 /* max number of message queues */ -#define DFLT_MSGMAX 10 /* max number of messages in each queue */ -#define HARD_MSGMAX (131072/sizeof(void*)) -#define DFLT_MSGSIZEMAX 8192 /* max message size */ - +struct posix_msg_tree_node { + struct rb_node rb_node; + struct list_head msg_list; + int priority; +}; struct ext_wait_queue { /* queue of sleeping tasks */ struct task_struct *task; @@ -65,11 +68,13 @@ struct mqueue_inode_info { struct inode vfs_inode; wait_queue_head_t wait_q; - struct msg_msg **messages; + struct rb_root msg_tree; + struct posix_msg_tree_node *node_cache; struct mq_attr attr; struct sigevent notify; - struct pid* notify_owner; + struct pid *notify_owner; + struct user_namespace *notify_user_ns; struct user_struct *user; /* user who created, for accounting */ struct sock *notify_sock; struct sk_buff *notify_cookie; @@ -82,132 +87,263 @@ struct mqueue_inode_info { static const struct inode_operations mqueue_dir_inode_operations; static const struct file_operations mqueue_file_operations; -static struct super_operations mqueue_super_ops; +static const struct super_operations mqueue_super_ops; static void remove_notification(struct mqueue_inode_info *info); -static spinlock_t mq_lock; static struct kmem_cache *mqueue_inode_cachep; -static struct vfsmount *mqueue_mnt; -static unsigned int queues_count; -static unsigned int queues_max = DFLT_QUEUESMAX; -static unsigned int msg_max = DFLT_MSGMAX; -static unsigned int msgsize_max = DFLT_MSGSIZEMAX; - -static struct ctl_table_header * mq_sysctl_table; +static struct ctl_table_header *mq_sysctl_table; static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode) { return container_of(inode, struct mqueue_inode_info, vfs_inode); } -static struct inode *mqueue_get_inode(struct super_block *sb, int mode, - struct mq_attr *attr) +/* + * This routine should be called with the mq_lock held. + */ +static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode) +{ + return get_ipc_ns(inode->i_sb->s_fs_info); +} + +static struct ipc_namespace *get_ns_from_inode(struct inode *inode) +{ + struct ipc_namespace *ns; + + spin_lock(&mq_lock); + ns = __get_ns_from_inode(inode); + spin_unlock(&mq_lock); + return ns; +} + +/* Auxiliary functions to manipulate messages' list */ +static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info) +{ + struct rb_node **p, *parent = NULL; + struct posix_msg_tree_node *leaf; + + p = &info->msg_tree.rb_node; + while (*p) { + parent = *p; + leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); + + if (likely(leaf->priority == msg->m_type)) + goto insert_msg; + else if (msg->m_type < leaf->priority) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + if (info->node_cache) { + leaf = info->node_cache; + info->node_cache = NULL; + } else { + leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC); + if (!leaf) + return -ENOMEM; + INIT_LIST_HEAD(&leaf->msg_list); + info->qsize += sizeof(*leaf); + } + leaf->priority = msg->m_type; + rb_link_node(&leaf->rb_node, parent, p); + rb_insert_color(&leaf->rb_node, &info->msg_tree); +insert_msg: + info->attr.mq_curmsgs++; + info->qsize += msg->m_ts; + list_add_tail(&msg->m_list, &leaf->msg_list); + return 0; +} + +static inline struct msg_msg *msg_get(struct mqueue_inode_info *info) +{ + struct rb_node **p, *parent = NULL; + struct posix_msg_tree_node *leaf; + struct msg_msg *msg; + +try_again: + p = &info->msg_tree.rb_node; + while (*p) { + parent = *p; + /* + * During insert, low priorities go to the left and high to the + * right. On receive, we want the highest priorities first, so + * walk all the way to the right. + */ + p = &(*p)->rb_right; + } + if (!parent) { + if (info->attr.mq_curmsgs) { + pr_warn_once("Inconsistency in POSIX message queue, " + "no tree element, but supposedly messages " + "should exist!\n"); + info->attr.mq_curmsgs = 0; + } + return NULL; + } + leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); + if (unlikely(list_empty(&leaf->msg_list))) { + pr_warn_once("Inconsistency in POSIX message queue, " + "empty leaf node but we haven't implemented " + "lazy leaf delete!\n"); + rb_erase(&leaf->rb_node, &info->msg_tree); + if (info->node_cache) { + info->qsize -= sizeof(*leaf); + kfree(leaf); + } else { + info->node_cache = leaf; + } + goto try_again; + } else { + msg = list_first_entry(&leaf->msg_list, + struct msg_msg, m_list); + list_del(&msg->m_list); + if (list_empty(&leaf->msg_list)) { + rb_erase(&leaf->rb_node, &info->msg_tree); + if (info->node_cache) { + info->qsize -= sizeof(*leaf); + kfree(leaf); + } else { + info->node_cache = leaf; + } + } + } + info->attr.mq_curmsgs--; + info->qsize -= msg->m_ts; + return msg; +} + +static struct inode *mqueue_get_inode(struct super_block *sb, + struct ipc_namespace *ipc_ns, umode_t mode, + struct mq_attr *attr) { + struct user_struct *u = current_user(); struct inode *inode; + int ret = -ENOMEM; inode = new_inode(sb); - if (inode) { - inode->i_mode = mode; - inode->i_uid = current->fsuid; - inode->i_gid = current->fsgid; - inode->i_blocks = 0; - inode->i_mtime = inode->i_ctime = inode->i_atime = - CURRENT_TIME; + if (!inode) + goto err; + + inode->i_ino = get_next_ino(); + inode->i_mode = mode; + inode->i_uid = current_fsuid(); + inode->i_gid = current_fsgid(); + inode->i_mtime = inode->i_ctime = inode->i_atime = CURRENT_TIME; + + if (S_ISREG(mode)) { + struct mqueue_inode_info *info; + unsigned long mq_bytes, mq_treesize; + + inode->i_fop = &mqueue_file_operations; + inode->i_size = FILENT_SIZE; + /* mqueue specific info */ + info = MQUEUE_I(inode); + spin_lock_init(&info->lock); + init_waitqueue_head(&info->wait_q); + INIT_LIST_HEAD(&info->e_wait_q[0].list); + INIT_LIST_HEAD(&info->e_wait_q[1].list); + info->notify_owner = NULL; + info->notify_user_ns = NULL; + info->qsize = 0; + info->user = NULL; /* set when all is ok */ + info->msg_tree = RB_ROOT; + info->node_cache = NULL; + memset(&info->attr, 0, sizeof(info->attr)); + info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max, + ipc_ns->mq_msg_default); + info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, + ipc_ns->mq_msgsize_default); + if (attr) { + info->attr.mq_maxmsg = attr->mq_maxmsg; + info->attr.mq_msgsize = attr->mq_msgsize; + } + /* + * We used to allocate a static array of pointers and account + * the size of that array as well as one msg_msg struct per + * possible message into the queue size. That's no longer + * accurate as the queue is now an rbtree and will grow and + * shrink depending on usage patterns. We can, however, still + * account one msg_msg struct per message, but the nodes are + * allocated depending on priority usage, and most programs + * only use one, or a handful, of priorities. However, since + * this is pinned memory, we need to assume worst case, so + * that means the min(mq_maxmsg, max_priorities) * struct + * posix_msg_tree_node. + */ + mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + + min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * + sizeof(struct posix_msg_tree_node); + + mq_bytes = mq_treesize + (info->attr.mq_maxmsg * + info->attr.mq_msgsize); - if (S_ISREG(mode)) { - struct mqueue_inode_info *info; - struct task_struct *p = current; - struct user_struct *u = p->user; - unsigned long mq_bytes, mq_msg_tblsz; - - inode->i_fop = &mqueue_file_operations; - inode->i_size = FILENT_SIZE; - /* mqueue specific info */ - info = MQUEUE_I(inode); - spin_lock_init(&info->lock); - init_waitqueue_head(&info->wait_q); - INIT_LIST_HEAD(&info->e_wait_q[0].list); - INIT_LIST_HEAD(&info->e_wait_q[1].list); - info->messages = NULL; - info->notify_owner = NULL; - info->qsize = 0; - info->user = NULL; /* set when all is ok */ - memset(&info->attr, 0, sizeof(info->attr)); - info->attr.mq_maxmsg = DFLT_MSGMAX; - info->attr.mq_msgsize = DFLT_MSGSIZEMAX; - if (attr) { - info->attr.mq_maxmsg = attr->mq_maxmsg; - info->attr.mq_msgsize = attr->mq_msgsize; - } - mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *); - mq_bytes = (mq_msg_tblsz + - (info->attr.mq_maxmsg * info->attr.mq_msgsize)); - - spin_lock(&mq_lock); - if (u->mq_bytes + mq_bytes < u->mq_bytes || - u->mq_bytes + mq_bytes > - p->signal->rlim[RLIMIT_MSGQUEUE].rlim_cur) { - spin_unlock(&mq_lock); - goto out_inode; - } - u->mq_bytes += mq_bytes; + spin_lock(&mq_lock); + if (u->mq_bytes + mq_bytes < u->mq_bytes || + u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) { spin_unlock(&mq_lock); - - info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL); - if (!info->messages) { - spin_lock(&mq_lock); - u->mq_bytes -= mq_bytes; - spin_unlock(&mq_lock); - goto out_inode; - } - /* all is ok */ - info->user = get_uid(u); - } else if (S_ISDIR(mode)) { - inc_nlink(inode); - /* Some things misbehave if size == 0 on a directory */ - inode->i_size = 2 * DIRENT_SIZE; - inode->i_op = &mqueue_dir_inode_operations; - inode->i_fop = &simple_dir_operations; + /* mqueue_evict_inode() releases info->messages */ + ret = -EMFILE; + goto out_inode; } + u->mq_bytes += mq_bytes; + spin_unlock(&mq_lock); + + /* all is ok */ + info->user = get_uid(u); + } else if (S_ISDIR(mode)) { + inc_nlink(inode); + /* Some things misbehave if size == 0 on a directory */ + inode->i_size = 2 * DIRENT_SIZE; + inode->i_op = &mqueue_dir_inode_operations; + inode->i_fop = &simple_dir_operations; } + return inode; out_inode: - make_bad_inode(inode); iput(inode); - return NULL; +err: + return ERR_PTR(ret); } static int mqueue_fill_super(struct super_block *sb, void *data, int silent) { struct inode *inode; + struct ipc_namespace *ns = data; sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_magic = MQUEUE_MAGIC; sb->s_op = &mqueue_super_ops; - inode = mqueue_get_inode(sb, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL); - if (!inode) - return -ENOMEM; + inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL); + if (IS_ERR(inode)) + return PTR_ERR(inode); - sb->s_root = d_alloc_root(inode); - if (!sb->s_root) { - iput(inode); + sb->s_root = d_make_root(inode); + if (!sb->s_root) return -ENOMEM; - } - return 0; } -static int mqueue_get_sb(struct file_system_type *fs_type, +static struct dentry *mqueue_mount(struct file_system_type *fs_type, int flags, const char *dev_name, - void *data, struct vfsmount *mnt) + void *data) { - return get_sb_single(fs_type, flags, data, mqueue_fill_super, mnt); + if (!(flags & MS_KERNMOUNT)) { + struct ipc_namespace *ns = current->nsproxy->ipc_ns; + /* Don't allow mounting unless the caller has CAP_SYS_ADMIN + * over the ipc namespace. + */ + if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN)) + return ERR_PTR(-EPERM); + + data = ns; + } + return mount_ns(fs_type, flags, data, mqueue_fill_super); } -static void init_once(struct kmem_cache *cachep, void *foo) +static void init_once(void *foo) { struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; @@ -224,86 +360,119 @@ static struct inode *mqueue_alloc_inode(struct super_block *sb) return &ei->vfs_inode; } -static void mqueue_destroy_inode(struct inode *inode) +static void mqueue_i_callback(struct rcu_head *head) { + struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode)); } -static void mqueue_delete_inode(struct inode *inode) +static void mqueue_destroy_inode(struct inode *inode) +{ + call_rcu(&inode->i_rcu, mqueue_i_callback); +} + +static void mqueue_evict_inode(struct inode *inode) { struct mqueue_inode_info *info; struct user_struct *user; - unsigned long mq_bytes; - int i; + unsigned long mq_bytes, mq_treesize; + struct ipc_namespace *ipc_ns; + struct msg_msg *msg; + + clear_inode(inode); - if (S_ISDIR(inode->i_mode)) { - clear_inode(inode); + if (S_ISDIR(inode->i_mode)) return; - } + + ipc_ns = get_ns_from_inode(inode); info = MQUEUE_I(inode); spin_lock(&info->lock); - for (i = 0; i < info->attr.mq_curmsgs; i++) - free_msg(info->messages[i]); - kfree(info->messages); + while ((msg = msg_get(info)) != NULL) + free_msg(msg); + kfree(info->node_cache); spin_unlock(&info->lock); - clear_inode(inode); + /* Total amount of bytes accounted for the mqueue */ + mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + + min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * + sizeof(struct posix_msg_tree_node); + + mq_bytes = mq_treesize + (info->attr.mq_maxmsg * + info->attr.mq_msgsize); - mq_bytes = (info->attr.mq_maxmsg * sizeof(struct msg_msg *) + - (info->attr.mq_maxmsg * info->attr.mq_msgsize)); user = info->user; if (user) { spin_lock(&mq_lock); user->mq_bytes -= mq_bytes; - queues_count--; + /* + * get_ns_from_inode() ensures that the + * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns + * to which we now hold a reference, or it is NULL. + * We can't put it here under mq_lock, though. + */ + if (ipc_ns) + ipc_ns->mq_queues_count--; spin_unlock(&mq_lock); free_uid(user); } + if (ipc_ns) + put_ipc_ns(ipc_ns); } static int mqueue_create(struct inode *dir, struct dentry *dentry, - int mode, struct nameidata *nd) + umode_t mode, bool excl) { struct inode *inode; struct mq_attr *attr = dentry->d_fsdata; int error; + struct ipc_namespace *ipc_ns; spin_lock(&mq_lock); - if (queues_count >= queues_max && !capable(CAP_SYS_RESOURCE)) { + ipc_ns = __get_ns_from_inode(dir); + if (!ipc_ns) { + error = -EACCES; + goto out_unlock; + } + + if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && + !capable(CAP_SYS_RESOURCE)) { error = -ENOSPC; - goto out_lock; + goto out_unlock; } - queues_count++; + ipc_ns->mq_queues_count++; spin_unlock(&mq_lock); - inode = mqueue_get_inode(dir->i_sb, mode, attr); - if (!inode) { - error = -ENOMEM; + inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr); + if (IS_ERR(inode)) { + error = PTR_ERR(inode); spin_lock(&mq_lock); - queues_count--; - goto out_lock; + ipc_ns->mq_queues_count--; + goto out_unlock; } + put_ipc_ns(ipc_ns); dir->i_size += DIRENT_SIZE; dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; d_instantiate(dentry, inode); dget(dentry); return 0; -out_lock: +out_unlock: spin_unlock(&mq_lock); + if (ipc_ns) + put_ipc_ns(ipc_ns); return error; } static int mqueue_unlink(struct inode *dir, struct dentry *dentry) { - struct inode *inode = dentry->d_inode; + struct inode *inode = dentry->d_inode; dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; dir->i_size -= DIRENT_SIZE; - drop_nlink(inode); - dput(dentry); - return 0; + drop_nlink(inode); + dput(dentry); + return 0; } /* @@ -314,15 +483,11 @@ static int mqueue_unlink(struct inode *dir, struct dentry *dentry) * through std routines) */ static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, - size_t count, loff_t * off) + size_t count, loff_t *off) { - struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); + struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); char buffer[FILENT_SIZE]; - size_t slen; - loff_t o; - - if (!count) - return 0; + ssize_t ret; spin_lock(&info->lock); snprintf(buffer, sizeof(buffer), @@ -335,26 +500,19 @@ static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, pid_vnr(info->notify_owner)); spin_unlock(&info->lock); buffer[sizeof(buffer)-1] = '\0'; - slen = strlen(buffer)+1; - - o = *off; - if (o > slen) - return 0; - - if (o + count > slen) - count = slen - o; - if (copy_to_user(u_data, buffer + o, count)) - return -EFAULT; + ret = simple_read_from_buffer(u_data, count, off, buffer, + strlen(buffer)); + if (ret <= 0) + return ret; - *off = o + count; - filp->f_path.dentry->d_inode->i_atime = filp->f_path.dentry->d_inode->i_ctime = CURRENT_TIME; - return count; + file_inode(filp)->i_atime = file_inode(filp)->i_ctime = CURRENT_TIME; + return ret; } static int mqueue_flush_file(struct file *filp, fl_owner_t id) { - struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); + struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); spin_lock(&info->lock); if (task_tgid(current) == info->notify_owner) @@ -366,7 +524,7 @@ static int mqueue_flush_file(struct file *filp, fl_owner_t id) static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab) { - struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); + struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); int retval = 0; poll_wait(filp, &info->wait_q, poll_tab); @@ -405,7 +563,7 @@ static void wq_add(struct mqueue_inode_info *info, int sr, * sr: SEND or RECV */ static int wq_sleep(struct mqueue_inode_info *info, int sr, - long timeout, struct ext_wait_queue *ewp) + ktime_t *timeout, struct ext_wait_queue *ewp) { int retval; signed long time; @@ -416,7 +574,8 @@ static int wq_sleep(struct mqueue_inode_info *info, int sr, set_current_state(TASK_INTERRUPTIBLE); spin_unlock(&info->lock); - time = schedule_timeout(timeout); + time = schedule_hrtimeout_range_clock(timeout, 0, + HRTIMER_MODE_ABS, CLOCK_REALTIME); while (ewp->state == STATE_PENDING) cpu_relax(); @@ -460,30 +619,10 @@ static struct ext_wait_queue *wq_get_first_waiter( return list_entry(ptr, struct ext_wait_queue, list); } -/* Auxiliary functions to manipulate messages' list */ -static void msg_insert(struct msg_msg *ptr, struct mqueue_inode_info *info) -{ - int k; - - k = info->attr.mq_curmsgs - 1; - while (k >= 0 && info->messages[k]->m_type >= ptr->m_type) { - info->messages[k + 1] = info->messages[k]; - k--; - } - info->attr.mq_curmsgs++; - info->qsize += ptr->m_ts; - info->messages[k + 1] = ptr; -} - -static inline struct msg_msg *msg_get(struct mqueue_inode_info *info) -{ - info->qsize -= info->messages[--info->attr.mq_curmsgs]->m_ts; - return info->messages[info->attr.mq_curmsgs]; -} static inline void set_cookie(struct sk_buff *skb, char code) { - ((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code; + ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code; } /* @@ -509,8 +648,12 @@ static void __do_notify(struct mqueue_inode_info *info) sig_i.si_errno = 0; sig_i.si_code = SI_MESGQ; sig_i.si_value = info->notify.sigev_value; - sig_i.si_pid = task_tgid_vnr(current); - sig_i.si_uid = current->uid; + /* map current pid/uid into info->owner's namespaces */ + rcu_read_lock(); + sig_i.si_pid = task_tgid_nr_ns(current, + ns_of_pid(info->notify_owner)); + sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid()); + rcu_read_unlock(); kill_pid_info(info->notify.sigev_signo, &sig_i, info->notify_owner); @@ -522,40 +665,23 @@ static void __do_notify(struct mqueue_inode_info *info) } /* after notification unregisters process */ put_pid(info->notify_owner); + put_user_ns(info->notify_user_ns); info->notify_owner = NULL; + info->notify_user_ns = NULL; } wake_up(&info->wait_q); } -static long prepare_timeout(const struct timespec __user *u_arg) +static int prepare_timeout(const struct timespec __user *u_abs_timeout, + ktime_t *expires, struct timespec *ts) { - struct timespec ts, nowts; - long timeout; - - if (u_arg) { - if (unlikely(copy_from_user(&ts, u_arg, - sizeof(struct timespec)))) - return -EFAULT; - - if (unlikely(ts.tv_nsec < 0 || ts.tv_sec < 0 - || ts.tv_nsec >= NSEC_PER_SEC)) - return -EINVAL; - nowts = CURRENT_TIME; - /* first subtract as jiffies can't be too big */ - ts.tv_sec -= nowts.tv_sec; - if (ts.tv_nsec < nowts.tv_nsec) { - ts.tv_nsec += NSEC_PER_SEC; - ts.tv_sec--; - } - ts.tv_nsec -= nowts.tv_nsec; - if (ts.tv_sec < 0) - return 0; - - timeout = timespec_to_jiffies(&ts) + 1; - } else - return MAX_SCHEDULE_TIMEOUT; + if (copy_from_user(ts, u_abs_timeout, sizeof(struct timespec))) + return -EFAULT; + if (!timespec_valid(ts)) + return -EINVAL; - return timeout; + *expires = timespec_to_ktime(*ts); + return 0; } static void remove_notification(struct mqueue_inode_info *info) @@ -566,192 +692,211 @@ static void remove_notification(struct mqueue_inode_info *info) netlink_sendskb(info->notify_sock, info->notify_cookie); } put_pid(info->notify_owner); + put_user_ns(info->notify_user_ns); info->notify_owner = NULL; + info->notify_user_ns = NULL; } -static int mq_attr_ok(struct mq_attr *attr) +static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr) { + int mq_treesize; + unsigned long total_size; + if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0) - return 0; + return -EINVAL; if (capable(CAP_SYS_RESOURCE)) { - if (attr->mq_maxmsg > HARD_MSGMAX) - return 0; + if (attr->mq_maxmsg > HARD_MSGMAX || + attr->mq_msgsize > HARD_MSGSIZEMAX) + return -EINVAL; } else { - if (attr->mq_maxmsg > msg_max || - attr->mq_msgsize > msgsize_max) - return 0; + if (attr->mq_maxmsg > ipc_ns->mq_msg_max || + attr->mq_msgsize > ipc_ns->mq_msgsize_max) + return -EINVAL; } /* check for overflow */ if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg) - return 0; - if ((unsigned long)(attr->mq_maxmsg * attr->mq_msgsize) + - (attr->mq_maxmsg * sizeof (struct msg_msg *)) < - (unsigned long)(attr->mq_maxmsg * attr->mq_msgsize)) - return 0; - return 1; + return -EOVERFLOW; + mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) + + min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) * + sizeof(struct posix_msg_tree_node); + total_size = attr->mq_maxmsg * attr->mq_msgsize; + if (total_size + mq_treesize < total_size) + return -EOVERFLOW; + return 0; } /* * Invoked when creating a new queue via sys_mq_open */ -static struct file *do_create(struct dentry *dir, struct dentry *dentry, - int oflag, mode_t mode, struct mq_attr __user *u_attr) +static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir, + struct path *path, int oflag, umode_t mode, + struct mq_attr *attr) { - struct mq_attr attr; + const struct cred *cred = current_cred(); int ret; - if (u_attr) { - ret = -EFAULT; - if (copy_from_user(&attr, u_attr, sizeof(attr))) - goto out; - ret = -EINVAL; - if (!mq_attr_ok(&attr)) - goto out; + if (attr) { + ret = mq_attr_ok(ipc_ns, attr); + if (ret) + return ERR_PTR(ret); /* store for use during create */ - dentry->d_fsdata = &attr; + path->dentry->d_fsdata = attr; + } else { + struct mq_attr def_attr; + + def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max, + ipc_ns->mq_msg_default); + def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, + ipc_ns->mq_msgsize_default); + ret = mq_attr_ok(ipc_ns, &def_attr); + if (ret) + return ERR_PTR(ret); } - mode &= ~current->fs->umask; - ret = vfs_create(dir->d_inode, dentry, mode, NULL); - dentry->d_fsdata = NULL; + mode &= ~current_umask(); + ret = vfs_create(dir, path->dentry, mode, true); + path->dentry->d_fsdata = NULL; if (ret) - goto out; - - return dentry_open(dentry, mqueue_mnt, oflag); - -out: - dput(dentry); - mntput(mqueue_mnt); - return ERR_PTR(ret); + return ERR_PTR(ret); + return dentry_open(path, oflag, cred); } /* Opens existing queue */ -static struct file *do_open(struct dentry *dentry, int oflag) +static struct file *do_open(struct path *path, int oflag) { -static int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, - MAY_READ | MAY_WRITE }; - - if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) { - dput(dentry); - mntput(mqueue_mnt); + static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, + MAY_READ | MAY_WRITE }; + int acc; + if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) return ERR_PTR(-EINVAL); - } - - if (permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE], NULL)) { - dput(dentry); - mntput(mqueue_mnt); + acc = oflag2acc[oflag & O_ACCMODE]; + if (inode_permission(path->dentry->d_inode, acc)) return ERR_PTR(-EACCES); - } - - return dentry_open(dentry, mqueue_mnt, oflag); + return dentry_open(path, oflag, current_cred()); } -asmlinkage long sys_mq_open(const char __user *u_name, int oflag, mode_t mode, - struct mq_attr __user *u_attr) +SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, + struct mq_attr __user *, u_attr) { - struct dentry *dentry; + struct path path; struct file *filp; - char *name; + struct filename *name; + struct mq_attr attr; int fd, error; + struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; + struct vfsmount *mnt = ipc_ns->mq_mnt; + struct dentry *root = mnt->mnt_root; + int ro; - error = audit_mq_open(oflag, mode, u_attr); - if (error != 0) - return error; + if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr))) + return -EFAULT; + + audit_mq_open(oflag, mode, u_attr ? &attr : NULL); if (IS_ERR(name = getname(u_name))) return PTR_ERR(name); - fd = get_unused_fd(); + fd = get_unused_fd_flags(O_CLOEXEC); if (fd < 0) goto out_putname; - mutex_lock(&mqueue_mnt->mnt_root->d_inode->i_mutex); - dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name)); - if (IS_ERR(dentry)) { - error = PTR_ERR(dentry); - goto out_err; + ro = mnt_want_write(mnt); /* we'll drop it in any case */ + error = 0; + mutex_lock(&root->d_inode->i_mutex); + path.dentry = lookup_one_len(name->name, root, strlen(name->name)); + if (IS_ERR(path.dentry)) { + error = PTR_ERR(path.dentry); + goto out_putfd; } - mntget(mqueue_mnt); + path.mnt = mntget(mnt); if (oflag & O_CREAT) { - if (dentry->d_inode) { /* entry already exists */ - audit_inode(name, dentry); - error = -EEXIST; - if (oflag & O_EXCL) + if (path.dentry->d_inode) { /* entry already exists */ + audit_inode(name, path.dentry, 0); + if (oflag & O_EXCL) { + error = -EEXIST; goto out; - filp = do_open(dentry, oflag); + } + filp = do_open(&path, oflag); } else { - filp = do_create(mqueue_mnt->mnt_root, dentry, - oflag, mode, u_attr); + if (ro) { + error = ro; + goto out; + } + audit_inode_parent_hidden(name, root); + filp = do_create(ipc_ns, root->d_inode, + &path, oflag, mode, + u_attr ? &attr : NULL); } } else { - error = -ENOENT; - if (!dentry->d_inode) + if (!path.dentry->d_inode) { + error = -ENOENT; goto out; - audit_inode(name, dentry); - filp = do_open(dentry, oflag); + } + audit_inode(name, path.dentry, 0); + filp = do_open(&path, oflag); } - if (IS_ERR(filp)) { + if (!IS_ERR(filp)) + fd_install(fd, filp); + else error = PTR_ERR(filp); - goto out_putfd; - } - - set_close_on_exec(fd, 1); - fd_install(fd, filp); - goto out_upsem; - out: - dput(dentry); - mntput(mqueue_mnt); + path_put(&path); out_putfd: - put_unused_fd(fd); -out_err: - fd = error; -out_upsem: - mutex_unlock(&mqueue_mnt->mnt_root->d_inode->i_mutex); + if (error) { + put_unused_fd(fd); + fd = error; + } + mutex_unlock(&root->d_inode->i_mutex); + if (!ro) + mnt_drop_write(mnt); out_putname: putname(name); return fd; } -asmlinkage long sys_mq_unlink(const char __user *u_name) +SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) { int err; - char *name; + struct filename *name; struct dentry *dentry; struct inode *inode = NULL; + struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; + struct vfsmount *mnt = ipc_ns->mq_mnt; name = getname(u_name); if (IS_ERR(name)) return PTR_ERR(name); - mutex_lock_nested(&mqueue_mnt->mnt_root->d_inode->i_mutex, - I_MUTEX_PARENT); - dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name)); + audit_inode_parent_hidden(name, mnt->mnt_root); + err = mnt_want_write(mnt); + if (err) + goto out_name; + mutex_lock_nested(&mnt->mnt_root->d_inode->i_mutex, I_MUTEX_PARENT); + dentry = lookup_one_len(name->name, mnt->mnt_root, + strlen(name->name)); if (IS_ERR(dentry)) { err = PTR_ERR(dentry); goto out_unlock; } - if (!dentry->d_inode) { + inode = dentry->d_inode; + if (!inode) { err = -ENOENT; - goto out_err; + } else { + ihold(inode); + err = vfs_unlink(dentry->d_parent->d_inode, dentry, NULL); } - - inode = dentry->d_inode; - if (inode) - atomic_inc(&inode->i_count); - - err = vfs_unlink(dentry->d_parent->d_inode, dentry); -out_err: dput(dentry); out_unlock: - mutex_unlock(&mqueue_mnt->mnt_root->d_inode->i_mutex); - putname(name); + mutex_unlock(&mnt->mnt_root->d_inode->i_mutex); if (inode) iput(inode); + mnt_drop_write(mnt); +out_name: + putname(name); return err; } @@ -797,7 +942,8 @@ static inline void pipelined_receive(struct mqueue_inode_info *info) wake_up_interruptible(&info->wait_q); return; } - msg_insert(sender->msg, info); + if (msg_insert(sender->msg, info)) + return; list_del(&sender->list); sender->state = STATE_PENDING; wake_up_process(sender->task); @@ -805,41 +951,51 @@ static inline void pipelined_receive(struct mqueue_inode_info *info) sender->state = STATE_READY; } -asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr, - size_t msg_len, unsigned int msg_prio, - const struct timespec __user *u_abs_timeout) +SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, + size_t, msg_len, unsigned int, msg_prio, + const struct timespec __user *, u_abs_timeout) { - struct file *filp; + struct fd f; struct inode *inode; struct ext_wait_queue wait; struct ext_wait_queue *receiver; struct msg_msg *msg_ptr; struct mqueue_inode_info *info; - long timeout; - int ret; - - ret = audit_mq_timedsend(mqdes, msg_len, msg_prio, u_abs_timeout); - if (ret != 0) - return ret; + ktime_t expires, *timeout = NULL; + struct timespec ts; + struct posix_msg_tree_node *new_leaf = NULL; + int ret = 0; + + if (u_abs_timeout) { + int res = prepare_timeout(u_abs_timeout, &expires, &ts); + if (res) + return res; + timeout = &expires; + } if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX)) return -EINVAL; - timeout = prepare_timeout(u_abs_timeout); + audit_mq_sendrecv(mqdes, msg_len, msg_prio, timeout ? &ts : NULL); - ret = -EBADF; - filp = fget(mqdes); - if (unlikely(!filp)) + f = fdget(mqdes); + if (unlikely(!f.file)) { + ret = -EBADF; goto out; + } - inode = filp->f_path.dentry->d_inode; - if (unlikely(filp->f_op != &mqueue_file_operations)) + inode = file_inode(f.file); + if (unlikely(f.file->f_op != &mqueue_file_operations)) { + ret = -EBADF; goto out_fput; + } info = MQUEUE_I(inode); - audit_inode(NULL, filp->f_path.dentry); + audit_inode(NULL, f.file->f_path.dentry, 0); - if (unlikely(!(filp->f_mode & FMODE_WRITE))) + if (unlikely(!(f.file->f_mode & FMODE_WRITE))) { + ret = -EBADF; goto out_fput; + } if (unlikely(msg_len > info->attr.mq_msgsize)) { ret = -EMSGSIZE; @@ -856,74 +1012,106 @@ asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr, msg_ptr->m_ts = msg_len; msg_ptr->m_type = msg_prio; + /* + * msg_insert really wants us to have a valid, spare node struct so + * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will + * fall back to that if necessary. + */ + if (!info->node_cache) + new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); + spin_lock(&info->lock); + if (!info->node_cache && new_leaf) { + /* Save our speculative allocation into the cache */ + INIT_LIST_HEAD(&new_leaf->msg_list); + info->node_cache = new_leaf; + info->qsize += sizeof(*new_leaf); + new_leaf = NULL; + } else { + kfree(new_leaf); + } + if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) { - if (filp->f_flags & O_NONBLOCK) { - spin_unlock(&info->lock); + if (f.file->f_flags & O_NONBLOCK) { ret = -EAGAIN; - } else if (unlikely(timeout < 0)) { - spin_unlock(&info->lock); - ret = timeout; } else { wait.task = current; wait.msg = (void *) msg_ptr; wait.state = STATE_NONE; ret = wq_sleep(info, SEND, timeout, &wait); + /* + * wq_sleep must be called with info->lock held, and + * returns with the lock released + */ + goto out_free; } - if (ret < 0) - free_msg(msg_ptr); } else { receiver = wq_get_first_waiter(info, RECV); if (receiver) { pipelined_send(info, msg_ptr, receiver); } else { /* adds message to the queue */ - msg_insert(msg_ptr, info); + ret = msg_insert(msg_ptr, info); + if (ret) + goto out_unlock; __do_notify(info); } inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; - spin_unlock(&info->lock); - ret = 0; } +out_unlock: + spin_unlock(&info->lock); +out_free: + if (ret) + free_msg(msg_ptr); out_fput: - fput(filp); + fdput(f); out: return ret; } -asmlinkage ssize_t sys_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr, - size_t msg_len, unsigned int __user *u_msg_prio, - const struct timespec __user *u_abs_timeout) +SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, + size_t, msg_len, unsigned int __user *, u_msg_prio, + const struct timespec __user *, u_abs_timeout) { - long timeout; ssize_t ret; struct msg_msg *msg_ptr; - struct file *filp; + struct fd f; struct inode *inode; struct mqueue_inode_info *info; struct ext_wait_queue wait; + ktime_t expires, *timeout = NULL; + struct timespec ts; + struct posix_msg_tree_node *new_leaf = NULL; + + if (u_abs_timeout) { + int res = prepare_timeout(u_abs_timeout, &expires, &ts); + if (res) + return res; + timeout = &expires; + } - ret = audit_mq_timedreceive(mqdes, msg_len, u_msg_prio, u_abs_timeout); - if (ret != 0) - return ret; - - timeout = prepare_timeout(u_abs_timeout); + audit_mq_sendrecv(mqdes, msg_len, 0, timeout ? &ts : NULL); - ret = -EBADF; - filp = fget(mqdes); - if (unlikely(!filp)) + f = fdget(mqdes); + if (unlikely(!f.file)) { + ret = -EBADF; goto out; + } - inode = filp->f_path.dentry->d_inode; - if (unlikely(filp->f_op != &mqueue_file_operations)) + inode = file_inode(f.file); + if (unlikely(f.file->f_op != &mqueue_file_operations)) { + ret = -EBADF; goto out_fput; + } info = MQUEUE_I(inode); - audit_inode(NULL, filp->f_path.dentry); + audit_inode(NULL, f.file->f_path.dentry, 0); - if (unlikely(!(filp->f_mode & FMODE_READ))) + if (unlikely(!(f.file->f_mode & FMODE_READ))) { + ret = -EBADF; goto out_fput; + } /* checks if buffer is big enough */ if (unlikely(msg_len < info->attr.mq_msgsize)) { @@ -931,16 +1119,29 @@ asmlinkage ssize_t sys_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr, goto out_fput; } + /* + * msg_insert really wants us to have a valid, spare node struct so + * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will + * fall back to that if necessary. + */ + if (!info->node_cache) + new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); + spin_lock(&info->lock); + + if (!info->node_cache && new_leaf) { + /* Save our speculative allocation into the cache */ + INIT_LIST_HEAD(&new_leaf->msg_list); + info->node_cache = new_leaf; + info->qsize += sizeof(*new_leaf); + } else { + kfree(new_leaf); + } + if (info->attr.mq_curmsgs == 0) { - if (filp->f_flags & O_NONBLOCK) { + if (f.file->f_flags & O_NONBLOCK) { spin_unlock(&info->lock); ret = -EAGAIN; - msg_ptr = NULL; - } else if (unlikely(timeout < 0)) { - spin_unlock(&info->lock); - ret = timeout; - msg_ptr = NULL; } else { wait.task = current; wait.state = STATE_NONE; @@ -968,7 +1169,7 @@ asmlinkage ssize_t sys_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr, free_msg(msg_ptr); } out_fput: - fput(filp); + fdput(f); out: return ret; } @@ -978,28 +1179,28 @@ out: * and he isn't currently owner of notification, will be silently discarded. * It isn't explicitly defined in the POSIX. */ -asmlinkage long sys_mq_notify(mqd_t mqdes, - const struct sigevent __user *u_notification) +SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, + const struct sigevent __user *, u_notification) { int ret; - struct file *filp; + struct fd f; struct sock *sock; struct inode *inode; struct sigevent notification; struct mqueue_inode_info *info; struct sk_buff *nc; - ret = audit_mq_notify(mqdes, u_notification); - if (ret != 0) - return ret; - - nc = NULL; - sock = NULL; - if (u_notification != NULL) { + if (u_notification) { if (copy_from_user(¬ification, u_notification, sizeof(struct sigevent))) return -EFAULT; + } + + audit_mq_notify(mqdes, u_notification ? ¬ification : NULL); + nc = NULL; + sock = NULL; + if (u_notification != NULL) { if (unlikely(notification.sigev_notify != SIGEV_NONE && notification.sigev_notify != SIGEV_SIGNAL && notification.sigev_notify != SIGEV_THREAD)) @@ -1013,13 +1214,14 @@ asmlinkage long sys_mq_notify(mqd_t mqdes, /* create the notify skb */ nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); - ret = -ENOMEM; - if (!nc) + if (!nc) { + ret = -ENOMEM; goto out; - ret = -EFAULT; + } if (copy_from_user(nc->data, notification.sigev_value.sival_ptr, NOTIFY_COOKIE_LEN)) { + ret = -EFAULT; goto out; } @@ -1027,12 +1229,13 @@ asmlinkage long sys_mq_notify(mqd_t mqdes, skb_put(nc, NOTIFY_COOKIE_LEN); /* and attach it to the socket */ retry: - filp = fget(notification.sigev_signo); - ret = -EBADF; - if (!filp) + f = fdget(notification.sigev_signo); + if (!f.file) { + ret = -EBADF; goto out; - sock = netlink_getsockbyfilp(filp); - fput(filp); + } + sock = netlink_getsockbyfilp(f.file); + fdput(f); if (IS_ERR(sock)) { ret = PTR_ERR(sock); sock = NULL; @@ -1040,9 +1243,9 @@ retry: } timeo = MAX_SCHEDULE_TIMEOUT; - ret = netlink_attachskb(sock, nc, 0, &timeo, NULL); + ret = netlink_attachskb(sock, nc, &timeo, NULL); if (ret == 1) - goto retry; + goto retry; if (ret) { sock = NULL; nc = NULL; @@ -1051,14 +1254,17 @@ retry: } } - ret = -EBADF; - filp = fget(mqdes); - if (!filp) + f = fdget(mqdes); + if (!f.file) { + ret = -EBADF; goto out; + } - inode = filp->f_path.dentry->d_inode; - if (unlikely(filp->f_op != &mqueue_file_operations)) + inode = file_inode(f.file); + if (unlikely(f.file->f_op != &mqueue_file_operations)) { + ret = -EBADF; goto out_fput; + } info = MQUEUE_I(inode); ret = 0; @@ -1090,27 +1296,28 @@ retry: } info->notify_owner = get_pid(task_tgid(current)); + info->notify_user_ns = get_user_ns(current_user_ns()); inode->i_atime = inode->i_ctime = CURRENT_TIME; } spin_unlock(&info->lock); out_fput: - fput(filp); + fdput(f); out: - if (sock) { + if (sock) netlink_detachskb(sock, nc); - } else if (nc) { + else if (nc) dev_kfree_skb(nc); - } + return ret; } -asmlinkage long sys_mq_getsetattr(mqd_t mqdes, - const struct mq_attr __user *u_mqstat, - struct mq_attr __user *u_omqstat) +SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, + const struct mq_attr __user *, u_mqstat, + struct mq_attr __user *, u_omqstat) { int ret; struct mq_attr mqstat, omqstat; - struct file *filp; + struct fd f; struct inode *inode; struct mqueue_inode_info *info; @@ -1121,30 +1328,31 @@ asmlinkage long sys_mq_getsetattr(mqd_t mqdes, return -EINVAL; } - ret = -EBADF; - filp = fget(mqdes); - if (!filp) + f = fdget(mqdes); + if (!f.file) { + ret = -EBADF; goto out; + } - inode = filp->f_path.dentry->d_inode; - if (unlikely(filp->f_op != &mqueue_file_operations)) + inode = file_inode(f.file); + if (unlikely(f.file->f_op != &mqueue_file_operations)) { + ret = -EBADF; goto out_fput; + } info = MQUEUE_I(inode); spin_lock(&info->lock); omqstat = info->attr; - omqstat.mq_flags = filp->f_flags & O_NONBLOCK; + omqstat.mq_flags = f.file->f_flags & O_NONBLOCK; if (u_mqstat) { - ret = audit_mq_getsetattr(mqdes, &mqstat); - if (ret != 0) { - spin_unlock(&info->lock); - goto out_fput; - } + audit_mq_getsetattr(mqdes, &mqstat); + spin_lock(&f.file->f_lock); if (mqstat.mq_flags & O_NONBLOCK) - filp->f_flags |= O_NONBLOCK; + f.file->f_flags |= O_NONBLOCK; else - filp->f_flags &= ~O_NONBLOCK; + f.file->f_flags &= ~O_NONBLOCK; + spin_unlock(&f.file->f_lock); inode->i_atime = inode->i_ctime = CURRENT_TIME; } @@ -1157,7 +1365,7 @@ asmlinkage long sys_mq_getsetattr(mqd_t mqdes, ret = -EFAULT; out_fput: - fput(filp); + fdput(f); out: return ret; } @@ -1172,75 +1380,50 @@ static const struct file_operations mqueue_file_operations = { .flush = mqueue_flush_file, .poll = mqueue_poll_file, .read = mqueue_read_file, + .llseek = default_llseek, }; -static struct super_operations mqueue_super_ops = { +static const struct super_operations mqueue_super_ops = { .alloc_inode = mqueue_alloc_inode, .destroy_inode = mqueue_destroy_inode, + .evict_inode = mqueue_evict_inode, .statfs = simple_statfs, - .delete_inode = mqueue_delete_inode, - .drop_inode = generic_delete_inode, }; static struct file_system_type mqueue_fs_type = { .name = "mqueue", - .get_sb = mqueue_get_sb, + .mount = mqueue_mount, .kill_sb = kill_litter_super, + .fs_flags = FS_USERNS_MOUNT, }; -static int msg_max_limit_min = DFLT_MSGMAX; -static int msg_max_limit_max = HARD_MSGMAX; - -static int msg_maxsize_limit_min = DFLT_MSGSIZEMAX; -static int msg_maxsize_limit_max = INT_MAX; - -static ctl_table mq_sysctls[] = { - { - .procname = "queues_max", - .data = &queues_max, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, - { - .procname = "msg_max", - .data = &msg_max, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec_minmax, - .extra1 = &msg_max_limit_min, - .extra2 = &msg_max_limit_max, - }, - { - .procname = "msgsize_max", - .data = &msgsize_max, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec_minmax, - .extra1 = &msg_maxsize_limit_min, - .extra2 = &msg_maxsize_limit_max, - }, - { .ctl_name = 0 } -}; +int mq_init_ns(struct ipc_namespace *ns) +{ + ns->mq_queues_count = 0; + ns->mq_queues_max = DFLT_QUEUESMAX; + ns->mq_msg_max = DFLT_MSGMAX; + ns->mq_msgsize_max = DFLT_MSGSIZEMAX; + ns->mq_msg_default = DFLT_MSG; + ns->mq_msgsize_default = DFLT_MSGSIZE; + + ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns); + if (IS_ERR(ns->mq_mnt)) { + int err = PTR_ERR(ns->mq_mnt); + ns->mq_mnt = NULL; + return err; + } + return 0; +} -static ctl_table mq_sysctl_dir[] = { - { - .procname = "mqueue", - .mode = 0555, - .child = mq_sysctls, - }, - { .ctl_name = 0 } -}; +void mq_clear_sbinfo(struct ipc_namespace *ns) +{ + ns->mq_mnt->mnt_sb->s_fs_info = NULL; +} -static ctl_table mq_sysctl_root[] = { - { - .ctl_name = CTL_FS, - .procname = "fs", - .mode = 0555, - .child = mq_sysctl_dir, - }, - { .ctl_name = 0 } -}; +void mq_put_mnt(struct ipc_namespace *ns) +{ + kern_unmount(ns->mq_mnt); +} static int __init init_mqueue_fs(void) { @@ -1252,22 +1435,19 @@ static int __init init_mqueue_fs(void) if (mqueue_inode_cachep == NULL) return -ENOMEM; - /* ignore failues - they are not fatal */ - mq_sysctl_table = register_sysctl_table(mq_sysctl_root); + /* ignore failures - they are not fatal */ + mq_sysctl_table = mq_register_sysctl_table(); error = register_filesystem(&mqueue_fs_type); if (error) goto out_sysctl; - if (IS_ERR(mqueue_mnt = kern_mount(&mqueue_fs_type))) { - error = PTR_ERR(mqueue_mnt); - goto out_filesystem; - } - - /* internal initialization - not common for vfs */ - queues_count = 0; spin_lock_init(&mq_lock); + error = mq_init_ns(&init_ipc_ns); + if (error) + goto out_filesystem; + return 0; out_filesystem: @@ -1279,4 +1459,4 @@ out_sysctl: return error; } -__initcall(init_mqueue_fs); +device_initcall(init_mqueue_fs); diff --git a/ipc/msg.c b/ipc/msg.c index 46585a05473..c5d8e374998 100644 --- a/ipc/msg.c +++ b/ipc/msg.c @@ -23,10 +23,10 @@ */ #include <linux/capability.h> -#include <linux/slab.h> #include <linux/msg.h> #include <linux/spinlock.h> #include <linux/init.h> +#include <linux/mm.h> #include <linux/proc_fs.h> #include <linux/list.h> #include <linux/security.h> @@ -39,12 +39,10 @@ #include <linux/ipc_namespace.h> #include <asm/current.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include "util.h" -/* - * one msg_receiver structure for each sleeping receiver: - */ +/* one msg_receiver structure for each sleeping receiver */ struct msg_receiver { struct list_head r_list; struct task_struct *r_tsk; @@ -53,6 +51,12 @@ struct msg_receiver { long r_msgtype; long r_maxsize; + /* + * Mark r_msg volatile so that the compiler + * does not try to get smart and optimize + * it. We rely on this for the lockless + * receive algorithm. + */ struct msg_msg *volatile r_msg; }; @@ -66,86 +70,43 @@ struct msg_sender { #define SEARCH_EQUAL 2 #define SEARCH_NOTEQUAL 3 #define SEARCH_LESSEQUAL 4 +#define SEARCH_NUMBER 5 #define msg_ids(ns) ((ns)->ids[IPC_MSG_IDS]) -#define msg_unlock(msq) ipc_unlock(&(msq)->q_perm) -#define msg_buildid(id, seq) ipc_buildid(id, seq) - -static void freeque(struct ipc_namespace *, struct kern_ipc_perm *); -static int newque(struct ipc_namespace *, struct ipc_params *); -#ifdef CONFIG_PROC_FS -static int sysvipc_msg_proc_show(struct seq_file *s, void *it); -#endif - -void msg_init_ns(struct ipc_namespace *ns) -{ - ns->msg_ctlmax = MSGMAX; - ns->msg_ctlmnb = MSGMNB; - ns->msg_ctlmni = MSGMNI; - atomic_set(&ns->msg_bytes, 0); - atomic_set(&ns->msg_hdrs, 0); - ipc_init_ids(&ns->ids[IPC_MSG_IDS]); -} - -#ifdef CONFIG_IPC_NS -void msg_exit_ns(struct ipc_namespace *ns) -{ - free_ipcs(ns, &msg_ids(ns), freeque); -} -#endif - -void __init msg_init(void) -{ - msg_init_ns(&init_ipc_ns); - ipc_init_proc_interface("sysvipc/msg", - " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n", - IPC_MSG_IDS, sysvipc_msg_proc_show); -} - -/* - * This routine is called in the paths where the rw_mutex is held to protect - * access to the idr tree. - */ -static inline struct msg_queue *msg_lock_check_down(struct ipc_namespace *ns, - int id) +static inline struct msg_queue *msq_obtain_object(struct ipc_namespace *ns, int id) { - struct kern_ipc_perm *ipcp = ipc_lock_check_down(&msg_ids(ns), id); + struct kern_ipc_perm *ipcp = ipc_obtain_object(&msg_ids(ns), id); if (IS_ERR(ipcp)) - return (struct msg_queue *)ipcp; + return ERR_CAST(ipcp); return container_of(ipcp, struct msg_queue, q_perm); } -/* - * msg_lock_(check_) routines are called in the paths where the rw_mutex - * is not held. - */ -static inline struct msg_queue *msg_lock(struct ipc_namespace *ns, int id) +static inline struct msg_queue *msq_obtain_object_check(struct ipc_namespace *ns, + int id) { - struct kern_ipc_perm *ipcp = ipc_lock(&msg_ids(ns), id); + struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&msg_ids(ns), id); if (IS_ERR(ipcp)) - return (struct msg_queue *)ipcp; + return ERR_CAST(ipcp); return container_of(ipcp, struct msg_queue, q_perm); } -static inline struct msg_queue *msg_lock_check(struct ipc_namespace *ns, - int id) +static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s) { - struct kern_ipc_perm *ipcp = ipc_lock_check(&msg_ids(ns), id); - - if (IS_ERR(ipcp)) - return (struct msg_queue *)ipcp; - - return container_of(ipcp, struct msg_queue, q_perm); + ipc_rmid(&msg_ids(ns), &s->q_perm); } -static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s) +static void msg_rcu_free(struct rcu_head *head) { - ipc_rmid(&msg_ids(ns), &s->q_perm); + struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); + struct msg_queue *msq = ipc_rcu_to_struct(p); + + security_msg_queue_free(msq); + ipc_rcu_free(head); } /** @@ -153,7 +114,7 @@ static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s) * @ns: namespace * @params: ptr to the structure that contains the key and msgflg * - * Called with msg_ids.rw_mutex held (writer) + * Called with msg_ids.rwsem held (writer) */ static int newque(struct ipc_namespace *ns, struct ipc_params *params) { @@ -172,21 +133,17 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) msq->q_perm.security = NULL; retval = security_msg_queue_alloc(msq); if (retval) { - ipc_rcu_putref(msq); + ipc_rcu_putref(msq, ipc_rcu_free); return retval; } - /* - * ipc_addid() locks msq - */ + /* ipc_addid() locks msq upon success. */ id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni); if (id < 0) { - security_msg_queue_free(msq); - ipc_rcu_putref(msq); + ipc_rcu_putref(msq, msg_rcu_free); return id; } - msq->q_perm.id = msg_buildid(id, msq->q_perm.seq); msq->q_stime = msq->q_rtime = 0; msq->q_ctime = get_seconds(); msq->q_cbytes = msq->q_qnum = 0; @@ -196,7 +153,8 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) INIT_LIST_HEAD(&msq->q_receivers); INIT_LIST_HEAD(&msq->q_senders); - msg_unlock(msq); + ipc_unlock_object(&msq->q_perm); + rcu_read_unlock(); return msq->q_perm.id; } @@ -204,7 +162,7 @@ static int newque(struct ipc_namespace *ns, struct ipc_params *params) static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss) { mss->tsk = current; - current->state = TASK_INTERRUPTIBLE; + __set_current_state(TASK_INTERRUPTIBLE); list_add_tail(&mss->list, &msq->q_senders); } @@ -216,14 +174,9 @@ static inline void ss_del(struct msg_sender *mss) static void ss_wakeup(struct list_head *h, int kill) { - struct list_head *tmp; + struct msg_sender *mss, *t; - tmp = h->next; - while (tmp != h) { - struct msg_sender *mss; - - mss = list_entry(tmp, struct msg_sender, list); - tmp = tmp->next; + list_for_each_entry_safe(mss, t, h, list) { if (kill) mss->list.next = NULL; wake_up_process(mss->tsk); @@ -232,16 +185,17 @@ static void ss_wakeup(struct list_head *h, int kill) static void expunge_all(struct msg_queue *msq, int res) { - struct list_head *tmp; - - tmp = msq->q_receivers.next; - while (tmp != &msq->q_receivers) { - struct msg_receiver *msr; + struct msg_receiver *msr, *t; - msr = list_entry(tmp, struct msg_receiver, r_list); - tmp = tmp->next; - msr->r_msg = NULL; + list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) { + msr->r_msg = NULL; /* initialize expunge ordering */ wake_up_process(msr->r_tsk); + /* + * Ensure that the wakeup is visible before setting r_msg as + * the receiving end depends on it: either spinning on a nil, + * or dealing with -EAGAIN cases. See lockless receive part 1 + * and 2 in do_msgrcv(). + */ smp_mb(); msr->r_msg = ERR_PTR(res); } @@ -252,34 +206,30 @@ static void expunge_all(struct msg_queue *msq, int res) * removes the message queue from message queue ID IDR, and cleans up all the * messages associated with this queue. * - * msg_ids.rw_mutex (writer) and the spinlock for this message queue are held - * before freeque() is called. msg_ids.rw_mutex remains locked on exit. + * msg_ids.rwsem (writer) and the spinlock for this message queue are held + * before freeque() is called. msg_ids.rwsem remains locked on exit. */ static void freeque(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) { - struct list_head *tmp; + struct msg_msg *msg, *t; struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm); expunge_all(msq, -EIDRM); ss_wakeup(&msq->q_senders, 1); msg_rmid(ns, msq); - msg_unlock(msq); - - tmp = msq->q_messages.next; - while (tmp != &msq->q_messages) { - struct msg_msg *msg = list_entry(tmp, struct msg_msg, m_list); + ipc_unlock_object(&msq->q_perm); + rcu_read_unlock(); - tmp = tmp->next; + list_for_each_entry_safe(msg, t, &msq->q_messages, m_list) { atomic_dec(&ns->msg_hdrs); free_msg(msg); } atomic_sub(msq->q_cbytes, &ns->msg_bytes); - security_msg_queue_free(msq); - ipc_rcu_putref(msq); + ipc_rcu_putref(msq, msg_rcu_free); } /* - * Called with msg_ids.rw_mutex and ipcp locked. + * Called with msg_ids.rwsem and ipcp locked. */ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg) { @@ -288,18 +238,17 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg) return security_msg_queue_associate(msq, msgflg); } -asmlinkage long sys_msgget(key_t key, int msgflg) +SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg) { struct ipc_namespace *ns; - struct ipc_ops msg_ops; + static const struct ipc_ops msg_ops = { + .getnew = newque, + .associate = msg_security, + }; struct ipc_params msg_params; ns = current->nsproxy->ipc_ns; - msg_ops.getnew = newque; - msg_ops.associate = msg_security; - msg_ops.more_checks = NULL; - msg_params.key = key; msg_params.flg = msgflg; @@ -309,7 +258,7 @@ asmlinkage long sys_msgget(key_t key, int msgflg) static inline unsigned long copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version) { - switch(version) { + switch (version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: @@ -351,31 +300,14 @@ copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version) } } -struct msq_setbuf { - unsigned long qbytes; - uid_t uid; - gid_t gid; - mode_t mode; -}; - static inline unsigned long -copy_msqid_from_user(struct msq_setbuf *out, void __user *buf, int version) +copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version) { - switch(version) { + switch (version) { case IPC_64: - { - struct msqid64_ds tbuf; - - if (copy_from_user(&tbuf, buf, sizeof(tbuf))) + if (copy_from_user(out, buf, sizeof(*out))) return -EFAULT; - - out->qbytes = tbuf.msg_qbytes; - out->uid = tbuf.msg_perm.uid; - out->gid = tbuf.msg_perm.gid; - out->mode = tbuf.msg_perm.mode; - return 0; - } case IPC_OLD: { struct msqid_ds tbuf_old; @@ -383,14 +315,14 @@ copy_msqid_from_user(struct msq_setbuf *out, void __user *buf, int version) if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) return -EFAULT; - out->uid = tbuf_old.msg_perm.uid; - out->gid = tbuf_old.msg_perm.gid; - out->mode = tbuf_old.msg_perm.mode; + out->msg_perm.uid = tbuf_old.msg_perm.uid; + out->msg_perm.gid = tbuf_old.msg_perm.gid; + out->msg_perm.mode = tbuf_old.msg_perm.mode; if (tbuf_old.msg_qbytes == 0) - out->qbytes = tbuf_old.msg_lqbytes; + out->msg_qbytes = tbuf_old.msg_lqbytes; else - out->qbytes = tbuf_old.msg_qbytes; + out->msg_qbytes = tbuf_old.msg_qbytes; return 0; } @@ -399,19 +331,89 @@ copy_msqid_from_user(struct msq_setbuf *out, void __user *buf, int version) } } -asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf) +/* + * This function handles some msgctl commands which require the rwsem + * to be held in write mode. + * NOTE: no locks must be held, the rwsem is taken inside this function. + */ +static int msgctl_down(struct ipc_namespace *ns, int msqid, int cmd, + struct msqid_ds __user *buf, int version) { struct kern_ipc_perm *ipcp; - struct msq_setbuf uninitialized_var(setbuf); + struct msqid64_ds uninitialized_var(msqid64); struct msg_queue *msq; - int err, version; - struct ipc_namespace *ns; + int err; - if (msqid < 0 || cmd < 0) - return -EINVAL; + if (cmd == IPC_SET) { + if (copy_msqid_from_user(&msqid64, buf, version)) + return -EFAULT; + } - version = ipc_parse_version(&cmd); - ns = current->nsproxy->ipc_ns; + down_write(&msg_ids(ns).rwsem); + rcu_read_lock(); + + ipcp = ipcctl_pre_down_nolock(ns, &msg_ids(ns), msqid, cmd, + &msqid64.msg_perm, msqid64.msg_qbytes); + if (IS_ERR(ipcp)) { + err = PTR_ERR(ipcp); + goto out_unlock1; + } + + msq = container_of(ipcp, struct msg_queue, q_perm); + + err = security_msg_queue_msgctl(msq, cmd); + if (err) + goto out_unlock1; + + switch (cmd) { + case IPC_RMID: + ipc_lock_object(&msq->q_perm); + /* freeque unlocks the ipc object and rcu */ + freeque(ns, ipcp); + goto out_up; + case IPC_SET: + if (msqid64.msg_qbytes > ns->msg_ctlmnb && + !capable(CAP_SYS_RESOURCE)) { + err = -EPERM; + goto out_unlock1; + } + + ipc_lock_object(&msq->q_perm); + err = ipc_update_perm(&msqid64.msg_perm, ipcp); + if (err) + goto out_unlock0; + + msq->q_qbytes = msqid64.msg_qbytes; + + msq->q_ctime = get_seconds(); + /* sleeping receivers might be excluded by + * stricter permissions. + */ + expunge_all(msq, -EAGAIN); + /* sleeping senders might be able to send + * due to a larger queue size. + */ + ss_wakeup(&msq->q_senders, 0); + break; + default: + err = -EINVAL; + goto out_unlock1; + } + +out_unlock0: + ipc_unlock_object(&msq->q_perm); +out_unlock1: + rcu_read_unlock(); +out_up: + up_write(&msg_ids(ns).rwsem); + return err; +} + +static int msgctl_nolock(struct ipc_namespace *ns, int msqid, + int cmd, int version, void __user *buf) +{ + int err; + struct msg_queue *msq; switch (cmd) { case IPC_INFO: @@ -422,6 +424,7 @@ asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf) if (!buf) return -EFAULT; + /* * We must not return kernel stack data. * due to padding, it's not enough @@ -437,7 +440,7 @@ asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf) msginfo.msgmnb = ns->msg_ctlmnb; msginfo.msgssz = MSGSSZ; msginfo.msgseg = MSGSEG; - down_read(&msg_ids(ns).rw_mutex); + down_read(&msg_ids(ns).rwsem); if (cmd == MSG_INFO) { msginfo.msgpool = msg_ids(ns).in_use; msginfo.msgmap = atomic_read(&ns->msg_hdrs); @@ -448,12 +451,13 @@ asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf) msginfo.msgtql = MSGTQL; } max_id = ipc_get_maxid(&msg_ids(ns)); - up_read(&msg_ids(ns).rw_mutex); + up_read(&msg_ids(ns).rwsem); if (copy_to_user(buf, &msginfo, sizeof(struct msginfo))) return -EFAULT; return (max_id < 0) ? 0 : max_id; } - case MSG_STAT: /* msqid is an index rather than a msg queue id */ + + case MSG_STAT: case IPC_STAT: { struct msqid64_ds tbuf; @@ -462,27 +466,33 @@ asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf) if (!buf) return -EFAULT; + memset(&tbuf, 0, sizeof(tbuf)); + + rcu_read_lock(); if (cmd == MSG_STAT) { - msq = msg_lock(ns, msqid); - if (IS_ERR(msq)) - return PTR_ERR(msq); + msq = msq_obtain_object(ns, msqid); + if (IS_ERR(msq)) { + err = PTR_ERR(msq); + goto out_unlock; + } success_return = msq->q_perm.id; } else { - msq = msg_lock_check(ns, msqid); - if (IS_ERR(msq)) - return PTR_ERR(msq); + msq = msq_obtain_object_check(ns, msqid); + if (IS_ERR(msq)) { + err = PTR_ERR(msq); + goto out_unlock; + } success_return = 0; } + err = -EACCES; - if (ipcperms(&msq->q_perm, S_IRUGO)) + if (ipcperms(ns, &msq->q_perm, S_IRUGO)) goto out_unlock; err = security_msg_queue_msgctl(msq, cmd); if (err) goto out_unlock; - memset(&tbuf, 0, sizeof(tbuf)); - kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm); tbuf.msg_stime = msq->q_stime; tbuf.msg_rtime = msq->q_rtime; @@ -492,140 +502,97 @@ asmlinkage long sys_msgctl(int msqid, int cmd, struct msqid_ds __user *buf) tbuf.msg_qbytes = msq->q_qbytes; tbuf.msg_lspid = msq->q_lspid; tbuf.msg_lrpid = msq->q_lrpid; - msg_unlock(msq); + rcu_read_unlock(); + if (copy_msqid_to_user(buf, &tbuf, version)) return -EFAULT; return success_return; } - case IPC_SET: - if (!buf) - return -EFAULT; - if (copy_msqid_from_user(&setbuf, buf, version)) - return -EFAULT; - break; - case IPC_RMID: - break; - default: - return -EINVAL; - } - down_write(&msg_ids(ns).rw_mutex); - msq = msg_lock_check_down(ns, msqid); - if (IS_ERR(msq)) { - err = PTR_ERR(msq); - goto out_up; + default: + return -EINVAL; } - ipcp = &msq->q_perm; + return err; +out_unlock: + rcu_read_unlock(); + return err; +} - err = audit_ipc_obj(ipcp); - if (err) - goto out_unlock_up; - if (cmd == IPC_SET) { - err = audit_ipc_set_perm(setbuf.qbytes, setbuf.uid, setbuf.gid, - setbuf.mode); - if (err) - goto out_unlock_up; - } +SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf) +{ + int version; + struct ipc_namespace *ns; - err = -EPERM; - if (current->euid != ipcp->cuid && - current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN)) - /* We _could_ check for CAP_CHOWN above, but we don't */ - goto out_unlock_up; + if (msqid < 0 || cmd < 0) + return -EINVAL; - err = security_msg_queue_msgctl(msq, cmd); - if (err) - goto out_unlock_up; + version = ipc_parse_version(&cmd); + ns = current->nsproxy->ipc_ns; switch (cmd) { + case IPC_INFO: + case MSG_INFO: + case MSG_STAT: /* msqid is an index rather than a msg queue id */ + case IPC_STAT: + return msgctl_nolock(ns, msqid, cmd, version, buf); case IPC_SET: - { - err = -EPERM; - if (setbuf.qbytes > ns->msg_ctlmnb && !capable(CAP_SYS_RESOURCE)) - goto out_unlock_up; - - msq->q_qbytes = setbuf.qbytes; - - ipcp->uid = setbuf.uid; - ipcp->gid = setbuf.gid; - ipcp->mode = (ipcp->mode & ~S_IRWXUGO) | - (S_IRWXUGO & setbuf.mode); - msq->q_ctime = get_seconds(); - /* sleeping receivers might be excluded by - * stricter permissions. - */ - expunge_all(msq, -EAGAIN); - /* sleeping senders might be able to send - * due to a larger queue size. - */ - ss_wakeup(&msq->q_senders, 0); - msg_unlock(msq); - break; - } case IPC_RMID: - freeque(ns, &msq->q_perm); - break; + return msgctl_down(ns, msqid, cmd, buf, version); + default: + return -EINVAL; } - err = 0; -out_up: - up_write(&msg_ids(ns).rw_mutex); - return err; -out_unlock_up: - msg_unlock(msq); - goto out_up; -out_unlock: - msg_unlock(msq); - return err; } static int testmsg(struct msg_msg *msg, long type, int mode) { - switch(mode) - { - case SEARCH_ANY: + switch (mode) { + case SEARCH_ANY: + case SEARCH_NUMBER: + return 1; + case SEARCH_LESSEQUAL: + if (msg->m_type <= type) return 1; - case SEARCH_LESSEQUAL: - if (msg->m_type <=type) - return 1; - break; - case SEARCH_EQUAL: - if (msg->m_type == type) - return 1; - break; - case SEARCH_NOTEQUAL: - if (msg->m_type != type) - return 1; - break; + break; + case SEARCH_EQUAL: + if (msg->m_type == type) + return 1; + break; + case SEARCH_NOTEQUAL: + if (msg->m_type != type) + return 1; + break; } return 0; } static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) { - struct list_head *tmp; + struct msg_receiver *msr, *t; - tmp = msq->q_receivers.next; - while (tmp != &msq->q_receivers) { - struct msg_receiver *msr; - - msr = list_entry(tmp, struct msg_receiver, r_list); - tmp = tmp->next; + list_for_each_entry_safe(msr, t, &msq->q_receivers, r_list) { if (testmsg(msg, msr->r_msgtype, msr->r_mode) && !security_msg_queue_msgrcv(msq, msg, msr->r_tsk, msr->r_msgtype, msr->r_mode)) { list_del(&msr->r_list); if (msr->r_maxsize < msg->m_ts) { + /* initialize pipelined send ordering */ msr->r_msg = NULL; wake_up_process(msr->r_tsk); - smp_mb(); + smp_mb(); /* see barrier comment below */ msr->r_msg = ERR_PTR(-E2BIG); } else { msr->r_msg = NULL; msq->q_lrpid = task_pid_vnr(msr->r_tsk); msq->q_rtime = get_seconds(); wake_up_process(msr->r_tsk); + /* + * Ensure that the wakeup is visible before + * setting r_msg, as the receiving end depends + * on it. See lockless receive part 1 and 2 in + * do_msgrcv(). + */ smp_mb(); msr->r_msg = msg; @@ -633,6 +600,7 @@ static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg) } } } + return 0; } @@ -658,22 +626,31 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, msg->m_type = mtype; msg->m_ts = msgsz; - msq = msg_lock_check(ns, msqid); + rcu_read_lock(); + msq = msq_obtain_object_check(ns, msqid); if (IS_ERR(msq)) { err = PTR_ERR(msq); - goto out_free; + goto out_unlock1; } + ipc_lock_object(&msq->q_perm); + for (;;) { struct msg_sender s; err = -EACCES; - if (ipcperms(&msq->q_perm, S_IWUGO)) - goto out_unlock_free; + if (ipcperms(ns, &msq->q_perm, S_IWUGO)) + goto out_unlock0; + + /* raced with RMID? */ + if (!ipc_valid_object(&msq->q_perm)) { + err = -EIDRM; + goto out_unlock0; + } err = security_msg_queue_msgsnd(msq, msg, msgflg); if (err) - goto out_unlock_free; + goto out_unlock0; if (msgsz + msq->q_cbytes <= msq->q_qbytes && 1 + msq->q_qnum <= msq->q_qbytes) { @@ -683,32 +660,44 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, /* queue full, wait: */ if (msgflg & IPC_NOWAIT) { err = -EAGAIN; - goto out_unlock_free; + goto out_unlock0; } + + /* enqueue the sender and prepare to block */ ss_add(msq, &s); - ipc_rcu_getref(msq); - msg_unlock(msq); + + if (!ipc_rcu_getref(msq)) { + err = -EIDRM; + goto out_unlock0; + } + + ipc_unlock_object(&msq->q_perm); + rcu_read_unlock(); schedule(); - ipc_lock_by_ptr(&msq->q_perm); - ipc_rcu_putref(msq); - if (msq->q_perm.deleted) { + rcu_read_lock(); + ipc_lock_object(&msq->q_perm); + + ipc_rcu_putref(msq, ipc_rcu_free); + /* raced with RMID? */ + if (!ipc_valid_object(&msq->q_perm)) { err = -EIDRM; - goto out_unlock_free; + goto out_unlock0; } + ss_del(&s); if (signal_pending(current)) { err = -ERESTARTNOHAND; - goto out_unlock_free; + goto out_unlock0; } - } + } msq->q_lspid = task_tgid_vnr(current); msq->q_stime = get_seconds(); if (!pipelined_send(msq, msg)) { - /* noone is waiting for this message, enqueue it */ + /* no one is waiting for this message, enqueue it */ list_add_tail(&msg->m_list, &msq->q_messages); msq->q_cbytes += msgsz; msq->q_qnum++; @@ -719,16 +708,17 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext, err = 0; msg = NULL; -out_unlock_free: - msg_unlock(msq); -out_free: +out_unlock0: + ipc_unlock_object(&msq->q_perm); +out_unlock1: + rcu_read_unlock(); if (msg != NULL) free_msg(msg); return err; } -asmlinkage long -sys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg) +SYSCALL_DEFINE4(msgsnd, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz, + int, msgflg) { long mtype; @@ -739,6 +729,8 @@ sys_msgsnd(int msqid, struct msgbuf __user *msgp, size_t msgsz, int msgflg) static inline int convert_mode(long *msgtyp, int msgflg) { + if (msgflg & MSG_COPY) + return SEARCH_NUMBER; /* * find message of correct type. * msgtyp = 0 => get first. @@ -756,62 +748,142 @@ static inline int convert_mode(long *msgtyp, int msgflg) return SEARCH_EQUAL; } -long do_msgrcv(int msqid, long *pmtype, void __user *mtext, - size_t msgsz, long msgtyp, int msgflg) +static long do_msg_fill(void __user *dest, struct msg_msg *msg, size_t bufsz) +{ + struct msgbuf __user *msgp = dest; + size_t msgsz; + + if (put_user(msg->m_type, &msgp->mtype)) + return -EFAULT; + + msgsz = (bufsz > msg->m_ts) ? msg->m_ts : bufsz; + if (store_msg(msgp->mtext, msg, msgsz)) + return -EFAULT; + return msgsz; +} + +#ifdef CONFIG_CHECKPOINT_RESTORE +/* + * This function creates new kernel message structure, large enough to store + * bufsz message bytes. + */ +static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz) +{ + struct msg_msg *copy; + + /* + * Create dummy message to copy real message to. + */ + copy = load_msg(buf, bufsz); + if (!IS_ERR(copy)) + copy->m_ts = bufsz; + return copy; +} + +static inline void free_copy(struct msg_msg *copy) +{ + if (copy) + free_msg(copy); +} +#else +static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz) +{ + return ERR_PTR(-ENOSYS); +} + +static inline void free_copy(struct msg_msg *copy) +{ +} +#endif + +static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode) +{ + struct msg_msg *msg, *found = NULL; + long count = 0; + + list_for_each_entry(msg, &msq->q_messages, m_list) { + if (testmsg(msg, *msgtyp, mode) && + !security_msg_queue_msgrcv(msq, msg, current, + *msgtyp, mode)) { + if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) { + *msgtyp = msg->m_type - 1; + found = msg; + } else if (mode == SEARCH_NUMBER) { + if (*msgtyp == count) + return msg; + } else + return msg; + count++; + } + } + + return found ?: ERR_PTR(-EAGAIN); +} + +long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp, int msgflg, + long (*msg_handler)(void __user *, struct msg_msg *, size_t)) { - struct msg_queue *msq; - struct msg_msg *msg; int mode; + struct msg_queue *msq; struct ipc_namespace *ns; + struct msg_msg *msg, *copy = NULL; - if (msqid < 0 || (long) msgsz < 0) + ns = current->nsproxy->ipc_ns; + + if (msqid < 0 || (long) bufsz < 0) return -EINVAL; + + if (msgflg & MSG_COPY) { + if ((msgflg & MSG_EXCEPT) || !(msgflg & IPC_NOWAIT)) + return -EINVAL; + copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax)); + if (IS_ERR(copy)) + return PTR_ERR(copy); + } mode = convert_mode(&msgtyp, msgflg); - ns = current->nsproxy->ipc_ns; - msq = msg_lock_check(ns, msqid); - if (IS_ERR(msq)) + rcu_read_lock(); + msq = msq_obtain_object_check(ns, msqid); + if (IS_ERR(msq)) { + rcu_read_unlock(); + free_copy(copy); return PTR_ERR(msq); + } for (;;) { struct msg_receiver msr_d; - struct list_head *tmp; msg = ERR_PTR(-EACCES); - if (ipcperms(&msq->q_perm, S_IRUGO)) - goto out_unlock; + if (ipcperms(ns, &msq->q_perm, S_IRUGO)) + goto out_unlock1; - msg = ERR_PTR(-EAGAIN); - tmp = msq->q_messages.next; - while (tmp != &msq->q_messages) { - struct msg_msg *walk_msg; - - walk_msg = list_entry(tmp, struct msg_msg, m_list); - if (testmsg(walk_msg, msgtyp, mode) && - !security_msg_queue_msgrcv(msq, walk_msg, current, - msgtyp, mode)) { - - msg = walk_msg; - if (mode == SEARCH_LESSEQUAL && - walk_msg->m_type != 1) { - msg = walk_msg; - msgtyp = walk_msg->m_type - 1; - } else { - msg = walk_msg; - break; - } - } - tmp = tmp->next; + ipc_lock_object(&msq->q_perm); + + /* raced with RMID? */ + if (!ipc_valid_object(&msq->q_perm)) { + msg = ERR_PTR(-EIDRM); + goto out_unlock0; } + + msg = find_msg(msq, &msgtyp, mode); if (!IS_ERR(msg)) { /* * Found a suitable message. * Unlink it from the queue. */ - if ((msgsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) { + if ((bufsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) { msg = ERR_PTR(-E2BIG); - goto out_unlock; + goto out_unlock0; } + /* + * If we are copying, then do not unlink message and do + * not update queue parameters. + */ + if (msgflg & MSG_COPY) { + msg = copy_msg(msg, copy); + goto out_unlock0; + } + list_del(&msg->m_list); msq->q_qnum--; msq->q_rtime = get_seconds(); @@ -820,14 +892,16 @@ long do_msgrcv(int msqid, long *pmtype, void __user *mtext, atomic_sub(msg->m_ts, &ns->msg_bytes); atomic_dec(&ns->msg_hdrs); ss_wakeup(&msq->q_senders, 0); - msg_unlock(msq); - break; + + goto out_unlock0; } + /* No message waiting. Wait for a message */ if (msgflg & IPC_NOWAIT) { msg = ERR_PTR(-ENOMSG); - goto out_unlock; + goto out_unlock0; } + list_add_tail(&msr_d.r_list, &msq->q_receivers); msr_d.r_tsk = current; msr_d.r_msgtype = msgtyp; @@ -835,22 +909,23 @@ long do_msgrcv(int msqid, long *pmtype, void __user *mtext, if (msgflg & MSG_NOERROR) msr_d.r_maxsize = INT_MAX; else - msr_d.r_maxsize = msgsz; + msr_d.r_maxsize = bufsz; msr_d.r_msg = ERR_PTR(-EAGAIN); - current->state = TASK_INTERRUPTIBLE; - msg_unlock(msq); + __set_current_state(TASK_INTERRUPTIBLE); + ipc_unlock_object(&msq->q_perm); + rcu_read_unlock(); schedule(); /* Lockless receive, part 1: * Disable preemption. We don't hold a reference to the queue * and getting a reference would defeat the idea of a lockless * operation, thus the code relies on rcu to guarantee the - * existance of msq: + * existence of msq: * Prior to destruction, expunge_all(-EIRDM) changes r_msg. * Thus if r_msg is -EAGAIN, then the queue not yet destroyed. * rcu_read_lock() prevents preemption between reading r_msg - * and the spin_lock() inside ipc_lock_by_ptr(). + * and acquiring the q_perm.lock in ipc_lock_object(). */ rcu_read_lock(); @@ -859,7 +934,7 @@ long do_msgrcv(int msqid, long *pmtype, void __user *mtext, * wake_up_process(). There is a race with exit(), see * ipc/mqueue.c for the details. */ - msg = (struct msg_msg*)msr_d.r_msg; + msg = (struct msg_msg *)msr_d.r_msg; while (msg == NULL) { cpu_relax(); msg = (struct msg_msg *)msr_d.r_msg; @@ -869,63 +944,106 @@ long do_msgrcv(int msqid, long *pmtype, void __user *mtext, * If there is a message or an error then accept it without * locking. */ - if (msg != ERR_PTR(-EAGAIN)) { - rcu_read_unlock(); - break; - } + if (msg != ERR_PTR(-EAGAIN)) + goto out_unlock1; /* Lockless receive, part 3: * Acquire the queue spinlock. */ - ipc_lock_by_ptr(&msq->q_perm); - rcu_read_unlock(); + ipc_lock_object(&msq->q_perm); /* Lockless receive, part 4: * Repeat test after acquiring the spinlock. */ - msg = (struct msg_msg*)msr_d.r_msg; + msg = (struct msg_msg *)msr_d.r_msg; if (msg != ERR_PTR(-EAGAIN)) - goto out_unlock; + goto out_unlock0; list_del(&msr_d.r_list); if (signal_pending(current)) { msg = ERR_PTR(-ERESTARTNOHAND); -out_unlock: - msg_unlock(msq); - break; + goto out_unlock0; } + + ipc_unlock_object(&msq->q_perm); } - if (IS_ERR(msg)) - return PTR_ERR(msg); - msgsz = (msgsz > msg->m_ts) ? msg->m_ts : msgsz; - *pmtype = msg->m_type; - if (store_msg(mtext, msg, msgsz)) - msgsz = -EFAULT; +out_unlock0: + ipc_unlock_object(&msq->q_perm); +out_unlock1: + rcu_read_unlock(); + if (IS_ERR(msg)) { + free_copy(copy); + return PTR_ERR(msg); + } + bufsz = msg_handler(buf, msg, bufsz); free_msg(msg); - return msgsz; + return bufsz; } -asmlinkage long sys_msgrcv(int msqid, struct msgbuf __user *msgp, size_t msgsz, - long msgtyp, int msgflg) +SYSCALL_DEFINE5(msgrcv, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz, + long, msgtyp, int, msgflg) { - long err, mtype; + return do_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg, do_msg_fill); +} - err = do_msgrcv(msqid, &mtype, msgp->mtext, msgsz, msgtyp, msgflg); - if (err < 0) - goto out; +/* + * Scale msgmni with the available lowmem size: the memory dedicated to msg + * queues should occupy at most 1/MSG_MEM_SCALE of lowmem. + * Also take into account the number of nsproxies created so far. + * This should be done staying within the (MSGMNI , IPCMNI/nr_ipc_ns) range. + */ +void recompute_msgmni(struct ipc_namespace *ns) +{ + struct sysinfo i; + unsigned long allowed; + int nb_ns; + + si_meminfo(&i); + allowed = (((i.totalram - i.totalhigh) / MSG_MEM_SCALE) * i.mem_unit) + / MSGMNB; + nb_ns = atomic_read(&nr_ipc_ns); + allowed /= nb_ns; + + if (allowed < MSGMNI) { + ns->msg_ctlmni = MSGMNI; + return; + } - if (put_user(mtype, &msgp->mtype)) - err = -EFAULT; -out: - return err; + if (allowed > IPCMNI / nb_ns) { + ns->msg_ctlmni = IPCMNI / nb_ns; + return; + } + + ns->msg_ctlmni = allowed; } +void msg_init_ns(struct ipc_namespace *ns) +{ + ns->msg_ctlmax = MSGMAX; + ns->msg_ctlmnb = MSGMNB; + + recompute_msgmni(ns); + + atomic_set(&ns->msg_bytes, 0); + atomic_set(&ns->msg_hdrs, 0); + ipc_init_ids(&ns->ids[IPC_MSG_IDS]); +} + +#ifdef CONFIG_IPC_NS +void msg_exit_ns(struct ipc_namespace *ns) +{ + free_ipcs(ns, &msg_ids(ns), freeque); + idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr); +} +#endif + #ifdef CONFIG_PROC_FS static int sysvipc_msg_proc_show(struct seq_file *s, void *it) { + struct user_namespace *user_ns = seq_user_ns(s); struct msg_queue *msq = it; return seq_printf(s, @@ -937,12 +1055,24 @@ static int sysvipc_msg_proc_show(struct seq_file *s, void *it) msq->q_qnum, msq->q_lspid, msq->q_lrpid, - msq->q_perm.uid, - msq->q_perm.gid, - msq->q_perm.cuid, - msq->q_perm.cgid, + from_kuid_munged(user_ns, msq->q_perm.uid), + from_kgid_munged(user_ns, msq->q_perm.gid), + from_kuid_munged(user_ns, msq->q_perm.cuid), + from_kgid_munged(user_ns, msq->q_perm.cgid), msq->q_stime, msq->q_rtime, msq->q_ctime); } #endif + +void __init msg_init(void) +{ + msg_init_ns(&init_ipc_ns); + + printk(KERN_INFO "msgmni has been set to %d\n", + init_ipc_ns.msg_ctlmni); + + ipc_init_proc_interface("sysvipc/msg", + " key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n", + IPC_MSG_IDS, sysvipc_msg_proc_show); +} diff --git a/ipc/msgutil.c b/ipc/msgutil.c index c82c215693d..7e7095974d5 100644 --- a/ipc/msgutil.c +++ b/ipc/msgutil.c @@ -13,64 +13,94 @@ #include <linux/security.h> #include <linux/slab.h> #include <linux/ipc.h> -#include <asm/uaccess.h> +#include <linux/msg.h> +#include <linux/ipc_namespace.h> +#include <linux/utsname.h> +#include <linux/proc_ns.h> +#include <linux/uaccess.h> #include "util.h" +DEFINE_SPINLOCK(mq_lock); + +/* + * The next 2 defines are here bc this is the only file + * compiled when either CONFIG_SYSVIPC and CONFIG_POSIX_MQUEUE + * and not CONFIG_IPC_NS. + */ +struct ipc_namespace init_ipc_ns = { + .count = ATOMIC_INIT(1), + .user_ns = &init_user_ns, + .proc_inum = PROC_IPC_INIT_INO, +}; + +atomic_t nr_ipc_ns = ATOMIC_INIT(1); + struct msg_msgseg { - struct msg_msgseg* next; + struct msg_msgseg *next; /* the next part of the message follows immediately */ }; -#define DATALEN_MSG (PAGE_SIZE-sizeof(struct msg_msg)) -#define DATALEN_SEG (PAGE_SIZE-sizeof(struct msg_msgseg)) +#define DATALEN_MSG ((size_t)PAGE_SIZE-sizeof(struct msg_msg)) +#define DATALEN_SEG ((size_t)PAGE_SIZE-sizeof(struct msg_msgseg)) -struct msg_msg *load_msg(const void __user *src, int len) + +static struct msg_msg *alloc_msg(size_t len) { struct msg_msg *msg; struct msg_msgseg **pseg; - int err; - int alen; - - alen = len; - if (alen > DATALEN_MSG) - alen = DATALEN_MSG; + size_t alen; + alen = min(len, DATALEN_MSG); msg = kmalloc(sizeof(*msg) + alen, GFP_KERNEL); if (msg == NULL) - return ERR_PTR(-ENOMEM); + return NULL; msg->next = NULL; msg->security = NULL; - if (copy_from_user(msg + 1, src, alen)) { - err = -EFAULT; - goto out_err; - } - len -= alen; - src = ((char __user *)src) + alen; pseg = &msg->next; while (len > 0) { struct msg_msgseg *seg; - alen = len; - if (alen > DATALEN_SEG) - alen = DATALEN_SEG; - seg = kmalloc(sizeof(*seg) + alen, - GFP_KERNEL); - if (seg == NULL) { - err = -ENOMEM; + alen = min(len, DATALEN_SEG); + seg = kmalloc(sizeof(*seg) + alen, GFP_KERNEL); + if (seg == NULL) goto out_err; - } *pseg = seg; seg->next = NULL; - if (copy_from_user(seg + 1, src, alen)) { - err = -EFAULT; - goto out_err; - } pseg = &seg->next; len -= alen; - src = ((char __user *)src) + alen; + } + + return msg; + +out_err: + free_msg(msg); + return NULL; +} + +struct msg_msg *load_msg(const void __user *src, size_t len) +{ + struct msg_msg *msg; + struct msg_msgseg *seg; + int err = -EFAULT; + size_t alen; + + msg = alloc_msg(len); + if (msg == NULL) + return ERR_PTR(-ENOMEM); + + alen = min(len, DATALEN_MSG); + if (copy_from_user(msg + 1, src, alen)) + goto out_err; + + for (seg = msg->next; seg != NULL; seg = seg->next) { + len -= alen; + src = (char __user *)src + alen; + alen = min(len, DATALEN_SEG); + if (copy_from_user(seg + 1, src, alen)) + goto out_err; } err = security_msg_msg_alloc(msg); @@ -83,30 +113,55 @@ out_err: free_msg(msg); return ERR_PTR(err); } +#ifdef CONFIG_CHECKPOINT_RESTORE +struct msg_msg *copy_msg(struct msg_msg *src, struct msg_msg *dst) +{ + struct msg_msgseg *dst_pseg, *src_pseg; + size_t len = src->m_ts; + size_t alen; + + BUG_ON(dst == NULL); + if (src->m_ts > dst->m_ts) + return ERR_PTR(-EINVAL); + + alen = min(len, DATALEN_MSG); + memcpy(dst + 1, src + 1, alen); + + for (dst_pseg = dst->next, src_pseg = src->next; + src_pseg != NULL; + dst_pseg = dst_pseg->next, src_pseg = src_pseg->next) { -int store_msg(void __user *dest, struct msg_msg *msg, int len) + len -= alen; + alen = min(len, DATALEN_SEG); + memcpy(dst_pseg + 1, src_pseg + 1, alen); + } + + dst->m_type = src->m_type; + dst->m_ts = src->m_ts; + + return dst; +} +#else +struct msg_msg *copy_msg(struct msg_msg *src, struct msg_msg *dst) +{ + return ERR_PTR(-ENOSYS); +} +#endif +int store_msg(void __user *dest, struct msg_msg *msg, size_t len) { - int alen; + size_t alen; struct msg_msgseg *seg; - alen = len; - if (alen > DATALEN_MSG) - alen = DATALEN_MSG; + alen = min(len, DATALEN_MSG); if (copy_to_user(dest, msg + 1, alen)) return -1; - len -= alen; - dest = ((char __user *)dest) + alen; - seg = msg->next; - while (len > 0) { - alen = len; - if (alen > DATALEN_SEG) - alen = DATALEN_SEG; + for (seg = msg->next; seg != NULL; seg = seg->next) { + len -= alen; + dest = (char __user *)dest + alen; + alen = min(len, DATALEN_SEG); if (copy_to_user(dest, seg + 1, alen)) return -1; - len -= alen; - dest = ((char __user *)dest) + alen; - seg = seg->next; } return 0; } diff --git a/ipc/namespace.c b/ipc/namespace.c index 1b967655eb3..59451c1e214 100644 --- a/ipc/namespace.c +++ b/ipc/namespace.c @@ -9,39 +9,61 @@ #include <linux/rcupdate.h> #include <linux/nsproxy.h> #include <linux/slab.h> +#include <linux/fs.h> +#include <linux/mount.h> +#include <linux/user_namespace.h> +#include <linux/proc_ns.h> #include "util.h" -static struct ipc_namespace *clone_ipc_ns(struct ipc_namespace *old_ns) +static struct ipc_namespace *create_ipc_ns(struct user_namespace *user_ns, + struct ipc_namespace *old_ns) { struct ipc_namespace *ns; + int err; ns = kmalloc(sizeof(struct ipc_namespace), GFP_KERNEL); if (ns == NULL) return ERR_PTR(-ENOMEM); + err = proc_alloc_inum(&ns->proc_inum); + if (err) { + kfree(ns); + return ERR_PTR(err); + } + + atomic_set(&ns->count, 1); + err = mq_init_ns(ns); + if (err) { + proc_free_inum(ns->proc_inum); + kfree(ns); + return ERR_PTR(err); + } + atomic_inc(&nr_ipc_ns); + sem_init_ns(ns); msg_init_ns(ns); shm_init_ns(ns); - kref_init(&ns->kref); + /* + * msgmni has already been computed for the new ipc ns. + * Thus, do the ipcns creation notification before registering that + * new ipcns in the chain. + */ + ipcns_notify(IPCNS_CREATED); + register_ipcns_notifier(ns); + + ns->user_ns = get_user_ns(user_ns); + return ns; } -struct ipc_namespace *copy_ipcs(unsigned long flags, struct ipc_namespace *ns) +struct ipc_namespace *copy_ipcs(unsigned long flags, + struct user_namespace *user_ns, struct ipc_namespace *ns) { - struct ipc_namespace *new_ns; - - BUG_ON(!ns); - get_ipc_ns(ns); - if (!(flags & CLONE_NEWIPC)) - return ns; - - new_ns = clone_ipc_ns(ns); - - put_ipc_ns(ns); - return new_ns; + return get_ipc_ns(ns); + return create_ipc_ns(user_ns, ns); } /* @@ -59,7 +81,7 @@ void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids, int next_id; int total, in_use; - down_write(&ids->rw_mutex); + down_write(&ids->rwsem); in_use = ids->in_use; @@ -67,20 +89,111 @@ void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids, perm = idr_find(&ids->ipcs_idr, next_id); if (perm == NULL) continue; - ipc_lock_by_ptr(perm); + rcu_read_lock(); + ipc_lock_object(perm); free(ns, perm); total++; } - up_write(&ids->rw_mutex); + up_write(&ids->rwsem); } -void free_ipc_ns(struct kref *kref) +static void free_ipc_ns(struct ipc_namespace *ns) { - struct ipc_namespace *ns; - - ns = container_of(kref, struct ipc_namespace, kref); + /* + * Unregistering the hotplug notifier at the beginning guarantees + * that the ipc namespace won't be freed while we are inside the + * callback routine. Since the blocking_notifier_chain_XXX routines + * hold a rw lock on the notifier list, unregister_ipcns_notifier() + * won't take the rw lock before blocking_notifier_call_chain() has + * released the rd lock. + */ + unregister_ipcns_notifier(ns); sem_exit_ns(ns); msg_exit_ns(ns); shm_exit_ns(ns); + atomic_dec(&nr_ipc_ns); + + /* + * Do the ipcns removal notification after decrementing nr_ipc_ns in + * order to have a correct value when recomputing msgmni. + */ + ipcns_notify(IPCNS_REMOVED); + put_user_ns(ns->user_ns); + proc_free_inum(ns->proc_inum); kfree(ns); } + +/* + * put_ipc_ns - drop a reference to an ipc namespace. + * @ns: the namespace to put + * + * If this is the last task in the namespace exiting, and + * it is dropping the refcount to 0, then it can race with + * a task in another ipc namespace but in a mounts namespace + * which has this ipcns's mqueuefs mounted, doing some action + * with one of the mqueuefs files. That can raise the refcount. + * So dropping the refcount, and raising the refcount when + * accessing it through the VFS, are protected with mq_lock. + * + * (Clearly, a task raising the refcount on its own ipc_ns + * needn't take mq_lock since it can't race with the last task + * in the ipcns exiting). + */ +void put_ipc_ns(struct ipc_namespace *ns) +{ + if (atomic_dec_and_lock(&ns->count, &mq_lock)) { + mq_clear_sbinfo(ns); + spin_unlock(&mq_lock); + mq_put_mnt(ns); + free_ipc_ns(ns); + } +} + +static void *ipcns_get(struct task_struct *task) +{ + struct ipc_namespace *ns = NULL; + struct nsproxy *nsproxy; + + rcu_read_lock(); + nsproxy = task_nsproxy(task); + if (nsproxy) + ns = get_ipc_ns(nsproxy->ipc_ns); + rcu_read_unlock(); + + return ns; +} + +static void ipcns_put(void *ns) +{ + return put_ipc_ns(ns); +} + +static int ipcns_install(struct nsproxy *nsproxy, void *new) +{ + struct ipc_namespace *ns = new; + if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN) || + !ns_capable(current_user_ns(), CAP_SYS_ADMIN)) + return -EPERM; + + /* Ditch state from the old ipc namespace */ + exit_sem(current); + put_ipc_ns(nsproxy->ipc_ns); + nsproxy->ipc_ns = get_ipc_ns(ns); + return 0; +} + +static unsigned int ipcns_inum(void *vp) +{ + struct ipc_namespace *ns = vp; + + return ns->proc_inum; +} + +const struct proc_ns_operations ipcns_operations = { + .name = "ipc", + .type = CLONE_NEWIPC, + .get = ipcns_get, + .put = ipcns_put, + .install = ipcns_install, + .inum = ipcns_inum, +}; diff --git a/ipc/sem.c b/ipc/sem.c index 0b45a4d383c..454f6c6020a 100644 --- a/ipc/sem.c +++ b/ipc/sem.c @@ -3,64 +3,16 @@ * Copyright (C) 1992 Krishna Balasubramanian * Copyright (C) 1995 Eric Schenk, Bruno Haible * - * IMPLEMENTATION NOTES ON CODE REWRITE (Eric Schenk, January 1995): - * This code underwent a massive rewrite in order to solve some problems - * with the original code. In particular the original code failed to - * wake up processes that were waiting for semval to go to 0 if the - * value went to 0 and was then incremented rapidly enough. In solving - * this problem I have also modified the implementation so that it - * processes pending operations in a FIFO manner, thus give a guarantee - * that processes waiting for a lock on the semaphore won't starve - * unless another locking process fails to unlock. - * In addition the following two changes in behavior have been introduced: - * - The original implementation of semop returned the value - * last semaphore element examined on success. This does not - * match the manual page specifications, and effectively - * allows the user to read the semaphore even if they do not - * have read permissions. The implementation now returns 0 - * on success as stated in the manual page. - * - There is some confusion over whether the set of undo adjustments - * to be performed at exit should be done in an atomic manner. - * That is, if we are attempting to decrement the semval should we queue - * up and wait until we can do so legally? - * The original implementation attempted to do this. - * The current implementation does not do so. This is because I don't - * think it is the right thing (TM) to do, and because I couldn't - * see a clean way to get the old behavior with the new design. - * The POSIX standard and SVID should be consulted to determine - * what behavior is mandated. - * - * Further notes on refinement (Christoph Rohland, December 1998): - * - The POSIX standard says, that the undo adjustments simply should - * redo. So the current implementation is o.K. - * - The previous code had two flaws: - * 1) It actively gave the semaphore to the next waiting process - * sleeping on the semaphore. Since this process did not have the - * cpu this led to many unnecessary context switches and bad - * performance. Now we only check which process should be able to - * get the semaphore and if this process wants to reduce some - * semaphore value we simply wake it up without doing the - * operation. So it has to try to get it later. Thus e.g. the - * running process may reacquire the semaphore during the current - * time slice. If it only waits for zero or increases the semaphore, - * we do the operation in advance and wake it up. - * 2) It did not wake up all zero waiting processes. We try to do - * better but only get the semops right which only wait for zero or - * increase. If there are decrement operations in the operations - * array we do the same as before. - * - * With the incarnation of O(1) scheduler, it becomes unnecessary to perform - * check/retry algorithm for waking up blocked processes as the new scheduler - * is better at handling thread switch than the old one. - * * /proc/sysvipc/sem support (c) 1999 Dragos Acostachioaie <dragos@iname.com> * * SMP-threaded, sysctl's added * (c) 1999 Manfred Spraul <manfred@colorfullife.com> * Enforced range limit on SEM_UNDO - * (c) 2001 Red Hat Inc <alan@redhat.com> + * (c) 2001 Red Hat Inc * Lockless wakeup * (c) 2003 Manfred Spraul <manfred@colorfullife.com> + * Further wakeup optimizations, documentation + * (c) 2010 Manfred Spraul <manfred@colorfullife.com> * * support for audit of ipc object properties and permission changes * Dustin Kirkland <dustin.kirkland@us.ibm.com> @@ -68,6 +20,56 @@ * namespaces support * OpenVZ, SWsoft Inc. * Pavel Emelianov <xemul@openvz.org> + * + * Implementation notes: (May 2010) + * This file implements System V semaphores. + * + * User space visible behavior: + * - FIFO ordering for semop() operations (just FIFO, not starvation + * protection) + * - multiple semaphore operations that alter the same semaphore in + * one semop() are handled. + * - sem_ctime (time of last semctl()) is updated in the IPC_SET, SETVAL and + * SETALL calls. + * - two Linux specific semctl() commands: SEM_STAT, SEM_INFO. + * - undo adjustments at process exit are limited to 0..SEMVMX. + * - namespace are supported. + * - SEMMSL, SEMMNS, SEMOPM and SEMMNI can be configured at runtine by writing + * to /proc/sys/kernel/sem. + * - statistics about the usage are reported in /proc/sysvipc/sem. + * + * Internals: + * - scalability: + * - all global variables are read-mostly. + * - semop() calls and semctl(RMID) are synchronized by RCU. + * - most operations do write operations (actually: spin_lock calls) to + * the per-semaphore array structure. + * Thus: Perfect SMP scaling between independent semaphore arrays. + * If multiple semaphores in one array are used, then cache line + * trashing on the semaphore array spinlock will limit the scaling. + * - semncnt and semzcnt are calculated on demand in count_semcnt() + * - the task that performs a successful semop() scans the list of all + * sleeping tasks and completes any pending operations that can be fulfilled. + * Semaphores are actively given to waiting tasks (necessary for FIFO). + * (see update_queue()) + * - To improve the scalability, the actual wake-up calls are performed after + * dropping all locks. (see wake_up_sem_queue_prepare(), + * wake_up_sem_queue_do()) + * - All work is done by the waker, the woken up task does not have to do + * anything - not even acquiring a lock or dropping a refcount. + * - A woken up task may not even touch the semaphore array anymore, it may + * have been destroyed already by a semctl(RMID). + * - The synchronizations between wake-ups due to a timeout/signal and a + * wake-up due to a completed semaphore operation is achieved by using an + * intermediate state (IN_WAKEUP). + * - UNDO values are stored in an array (one per process and per + * semaphore array, lazily allocated). For backwards compatibility, multiple + * modes for the UNDO variables are supported (per process, per thread) + * (see copy_semundo, CLONE_SYSVSEM) + * - There are two lists of the pending operations: a per-array list + * and per-semaphore list (stored in the array). This allows to achieve FIFO + * ordering without always scanning all pending operations. + * The worst-case behavior is nevertheless O(N^2) for N wakeups. */ #include <linux/slab.h> @@ -84,14 +86,63 @@ #include <linux/nsproxy.h> #include <linux/ipc_namespace.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include "util.h" +/* One semaphore structure for each semaphore in the system. */ +struct sem { + int semval; /* current value */ + int sempid; /* pid of last operation */ + spinlock_t lock; /* spinlock for fine-grained semtimedop */ + struct list_head pending_alter; /* pending single-sop operations */ + /* that alter the semaphore */ + struct list_head pending_const; /* pending single-sop operations */ + /* that do not alter the semaphore*/ + time_t sem_otime; /* candidate for sem_otime */ +} ____cacheline_aligned_in_smp; + +/* One queue for each sleeping process in the system. */ +struct sem_queue { + struct list_head list; /* queue of pending operations */ + struct task_struct *sleeper; /* this process */ + struct sem_undo *undo; /* undo structure */ + int pid; /* process id of requesting process */ + int status; /* completion status of operation */ + struct sembuf *sops; /* array of pending operations */ + struct sembuf *blocking; /* the operation that blocked */ + int nsops; /* number of operations */ + int alter; /* does *sops alter the array? */ +}; + +/* Each task has a list of undo requests. They are executed automatically + * when the process exits. + */ +struct sem_undo { + struct list_head list_proc; /* per-process list: * + * all undos from one process + * rcu protected */ + struct rcu_head rcu; /* rcu struct for sem_undo */ + struct sem_undo_list *ulp; /* back ptr to sem_undo_list */ + struct list_head list_id; /* per semaphore array list: + * all undos for one array */ + int semid; /* semaphore set identifier */ + short *semadj; /* array of adjustments */ + /* one per semaphore */ +}; + +/* sem_undo_list controls shared access to the list of sem_undo structures + * that may be shared among all a CLONE_SYSVSEM task group. + */ +struct sem_undo_list { + atomic_t refcnt; + spinlock_t lock; + struct list_head list_proc; +}; + + #define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS]) -#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm) #define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid) -#define sem_buildid(id, seq) ipc_buildid(id, seq) static int newary(struct ipc_namespace *, struct ipc_params *); static void freeary(struct ipc_namespace *, struct kern_ipc_perm *); @@ -103,12 +154,15 @@ static int sysvipc_sem_proc_show(struct seq_file *s, void *it); #define SEMOPM_FAST 64 /* ~ 372 bytes on stack */ /* - * linked list protection: + * Locking: * sem_undo.id_next, - * sem_array.sem_pending{,last}, - * sem_array.sem_undo: sem_lock() for read/write + * sem_array.complex_count, + * sem_array.pending{_alter,_cont}, + * sem_array.sem_undo: global sem_lock() for read/write * sem_undo.proc_next: only "current" is allowed to read/write that field. - * + * + * sem_array.sem_base[i].pending_{const,alter}: + * global or semaphore sem_lock() for read/write */ #define sc_semmsl sem_ctls[0] @@ -130,10 +184,11 @@ void sem_init_ns(struct ipc_namespace *ns) void sem_exit_ns(struct ipc_namespace *ns) { free_ipcs(ns, &sem_ids(ns), freeary); + idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr); } #endif -void __init sem_init (void) +void __init sem_init(void) { sem_init_ns(&init_ipc_ns); ipc_init_proc_interface("sysvipc/sem", @@ -141,46 +196,238 @@ void __init sem_init (void) IPC_SEM_IDS, sysvipc_sem_proc_show); } +/** + * unmerge_queues - unmerge queues, if possible. + * @sma: semaphore array + * + * The function unmerges the wait queues if complex_count is 0. + * It must be called prior to dropping the global semaphore array lock. + */ +static void unmerge_queues(struct sem_array *sma) +{ + struct sem_queue *q, *tq; + + /* complex operations still around? */ + if (sma->complex_count) + return; + /* + * We will switch back to simple mode. + * Move all pending operation back into the per-semaphore + * queues. + */ + list_for_each_entry_safe(q, tq, &sma->pending_alter, list) { + struct sem *curr; + curr = &sma->sem_base[q->sops[0].sem_num]; + + list_add_tail(&q->list, &curr->pending_alter); + } + INIT_LIST_HEAD(&sma->pending_alter); +} + +/** + * merge_queues - merge single semop queues into global queue + * @sma: semaphore array + * + * This function merges all per-semaphore queues into the global queue. + * It is necessary to achieve FIFO ordering for the pending single-sop + * operations when a multi-semop operation must sleep. + * Only the alter operations must be moved, the const operations can stay. + */ +static void merge_queues(struct sem_array *sma) +{ + int i; + for (i = 0; i < sma->sem_nsems; i++) { + struct sem *sem = sma->sem_base + i; + + list_splice_init(&sem->pending_alter, &sma->pending_alter); + } +} + +static void sem_rcu_free(struct rcu_head *head) +{ + struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); + struct sem_array *sma = ipc_rcu_to_struct(p); + + security_sem_free(sma); + ipc_rcu_free(head); +} + /* - * This routine is called in the paths where the rw_mutex is held to protect - * access to the idr tree. + * Wait until all currently ongoing simple ops have completed. + * Caller must own sem_perm.lock. + * New simple ops cannot start, because simple ops first check + * that sem_perm.lock is free. + * that a) sem_perm.lock is free and b) complex_count is 0. */ -static inline struct sem_array *sem_lock_check_down(struct ipc_namespace *ns, - int id) +static void sem_wait_array(struct sem_array *sma) { - struct kern_ipc_perm *ipcp = ipc_lock_check_down(&sem_ids(ns), id); + int i; + struct sem *sem; - if (IS_ERR(ipcp)) - return (struct sem_array *)ipcp; + if (sma->complex_count) { + /* The thread that increased sma->complex_count waited on + * all sem->lock locks. Thus we don't need to wait again. + */ + return; + } - return container_of(ipcp, struct sem_array, sem_perm); + for (i = 0; i < sma->sem_nsems; i++) { + sem = sma->sem_base + i; + spin_unlock_wait(&sem->lock); + } } /* - * sem_lock_(check_) routines are called in the paths where the rw_mutex + * If the request contains only one semaphore operation, and there are + * no complex transactions pending, lock only the semaphore involved. + * Otherwise, lock the entire semaphore array, since we either have + * multiple semaphores in our own semops, or we need to look at + * semaphores from other pending complex operations. + */ +static inline int sem_lock(struct sem_array *sma, struct sembuf *sops, + int nsops) +{ + struct sem *sem; + + if (nsops != 1) { + /* Complex operation - acquire a full lock */ + ipc_lock_object(&sma->sem_perm); + + /* And wait until all simple ops that are processed + * right now have dropped their locks. + */ + sem_wait_array(sma); + return -1; + } + + /* + * Only one semaphore affected - try to optimize locking. + * The rules are: + * - optimized locking is possible if no complex operation + * is either enqueued or processed right now. + * - The test for enqueued complex ops is simple: + * sma->complex_count != 0 + * - Testing for complex ops that are processed right now is + * a bit more difficult. Complex ops acquire the full lock + * and first wait that the running simple ops have completed. + * (see above) + * Thus: If we own a simple lock and the global lock is free + * and complex_count is now 0, then it will stay 0 and + * thus just locking sem->lock is sufficient. + */ + sem = sma->sem_base + sops->sem_num; + + if (sma->complex_count == 0) { + /* + * It appears that no complex operation is around. + * Acquire the per-semaphore lock. + */ + spin_lock(&sem->lock); + + /* Then check that the global lock is free */ + if (!spin_is_locked(&sma->sem_perm.lock)) { + /* spin_is_locked() is not a memory barrier */ + smp_mb(); + + /* Now repeat the test of complex_count: + * It can't change anymore until we drop sem->lock. + * Thus: if is now 0, then it will stay 0. + */ + if (sma->complex_count == 0) { + /* fast path successful! */ + return sops->sem_num; + } + } + spin_unlock(&sem->lock); + } + + /* slow path: acquire the full lock */ + ipc_lock_object(&sma->sem_perm); + + if (sma->complex_count == 0) { + /* False alarm: + * There is no complex operation, thus we can switch + * back to the fast path. + */ + spin_lock(&sem->lock); + ipc_unlock_object(&sma->sem_perm); + return sops->sem_num; + } else { + /* Not a false alarm, thus complete the sequence for a + * full lock. + */ + sem_wait_array(sma); + return -1; + } +} + +static inline void sem_unlock(struct sem_array *sma, int locknum) +{ + if (locknum == -1) { + unmerge_queues(sma); + ipc_unlock_object(&sma->sem_perm); + } else { + struct sem *sem = sma->sem_base + locknum; + spin_unlock(&sem->lock); + } +} + +/* + * sem_lock_(check_) routines are called in the paths where the rwsem * is not held. + * + * The caller holds the RCU read lock. */ -static inline struct sem_array *sem_lock(struct ipc_namespace *ns, int id) +static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, + int id, struct sembuf *sops, int nsops, int *locknum) { - struct kern_ipc_perm *ipcp = ipc_lock(&sem_ids(ns), id); + struct kern_ipc_perm *ipcp; + struct sem_array *sma; + ipcp = ipc_obtain_object(&sem_ids(ns), id); if (IS_ERR(ipcp)) - return (struct sem_array *)ipcp; + return ERR_CAST(ipcp); + + sma = container_of(ipcp, struct sem_array, sem_perm); + *locknum = sem_lock(sma, sops, nsops); + + /* ipc_rmid() may have already freed the ID while sem_lock + * was spinning: verify that the structure is still valid + */ + if (ipc_valid_object(ipcp)) + return container_of(ipcp, struct sem_array, sem_perm); + + sem_unlock(sma, *locknum); + return ERR_PTR(-EINVAL); +} + +static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id) +{ + struct kern_ipc_perm *ipcp = ipc_obtain_object(&sem_ids(ns), id); + + if (IS_ERR(ipcp)) + return ERR_CAST(ipcp); return container_of(ipcp, struct sem_array, sem_perm); } -static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns, - int id) +static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns, + int id) { - struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id); + struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id); if (IS_ERR(ipcp)) - return (struct sem_array *)ipcp; + return ERR_CAST(ipcp); return container_of(ipcp, struct sem_array, sem_perm); } +static inline void sem_lock_and_putref(struct sem_array *sma) +{ + sem_lock(sma, NULL, -1); + ipc_rcu_putref(sma, ipc_rcu_free); +} + static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) { ipc_rmid(&sem_ids(ns), &s->sem_perm); @@ -191,18 +438,18 @@ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) * Without the check/retry algorithm a lockless wakeup is possible: * - queue.status is initialized to -EINTR before blocking. * - wakeup is performed by - * * unlinking the queue entry from sma->sem_pending + * * unlinking the queue entry from the pending list * * setting queue.status to IN_WAKEUP * This is the notification for the blocked thread that a * result value is imminent. * * call wake_up_process * * set queue.status to the final value. * - the previously blocked thread checks queue.status: - * * if it's IN_WAKEUP, then it must wait until the value changes - * * if it's not -EINTR, then the operation was completed by - * update_queue. semtimedop can return queue.status without - * performing any operation on the sem array. - * * otherwise it must acquire the spinlock and check what's up. + * * if it's IN_WAKEUP, then it must wait until the value changes + * * if it's not -EINTR, then the operation was completed by + * update_queue. semtimedop can return queue.status without + * performing any operation on the sem array. + * * otherwise it must acquire the spinlock and check what's up. * * The two-stage algorithm is necessary to protect against the following * races: @@ -225,9 +472,8 @@ static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s) * @ns: namespace * @params: ptr to the structure that contains key, semflg and nsems * - * Called with sem_ids.rw_mutex held (as a writer) + * Called with sem_ids.rwsem held (as a writer) */ - static int newary(struct ipc_namespace *ns, struct ipc_params *params) { int id; @@ -237,18 +483,19 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) key_t key = params->key; int nsems = params->u.nsems; int semflg = params->flg; + int i; if (!nsems) return -EINVAL; if (ns->used_sems + nsems > ns->sc_semmns) return -ENOSPC; - size = sizeof (*sma) + nsems * sizeof (struct sem); + size = sizeof(*sma) + nsems * sizeof(struct sem); sma = ipc_rcu_alloc(size); - if (!sma) { + if (!sma) return -ENOMEM; - } - memset (sma, 0, size); + + memset(sma, 0, size); sma->sem_perm.mode = (semflg & S_IRWXUGO); sma->sem_perm.key = key; @@ -256,33 +503,40 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params) sma->sem_perm.security = NULL; retval = security_sem_alloc(sma); if (retval) { - ipc_rcu_putref(sma); + ipc_rcu_putref(sma, ipc_rcu_free); return retval; } id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni); if (id < 0) { - security_sem_free(sma); - ipc_rcu_putref(sma); + ipc_rcu_putref(sma, sem_rcu_free); return id; } ns->used_sems += nsems; - sma->sem_perm.id = sem_buildid(id, sma->sem_perm.seq); sma->sem_base = (struct sem *) &sma[1]; - /* sma->sem_pending = NULL; */ - sma->sem_pending_last = &sma->sem_pending; - /* sma->undo = NULL; */ + + for (i = 0; i < nsems; i++) { + INIT_LIST_HEAD(&sma->sem_base[i].pending_alter); + INIT_LIST_HEAD(&sma->sem_base[i].pending_const); + spin_lock_init(&sma->sem_base[i].lock); + } + + sma->complex_count = 0; + INIT_LIST_HEAD(&sma->pending_alter); + INIT_LIST_HEAD(&sma->pending_const); + INIT_LIST_HEAD(&sma->list_id); sma->sem_nsems = nsems; sma->sem_ctime = get_seconds(); - sem_unlock(sma); + sem_unlock(sma, -1); + rcu_read_unlock(); return sma->sem_perm.id; } /* - * Called with sem_ids.rw_mutex and ipcp locked. + * Called with sem_ids.rwsem and ipcp locked. */ static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg) { @@ -293,7 +547,7 @@ static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg) } /* - * Called with sem_ids.rw_mutex and ipcp locked. + * Called with sem_ids.rwsem and ipcp locked. */ static inline int sem_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params) @@ -307,10 +561,14 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp, return 0; } -asmlinkage long sys_semget(key_t key, int nsems, int semflg) +SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg) { struct ipc_namespace *ns; - struct ipc_ops sem_ops; + static const struct ipc_ops sem_ops = { + .getnew = newary, + .associate = sem_security, + .more_checks = sem_more_checks, + }; struct ipc_params sem_params; ns = current->nsproxy->ipc_ns; @@ -318,10 +576,6 @@ asmlinkage long sys_semget(key_t key, int nsems, int semflg) if (nsems < 0 || nsems > ns->sc_semmsl) return -EINVAL; - sem_ops.getnew = newary; - sem_ops.associate = sem_security; - sem_ops.more_checks = sem_more_checks; - sem_params.key = key; sem_params.flg = semflg; sem_params.u.nsems = nsems; @@ -329,55 +583,32 @@ asmlinkage long sys_semget(key_t key, int nsems, int semflg) return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params); } -/* Manage the doubly linked list sma->sem_pending as a FIFO: - * insert new queue elements at the tail sma->sem_pending_last. - */ -static inline void append_to_queue (struct sem_array * sma, - struct sem_queue * q) -{ - *(q->prev = sma->sem_pending_last) = q; - *(sma->sem_pending_last = &q->next) = NULL; -} - -static inline void prepend_to_queue (struct sem_array * sma, - struct sem_queue * q) -{ - q->next = sma->sem_pending; - *(q->prev = &sma->sem_pending) = q; - if (q->next) - q->next->prev = &q->next; - else /* sma->sem_pending_last == &sma->sem_pending */ - sma->sem_pending_last = &q->next; -} - -static inline void remove_from_queue (struct sem_array * sma, - struct sem_queue * q) -{ - *(q->prev) = q->next; - if (q->next) - q->next->prev = q->prev; - else /* sma->sem_pending_last == &q->next */ - sma->sem_pending_last = q->prev; - q->prev = NULL; /* mark as removed */ -} - -/* - * Determine whether a sequence of semaphore operations would succeed - * all at once. Return 0 if yes, 1 if need to sleep, else return error code. +/** + * perform_atomic_semop - Perform (if possible) a semaphore operation + * @sma: semaphore array + * @q: struct sem_queue that describes the operation + * + * Returns 0 if the operation was possible. + * Returns 1 if the operation is impossible, the caller must sleep. + * Negative values are error codes. */ - -static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops, - int nsops, struct sem_undo *un, int pid) +static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q) { - int result, sem_op; + int result, sem_op, nsops, pid; struct sembuf *sop; - struct sem * curr; + struct sem *curr; + struct sembuf *sops; + struct sem_undo *un; + + sops = q->sops; + nsops = q->nsops; + un = q->undo; for (sop = sops; sop < sops + nsops; sop++) { curr = sma->sem_base + sop->sem_num; sem_op = sop->sem_op; result = curr->semval; - + if (!sem_op && result) goto would_block; @@ -386,26 +617,25 @@ static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops, goto would_block; if (result > SEMVMX) goto out_of_range; + if (sop->sem_flg & SEM_UNDO) { int undo = un->semadj[sop->sem_num] - sem_op; - /* - * Exceeding the undo range is an error. - */ + /* Exceeding the undo range is an error. */ if (undo < (-SEMAEM - 1) || undo > SEMAEM) goto out_of_range; + un->semadj[sop->sem_num] = undo; } + curr->semval = result; } sop--; + pid = q->pid; while (sop >= sops) { sma->sem_base[sop->sem_num].sempid = pid; - if (sop->sem_flg & SEM_UNDO) - un->semadj[sop->sem_num] -= sop->sem_op; sop--; } - - sma->sem_otime = get_seconds(); + return 0; out_of_range: @@ -413,6 +643,8 @@ out_of_range: goto undo; would_block: + q->blocking = sop; + if (sop->sem_flg & IPC_NOWAIT) result = -EAGAIN; else @@ -421,154 +653,482 @@ would_block: undo: sop--; while (sop >= sops) { - sma->sem_base[sop->sem_num].semval -= sop->sem_op; + sem_op = sop->sem_op; + sma->sem_base[sop->sem_num].semval -= sem_op; + if (sop->sem_flg & SEM_UNDO) + un->semadj[sop->sem_num] += sem_op; sop--; } return result; } -/* Go through the pending queue for the indicated semaphore - * looking for tasks that can be completed. +/** wake_up_sem_queue_prepare(q, error): Prepare wake-up + * @q: queue entry that must be signaled + * @error: Error value for the signal + * + * Prepare the wake-up of the queue entry q. */ -static void update_queue (struct sem_array * sma) +static void wake_up_sem_queue_prepare(struct list_head *pt, + struct sem_queue *q, int error) { - int error; - struct sem_queue * q; + if (list_empty(pt)) { + /* + * Hold preempt off so that we don't get preempted and have the + * wakee busy-wait until we're scheduled back on. + */ + preempt_disable(); + } + q->status = IN_WAKEUP; + q->pid = error; - q = sma->sem_pending; - while(q) { - error = try_atomic_semop(sma, q->sops, q->nsops, - q->undo, q->pid); + list_add_tail(&q->list, pt); +} + +/** + * wake_up_sem_queue_do - do the actual wake-up + * @pt: list of tasks to be woken up + * + * Do the actual wake-up. + * The function is called without any locks held, thus the semaphore array + * could be destroyed already and the tasks can disappear as soon as the + * status is set to the actual return code. + */ +static void wake_up_sem_queue_do(struct list_head *pt) +{ + struct sem_queue *q, *t; + int did_something; + + did_something = !list_empty(pt); + list_for_each_entry_safe(q, t, pt, list) { + wake_up_process(q->sleeper); + /* q can disappear immediately after writing q->status. */ + smp_wmb(); + q->status = q->pid; + } + if (did_something) + preempt_enable(); +} + +static void unlink_queue(struct sem_array *sma, struct sem_queue *q) +{ + list_del(&q->list); + if (q->nsops > 1) + sma->complex_count--; +} + +/** check_restart(sma, q) + * @sma: semaphore array + * @q: the operation that just completed + * + * update_queue is O(N^2) when it restarts scanning the whole queue of + * waiting operations. Therefore this function checks if the restart is + * really necessary. It is called after a previously waiting operation + * modified the array. + * Note that wait-for-zero operations are handled without restart. + */ +static int check_restart(struct sem_array *sma, struct sem_queue *q) +{ + /* pending complex alter operations are too difficult to analyse */ + if (!list_empty(&sma->pending_alter)) + return 1; + + /* we were a sleeping complex operation. Too difficult */ + if (q->nsops > 1) + return 1; + + /* It is impossible that someone waits for the new value: + * - complex operations always restart. + * - wait-for-zero are handled seperately. + * - q is a previously sleeping simple operation that + * altered the array. It must be a decrement, because + * simple increments never sleep. + * - If there are older (higher priority) decrements + * in the queue, then they have observed the original + * semval value and couldn't proceed. The operation + * decremented to value - thus they won't proceed either. + */ + return 0; +} + +/** + * wake_const_ops - wake up non-alter tasks + * @sma: semaphore array. + * @semnum: semaphore that was modified. + * @pt: list head for the tasks that must be woken up. + * + * wake_const_ops must be called after a semaphore in a semaphore array + * was set to 0. If complex const operations are pending, wake_const_ops must + * be called with semnum = -1, as well as with the number of each modified + * semaphore. + * The tasks that must be woken up are added to @pt. The return code + * is stored in q->pid. + * The function returns 1 if at least one operation was completed successfully. + */ +static int wake_const_ops(struct sem_array *sma, int semnum, + struct list_head *pt) +{ + struct sem_queue *q; + struct list_head *walk; + struct list_head *pending_list; + int semop_completed = 0; + + if (semnum == -1) + pending_list = &sma->pending_const; + else + pending_list = &sma->sem_base[semnum].pending_const; + + walk = pending_list->next; + while (walk != pending_list) { + int error; + + q = container_of(walk, struct sem_queue, list); + walk = walk->next; + + error = perform_atomic_semop(sma, q); - /* Does q->sleeper still need to sleep? */ if (error <= 0) { - struct sem_queue *n; - remove_from_queue(sma,q); - q->status = IN_WAKEUP; + /* operation completed, remove from queue & wakeup */ + + unlink_queue(sma, q); + + wake_up_sem_queue_prepare(pt, q, error); + if (error == 0) + semop_completed = 1; + } + } + return semop_completed; +} + +/** + * do_smart_wakeup_zero - wakeup all wait for zero tasks + * @sma: semaphore array + * @sops: operations that were performed + * @nsops: number of operations + * @pt: list head of the tasks that must be woken up. + * + * Checks all required queue for wait-for-zero operations, based + * on the actual changes that were performed on the semaphore array. + * The function returns 1 if at least one operation was completed successfully. + */ +static int do_smart_wakeup_zero(struct sem_array *sma, struct sembuf *sops, + int nsops, struct list_head *pt) +{ + int i; + int semop_completed = 0; + int got_zero = 0; + + /* first: the per-semaphore queues, if known */ + if (sops) { + for (i = 0; i < nsops; i++) { + int num = sops[i].sem_num; + + if (sma->sem_base[num].semval == 0) { + got_zero = 1; + semop_completed |= wake_const_ops(sma, num, pt); + } + } + } else { + /* + * No sops means modified semaphores not known. + * Assume all were changed. + */ + for (i = 0; i < sma->sem_nsems; i++) { + if (sma->sem_base[i].semval == 0) { + got_zero = 1; + semop_completed |= wake_const_ops(sma, i, pt); + } + } + } + /* + * If one of the modified semaphores got 0, + * then check the global queue, too. + */ + if (got_zero) + semop_completed |= wake_const_ops(sma, -1, pt); + + return semop_completed; +} + + +/** + * update_queue - look for tasks that can be completed. + * @sma: semaphore array. + * @semnum: semaphore that was modified. + * @pt: list head for the tasks that must be woken up. + * + * update_queue must be called after a semaphore in a semaphore array + * was modified. If multiple semaphores were modified, update_queue must + * be called with semnum = -1, as well as with the number of each modified + * semaphore. + * The tasks that must be woken up are added to @pt. The return code + * is stored in q->pid. + * The function internally checks if const operations can now succeed. + * + * The function return 1 if at least one semop was completed successfully. + */ +static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt) +{ + struct sem_queue *q; + struct list_head *walk; + struct list_head *pending_list; + int semop_completed = 0; + + if (semnum == -1) + pending_list = &sma->pending_alter; + else + pending_list = &sma->sem_base[semnum].pending_alter; + +again: + walk = pending_list->next; + while (walk != pending_list) { + int error, restart; + + q = container_of(walk, struct sem_queue, list); + walk = walk->next; + + /* If we are scanning the single sop, per-semaphore list of + * one semaphore and that semaphore is 0, then it is not + * necessary to scan further: simple increments + * that affect only one entry succeed immediately and cannot + * be in the per semaphore pending queue, and decrements + * cannot be successful if the value is already 0. + */ + if (semnum != -1 && sma->sem_base[semnum].semval == 0) + break; + + error = perform_atomic_semop(sma, q); + + /* Does q->sleeper still need to sleep? */ + if (error > 0) + continue; + + unlink_queue(sma, q); + + if (error) { + restart = 0; + } else { + semop_completed = 1; + do_smart_wakeup_zero(sma, q->sops, q->nsops, pt); + restart = check_restart(sma, q); + } + + wake_up_sem_queue_prepare(pt, q, error); + if (restart) + goto again; + } + return semop_completed; +} + +/** + * set_semotime - set sem_otime + * @sma: semaphore array + * @sops: operations that modified the array, may be NULL + * + * sem_otime is replicated to avoid cache line trashing. + * This function sets one instance to the current time. + */ +static void set_semotime(struct sem_array *sma, struct sembuf *sops) +{ + if (sops == NULL) { + sma->sem_base[0].sem_otime = get_seconds(); + } else { + sma->sem_base[sops[0].sem_num].sem_otime = + get_seconds(); + } +} + +/** + * do_smart_update - optimized update_queue + * @sma: semaphore array + * @sops: operations that were performed + * @nsops: number of operations + * @otime: force setting otime + * @pt: list head of the tasks that must be woken up. + * + * do_smart_update() does the required calls to update_queue and wakeup_zero, + * based on the actual changes that were performed on the semaphore array. + * Note that the function does not do the actual wake-up: the caller is + * responsible for calling wake_up_sem_queue_do(@pt). + * It is safe to perform this call after dropping all locks. + */ +static void do_smart_update(struct sem_array *sma, struct sembuf *sops, int nsops, + int otime, struct list_head *pt) +{ + int i; + + otime |= do_smart_wakeup_zero(sma, sops, nsops, pt); + + if (!list_empty(&sma->pending_alter)) { + /* semaphore array uses the global queue - just process it. */ + otime |= update_queue(sma, -1, pt); + } else { + if (!sops) { /* - * Continue scanning. The next operation - * that must be checked depends on the type of the - * completed operation: - * - if the operation modified the array, then - * restart from the head of the queue and - * check for threads that might be waiting - * for semaphore values to become 0. - * - if the operation didn't modify the array, - * then just continue. - */ - if (q->alter) - n = sma->sem_pending; - else - n = q->next; - wake_up_process(q->sleeper); - /* hands-off: q will disappear immediately after - * writing q->status. + * No sops, thus the modified semaphores are not + * known. Check all. */ - smp_wmb(); - q->status = error; - q = n; + for (i = 0; i < sma->sem_nsems; i++) + otime |= update_queue(sma, i, pt); } else { - q = q->next; + /* + * Check the semaphores that were increased: + * - No complex ops, thus all sleeping ops are + * decrease. + * - if we decreased the value, then any sleeping + * semaphore ops wont be able to run: If the + * previous value was too small, then the new + * value will be too small, too. + */ + for (i = 0; i < nsops; i++) { + if (sops[i].sem_op > 0) { + otime |= update_queue(sma, + sops[i].sem_num, pt); + } + } } } + if (otime) + set_semotime(sma, sops); +} + +/* + * check_qop: Test if a queued operation sleeps on the semaphore semnum + */ +static int check_qop(struct sem_array *sma, int semnum, struct sem_queue *q, + bool count_zero) +{ + struct sembuf *sop = q->blocking; + + /* + * Linux always (since 0.99.10) reported a task as sleeping on all + * semaphores. This violates SUS, therefore it was changed to the + * standard compliant behavior. + * Give the administrators a chance to notice that an application + * might misbehave because it relies on the Linux behavior. + */ + pr_info_once("semctl(GETNCNT/GETZCNT) is since 3.16 Single Unix Specification compliant.\n" + "The task %s (%d) triggered the difference, watch for misbehavior.\n", + current->comm, task_pid_nr(current)); + + if (sop->sem_num != semnum) + return 0; + + if (count_zero && sop->sem_op == 0) + return 1; + if (!count_zero && sop->sem_op < 0) + return 1; + + return 0; } /* The following counts are associated to each semaphore: * semncnt number of tasks waiting on semval being nonzero * semzcnt number of tasks waiting on semval being zero - * This model assumes that a task waits on exactly one semaphore. - * Since semaphore operations are to be performed atomically, tasks actually - * wait on a whole sequence of semaphores simultaneously. - * The counts we return here are a rough approximation, but still - * warrant that semncnt+semzcnt>0 if the task is on the pending queue. + * + * Per definition, a task waits only on the semaphore of the first semop + * that cannot proceed, even if additional operation would block, too. */ -static int count_semncnt (struct sem_array * sma, ushort semnum) +static int count_semcnt(struct sem_array *sma, ushort semnum, + bool count_zero) { - int semncnt; - struct sem_queue * q; + struct list_head *l; + struct sem_queue *q; + int semcnt; - semncnt = 0; - for (q = sma->sem_pending; q; q = q->next) { - struct sembuf * sops = q->sops; - int nsops = q->nsops; - int i; - for (i = 0; i < nsops; i++) - if (sops[i].sem_num == semnum - && (sops[i].sem_op < 0) - && !(sops[i].sem_flg & IPC_NOWAIT)) - semncnt++; + semcnt = 0; + /* First: check the simple operations. They are easy to evaluate */ + if (count_zero) + l = &sma->sem_base[semnum].pending_const; + else + l = &sma->sem_base[semnum].pending_alter; + + list_for_each_entry(q, l, list) { + /* all task on a per-semaphore list sleep on exactly + * that semaphore + */ + semcnt++; } - return semncnt; -} -static int count_semzcnt (struct sem_array * sma, ushort semnum) -{ - int semzcnt; - struct sem_queue * q; - semzcnt = 0; - for (q = sma->sem_pending; q; q = q->next) { - struct sembuf * sops = q->sops; - int nsops = q->nsops; - int i; - for (i = 0; i < nsops; i++) - if (sops[i].sem_num == semnum - && (sops[i].sem_op == 0) - && !(sops[i].sem_flg & IPC_NOWAIT)) - semzcnt++; + /* Then: check the complex operations. */ + list_for_each_entry(q, &sma->pending_alter, list) { + semcnt += check_qop(sma, semnum, q, count_zero); } - return semzcnt; + if (count_zero) { + list_for_each_entry(q, &sma->pending_const, list) { + semcnt += check_qop(sma, semnum, q, count_zero); + } + } + return semcnt; } -/* Free a semaphore set. freeary() is called with sem_ids.rw_mutex locked - * as a writer and the spinlock for this semaphore set hold. sem_ids.rw_mutex +/* Free a semaphore set. freeary() is called with sem_ids.rwsem locked + * as a writer and the spinlock for this semaphore set hold. sem_ids.rwsem * remains locked on exit. */ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) { - struct sem_undo *un; - struct sem_queue *q; + struct sem_undo *un, *tu; + struct sem_queue *q, *tq; struct sem_array *sma = container_of(ipcp, struct sem_array, sem_perm); - - /* Invalidate the existing undo structures for this semaphore set. - * (They will be freed without any further action in exit_sem() - * or during the next semop.) - */ - for (un = sma->undo; un; un = un->id_next) + struct list_head tasks; + int i; + + /* Free the existing undo structures for this semaphore set. */ + ipc_assert_locked_object(&sma->sem_perm); + list_for_each_entry_safe(un, tu, &sma->list_id, list_id) { + list_del(&un->list_id); + spin_lock(&un->ulp->lock); un->semid = -1; + list_del_rcu(&un->list_proc); + spin_unlock(&un->ulp->lock); + kfree_rcu(un, rcu); + } /* Wake up all pending processes and let them fail with EIDRM. */ - q = sma->sem_pending; - while(q) { - struct sem_queue *n; - /* lazy remove_from_queue: we are killing the whole queue */ - q->prev = NULL; - n = q->next; - q->status = IN_WAKEUP; - wake_up_process(q->sleeper); /* doesn't sleep */ - smp_wmb(); - q->status = -EIDRM; /* hands-off q */ - q = n; + INIT_LIST_HEAD(&tasks); + list_for_each_entry_safe(q, tq, &sma->pending_const, list) { + unlink_queue(sma, q); + wake_up_sem_queue_prepare(&tasks, q, -EIDRM); + } + + list_for_each_entry_safe(q, tq, &sma->pending_alter, list) { + unlink_queue(sma, q); + wake_up_sem_queue_prepare(&tasks, q, -EIDRM); + } + for (i = 0; i < sma->sem_nsems; i++) { + struct sem *sem = sma->sem_base + i; + list_for_each_entry_safe(q, tq, &sem->pending_const, list) { + unlink_queue(sma, q); + wake_up_sem_queue_prepare(&tasks, q, -EIDRM); + } + list_for_each_entry_safe(q, tq, &sem->pending_alter, list) { + unlink_queue(sma, q); + wake_up_sem_queue_prepare(&tasks, q, -EIDRM); + } } /* Remove the semaphore set from the IDR */ sem_rmid(ns, sma); - sem_unlock(sma); + sem_unlock(sma, -1); + rcu_read_unlock(); + wake_up_sem_queue_do(&tasks); ns->used_sems -= sma->sem_nsems; - security_sem_free(sma); - ipc_rcu_putref(sma); + ipc_rcu_putref(sma, sem_rcu_free); } static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version) { - switch(version) { + switch (version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: { struct semid_ds out; + memset(&out, 0, sizeof(out)); + ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm); out.sem_otime = in->sem_otime; @@ -582,13 +1142,28 @@ static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, } } +static time_t get_semotime(struct sem_array *sma) +{ + int i; + time_t res; + + res = sma->sem_base[0].sem_otime; + for (i = 1; i < sma->sem_nsems; i++) { + time_t to = sma->sem_base[i].sem_otime; + + if (to > res) + res = to; + } + return res; +} + static int semctl_nolock(struct ipc_namespace *ns, int semid, - int cmd, int version, union semun arg) + int cmd, int version, void __user *p) { - int err = -EINVAL; + int err; struct sem_array *sma; - switch(cmd) { + switch (cmd) { case IPC_INFO: case SEM_INFO: { @@ -598,8 +1173,8 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, err = security_sem_semctl(NULL, cmd); if (err) return err; - - memset(&seminfo,0,sizeof(seminfo)); + + memset(&seminfo, 0, sizeof(seminfo)); seminfo.semmni = ns->sc_semmni; seminfo.semmns = ns->sc_semmns; seminfo.semmsl = ns->sc_semmsl; @@ -608,7 +1183,7 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, seminfo.semmnu = SEMMNU; seminfo.semmap = SEMMAP; seminfo.semume = SEMUME; - down_read(&sem_ids(ns).rw_mutex); + down_read(&sem_ids(ns).rwsem); if (cmd == SEM_INFO) { seminfo.semusz = sem_ids(ns).in_use; seminfo.semaem = ns->used_sems; @@ -617,114 +1192,198 @@ static int semctl_nolock(struct ipc_namespace *ns, int semid, seminfo.semaem = SEMAEM; } max_id = ipc_get_maxid(&sem_ids(ns)); - up_read(&sem_ids(ns).rw_mutex); - if (copy_to_user (arg.__buf, &seminfo, sizeof(struct seminfo))) + up_read(&sem_ids(ns).rwsem); + if (copy_to_user(p, &seminfo, sizeof(struct seminfo))) return -EFAULT; - return (max_id < 0) ? 0: max_id; + return (max_id < 0) ? 0 : max_id; } case IPC_STAT: case SEM_STAT: { struct semid64_ds tbuf; - int id; + int id = 0; + + memset(&tbuf, 0, sizeof(tbuf)); + rcu_read_lock(); if (cmd == SEM_STAT) { - sma = sem_lock(ns, semid); - if (IS_ERR(sma)) - return PTR_ERR(sma); + sma = sem_obtain_object(ns, semid); + if (IS_ERR(sma)) { + err = PTR_ERR(sma); + goto out_unlock; + } id = sma->sem_perm.id; } else { - sma = sem_lock_check(ns, semid); - if (IS_ERR(sma)) - return PTR_ERR(sma); - id = 0; + sma = sem_obtain_object_check(ns, semid); + if (IS_ERR(sma)) { + err = PTR_ERR(sma); + goto out_unlock; + } } err = -EACCES; - if (ipcperms (&sma->sem_perm, S_IRUGO)) + if (ipcperms(ns, &sma->sem_perm, S_IRUGO)) goto out_unlock; err = security_sem_semctl(sma, cmd); if (err) goto out_unlock; - memset(&tbuf, 0, sizeof(tbuf)); - kernel_to_ipc64_perm(&sma->sem_perm, &tbuf.sem_perm); - tbuf.sem_otime = sma->sem_otime; - tbuf.sem_ctime = sma->sem_ctime; - tbuf.sem_nsems = sma->sem_nsems; - sem_unlock(sma); - if (copy_semid_to_user (arg.buf, &tbuf, version)) + tbuf.sem_otime = get_semotime(sma); + tbuf.sem_ctime = sma->sem_ctime; + tbuf.sem_nsems = sma->sem_nsems; + rcu_read_unlock(); + if (copy_semid_to_user(p, &tbuf, version)) return -EFAULT; return id; } default: return -EINVAL; } - return err; out_unlock: - sem_unlock(sma); + rcu_read_unlock(); return err; } -static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, - int cmd, int version, union semun arg) +static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum, + unsigned long arg) { + struct sem_undo *un; struct sem_array *sma; - struct sem* curr; + struct sem *curr; int err; + struct list_head tasks; + int val; +#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN) + /* big-endian 64bit */ + val = arg >> 32; +#else + /* 32bit or little-endian 64bit */ + val = arg; +#endif + + if (val > SEMVMX || val < 0) + return -ERANGE; + + INIT_LIST_HEAD(&tasks); + + rcu_read_lock(); + sma = sem_obtain_object_check(ns, semid); + if (IS_ERR(sma)) { + rcu_read_unlock(); + return PTR_ERR(sma); + } + + if (semnum < 0 || semnum >= sma->sem_nsems) { + rcu_read_unlock(); + return -EINVAL; + } + + + if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) { + rcu_read_unlock(); + return -EACCES; + } + + err = security_sem_semctl(sma, SETVAL); + if (err) { + rcu_read_unlock(); + return -EACCES; + } + + sem_lock(sma, NULL, -1); + + if (!ipc_valid_object(&sma->sem_perm)) { + sem_unlock(sma, -1); + rcu_read_unlock(); + return -EIDRM; + } + + curr = &sma->sem_base[semnum]; + + ipc_assert_locked_object(&sma->sem_perm); + list_for_each_entry(un, &sma->list_id, list_id) + un->semadj[semnum] = 0; + + curr->semval = val; + curr->sempid = task_tgid_vnr(current); + sma->sem_ctime = get_seconds(); + /* maybe some queued-up processes were waiting for this */ + do_smart_update(sma, NULL, 0, 0, &tasks); + sem_unlock(sma, -1); + rcu_read_unlock(); + wake_up_sem_queue_do(&tasks); + return 0; +} + +static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, + int cmd, void __user *p) +{ + struct sem_array *sma; + struct sem *curr; + int err, nsems; ushort fast_sem_io[SEMMSL_FAST]; - ushort* sem_io = fast_sem_io; - int nsems; + ushort *sem_io = fast_sem_io; + struct list_head tasks; - sma = sem_lock_check(ns, semid); - if (IS_ERR(sma)) + INIT_LIST_HEAD(&tasks); + + rcu_read_lock(); + sma = sem_obtain_object_check(ns, semid); + if (IS_ERR(sma)) { + rcu_read_unlock(); return PTR_ERR(sma); + } nsems = sma->sem_nsems; err = -EACCES; - if (ipcperms (&sma->sem_perm, (cmd==SETVAL||cmd==SETALL)?S_IWUGO:S_IRUGO)) - goto out_unlock; + if (ipcperms(ns, &sma->sem_perm, cmd == SETALL ? S_IWUGO : S_IRUGO)) + goto out_rcu_wakeup; err = security_sem_semctl(sma, cmd); if (err) - goto out_unlock; + goto out_rcu_wakeup; err = -EACCES; switch (cmd) { case GETALL: { - ushort __user *array = arg.array; + ushort __user *array = p; int i; - if(nsems > SEMMSL_FAST) { - ipc_rcu_getref(sma); - sem_unlock(sma); - + sem_lock(sma, NULL, -1); + if (!ipc_valid_object(&sma->sem_perm)) { + err = -EIDRM; + goto out_unlock; + } + if (nsems > SEMMSL_FAST) { + if (!ipc_rcu_getref(sma)) { + err = -EIDRM; + goto out_unlock; + } + sem_unlock(sma, -1); + rcu_read_unlock(); sem_io = ipc_alloc(sizeof(ushort)*nsems); - if(sem_io == NULL) { - ipc_lock_by_ptr(&sma->sem_perm); - ipc_rcu_putref(sma); - sem_unlock(sma); + if (sem_io == NULL) { + ipc_rcu_putref(sma, ipc_rcu_free); return -ENOMEM; } - ipc_lock_by_ptr(&sma->sem_perm); - ipc_rcu_putref(sma); - if (sma->sem_perm.deleted) { - sem_unlock(sma); + rcu_read_lock(); + sem_lock_and_putref(sma); + if (!ipc_valid_object(&sma->sem_perm)) { err = -EIDRM; - goto out_free; + goto out_unlock; } } - for (i = 0; i < sma->sem_nsems; i++) sem_io[i] = sma->sem_base[i].semval; - sem_unlock(sma); + sem_unlock(sma, -1); + rcu_read_unlock(); err = 0; - if(copy_to_user(array, sem_io, nsems*sizeof(ushort))) + if (copy_to_user(array, sem_io, nsems*sizeof(ushort))) err = -EFAULT; goto out_free; } @@ -733,61 +1392,65 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, int i; struct sem_undo *un; - ipc_rcu_getref(sma); - sem_unlock(sma); + if (!ipc_rcu_getref(sma)) { + err = -EIDRM; + goto out_rcu_wakeup; + } + rcu_read_unlock(); - if(nsems > SEMMSL_FAST) { + if (nsems > SEMMSL_FAST) { sem_io = ipc_alloc(sizeof(ushort)*nsems); - if(sem_io == NULL) { - ipc_lock_by_ptr(&sma->sem_perm); - ipc_rcu_putref(sma); - sem_unlock(sma); + if (sem_io == NULL) { + ipc_rcu_putref(sma, ipc_rcu_free); return -ENOMEM; } } - if (copy_from_user (sem_io, arg.array, nsems*sizeof(ushort))) { - ipc_lock_by_ptr(&sma->sem_perm); - ipc_rcu_putref(sma); - sem_unlock(sma); + if (copy_from_user(sem_io, p, nsems*sizeof(ushort))) { + ipc_rcu_putref(sma, ipc_rcu_free); err = -EFAULT; goto out_free; } for (i = 0; i < nsems; i++) { if (sem_io[i] > SEMVMX) { - ipc_lock_by_ptr(&sma->sem_perm); - ipc_rcu_putref(sma); - sem_unlock(sma); + ipc_rcu_putref(sma, ipc_rcu_free); err = -ERANGE; goto out_free; } } - ipc_lock_by_ptr(&sma->sem_perm); - ipc_rcu_putref(sma); - if (sma->sem_perm.deleted) { - sem_unlock(sma); + rcu_read_lock(); + sem_lock_and_putref(sma); + if (!ipc_valid_object(&sma->sem_perm)) { err = -EIDRM; - goto out_free; + goto out_unlock; } for (i = 0; i < nsems; i++) sma->sem_base[i].semval = sem_io[i]; - for (un = sma->undo; un; un = un->id_next) + + ipc_assert_locked_object(&sma->sem_perm); + list_for_each_entry(un, &sma->list_id, list_id) { for (i = 0; i < nsems; i++) un->semadj[i] = 0; + } sma->sem_ctime = get_seconds(); /* maybe some queued-up processes were waiting for this */ - update_queue(sma); + do_smart_update(sma, NULL, 0, 0, &tasks); err = 0; goto out_unlock; } - /* GETVAL, GETPID, GETNCTN, GETZCNT, SETVAL: fall-through */ + /* GETVAL, GETPID, GETNCTN, GETZCNT: fall-through */ } err = -EINVAL; - if(semnum < 0 || semnum >= nsems) - goto out_unlock; + if (semnum < 0 || semnum >= nsems) + goto out_rcu_wakeup; + sem_lock(sma, NULL, -1); + if (!ipc_valid_object(&sma->sem_perm)) { + err = -EIDRM; + goto out_unlock; + } curr = &sma->sem_base[semnum]; switch (cmd) { @@ -798,70 +1461,42 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum, err = curr->sempid; goto out_unlock; case GETNCNT: - err = count_semncnt(sma,semnum); + err = count_semcnt(sma, semnum, 0); goto out_unlock; case GETZCNT: - err = count_semzcnt(sma,semnum); - goto out_unlock; - case SETVAL: - { - int val = arg.val; - struct sem_undo *un; - err = -ERANGE; - if (val > SEMVMX || val < 0) - goto out_unlock; - - for (un = sma->undo; un; un = un->id_next) - un->semadj[semnum] = 0; - curr->semval = val; - curr->sempid = task_tgid_vnr(current); - sma->sem_ctime = get_seconds(); - /* maybe some queued-up processes were waiting for this */ - update_queue(sma); - err = 0; + err = count_semcnt(sma, semnum, 1); goto out_unlock; } - } + out_unlock: - sem_unlock(sma); + sem_unlock(sma, -1); +out_rcu_wakeup: + rcu_read_unlock(); + wake_up_sem_queue_do(&tasks); out_free: - if(sem_io != fast_sem_io) + if (sem_io != fast_sem_io) ipc_free(sem_io, sizeof(ushort)*nsems); return err; } -struct sem_setbuf { - uid_t uid; - gid_t gid; - mode_t mode; -}; - -static inline unsigned long copy_semid_from_user(struct sem_setbuf *out, void __user *buf, int version) +static inline unsigned long +copy_semid_from_user(struct semid64_ds *out, void __user *buf, int version) { - switch(version) { + switch (version) { case IPC_64: - { - struct semid64_ds tbuf; - - if(copy_from_user(&tbuf, buf, sizeof(tbuf))) + if (copy_from_user(out, buf, sizeof(*out))) return -EFAULT; - - out->uid = tbuf.sem_perm.uid; - out->gid = tbuf.sem_perm.gid; - out->mode = tbuf.sem_perm.mode; - return 0; - } case IPC_OLD: { struct semid_ds tbuf_old; - if(copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) + if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) return -EFAULT; - out->uid = tbuf_old.sem_perm.uid; - out->gid = tbuf_old.sem_perm.gid; - out->mode = tbuf_old.sem_perm.mode; + out->sem_perm.uid = tbuf_old.sem_perm.uid; + out->sem_perm.gid = tbuf_old.sem_perm.gid; + out->sem_perm.mode = tbuf_old.sem_perm.mode; return 0; } @@ -870,74 +1505,72 @@ static inline unsigned long copy_semid_from_user(struct sem_setbuf *out, void __ } } -static int semctl_down(struct ipc_namespace *ns, int semid, int semnum, - int cmd, int version, union semun arg) +/* + * This function handles some semctl commands which require the rwsem + * to be held in write mode. + * NOTE: no locks must be held, the rwsem is taken inside this function. + */ +static int semctl_down(struct ipc_namespace *ns, int semid, + int cmd, int version, void __user *p) { struct sem_array *sma; int err; - struct sem_setbuf uninitialized_var(setbuf); + struct semid64_ds semid64; struct kern_ipc_perm *ipcp; - if(cmd == IPC_SET) { - if(copy_semid_from_user (&setbuf, arg.buf, version)) + if (cmd == IPC_SET) { + if (copy_semid_from_user(&semid64, p, version)) return -EFAULT; } - sma = sem_lock_check_down(ns, semid); - if (IS_ERR(sma)) - return PTR_ERR(sma); - ipcp = &sma->sem_perm; + down_write(&sem_ids(ns).rwsem); + rcu_read_lock(); - err = audit_ipc_obj(ipcp); - if (err) - goto out_unlock; - - if (cmd == IPC_SET) { - err = audit_ipc_set_perm(0, setbuf.uid, setbuf.gid, setbuf.mode); - if (err) - goto out_unlock; - } - if (current->euid != ipcp->cuid && - current->euid != ipcp->uid && !capable(CAP_SYS_ADMIN)) { - err=-EPERM; - goto out_unlock; + ipcp = ipcctl_pre_down_nolock(ns, &sem_ids(ns), semid, cmd, + &semid64.sem_perm, 0); + if (IS_ERR(ipcp)) { + err = PTR_ERR(ipcp); + goto out_unlock1; } + sma = container_of(ipcp, struct sem_array, sem_perm); + err = security_sem_semctl(sma, cmd); if (err) - goto out_unlock; + goto out_unlock1; - switch(cmd){ + switch (cmd) { case IPC_RMID: + sem_lock(sma, NULL, -1); + /* freeary unlocks the ipc object and rcu */ freeary(ns, ipcp); - err = 0; - break; + goto out_up; case IPC_SET: - ipcp->uid = setbuf.uid; - ipcp->gid = setbuf.gid; - ipcp->mode = (ipcp->mode & ~S_IRWXUGO) - | (setbuf.mode & S_IRWXUGO); + sem_lock(sma, NULL, -1); + err = ipc_update_perm(&semid64.sem_perm, ipcp); + if (err) + goto out_unlock0; sma->sem_ctime = get_seconds(); - sem_unlock(sma); - err = 0; break; default: - sem_unlock(sma); err = -EINVAL; - break; + goto out_unlock1; } - return err; -out_unlock: - sem_unlock(sma); +out_unlock0: + sem_unlock(sma, -1); +out_unlock1: + rcu_read_unlock(); +out_up: + up_write(&sem_ids(ns).rwsem); return err; } -asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg) +SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg) { - int err = -EINVAL; int version; struct ipc_namespace *ns; + void __user *p = (void __user *)arg; if (semid < 0) return -EINVAL; @@ -945,28 +1578,24 @@ asmlinkage long sys_semctl (int semid, int semnum, int cmd, union semun arg) version = ipc_parse_version(&cmd); ns = current->nsproxy->ipc_ns; - switch(cmd) { + switch (cmd) { case IPC_INFO: case SEM_INFO: case IPC_STAT: case SEM_STAT: - err = semctl_nolock(ns, semid, cmd, version, arg); - return err; + return semctl_nolock(ns, semid, cmd, version, p); case GETALL: case GETVAL: case GETPID: case GETNCNT: case GETZCNT: - case SETVAL: case SETALL: - err = semctl_main(ns,semid,semnum,cmd,version,arg); - return err; + return semctl_main(ns, semid, semnum, cmd, p); + case SETVAL: + return semctl_setval(ns, semid, semnum, arg); case IPC_RMID: case IPC_SET: - down_write(&sem_ids(ns).rw_mutex); - err = semctl_down(ns,semid,semnum,cmd,version,arg); - up_write(&sem_ids(ns).rw_mutex); - return err; + return semctl_down(ns, semid, cmd, version, p); default: return -EINVAL; } @@ -994,111 +1623,167 @@ static inline int get_undo_list(struct sem_undo_list **undo_listp) return -ENOMEM; spin_lock_init(&undo_list->lock); atomic_set(&undo_list->refcnt, 1); + INIT_LIST_HEAD(&undo_list->list_proc); + current->sysvsem.undo_list = undo_list; } *undo_listp = undo_list; return 0; } +static struct sem_undo *__lookup_undo(struct sem_undo_list *ulp, int semid) +{ + struct sem_undo *un; + + list_for_each_entry_rcu(un, &ulp->list_proc, list_proc) { + if (un->semid == semid) + return un; + } + return NULL; +} + static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid) { - struct sem_undo **last, *un; + struct sem_undo *un; - last = &ulp->proc_list; - un = *last; - while(un != NULL) { - if(un->semid==semid) - break; - if(un->semid==-1) { - *last=un->proc_next; - kfree(un); - } else { - last=&un->proc_next; - } - un=*last; + assert_spin_locked(&ulp->lock); + + un = __lookup_undo(ulp, semid); + if (un) { + list_del_rcu(&un->list_proc); + list_add_rcu(&un->list_proc, &ulp->list_proc); } return un; } -static struct sem_undo *find_undo(struct ipc_namespace *ns, int semid) +/** + * find_alloc_undo - lookup (and if not present create) undo array + * @ns: namespace + * @semid: semaphore array id + * + * The function looks up (and if not present creates) the undo structure. + * The size of the undo structure depends on the size of the semaphore + * array, thus the alloc path is not that straightforward. + * Lifetime-rules: sem_undo is rcu-protected, on success, the function + * performs a rcu_read_lock(). + */ +static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid) { struct sem_array *sma; struct sem_undo_list *ulp; struct sem_undo *un, *new; - int nsems; - int error; + int nsems, error; error = get_undo_list(&ulp); if (error) return ERR_PTR(error); + rcu_read_lock(); spin_lock(&ulp->lock); un = lookup_undo(ulp, semid); spin_unlock(&ulp->lock); - if (likely(un!=NULL)) + if (likely(un != NULL)) goto out; /* no undo structure around - allocate one. */ - sma = sem_lock_check(ns, semid); - if (IS_ERR(sma)) - return ERR_PTR(PTR_ERR(sma)); + /* step 1: figure out the size of the semaphore array */ + sma = sem_obtain_object_check(ns, semid); + if (IS_ERR(sma)) { + rcu_read_unlock(); + return ERR_CAST(sma); + } nsems = sma->sem_nsems; - ipc_rcu_getref(sma); - sem_unlock(sma); + if (!ipc_rcu_getref(sma)) { + rcu_read_unlock(); + un = ERR_PTR(-EIDRM); + goto out; + } + rcu_read_unlock(); + /* step 2: allocate new undo structure */ new = kzalloc(sizeof(struct sem_undo) + sizeof(short)*nsems, GFP_KERNEL); if (!new) { - ipc_lock_by_ptr(&sma->sem_perm); - ipc_rcu_putref(sma); - sem_unlock(sma); + ipc_rcu_putref(sma, ipc_rcu_free); return ERR_PTR(-ENOMEM); } - new->semadj = (short *) &new[1]; - new->semid = semid; - spin_lock(&ulp->lock); - un = lookup_undo(ulp, semid); - if (un) { - spin_unlock(&ulp->lock); + /* step 3: Acquire the lock on semaphore array */ + rcu_read_lock(); + sem_lock_and_putref(sma); + if (!ipc_valid_object(&sma->sem_perm)) { + sem_unlock(sma, -1); + rcu_read_unlock(); kfree(new); - ipc_lock_by_ptr(&sma->sem_perm); - ipc_rcu_putref(sma); - sem_unlock(sma); + un = ERR_PTR(-EIDRM); goto out; } - ipc_lock_by_ptr(&sma->sem_perm); - ipc_rcu_putref(sma); - if (sma->sem_perm.deleted) { - sem_unlock(sma); - spin_unlock(&ulp->lock); + spin_lock(&ulp->lock); + + /* + * step 4: check for races: did someone else allocate the undo struct? + */ + un = lookup_undo(ulp, semid); + if (un) { kfree(new); - un = ERR_PTR(-EIDRM); - goto out; + goto success; } - new->proc_next = ulp->proc_list; - ulp->proc_list = new; - new->id_next = sma->undo; - sma->undo = new; - sem_unlock(sma); + /* step 5: initialize & link new undo structure */ + new->semadj = (short *) &new[1]; + new->ulp = ulp; + new->semid = semid; + assert_spin_locked(&ulp->lock); + list_add_rcu(&new->list_proc, &ulp->list_proc); + ipc_assert_locked_object(&sma->sem_perm); + list_add(&new->list_id, &sma->list_id); un = new; + +success: spin_unlock(&ulp->lock); + sem_unlock(sma, -1); out: return un; } -asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops, - unsigned nsops, const struct timespec __user *timeout) + +/** + * get_queue_result - retrieve the result code from sem_queue + * @q: Pointer to queue structure + * + * Retrieve the return code from the pending queue. If IN_WAKEUP is found in + * q->status, then we must loop until the value is replaced with the final + * value: This may happen if a task is woken up by an unrelated event (e.g. + * signal) and in parallel the task is woken up by another task because it got + * the requested semaphores. + * + * The function can be called with or without holding the semaphore spinlock. + */ +static int get_queue_result(struct sem_queue *q) +{ + int error; + + error = q->status; + while (unlikely(error == IN_WAKEUP)) { + cpu_relax(); + error = q->status; + } + + return error; +} + +SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops, + unsigned, nsops, const struct timespec __user *, timeout) { int error = -EINVAL; struct sem_array *sma; struct sembuf fast_sops[SEMOPM_FAST]; - struct sembuf* sops = fast_sops, *sop; + struct sembuf *sops = fast_sops, *sop; struct sem_undo *un; - int undos = 0, alter = 0, max; + int undos = 0, alter = 0, max, locknum; struct sem_queue queue; unsigned long jiffies_left = 0; struct ipc_namespace *ns; + struct list_head tasks; ns = current->nsproxy->ipc_ns; @@ -1106,13 +1791,13 @@ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops, return -EINVAL; if (nsops > ns->sc_semopm) return -E2BIG; - if(nsops > SEMOPM_FAST) { - sops = kmalloc(sizeof(*sops)*nsops,GFP_KERNEL); - if(sops==NULL) + if (nsops > SEMOPM_FAST) { + sops = kmalloc(sizeof(*sops)*nsops, GFP_KERNEL); + if (sops == NULL) return -ENOMEM; } - if (copy_from_user (sops, tsops, nsops * sizeof(*tsops))) { - error=-EFAULT; + if (copy_from_user(sops, tsops, nsops * sizeof(*tsops))) { + error = -EFAULT; goto out_free; } if (timeout) { @@ -1138,120 +1823,191 @@ asmlinkage long sys_semtimedop(int semid, struct sembuf __user *tsops, alter = 1; } -retry_undos: + INIT_LIST_HEAD(&tasks); + if (undos) { - un = find_undo(ns, semid); + /* On success, find_alloc_undo takes the rcu_read_lock */ + un = find_alloc_undo(ns, semid); if (IS_ERR(un)) { error = PTR_ERR(un); goto out_free; } - } else + } else { un = NULL; + rcu_read_lock(); + } - sma = sem_lock_check(ns, semid); + sma = sem_obtain_object_check(ns, semid); if (IS_ERR(sma)) { + rcu_read_unlock(); error = PTR_ERR(sma); goto out_free; } - /* - * semid identifiers are not unique - find_undo may have - * allocated an undo structure, it was invalidated by an RMID - * and now a new array with received the same id. Check and retry. - */ - if (un && un->semid == -1) { - sem_unlock(sma); - goto retry_undos; - } error = -EFBIG; if (max >= sma->sem_nsems) - goto out_unlock_free; + goto out_rcu_wakeup; error = -EACCES; - if (ipcperms(&sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) - goto out_unlock_free; + if (ipcperms(ns, &sma->sem_perm, alter ? S_IWUGO : S_IRUGO)) + goto out_rcu_wakeup; error = security_sem_semop(sma, sops, nsops, alter); if (error) - goto out_unlock_free; + goto out_rcu_wakeup; - error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current)); - if (error <= 0) { - if (alter && error == 0) - update_queue (sma); + error = -EIDRM; + locknum = sem_lock(sma, sops, nsops); + /* + * We eventually might perform the following check in a lockless + * fashion, considering ipc_valid_object() locking constraints. + * If nsops == 1 and there is no contention for sem_perm.lock, then + * only a per-semaphore lock is held and it's OK to proceed with the + * check below. More details on the fine grained locking scheme + * entangled here and why it's RMID race safe on comments at sem_lock() + */ + if (!ipc_valid_object(&sma->sem_perm)) goto out_unlock_free; - } - - /* We need to sleep on this operation, so we put the current - * task into the pending queue and go to sleep. + /* + * semid identifiers are not unique - find_alloc_undo may have + * allocated an undo structure, it was invalidated by an RMID + * and now a new array with received the same id. Check and fail. + * This case can be detected checking un->semid. The existence of + * "un" itself is guaranteed by rcu. */ - - queue.sma = sma; + if (un && un->semid == -1) + goto out_unlock_free; + queue.sops = sops; queue.nsops = nsops; queue.undo = un; queue.pid = task_tgid_vnr(current); - queue.id = semid; queue.alter = alter; - if (alter) - append_to_queue(sma ,&queue); - else - prepend_to_queue(sma ,&queue); + + error = perform_atomic_semop(sma, &queue); + if (error == 0) { + /* If the operation was successful, then do + * the required updates. + */ + if (alter) + do_smart_update(sma, sops, nsops, 1, &tasks); + else + set_semotime(sma, sops); + } + if (error <= 0) + goto out_unlock_free; + + /* We need to sleep on this operation, so we put the current + * task into the pending queue and go to sleep. + */ + + if (nsops == 1) { + struct sem *curr; + curr = &sma->sem_base[sops->sem_num]; + + if (alter) { + if (sma->complex_count) { + list_add_tail(&queue.list, + &sma->pending_alter); + } else { + + list_add_tail(&queue.list, + &curr->pending_alter); + } + } else { + list_add_tail(&queue.list, &curr->pending_const); + } + } else { + if (!sma->complex_count) + merge_queues(sma); + + if (alter) + list_add_tail(&queue.list, &sma->pending_alter); + else + list_add_tail(&queue.list, &sma->pending_const); + + sma->complex_count++; + } queue.status = -EINTR; queue.sleeper = current; + +sleep_again: current->state = TASK_INTERRUPTIBLE; - sem_unlock(sma); + sem_unlock(sma, locknum); + rcu_read_unlock(); if (timeout) jiffies_left = schedule_timeout(jiffies_left); else schedule(); - error = queue.status; - while(unlikely(error == IN_WAKEUP)) { - cpu_relax(); - error = queue.status; - } + error = get_queue_result(&queue); if (error != -EINTR) { /* fast path: update_queue already obtained all requested - * resources */ + * resources. + * Perform a smp_mb(): User space could assume that semop() + * is a memory barrier: Without the mb(), the cpu could + * speculatively read in user space stale data that was + * overwritten by the previous owner of the semaphore. + */ + smp_mb(); + goto out_free; } - sma = sem_lock(ns, semid); + rcu_read_lock(); + sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum); + + /* + * Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing. + */ + error = get_queue_result(&queue); + + /* + * Array removed? If yes, leave without sem_unlock(). + */ if (IS_ERR(sma)) { - BUG_ON(queue.prev != NULL); - error = -EIDRM; + rcu_read_unlock(); goto out_free; } + /* - * If queue.status != -EINTR we are woken up by another process + * If queue.status != -EINTR we are woken up by another process. + * Leave without unlink_queue(), but with sem_unlock(). */ - error = queue.status; - if (error != -EINTR) { + if (error != -EINTR) goto out_unlock_free; - } /* * If an interrupt occurred we have to clean up the queue */ if (timeout && jiffies_left == 0) error = -EAGAIN; - remove_from_queue(sma,&queue); - goto out_unlock_free; + + /* + * If the wakeup was spurious, just retry + */ + if (error == -EINTR && !signal_pending(current)) + goto sleep_again; + + unlink_queue(sma, &queue); out_unlock_free: - sem_unlock(sma); + sem_unlock(sma, locknum); +out_rcu_wakeup: + rcu_read_unlock(); + wake_up_sem_queue_do(&tasks); out_free: - if(sops != fast_sops) + if (sops != fast_sops) kfree(sops); return error; } -asmlinkage long sys_semop (int semid, struct sembuf __user *tsops, unsigned nsops) +SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops, + unsigned, nsops) { return sys_semtimedop(semid, tsops, nsops, NULL); } @@ -1271,7 +2027,7 @@ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) return error; atomic_inc(&undo_list->refcnt); tsk->sysvsem.undo_list = undo_list; - } else + } else tsk->sysvsem.undo_list = NULL; return 0; @@ -1291,55 +2047,72 @@ int copy_semundo(unsigned long clone_flags, struct task_struct *tsk) */ void exit_sem(struct task_struct *tsk) { - struct sem_undo_list *undo_list; - struct sem_undo *u, **up; - struct ipc_namespace *ns; + struct sem_undo_list *ulp; - undo_list = tsk->sysvsem.undo_list; - if (!undo_list) + ulp = tsk->sysvsem.undo_list; + if (!ulp) return; + tsk->sysvsem.undo_list = NULL; - if (!atomic_dec_and_test(&undo_list->refcnt)) + if (!atomic_dec_and_test(&ulp->refcnt)) return; - ns = tsk->nsproxy->ipc_ns; - /* There's no need to hold the semundo list lock, as current - * is the last task exiting for this undo list. - */ - for (up = &undo_list->proc_list; (u = *up); *up = u->proc_next, kfree(u)) { + for (;;) { struct sem_array *sma; - int nsems, i; - struct sem_undo *un, **unp; - int semid; - - semid = u->semid; + struct sem_undo *un; + struct list_head tasks; + int semid, i; + + rcu_read_lock(); + un = list_entry_rcu(ulp->list_proc.next, + struct sem_undo, list_proc); + if (&un->list_proc == &ulp->list_proc) + semid = -1; + else + semid = un->semid; + + if (semid == -1) { + rcu_read_unlock(); + break; + } + + sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid); + /* exit_sem raced with IPC_RMID, nothing to do */ + if (IS_ERR(sma)) { + rcu_read_unlock(); + continue; + } - if(semid == -1) + sem_lock(sma, NULL, -1); + /* exit_sem raced with IPC_RMID, nothing to do */ + if (!ipc_valid_object(&sma->sem_perm)) { + sem_unlock(sma, -1); + rcu_read_unlock(); continue; - sma = sem_lock(ns, semid); - if (IS_ERR(sma)) + } + un = __lookup_undo(ulp, semid); + if (un == NULL) { + /* exit_sem raced with IPC_RMID+semget() that created + * exactly the same semid. Nothing to do. + */ + sem_unlock(sma, -1); + rcu_read_unlock(); continue; + } - if (u->semid == -1) - goto next_entry; + /* remove un from the linked lists */ + ipc_assert_locked_object(&sma->sem_perm); + list_del(&un->list_id); - BUG_ON(sem_checkid(sma, u->semid)); + spin_lock(&ulp->lock); + list_del_rcu(&un->list_proc); + spin_unlock(&ulp->lock); - /* remove u from the sma->undo list */ - for (unp = &sma->undo; (un = *unp); unp = &un->id_next) { - if (u == un) - goto found; - } - printk ("exit_sem undo list error id=%d\n", u->semid); - goto next_entry; -found: - *unp = un->id_next; - /* perform adjustments registered in u */ - nsems = sma->sem_nsems; - for (i = 0; i < nsems; i++) { - struct sem * semaphore = &sma->sem_base[i]; - if (u->semadj[i]) { - semaphore->semval += u->semadj[i]; + /* perform adjustments registered in un */ + for (i = 0; i < sma->sem_nsems; i++) { + struct sem *semaphore = &sma->sem_base[i]; + if (un->semadj[i]) { + semaphore->semval += un->semadj[i]; /* * Range checks of the new semaphore value, * not defined by sus: @@ -1351,7 +2124,7 @@ found: * Linux caps the semaphore value, both at 0 * and at SEMVMX. * - * Manfred <manfred@colorfullife.com> + * Manfred <manfred@colorfullife.com> */ if (semaphore->semval < 0) semaphore->semval = 0; @@ -1360,31 +2133,46 @@ found: semaphore->sempid = task_tgid_vnr(current); } } - sma->sem_otime = get_seconds(); /* maybe some queued-up processes were waiting for this */ - update_queue(sma); -next_entry: - sem_unlock(sma); + INIT_LIST_HEAD(&tasks); + do_smart_update(sma, NULL, 0, 1, &tasks); + sem_unlock(sma, -1); + rcu_read_unlock(); + wake_up_sem_queue_do(&tasks); + + kfree_rcu(un, rcu); } - kfree(undo_list); + kfree(ulp); } #ifdef CONFIG_PROC_FS static int sysvipc_sem_proc_show(struct seq_file *s, void *it) { + struct user_namespace *user_ns = seq_user_ns(s); struct sem_array *sma = it; + time_t sem_otime; + + /* + * The proc interface isn't aware of sem_lock(), it calls + * ipc_lock_object() directly (in sysvipc_find_ipc). + * In order to stay compatible with sem_lock(), we must wait until + * all simple semop() calls have left their critical regions. + */ + sem_wait_array(sma); + + sem_otime = get_semotime(sma); return seq_printf(s, - "%10d %10d %4o %10lu %5u %5u %5u %5u %10lu %10lu\n", + "%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n", sma->sem_perm.key, sma->sem_perm.id, sma->sem_perm.mode, sma->sem_nsems, - sma->sem_perm.uid, - sma->sem_perm.gid, - sma->sem_perm.cuid, - sma->sem_perm.cgid, - sma->sem_otime, + from_kuid_munged(user_ns, sma->sem_perm.uid), + from_kgid_munged(user_ns, sma->sem_perm.gid), + from_kuid_munged(user_ns, sma->sem_perm.cuid), + from_kgid_munged(user_ns, sma->sem_perm.cgid), + sem_otime, sma->sem_ctime); } #endif diff --git a/ipc/shm.c b/ipc/shm.c index cc63fae02f0..89fc354156c 100644 --- a/ipc/shm.c +++ b/ipc/shm.c @@ -19,6 +19,9 @@ * namespaces support * OpenVZ, SWsoft Inc. * Pavel Emelianov <xemul@openvz.org> + * + * Better ipc lock (kern_ipc_perm.lock) handling + * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013. */ #include <linux/slab.h> @@ -40,7 +43,7 @@ #include <linux/mount.h> #include <linux/ipc_namespace.h> -#include <asm/uaccess.h> +#include <linux/uaccess.h> #include "util.h" @@ -54,18 +57,17 @@ struct shm_file_data { #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data)) static const struct file_operations shm_file_operations; -static struct vm_operations_struct shm_vm_ops; +static const struct vm_operations_struct shm_vm_ops; #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS]) #define shm_unlock(shp) \ ipc_unlock(&(shp)->shm_perm) -#define shm_buildid(id, seq) ipc_buildid(id, seq) static int newseg(struct ipc_namespace *, struct ipc_params *); static void shm_open(struct vm_area_struct *vma); static void shm_close(struct vm_area_struct *vma); -static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp); +static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp); #ifdef CONFIG_PROC_FS static int sysvipc_shm_proc_show(struct seq_file *s, void *it); #endif @@ -75,20 +77,21 @@ void shm_init_ns(struct ipc_namespace *ns) ns->shm_ctlmax = SHMMAX; ns->shm_ctlall = SHMALL; ns->shm_ctlmni = SHMMNI; + ns->shm_rmid_forced = 0; ns->shm_tot = 0; - ipc_init_ids(&ns->ids[IPC_SHM_IDS]); + ipc_init_ids(&shm_ids(ns)); } /* - * Called with shm_ids.rw_mutex (writer) and the shp structure locked. - * Only shm_ids.rw_mutex remains locked on exit. + * Called with shm_ids.rwsem (writer) and the shp structure locked. + * Only shm_ids.rwsem remains locked on exit. */ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) { struct shmid_kernel *shp; shp = container_of(ipcp, struct shmid_kernel, shm_perm); - if (shp->shm_nattch){ + if (shp->shm_nattch) { shp->shm_perm.mode |= SHM_DEST; /* Do not find it any more */ shp->shm_perm.key = IPC_PRIVATE; @@ -101,47 +104,52 @@ static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) void shm_exit_ns(struct ipc_namespace *ns) { free_ipcs(ns, &shm_ids(ns), do_shm_rmid); + idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr); } #endif -void __init shm_init (void) +static int __init ipc_ns_init(void) { shm_init_ns(&init_ipc_ns); + return 0; +} + +pure_initcall(ipc_ns_init); + +void __init shm_init(void) +{ ipc_init_proc_interface("sysvipc/shm", - " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n", +#if BITS_PER_LONG <= 32 + " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", +#else + " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", +#endif IPC_SHM_IDS, sysvipc_shm_proc_show); } -/* - * shm_lock_(check_)down routines are called in the paths where the rw_mutex - * is held to protect access to the idr tree. - */ -static inline struct shmid_kernel *shm_lock_down(struct ipc_namespace *ns, - int id) +static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id) { - struct kern_ipc_perm *ipcp = ipc_lock_down(&shm_ids(ns), id); + struct kern_ipc_perm *ipcp = ipc_obtain_object(&shm_ids(ns), id); if (IS_ERR(ipcp)) - return (struct shmid_kernel *)ipcp; + return ERR_CAST(ipcp); return container_of(ipcp, struct shmid_kernel, shm_perm); } -static inline struct shmid_kernel *shm_lock_check_down( - struct ipc_namespace *ns, - int id) +static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id) { - struct kern_ipc_perm *ipcp = ipc_lock_check_down(&shm_ids(ns), id); + struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id); if (IS_ERR(ipcp)) - return (struct shmid_kernel *)ipcp; + return ERR_CAST(ipcp); return container_of(ipcp, struct shmid_kernel, shm_perm); } /* - * shm_lock_(check_) routines are called in the paths where the rw_mutex - * is not held. + * shm_lock_(check_) routines are called in the paths where the rwsem + * is not necessarily held. */ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) { @@ -153,15 +161,19 @@ static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) return container_of(ipcp, struct shmid_kernel, shm_perm); } -static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns, - int id) +static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp) { - struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id); + rcu_read_lock(); + ipc_lock_object(&ipcp->shm_perm); +} - if (IS_ERR(ipcp)) - return (struct shmid_kernel *)ipcp; +static void shm_rcu_free(struct rcu_head *head) +{ + struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); + struct shmid_kernel *shp = ipc_rcu_to_struct(p); - return container_of(ipcp, struct shmid_kernel, shm_perm); + security_shm_free(shp); + ipc_rcu_free(head); } static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) @@ -169,12 +181,6 @@ static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) ipc_rmid(&shm_ids(ns), &s->shm_perm); } -static inline int shm_addid(struct ipc_namespace *ns, struct shmid_kernel *shp) -{ - return ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); -} - - /* This is called by fork, once for every shm attach. */ static void shm_open(struct vm_area_struct *vma) @@ -197,22 +203,41 @@ static void shm_open(struct vm_area_struct *vma) * @ns: namespace * @shp: struct to free * - * It has to be called with shp and shm_ids.rw_mutex (writer) locked, + * It has to be called with shp and shm_ids.rwsem (writer) locked, * but returns with shp unlocked and freed. */ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) { + struct file *shm_file; + + shm_file = shp->shm_file; + shp->shm_file = NULL; ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; shm_rmid(ns, shp); shm_unlock(shp); - if (!is_file_hugepages(shp->shm_file)) - shmem_lock(shp->shm_file, 0, shp->mlock_user); - else - user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size, - shp->mlock_user); - fput (shp->shm_file); - security_shm_free(shp); - ipc_rcu_putref(shp); + if (!is_file_hugepages(shm_file)) + shmem_lock(shm_file, 0, shp->mlock_user); + else if (shp->mlock_user) + user_shm_unlock(file_inode(shm_file)->i_size, shp->mlock_user); + fput(shm_file); + ipc_rcu_putref(shp, shm_rcu_free); +} + +/* + * shm_may_destroy - identifies whether shm segment should be destroyed now + * + * Returns true if and only if there are no active users of the segment and + * one of the following is true: + * + * 1) shmctl(id, IPC_RMID, NULL) was called for this shp + * + * 2) sysctl kernel.shm_rmid_forced is set to 1. + */ +static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) +{ + return (shp->shm_nattch == 0) && + (ns->shm_rmid_forced || + (shp->shm_perm.mode & SHM_DEST)); } /* @@ -223,24 +248,100 @@ static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) */ static void shm_close(struct vm_area_struct *vma) { - struct file * file = vma->vm_file; + struct file *file = vma->vm_file; struct shm_file_data *sfd = shm_file_data(file); struct shmid_kernel *shp; struct ipc_namespace *ns = sfd->ns; - down_write(&shm_ids(ns).rw_mutex); + down_write(&shm_ids(ns).rwsem); /* remove from the list of attaches of the shm segment */ - shp = shm_lock_down(ns, sfd->id); + shp = shm_lock(ns, sfd->id); BUG_ON(IS_ERR(shp)); shp->shm_lprid = task_tgid_vnr(current); shp->shm_dtim = get_seconds(); shp->shm_nattch--; - if(shp->shm_nattch == 0 && - shp->shm_perm.mode & SHM_DEST) + if (shm_may_destroy(ns, shp)) shm_destroy(ns, shp); else shm_unlock(shp); - up_write(&shm_ids(ns).rw_mutex); + up_write(&shm_ids(ns).rwsem); +} + +/* Called with ns->shm_ids(ns).rwsem locked */ +static int shm_try_destroy_current(int id, void *p, void *data) +{ + struct ipc_namespace *ns = data; + struct kern_ipc_perm *ipcp = p; + struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm); + + if (shp->shm_creator != current) + return 0; + + /* + * Mark it as orphaned to destroy the segment when + * kernel.shm_rmid_forced is changed. + * It is noop if the following shm_may_destroy() returns true. + */ + shp->shm_creator = NULL; + + /* + * Don't even try to destroy it. If shm_rmid_forced=0 and IPC_RMID + * is not set, it shouldn't be deleted here. + */ + if (!ns->shm_rmid_forced) + return 0; + + if (shm_may_destroy(ns, shp)) { + shm_lock_by_ptr(shp); + shm_destroy(ns, shp); + } + return 0; +} + +/* Called with ns->shm_ids(ns).rwsem locked */ +static int shm_try_destroy_orphaned(int id, void *p, void *data) +{ + struct ipc_namespace *ns = data; + struct kern_ipc_perm *ipcp = p; + struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm); + + /* + * We want to destroy segments without users and with already + * exit'ed originating process. + * + * As shp->* are changed under rwsem, it's safe to skip shp locking. + */ + if (shp->shm_creator != NULL) + return 0; + + if (shm_may_destroy(ns, shp)) { + shm_lock_by_ptr(shp); + shm_destroy(ns, shp); + } + return 0; +} + +void shm_destroy_orphaned(struct ipc_namespace *ns) +{ + down_write(&shm_ids(ns).rwsem); + if (shm_ids(ns).in_use) + idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns); + up_write(&shm_ids(ns).rwsem); +} + + +void exit_shm(struct task_struct *task) +{ + struct ipc_namespace *ns = task->nsproxy->ipc_ns; + + if (shm_ids(ns).in_use == 0) + return; + + /* Destroy all already created segments, but not mapped yet */ + down_write(&shm_ids(ns).rwsem); + if (shm_ids(ns).in_use) + idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_current, ns); + up_write(&shm_ids(ns).rwsem); } static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) @@ -271,16 +372,14 @@ static struct mempolicy *shm_get_policy(struct vm_area_struct *vma, if (sfd->vm_ops->get_policy) pol = sfd->vm_ops->get_policy(vma, addr); - else if (vma->vm_policy) { + else if (vma->vm_policy) pol = vma->vm_policy; - mpol_get(pol); /* get_vma_policy() expects this */ - } else - pol = current->mempolicy; + return pol; } #endif -static int shm_mmap(struct file * file, struct vm_area_struct * vma) +static int shm_mmap(struct file *file, struct vm_area_struct *vma) { struct shm_file_data *sfd = shm_file_data(file); int ret; @@ -308,46 +407,60 @@ static int shm_release(struct inode *ino, struct file *file) return 0; } -static int shm_fsync(struct file *file, struct dentry *dentry, int datasync) +static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync) { - int (*fsync) (struct file *, struct dentry *, int datasync); struct shm_file_data *sfd = shm_file_data(file); - int ret = -EINVAL; - fsync = sfd->file->f_op->fsync; - if (fsync) - ret = fsync(sfd->file, sfd->file->f_path.dentry, datasync); - return ret; + if (!sfd->file->f_op->fsync) + return -EINVAL; + return sfd->file->f_op->fsync(sfd->file, start, end, datasync); } -static unsigned long shm_get_unmapped_area(struct file *file, - unsigned long addr, unsigned long len, unsigned long pgoff, - unsigned long flags) +static long shm_fallocate(struct file *file, int mode, loff_t offset, + loff_t len) { struct shm_file_data *sfd = shm_file_data(file); - return get_unmapped_area(sfd->file, addr, len, pgoff, flags); + + if (!sfd->file->f_op->fallocate) + return -EOPNOTSUPP; + return sfd->file->f_op->fallocate(file, mode, offset, len); } -int is_file_shm_hugepages(struct file *file) +static unsigned long shm_get_unmapped_area(struct file *file, + unsigned long addr, unsigned long len, unsigned long pgoff, + unsigned long flags) { - int ret = 0; - - if (file->f_op == &shm_file_operations) { - struct shm_file_data *sfd; - sfd = shm_file_data(file); - ret = is_file_hugepages(sfd->file); - } - return ret; + struct shm_file_data *sfd = shm_file_data(file); + return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len, + pgoff, flags); } static const struct file_operations shm_file_operations = { .mmap = shm_mmap, .fsync = shm_fsync, .release = shm_release, +#ifndef CONFIG_MMU .get_unmapped_area = shm_get_unmapped_area, +#endif + .llseek = noop_llseek, + .fallocate = shm_fallocate, }; -static struct vm_operations_struct shm_vm_ops = { +static const struct file_operations shm_file_operations_huge = { + .mmap = shm_mmap, + .fsync = shm_fsync, + .release = shm_release, + .get_unmapped_area = shm_get_unmapped_area, + .llseek = noop_llseek, + .fallocate = shm_fallocate, +}; + +int is_file_shm_hugepages(struct file *file) +{ + return file->f_op == &shm_file_operations_huge; +} + +static const struct vm_operations_struct shm_vm_ops = { .open = shm_open, /* callback for a new vm-area open */ .close = shm_close, /* callback for when the vm-area is released */ .fault = shm_fault, @@ -362,9 +475,8 @@ static struct vm_operations_struct shm_vm_ops = { * @ns: namespace * @params: ptr to the structure that contains key, size and shmflg * - * Called with shm_ids.rw_mutex held as a writer. + * Called with shm_ids.rwsem held as a writer. */ - static int newseg(struct ipc_namespace *ns, struct ipc_params *params) { key_t key = params->key; @@ -372,15 +484,20 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) size_t size = params->u.size; int error; struct shmid_kernel *shp; - int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT; - struct file * file; + size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; + struct file *file; char name[13]; int id; + vm_flags_t acctflag = 0; if (size < SHMMIN || size > ns->shm_ctlmax) return -EINVAL; - if (ns->shm_tot + numpages > ns->shm_ctlall) + if (numpages << PAGE_SHIFT < size) + return -ENOSPC; + + if (ns->shm_tot + numpages < ns->shm_tot || + ns->shm_tot + numpages > ns->shm_ctlall) return -ENOSPC; shp = ipc_rcu_alloc(sizeof(*shp)); @@ -394,31 +511,43 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) shp->shm_perm.security = NULL; error = security_shm_alloc(shp); if (error) { - ipc_rcu_putref(shp); + ipc_rcu_putref(shp, ipc_rcu_free); return error; } - sprintf (name, "SYSV%08x", key); + sprintf(name, "SYSV%08x", key); if (shmflg & SHM_HUGETLB) { - /* hugetlb_file_setup takes care of mlock user accounting */ - file = hugetlb_file_setup(name, size); - shp->mlock_user = current->user; + struct hstate *hs; + size_t hugesize; + + hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); + if (!hs) { + error = -EINVAL; + goto no_file; + } + hugesize = ALIGN(size, huge_page_size(hs)); + + /* hugetlb_file_setup applies strict accounting */ + if (shmflg & SHM_NORESERVE) + acctflag = VM_NORESERVE; + file = hugetlb_file_setup(name, hugesize, acctflag, + &shp->mlock_user, HUGETLB_SHMFS_INODE, + (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); } else { - int acctflag = VM_ACCOUNT; /* * Do not allow no accounting for OVERCOMMIT_NEVER, even - * if it's asked for. + * if it's asked for. */ if ((shmflg & SHM_NORESERVE) && sysctl_overcommit_memory != OVERCOMMIT_NEVER) - acctflag = 0; + acctflag = VM_NORESERVE; file = shmem_file_setup(name, size, acctflag); } error = PTR_ERR(file); if (IS_ERR(file)) goto no_file; - id = shm_addid(ns, shp); + id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); if (id < 0) { error = id; goto no_id; @@ -430,29 +559,33 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) shp->shm_ctim = get_seconds(); shp->shm_segsz = size; shp->shm_nattch = 0; - shp->shm_perm.id = shm_buildid(id, shp->shm_perm.seq); shp->shm_file = file; + shp->shm_creator = current; + /* * shmid gets reported as "inode#" in /proc/pid/maps. * proc-ps tools use this. Changing this will break them. */ - file->f_dentry->d_inode->i_ino = shp->shm_perm.id; + file_inode(file)->i_ino = shp->shm_perm.id; ns->shm_tot += numpages; error = shp->shm_perm.id; - shm_unlock(shp); + + ipc_unlock_object(&shp->shm_perm); + rcu_read_unlock(); return error; no_id: + if (is_file_hugepages(file) && shp->mlock_user) + user_shm_unlock(size, shp->mlock_user); fput(file); no_file: - security_shm_free(shp); - ipc_rcu_putref(shp); + ipc_rcu_putref(shp, shm_rcu_free); return error; } /* - * Called with shm_ids.rw_mutex and ipcp locked. + * Called with shm_ids.rwsem and ipcp locked. */ static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg) { @@ -463,7 +596,7 @@ static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg) } /* - * Called with shm_ids.rw_mutex and ipcp locked. + * Called with shm_ids.rwsem and ipcp locked. */ static inline int shm_more_checks(struct kern_ipc_perm *ipcp, struct ipc_params *params) @@ -477,18 +610,18 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp, return 0; } -asmlinkage long sys_shmget (key_t key, size_t size, int shmflg) +SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) { struct ipc_namespace *ns; - struct ipc_ops shm_ops; + static const struct ipc_ops shm_ops = { + .getnew = newseg, + .associate = shm_security, + .more_checks = shm_more_checks, + }; struct ipc_params shm_params; ns = current->nsproxy->ipc_ns; - shm_ops.getnew = newseg; - shm_ops.associate = shm_security; - shm_ops.more_checks = shm_more_checks; - shm_params.key = key; shm_params.flg = shmflg; shm_params.u.size = size; @@ -498,13 +631,14 @@ asmlinkage long sys_shmget (key_t key, size_t size, int shmflg) static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version) { - switch(version) { + switch (version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: { struct shmid_ds out; + memset(&out, 0, sizeof(out)); ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm); out.shm_segsz = in->shm_segsz; out.shm_atime = in->shm_atime; @@ -521,28 +655,14 @@ static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ } } -struct shm_setbuf { - uid_t uid; - gid_t gid; - mode_t mode; -}; - -static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __user *buf, int version) +static inline unsigned long +copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version) { - switch(version) { + switch (version) { case IPC_64: - { - struct shmid64_ds tbuf; - - if (copy_from_user(&tbuf, buf, sizeof(tbuf))) + if (copy_from_user(out, buf, sizeof(*out))) return -EFAULT; - - out->uid = tbuf.shm_perm.uid; - out->gid = tbuf.shm_perm.gid; - out->mode = tbuf.shm_perm.mode; - return 0; - } case IPC_OLD: { struct shmid_ds tbuf_old; @@ -550,9 +670,9 @@ static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __ if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) return -EFAULT; - out->uid = tbuf_old.shm_perm.uid; - out->gid = tbuf_old.shm_perm.gid; - out->mode = tbuf_old.shm_perm.mode; + out->shm_perm.uid = tbuf_old.shm_perm.uid; + out->shm_perm.gid = tbuf_old.shm_perm.gid; + out->shm_perm.mode = tbuf_old.shm_perm.mode; return 0; } @@ -563,14 +683,14 @@ static inline unsigned long copy_shmid_from_user(struct shm_setbuf *out, void __ static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version) { - switch(version) { + switch (version) { case IPC_64: return copy_to_user(buf, in, sizeof(*in)); case IPC_OLD: { struct shminfo out; - if(in->shmmax > INT_MAX) + if (in->shmmax > INT_MAX) out.shmmax = INT_MAX; else out.shmmax = (int)in->shmmax; @@ -578,7 +698,7 @@ static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminf out.shmmin = in->shmmin; out.shmmni = in->shmmni; out.shmseg = in->shmseg; - out.shmall = in->shmall; + out.shmall = in->shmall; return copy_to_user(buf, &out, sizeof(out)); } @@ -588,7 +708,35 @@ static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminf } /* - * Called with shm_ids.rw_mutex held as a reader + * Calculate and add used RSS and swap pages of a shm. + * Called with shm_ids.rwsem held as a reader + */ +static void shm_add_rss_swap(struct shmid_kernel *shp, + unsigned long *rss_add, unsigned long *swp_add) +{ + struct inode *inode; + + inode = file_inode(shp->shm_file); + + if (is_file_hugepages(shp->shm_file)) { + struct address_space *mapping = inode->i_mapping; + struct hstate *h = hstate_file(shp->shm_file); + *rss_add += pages_per_huge_page(h) * mapping->nrpages; + } else { +#ifdef CONFIG_SHMEM + struct shmem_inode_info *info = SHMEM_I(inode); + spin_lock(&info->lock); + *rss_add += inode->i_mapping->nrpages; + *swp_add += info->swapped; + spin_unlock(&info->lock); +#else + *rss_add += inode->i_mapping->nrpages; +#endif + } +} + +/* + * Called with shm_ids.rwsem held as a reader */ static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, unsigned long *swp) @@ -602,68 +750,113 @@ static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, in_use = shm_ids(ns).in_use; for (total = 0, next_id = 0; total < in_use; next_id++) { + struct kern_ipc_perm *ipc; struct shmid_kernel *shp; - struct inode *inode; - shp = idr_find(&shm_ids(ns).ipcs_idr, next_id); - if (shp == NULL) + ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id); + if (ipc == NULL) continue; + shp = container_of(ipc, struct shmid_kernel, shm_perm); - inode = shp->shm_file->f_path.dentry->d_inode; - - if (is_file_hugepages(shp->shm_file)) { - struct address_space *mapping = inode->i_mapping; - *rss += (HPAGE_SIZE/PAGE_SIZE)*mapping->nrpages; - } else { - struct shmem_inode_info *info = SHMEM_I(inode); - spin_lock(&info->lock); - *rss += inode->i_mapping->nrpages; - *swp += info->swapped; - spin_unlock(&info->lock); - } + shm_add_rss_swap(shp, rss, swp); total++; } } -asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) +/* + * This function handles some shmctl commands which require the rwsem + * to be held in write mode. + * NOTE: no locks must be held, the rwsem is taken inside this function. + */ +static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd, + struct shmid_ds __user *buf, int version) { - struct shm_setbuf setbuf; + struct kern_ipc_perm *ipcp; + struct shmid64_ds shmid64; struct shmid_kernel *shp; - int err, version; - struct ipc_namespace *ns; + int err; - if (cmd < 0 || shmid < 0) { + if (cmd == IPC_SET) { + if (copy_shmid_from_user(&shmid64, buf, version)) + return -EFAULT; + } + + down_write(&shm_ids(ns).rwsem); + rcu_read_lock(); + + ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd, + &shmid64.shm_perm, 0); + if (IS_ERR(ipcp)) { + err = PTR_ERR(ipcp); + goto out_unlock1; + } + + shp = container_of(ipcp, struct shmid_kernel, shm_perm); + + err = security_shm_shmctl(shp, cmd); + if (err) + goto out_unlock1; + + switch (cmd) { + case IPC_RMID: + ipc_lock_object(&shp->shm_perm); + /* do_shm_rmid unlocks the ipc object and rcu */ + do_shm_rmid(ns, ipcp); + goto out_up; + case IPC_SET: + ipc_lock_object(&shp->shm_perm); + err = ipc_update_perm(&shmid64.shm_perm, ipcp); + if (err) + goto out_unlock0; + shp->shm_ctim = get_seconds(); + break; + default: err = -EINVAL; - goto out; + goto out_unlock1; } - version = ipc_parse_version(&cmd); - ns = current->nsproxy->ipc_ns; +out_unlock0: + ipc_unlock_object(&shp->shm_perm); +out_unlock1: + rcu_read_unlock(); +out_up: + up_write(&shm_ids(ns).rwsem); + return err; +} - switch (cmd) { /* replace with proc interface ? */ - case IPC_INFO: - { - struct shminfo64 shminfo; +static int shmctl_nolock(struct ipc_namespace *ns, int shmid, + int cmd, int version, void __user *buf) +{ + int err; + struct shmid_kernel *shp; + /* preliminary security checks for *_INFO */ + if (cmd == IPC_INFO || cmd == SHM_INFO) { err = security_shm_shmctl(NULL, cmd); if (err) return err; + } - memset(&shminfo,0,sizeof(shminfo)); + switch (cmd) { + case IPC_INFO: + { + struct shminfo64 shminfo; + + memset(&shminfo, 0, sizeof(shminfo)); shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni; shminfo.shmmax = ns->shm_ctlmax; shminfo.shmall = ns->shm_ctlall; shminfo.shmmin = SHMMIN; - if(copy_shminfo_to_user (buf, &shminfo, version)) + if (copy_shminfo_to_user(buf, &shminfo, version)) return -EFAULT; - down_read(&shm_ids(ns).rw_mutex); + down_read(&shm_ids(ns).rwsem); err = ipc_get_maxid(&shm_ids(ns)); - up_read(&shm_ids(ns).rw_mutex); + up_read(&shm_ids(ns).rwsem); - if(err<0) + if (err < 0) err = 0; goto out; } @@ -671,20 +864,16 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) { struct shm_info shm_info; - err = security_shm_shmctl(NULL, cmd); - if (err) - return err; - - memset(&shm_info,0,sizeof(shm_info)); - down_read(&shm_ids(ns).rw_mutex); + memset(&shm_info, 0, sizeof(shm_info)); + down_read(&shm_ids(ns).rwsem); shm_info.used_ids = shm_ids(ns).in_use; - shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp); + shm_get_stat(ns, &shm_info.shm_rss, &shm_info.shm_swp); shm_info.shm_tot = ns->shm_tot; shm_info.swap_attempts = 0; shm_info.swap_successes = 0; err = ipc_get_maxid(&shm_ids(ns)); - up_read(&shm_ids(ns).rw_mutex); - if(copy_to_user (buf, &shm_info, sizeof(shm_info))) { + up_read(&shm_ids(ns).rwsem); + if (copy_to_user(buf, &shm_info, sizeof(shm_info))) { err = -EFAULT; goto out; } @@ -698,32 +887,31 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) struct shmid64_ds tbuf; int result; - if (!buf) { - err = -EFAULT; - goto out; - } - + rcu_read_lock(); if (cmd == SHM_STAT) { - shp = shm_lock(ns, shmid); + shp = shm_obtain_object(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); - goto out; + goto out_unlock; } result = shp->shm_perm.id; } else { - shp = shm_lock_check(ns, shmid); + shp = shm_obtain_object_check(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); - goto out; + goto out_unlock; } result = 0; } - err=-EACCES; - if (ipcperms (&shp->shm_perm, S_IRUGO)) + + err = -EACCES; + if (ipcperms(ns, &shp->shm_perm, S_IRUGO)) goto out_unlock; + err = security_shm_shmctl(shp, cmd); if (err) goto out_unlock; + memset(&tbuf, 0, sizeof(tbuf)); kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm); tbuf.shm_segsz = shp->shm_segsz; @@ -733,152 +921,119 @@ asmlinkage long sys_shmctl (int shmid, int cmd, struct shmid_ds __user *buf) tbuf.shm_cpid = shp->shm_cprid; tbuf.shm_lpid = shp->shm_lprid; tbuf.shm_nattch = shp->shm_nattch; - shm_unlock(shp); - if(copy_shmid_to_user (buf, &tbuf, version)) + rcu_read_unlock(); + + if (copy_shmid_to_user(buf, &tbuf, version)) err = -EFAULT; else err = result; goto out; } - case SHM_LOCK: - case SHM_UNLOCK: - { - shp = shm_lock_check(ns, shmid); - if (IS_ERR(shp)) { - err = PTR_ERR(shp); - goto out; - } + default: + return -EINVAL; + } - err = audit_ipc_obj(&(shp->shm_perm)); - if (err) - goto out_unlock; +out_unlock: + rcu_read_unlock(); +out: + return err; +} - if (!capable(CAP_IPC_LOCK)) { - err = -EPERM; - if (current->euid != shp->shm_perm.uid && - current->euid != shp->shm_perm.cuid) - goto out_unlock; - if (cmd == SHM_LOCK && - !current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur) - goto out_unlock; - } +SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) +{ + struct shmid_kernel *shp; + int err, version; + struct ipc_namespace *ns; - err = security_shm_shmctl(shp, cmd); - if (err) - goto out_unlock; - - if(cmd==SHM_LOCK) { - struct user_struct * user = current->user; - if (!is_file_hugepages(shp->shm_file)) { - err = shmem_lock(shp->shm_file, 1, user); - if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){ - shp->shm_perm.mode |= SHM_LOCKED; - shp->mlock_user = user; - } - } - } else if (!is_file_hugepages(shp->shm_file)) { - shmem_lock(shp->shm_file, 0, shp->mlock_user); - shp->shm_perm.mode &= ~SHM_LOCKED; - shp->mlock_user = NULL; - } - shm_unlock(shp); - goto out; - } + if (cmd < 0 || shmid < 0) + return -EINVAL; + + version = ipc_parse_version(&cmd); + ns = current->nsproxy->ipc_ns; + + switch (cmd) { + case IPC_INFO: + case SHM_INFO: + case SHM_STAT: + case IPC_STAT: + return shmctl_nolock(ns, shmid, cmd, version, buf); case IPC_RMID: + case IPC_SET: + return shmctl_down(ns, shmid, cmd, buf, version); + case SHM_LOCK: + case SHM_UNLOCK: { - /* - * We cannot simply remove the file. The SVID states - * that the block remains until the last person - * detaches from it, then is deleted. A shmat() on - * an RMID segment is legal in older Linux and if - * we change it apps break... - * - * Instead we set a destroyed flag, and then blow - * the name away when the usage hits zero. - */ - down_write(&shm_ids(ns).rw_mutex); - shp = shm_lock_check_down(ns, shmid); + struct file *shm_file; + + rcu_read_lock(); + shp = shm_obtain_object_check(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); - goto out_up; - } - - err = audit_ipc_obj(&(shp->shm_perm)); - if (err) - goto out_unlock_up; - - if (current->euid != shp->shm_perm.uid && - current->euid != shp->shm_perm.cuid && - !capable(CAP_SYS_ADMIN)) { - err=-EPERM; - goto out_unlock_up; + goto out_unlock1; } + audit_ipc_obj(&(shp->shm_perm)); err = security_shm_shmctl(shp, cmd); if (err) - goto out_unlock_up; + goto out_unlock1; - do_shm_rmid(ns, &shp->shm_perm); - up_write(&shm_ids(ns).rw_mutex); - goto out; - } + ipc_lock_object(&shp->shm_perm); - case IPC_SET: - { - if (!buf) { - err = -EFAULT; - goto out; + /* check if shm_destroy() is tearing down shp */ + if (!ipc_valid_object(&shp->shm_perm)) { + err = -EIDRM; + goto out_unlock0; } - if (copy_shmid_from_user (&setbuf, buf, version)) { - err = -EFAULT; - goto out; - } - down_write(&shm_ids(ns).rw_mutex); - shp = shm_lock_check_down(ns, shmid); - if (IS_ERR(shp)) { - err = PTR_ERR(shp); - goto out_up; + if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) { + kuid_t euid = current_euid(); + if (!uid_eq(euid, shp->shm_perm.uid) && + !uid_eq(euid, shp->shm_perm.cuid)) { + err = -EPERM; + goto out_unlock0; + } + if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) { + err = -EPERM; + goto out_unlock0; + } } - err = audit_ipc_obj(&(shp->shm_perm)); - if (err) - goto out_unlock_up; - err = audit_ipc_set_perm(0, setbuf.uid, setbuf.gid, setbuf.mode); - if (err) - goto out_unlock_up; - err=-EPERM; - if (current->euid != shp->shm_perm.uid && - current->euid != shp->shm_perm.cuid && - !capable(CAP_SYS_ADMIN)) { - goto out_unlock_up; + + shm_file = shp->shm_file; + if (is_file_hugepages(shm_file)) + goto out_unlock0; + + if (cmd == SHM_LOCK) { + struct user_struct *user = current_user(); + err = shmem_lock(shm_file, 1, user); + if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) { + shp->shm_perm.mode |= SHM_LOCKED; + shp->mlock_user = user; + } + goto out_unlock0; } - err = security_shm_shmctl(shp, cmd); - if (err) - goto out_unlock_up; - - shp->shm_perm.uid = setbuf.uid; - shp->shm_perm.gid = setbuf.gid; - shp->shm_perm.mode = (shp->shm_perm.mode & ~S_IRWXUGO) - | (setbuf.mode & S_IRWXUGO); - shp->shm_ctim = get_seconds(); - break; + /* SHM_UNLOCK */ + if (!(shp->shm_perm.mode & SHM_LOCKED)) + goto out_unlock0; + shmem_lock(shm_file, 0, shp->mlock_user); + shp->shm_perm.mode &= ~SHM_LOCKED; + shp->mlock_user = NULL; + get_file(shm_file); + ipc_unlock_object(&shp->shm_perm); + rcu_read_unlock(); + shmem_unlock_mapping(shm_file->f_mapping); + + fput(shm_file); + return err; } - default: - err = -EINVAL; - goto out; + return -EINVAL; } - err = 0; -out_unlock_up: - shm_unlock(shp); -out_up: - up_write(&shm_ids(ns).rw_mutex); - goto out; -out_unlock: - shm_unlock(shp); -out: +out_unlock0: + ipc_unlock_object(&shp->shm_perm); +out_unlock1: + rcu_read_unlock(); return err; } @@ -889,29 +1044,30 @@ out: * "raddr" thing points to kernel space, and there has to be a wrapper around * this. */ -long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr) +long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr, + unsigned long shmlba) { struct shmid_kernel *shp; unsigned long addr; unsigned long size; - struct file * file; + struct file *file; int err; unsigned long flags; unsigned long prot; int acc_mode; - unsigned long user_addr; struct ipc_namespace *ns; struct shm_file_data *sfd; struct path path; - mode_t f_mode; + fmode_t f_mode; + unsigned long populate = 0; err = -EINVAL; if (shmid < 0) goto out; else if ((addr = (ulong)shmaddr)) { - if (addr & (SHMLBA-1)) { + if (addr & (shmlba - 1)) { if (shmflg & SHM_RND) - addr &= ~(SHMLBA-1); /* round down */ + addr &= ~(shmlba - 1); /* round down */ else #ifndef __ARCH_FORCE_SHMLBA if (addr & ~PAGE_MASK) @@ -945,36 +1101,54 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr) * additional creator id... */ ns = current->nsproxy->ipc_ns; - shp = shm_lock_check(ns, shmid); + rcu_read_lock(); + shp = shm_obtain_object_check(ns, shmid); if (IS_ERR(shp)) { err = PTR_ERR(shp); - goto out; + goto out_unlock; } err = -EACCES; - if (ipcperms(&shp->shm_perm, acc_mode)) + if (ipcperms(ns, &shp->shm_perm, acc_mode)) goto out_unlock; err = security_shm_shmat(shp, shmaddr, shmflg); if (err) goto out_unlock; - path.dentry = dget(shp->shm_file->f_path.dentry); - path.mnt = shp->shm_file->f_path.mnt; + ipc_lock_object(&shp->shm_perm); + + /* check if shm_destroy() is tearing down shp */ + if (!ipc_valid_object(&shp->shm_perm)) { + ipc_unlock_object(&shp->shm_perm); + err = -EIDRM; + goto out_unlock; + } + + path = shp->shm_file->f_path; + path_get(&path); shp->shm_nattch++; size = i_size_read(path.dentry->d_inode); - shm_unlock(shp); + ipc_unlock_object(&shp->shm_perm); + rcu_read_unlock(); err = -ENOMEM; sfd = kzalloc(sizeof(*sfd), GFP_KERNEL); - if (!sfd) - goto out_put_dentry; - - err = -ENOMEM; + if (!sfd) { + path_put(&path); + goto out_nattch; + } - file = alloc_file(path.mnt, path.dentry, f_mode, &shm_file_operations); - if (!file) - goto out_free; + file = alloc_file(&path, f_mode, + is_file_hugepages(shp->shm_file) ? + &shm_file_operations_huge : + &shm_file_operations); + err = PTR_ERR(file); + if (IS_ERR(file)) { + kfree(sfd); + path_put(&path); + goto out_nattch; + } file->private_data = sfd; file->f_mapping = shp->shm_file->f_mapping; @@ -983,9 +1157,16 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr) sfd->file = shp->shm_file; sfd->vm_ops = NULL; + err = security_mmap_file(file, prot, flags); + if (err) + goto out_fput; + down_write(¤t->mm->mmap_sem); if (addr && !(shmflg & SHM_REMAP)) { err = -EINVAL; + if (addr + size < addr) + goto invalid; + if (find_vma_intersection(current->mm, addr, addr + size)) goto invalid; /* @@ -996,49 +1177,44 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr) addr > current->mm->start_stack - size - PAGE_SIZE * 5) goto invalid; } - - user_addr = do_mmap (file, addr, size, prot, flags, 0); - *raddr = user_addr; + + addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate); + *raddr = addr; err = 0; - if (IS_ERR_VALUE(user_addr)) - err = (long)user_addr; + if (IS_ERR_VALUE(addr)) + err = (long)addr; invalid: up_write(¤t->mm->mmap_sem); + if (populate) + mm_populate(addr, populate); +out_fput: fput(file); out_nattch: - down_write(&shm_ids(ns).rw_mutex); - shp = shm_lock_down(ns, shmid); + down_write(&shm_ids(ns).rwsem); + shp = shm_lock(ns, shmid); BUG_ON(IS_ERR(shp)); shp->shm_nattch--; - if(shp->shm_nattch == 0 && - shp->shm_perm.mode & SHM_DEST) + if (shm_may_destroy(ns, shp)) shm_destroy(ns, shp); else shm_unlock(shp); - up_write(&shm_ids(ns).rw_mutex); - -out: + up_write(&shm_ids(ns).rwsem); return err; out_unlock: - shm_unlock(shp); - goto out; - -out_free: - kfree(sfd); -out_put_dentry: - dput(path.dentry); - goto out_nattch; + rcu_read_unlock(); +out: + return err; } -asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg) +SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg) { unsigned long ret; long err; - err = do_shmat(shmid, shmaddr, shmflg, &ret); + err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA); if (err) return err; force_successful_syscall_return(); @@ -1049,13 +1225,16 @@ asmlinkage long sys_shmat(int shmid, char __user *shmaddr, int shmflg) * detach and kill segment if marked destroyed. * The work is done in shm_close. */ -asmlinkage long sys_shmdt(char __user *shmaddr) +SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) { struct mm_struct *mm = current->mm; - struct vm_area_struct *vma, *next; + struct vm_area_struct *vma; unsigned long addr = (unsigned long)shmaddr; - loff_t size = 0; int retval = -EINVAL; +#ifdef CONFIG_MMU + loff_t size = 0; + struct vm_area_struct *next; +#endif if (addr & ~PAGE_MASK) return retval; @@ -1084,6 +1263,7 @@ asmlinkage long sys_shmdt(char __user *shmaddr) */ vma = find_vma(mm, addr); +#ifdef CONFIG_MMU while (vma) { next = vma->vm_next; @@ -1096,7 +1276,7 @@ asmlinkage long sys_shmdt(char __user *shmaddr) (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) { - size = vma->vm_file->f_path.dentry->d_inode->i_size; + size = file_inode(vma->vm_file)->i_size; do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); /* * We discovered the size of the shm segment, so @@ -1114,7 +1294,7 @@ asmlinkage long sys_shmdt(char __user *shmaddr) /* * We need look no further than the maximum address a fragment * could possibly have landed at. Also cast things to loff_t to - * prevent overflows and make comparisions vs. equal-width types. + * prevent overflows and make comparisons vs. equal-width types. */ size = PAGE_ALIGN(size); while (vma && (loff_t)(vma->vm_end - addr) <= size) { @@ -1128,6 +1308,16 @@ asmlinkage long sys_shmdt(char __user *shmaddr) vma = next; } +#else /* CONFIG_MMU */ + /* under NOMMU conditions, the exact address to be destroyed must be + * given */ + if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) { + do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); + retval = 0; + } + +#endif + up_write(&mm->mmap_sem); return retval; } @@ -1135,17 +1325,22 @@ asmlinkage long sys_shmdt(char __user *shmaddr) #ifdef CONFIG_PROC_FS static int sysvipc_shm_proc_show(struct seq_file *s, void *it) { + struct user_namespace *user_ns = seq_user_ns(s); struct shmid_kernel *shp = it; - char *format; + unsigned long rss = 0, swp = 0; -#define SMALL_STRING "%10d %10d %4o %10u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n" -#define BIG_STRING "%10d %10d %4o %21u %5u %5u %5d %5u %5u %5u %5u %10lu %10lu %10lu\n" + shm_add_rss_swap(shp, &rss, &swp); - if (sizeof(size_t) <= sizeof(int)) - format = SMALL_STRING; - else - format = BIG_STRING; - return seq_printf(s, format, +#if BITS_PER_LONG <= 32 +#define SIZE_SPEC "%10lu" +#else +#define SIZE_SPEC "%21lu" +#endif + + return seq_printf(s, + "%10d %10d %4o " SIZE_SPEC " %5u %5u " + "%5lu %5u %5u %5u %5u %10lu %10lu %10lu " + SIZE_SPEC " " SIZE_SPEC "\n", shp->shm_perm.key, shp->shm_perm.id, shp->shm_perm.mode, @@ -1153,12 +1348,14 @@ static int sysvipc_shm_proc_show(struct seq_file *s, void *it) shp->shm_cprid, shp->shm_lprid, shp->shm_nattch, - shp->shm_perm.uid, - shp->shm_perm.gid, - shp->shm_perm.cuid, - shp->shm_perm.cgid, + from_kuid_munged(user_ns, shp->shm_perm.uid), + from_kgid_munged(user_ns, shp->shm_perm.gid), + from_kuid_munged(user_ns, shp->shm_perm.cuid), + from_kgid_munged(user_ns, shp->shm_perm.cgid), shp->shm_atim, shp->shm_dtim, - shp->shm_ctim); + shp->shm_ctim, + rss * PAGE_SIZE, + swp * PAGE_SIZE); } #endif diff --git a/ipc/syscall.c b/ipc/syscall.c new file mode 100644 index 00000000000..52429489cde --- /dev/null +++ b/ipc/syscall.c @@ -0,0 +1,99 @@ +/* + * sys_ipc() is the old de-multiplexer for the SysV IPC calls. + * + * This is really horribly ugly, and new architectures should just wire up + * the individual syscalls instead. + */ +#include <linux/unistd.h> + +#ifdef __ARCH_WANT_SYS_IPC +#include <linux/errno.h> +#include <linux/ipc.h> +#include <linux/shm.h> +#include <linux/syscalls.h> +#include <linux/uaccess.h> + +SYSCALL_DEFINE6(ipc, unsigned int, call, int, first, unsigned long, second, + unsigned long, third, void __user *, ptr, long, fifth) +{ + int version, ret; + + version = call >> 16; /* hack for backward compatibility */ + call &= 0xffff; + + switch (call) { + case SEMOP: + return sys_semtimedop(first, (struct sembuf __user *)ptr, + second, NULL); + case SEMTIMEDOP: + return sys_semtimedop(first, (struct sembuf __user *)ptr, + second, + (const struct timespec __user *)fifth); + + case SEMGET: + return sys_semget(first, second, third); + case SEMCTL: { + unsigned long arg; + if (!ptr) + return -EINVAL; + if (get_user(arg, (unsigned long __user *) ptr)) + return -EFAULT; + return sys_semctl(first, second, third, arg); + } + + case MSGSND: + return sys_msgsnd(first, (struct msgbuf __user *) ptr, + second, third); + case MSGRCV: + switch (version) { + case 0: { + struct ipc_kludge tmp; + if (!ptr) + return -EINVAL; + + if (copy_from_user(&tmp, + (struct ipc_kludge __user *) ptr, + sizeof(tmp))) + return -EFAULT; + return sys_msgrcv(first, tmp.msgp, second, + tmp.msgtyp, third); + } + default: + return sys_msgrcv(first, + (struct msgbuf __user *) ptr, + second, fifth, third); + } + case MSGGET: + return sys_msgget((key_t) first, second); + case MSGCTL: + return sys_msgctl(first, second, (struct msqid_ds __user *)ptr); + + case SHMAT: + switch (version) { + default: { + unsigned long raddr; + ret = do_shmat(first, (char __user *)ptr, + second, &raddr, SHMLBA); + if (ret) + return ret; + return put_user(raddr, (unsigned long __user *) third); + } + case 1: + /* + * This was the entry point for kernel-originating calls + * from iBCS2 in 2.2 days. + */ + return -EINVAL; + } + case SHMDT: + return sys_shmdt((char __user *)ptr); + case SHMGET: + return sys_shmget(first, second, third); + case SHMCTL: + return sys_shmctl(first, second, + (struct shmid_ds __user *) ptr); + default: + return -ENOSYS; + } +} +#endif diff --git a/ipc/util.c b/ipc/util.c index fd1b50da9db..27d74e69fd5 100644 --- a/ipc/util.c +++ b/ipc/util.c @@ -15,6 +15,29 @@ * Jun 2006 - namespaces ssupport * OpenVZ, SWsoft Inc. * Pavel Emelianov <xemul@openvz.org> + * + * General sysv ipc locking scheme: + * rcu_read_lock() + * obtain the ipc object (kern_ipc_perm) by looking up the id in an idr + * tree. + * - perform initial checks (capabilities, auditing and permission, + * etc). + * - perform read-only operations, such as STAT, INFO commands. + * acquire the ipc lock (kern_ipc_perm.lock) through + * ipc_lock_object() + * - perform data updates, such as SET, RMID commands and + * mechanism-specific operations (semop/semtimedop, + * msgsnd/msgrcv, shmat/shmdt). + * drop the ipc lock, through ipc_unlock_object(). + * rcu_read_unlock() + * + * The ids->rwsem must be taken when: + * - creating, removing and iterating the existing entries in ipc + * identifier sets. + * - iterating through files under /proc/sysvipc/ + * + * Note that sems have a special fast path that avoids kern_ipc_perm.lock - + * see sem_lock(). */ #include <linux/mm.h> @@ -23,6 +46,7 @@ #include <linux/msg.h> #include <linux/vmalloc.h> #include <linux/slab.h> +#include <linux/notifier.h> #include <linux/capability.h> #include <linux/highuid.h> #include <linux/security.h> @@ -33,6 +57,7 @@ #include <linux/audit.h> #include <linux/nsproxy.h> #include <linux/rwsem.h> +#include <linux/memory.h> #include <linux/ipc_namespace.h> #include <asm/unistd.h> @@ -46,61 +71,89 @@ struct ipc_proc_iface { int (*show)(struct seq_file *, void *); }; -struct ipc_namespace init_ipc_ns = { - .kref = { - .refcount = ATOMIC_INIT(2), - }, +static void ipc_memory_notifier(struct work_struct *work) +{ + ipcns_notify(IPCNS_MEMCHANGED); +} + +static int ipc_memory_callback(struct notifier_block *self, + unsigned long action, void *arg) +{ + static DECLARE_WORK(ipc_memory_wq, ipc_memory_notifier); + + switch (action) { + case MEM_ONLINE: /* memory successfully brought online */ + case MEM_OFFLINE: /* or offline: it's time to recompute msgmni */ + /* + * This is done by invoking the ipcns notifier chain with the + * IPC_MEMCHANGED event. + * In order not to keep the lock on the hotplug memory chain + * for too long, queue a work item that will, when waken up, + * activate the ipcns notification chain. + */ + schedule_work(&ipc_memory_wq); + break; + case MEM_GOING_ONLINE: + case MEM_GOING_OFFLINE: + case MEM_CANCEL_ONLINE: + case MEM_CANCEL_OFFLINE: + default: + break; + } + + return NOTIFY_OK; +} + +static struct notifier_block ipc_memory_nb = { + .notifier_call = ipc_memory_callback, + .priority = IPC_CALLBACK_PRI, }; /** - * ipc_init - initialise IPC subsystem + * ipc_init - initialise ipc subsystem + * + * The various sysv ipc resources (semaphores, messages and shared + * memory) are initialised. * - * The various system5 IPC resources (semaphores, messages and shared - * memory) are initialised + * A callback routine is registered into the memory hotplug notifier + * chain: since msgmni scales to lowmem this callback routine will be + * called upon successful memory add / remove to recompute msmgni. */ - static int __init ipc_init(void) { sem_init(); msg_init(); shm_init(); + register_hotmemory_notifier(&ipc_memory_nb); + register_ipcns_notifier(&init_ipc_ns); return 0; } -__initcall(ipc_init); +device_initcall(ipc_init); /** - * ipc_init_ids - initialise IPC identifiers - * @ids: Identifier set + * ipc_init_ids - initialise ipc identifiers + * @ids: ipc identifier set * - * Set up the sequence range to use for the ipc identifier range (limited - * below IPCMNI) then initialise the ids idr. + * Set up the sequence range to use for the ipc identifier range (limited + * below IPCMNI) then initialise the ids idr. */ - void ipc_init_ids(struct ipc_ids *ids) { - init_rwsem(&ids->rw_mutex); - ids->in_use = 0; ids->seq = 0; - { - int seq_limit = INT_MAX/SEQ_MULTIPLIER; - if(seq_limit > USHRT_MAX) - ids->seq_max = USHRT_MAX; - else - ids->seq_max = seq_limit; - } - + ids->next_id = -1; + init_rwsem(&ids->rwsem); idr_init(&ids->ipcs_idr); } #ifdef CONFIG_PROC_FS static const struct file_operations sysvipc_proc_fops; /** - * ipc_init_proc_interface - Create a proc interface for sysipc types using a seq_file interface. - * @path: Path in procfs - * @header: Banner to be printed at the beginning of the file. - * @ids: ipc id table to iterate. - * @show: show routine. + * ipc_init_proc_interface - create a proc interface for sysipc types using a seq_file interface. + * @path: Path in procfs + * @header: Banner to be printed at the beginning of the file. + * @ids: ipc id table to iterate. + * @show: show routine. */ void __init ipc_init_proc_interface(const char *path, const char *header, int ids, int (*show)(struct seq_file *, void *)) @@ -116,29 +169,26 @@ void __init ipc_init_proc_interface(const char *path, const char *header, iface->ids = ids; iface->show = show; - pde = create_proc_entry(path, - S_IRUGO, /* world readable */ - NULL /* parent dir */); - if (pde) { - pde->data = iface; - pde->proc_fops = &sysvipc_proc_fops; - } else { + pde = proc_create_data(path, + S_IRUGO, /* world readable */ + NULL, /* parent dir */ + &sysvipc_proc_fops, + iface); + if (!pde) kfree(iface); - } } #endif /** - * ipc_findkey - find a key in an ipc identifier set - * @ids: Identifier set - * @key: The key to find - * - * Requires ipc_ids.rw_mutex locked. - * Returns the LOCKED pointer to the ipc structure if found or NULL - * if not. - * If key is found ipc points to the owning ipc structure + * ipc_findkey - find a key in an ipc identifier set + * @ids: ipc identifier set + * @key: key to find + * + * Returns the locked pointer to the ipc structure if found or NULL + * otherwise. If key is found ipc points to the owning ipc structure + * + * Called with ipc_ids.rwsem held. */ - static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key) { struct kern_ipc_perm *ipc; @@ -156,7 +206,8 @@ static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key) continue; } - ipc_lock_by_ptr(ipc); + rcu_read_lock(); + ipc_lock_object(ipc); return ipc; } @@ -164,12 +215,11 @@ static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key) } /** - * ipc_get_maxid - get the last assigned id - * @ids: IPC identifier set + * ipc_get_maxid - get the last assigned id + * @ids: ipc identifier set * - * Called with ipc_ids.rw_mutex held. + * Called with ipc_ids.rwsem held. */ - int ipc_get_maxid(struct ipc_ids *ids) { struct kern_ipc_perm *ipc; @@ -195,22 +245,24 @@ int ipc_get_maxid(struct ipc_ids *ids) } /** - * ipc_addid - add an IPC identifier - * @ids: IPC identifier set - * @new: new IPC permission set - * @size: limit for the number of used ids + * ipc_addid - add an ipc identifier + * @ids: ipc identifier set + * @new: new ipc permission set + * @size: limit for the number of used ids * - * Add an entry 'new' to the IPC ids idr. The permissions object is - * initialised and the first free entry is set up and the id assigned - * is returned. The 'new' entry is returned in a locked state on success. - * On failure the entry is not locked and a negative err-code is returned. + * Add an entry 'new' to the ipc ids idr. The permissions object is + * initialised and the first free entry is set up and the id assigned + * is returned. The 'new' entry is returned in a locked state on success. + * On failure the entry is not locked and a negative err-code is returned. * - * Called with ipc_ids.rw_mutex held as a writer. + * Called with writer ipc_ids.rwsem held. */ - -int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) +int ipc_addid(struct ipc_ids *ids, struct kern_ipc_perm *new, int size) { - int id, err; + kuid_t euid; + kgid_t egid; + int id; + int next_id = ids->next_id; if (size > IPCMNI) size = IPCMNI; @@ -218,76 +270,86 @@ int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size) if (ids->in_use >= size) return -ENOSPC; - err = idr_get_new(&ids->ipcs_idr, new, &id); - if (err) - return err; + idr_preload(GFP_KERNEL); + + spin_lock_init(&new->lock); + new->deleted = false; + rcu_read_lock(); + spin_lock(&new->lock); + + id = idr_alloc(&ids->ipcs_idr, new, + (next_id < 0) ? 0 : ipcid_to_idx(next_id), 0, + GFP_NOWAIT); + idr_preload_end(); + if (id < 0) { + spin_unlock(&new->lock); + rcu_read_unlock(); + return id; + } ids->in_use++; - new->cuid = new->uid = current->euid; - new->gid = new->cgid = current->egid; + current_euid_egid(&euid, &egid); + new->cuid = new->uid = euid; + new->gid = new->cgid = egid; - new->seq = ids->seq++; - if(ids->seq > ids->seq_max) - ids->seq = 0; + if (next_id < 0) { + new->seq = ids->seq++; + if (ids->seq > IPCID_SEQ_MAX) + ids->seq = 0; + } else { + new->seq = ipcid_to_seqx(next_id); + ids->next_id = -1; + } - spin_lock_init(&new->lock); - new->deleted = 0; - rcu_read_lock(); - spin_lock(&new->lock); + new->id = ipc_buildid(id, new->seq); return id; } /** - * ipcget_new - create a new ipc object - * @ns: namespace - * @ids: IPC identifer set - * @ops: the actual creation routine to call - * @params: its parameters - * - * This routine is called by sys_msgget, sys_semget() and sys_shmget() - * when the key is IPC_PRIVATE. + * ipcget_new - create a new ipc object + * @ns: ipc namespace + * @ids: ipc identifer set + * @ops: the actual creation routine to call + * @params: its parameters + * + * This routine is called by sys_msgget, sys_semget() and sys_shmget() + * when the key is IPC_PRIVATE. */ static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids, - struct ipc_ops *ops, struct ipc_params *params) + const struct ipc_ops *ops, struct ipc_params *params) { int err; -retry: - err = idr_pre_get(&ids->ipcs_idr, GFP_KERNEL); - - if (!err) - return -ENOMEM; - down_write(&ids->rw_mutex); + down_write(&ids->rwsem); err = ops->getnew(ns, params); - up_write(&ids->rw_mutex); - - if (err == -EAGAIN) - goto retry; - + up_write(&ids->rwsem); return err; } /** - * ipc_check_perms - check security and permissions for an IPC - * @ipcp: ipc permission set - * @ops: the actual security routine to call - * @params: its parameters + * ipc_check_perms - check security and permissions for an ipc object + * @ns: ipc namespace + * @ipcp: ipc permission set + * @ops: the actual security routine to call + * @params: its parameters * - * This routine is called by sys_msgget(), sys_semget() and sys_shmget() - * when the key is not IPC_PRIVATE and that key already exists in the - * ids IDR. + * This routine is called by sys_msgget(), sys_semget() and sys_shmget() + * when the key is not IPC_PRIVATE and that key already exists in the + * ds IDR. * - * On success, the IPC id is returned. + * On success, the ipc id is returned. * - * It is called with ipc_ids.rw_mutex and ipcp->lock held. + * It is called with ipc_ids.rwsem and ipcp->lock held. */ -static int ipc_check_perms(struct kern_ipc_perm *ipcp, struct ipc_ops *ops, - struct ipc_params *params) +static int ipc_check_perms(struct ipc_namespace *ns, + struct kern_ipc_perm *ipcp, + const struct ipc_ops *ops, + struct ipc_params *params) { int err; - if (ipcperms(ipcp, params->flg)) + if (ipcperms(ns, ipcp, params->flg)) err = -EACCES; else { err = ops->associate(ipcp, params->flg); @@ -299,40 +361,36 @@ static int ipc_check_perms(struct kern_ipc_perm *ipcp, struct ipc_ops *ops, } /** - * ipcget_public - get an ipc object or create a new one - * @ns: namespace - * @ids: IPC identifer set - * @ops: the actual creation routine to call - * @params: its parameters - * - * This routine is called by sys_msgget, sys_semget() and sys_shmget() - * when the key is not IPC_PRIVATE. - * It adds a new entry if the key is not found and does some permission - * / security checkings if the key is found. - * - * On success, the ipc id is returned. + * ipcget_public - get an ipc object or create a new one + * @ns: ipc namespace + * @ids: ipc identifer set + * @ops: the actual creation routine to call + * @params: its parameters + * + * This routine is called by sys_msgget, sys_semget() and sys_shmget() + * when the key is not IPC_PRIVATE. + * It adds a new entry if the key is not found and does some permission + * / security checkings if the key is found. + * + * On success, the ipc id is returned. */ static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids, - struct ipc_ops *ops, struct ipc_params *params) + const struct ipc_ops *ops, struct ipc_params *params) { struct kern_ipc_perm *ipcp; int flg = params->flg; int err; -retry: - err = idr_pre_get(&ids->ipcs_idr, GFP_KERNEL); /* * Take the lock as a writer since we are potentially going to add * a new entry + read locks are not "upgradable" */ - down_write(&ids->rw_mutex); + down_write(&ids->rwsem); ipcp = ipc_findkey(ids, params->key); if (ipcp == NULL) { /* key not used */ if (!(flg & IPC_CREAT)) err = -ENOENT; - else if (!err) - err = -ENOMEM; else err = ops->getnew(ns, params); } else { @@ -349,53 +407,44 @@ retry: * ipc_check_perms returns the IPC id on * success */ - err = ipc_check_perms(ipcp, ops, params); + err = ipc_check_perms(ns, ipcp, ops, params); } ipc_unlock(ipcp); } - up_write(&ids->rw_mutex); - - if (err == -EAGAIN) - goto retry; + up_write(&ids->rwsem); return err; } /** - * ipc_rmid - remove an IPC identifier - * @ids: IPC identifier set - * @ipcp: ipc perm structure containing the identifier to remove + * ipc_rmid - remove an ipc identifier + * @ids: ipc identifier set + * @ipcp: ipc perm structure containing the identifier to remove * - * ipc_ids.rw_mutex (as a writer) and the spinlock for this ID are held - * before this function is called, and remain locked on the exit. + * ipc_ids.rwsem (as a writer) and the spinlock for this ID are held + * before this function is called, and remain locked on the exit. */ - void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp) { int lid = ipcid_to_idx(ipcp->id); idr_remove(&ids->ipcs_idr, lid); - ids->in_use--; - - ipcp->deleted = 1; - - return; + ipcp->deleted = true; } /** - * ipc_alloc - allocate ipc space - * @size: size desired + * ipc_alloc - allocate ipc space + * @size: size desired * - * Allocate memory from the appropriate pools and return a pointer to it. - * NULL is returned if the allocation fails + * Allocate memory from the appropriate pools and return a pointer to it. + * NULL is returned if the allocation fails */ - -void* ipc_alloc(int size) +void *ipc_alloc(int size) { - void* out; - if(size > PAGE_SIZE) + void *out; + if (size > PAGE_SIZE) out = vmalloc(size); else out = kmalloc(size, GFP_KERNEL); @@ -403,183 +452,94 @@ void* ipc_alloc(int size) } /** - * ipc_free - free ipc space - * @ptr: pointer returned by ipc_alloc - * @size: size of block + * ipc_free - free ipc space + * @ptr: pointer returned by ipc_alloc + * @size: size of block * - * Free a block created with ipc_alloc(). The caller must know the size - * used in the allocation call. + * Free a block created with ipc_alloc(). The caller must know the size + * used in the allocation call. */ - -void ipc_free(void* ptr, int size) +void ipc_free(void *ptr, int size) { - if(size > PAGE_SIZE) + if (size > PAGE_SIZE) vfree(ptr); else kfree(ptr); } -/* - * rcu allocations: - * There are three headers that are prepended to the actual allocation: - * - during use: ipc_rcu_hdr. - * - during the rcu grace period: ipc_rcu_grace. - * - [only if vmalloc]: ipc_rcu_sched. - * Their lifetime doesn't overlap, thus the headers share the same memory. - * Unlike a normal union, they are right-aligned, thus some container_of - * forward/backward casting is necessary: - */ -struct ipc_rcu_hdr -{ - int refcount; - int is_vmalloc; - void *data[0]; -}; - - -struct ipc_rcu_grace -{ - struct rcu_head rcu; - /* "void *" makes sure alignment of following data is sane. */ - void *data[0]; -}; - -struct ipc_rcu_sched -{ - struct work_struct work; - /* "void *" makes sure alignment of following data is sane. */ - void *data[0]; -}; - -#define HDRLEN_KMALLOC (sizeof(struct ipc_rcu_grace) > sizeof(struct ipc_rcu_hdr) ? \ - sizeof(struct ipc_rcu_grace) : sizeof(struct ipc_rcu_hdr)) -#define HDRLEN_VMALLOC (sizeof(struct ipc_rcu_sched) > HDRLEN_KMALLOC ? \ - sizeof(struct ipc_rcu_sched) : HDRLEN_KMALLOC) - -static inline int rcu_use_vmalloc(int size) -{ - /* Too big for a single page? */ - if (HDRLEN_KMALLOC + size > PAGE_SIZE) - return 1; - return 0; -} - /** - * ipc_rcu_alloc - allocate ipc and rcu space - * @size: size desired + * ipc_rcu_alloc - allocate ipc and rcu space + * @size: size desired * - * Allocate memory for the rcu header structure + the object. - * Returns the pointer to the object. - * NULL is returned if the allocation fails. + * Allocate memory for the rcu header structure + the object. + * Returns the pointer to the object or NULL upon failure. */ - -void* ipc_rcu_alloc(int size) +void *ipc_rcu_alloc(int size) { - void* out; - /* - * We prepend the allocation with the rcu struct, and - * workqueue if necessary (for vmalloc). + /* + * We prepend the allocation with the rcu struct */ - if (rcu_use_vmalloc(size)) { - out = vmalloc(HDRLEN_VMALLOC + size); - if (out) { - out += HDRLEN_VMALLOC; - container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1; - container_of(out, struct ipc_rcu_hdr, data)->refcount = 1; - } - } else { - out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL); - if (out) { - out += HDRLEN_KMALLOC; - container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0; - container_of(out, struct ipc_rcu_hdr, data)->refcount = 1; - } - } - - return out; + struct ipc_rcu *out = ipc_alloc(sizeof(struct ipc_rcu) + size); + if (unlikely(!out)) + return NULL; + atomic_set(&out->refcount, 1); + return out + 1; } -void ipc_rcu_getref(void *ptr) +int ipc_rcu_getref(void *ptr) { - container_of(ptr, struct ipc_rcu_hdr, data)->refcount++; -} + struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1; -static void ipc_do_vfree(struct work_struct *work) -{ - vfree(container_of(work, struct ipc_rcu_sched, work)); + return atomic_inc_not_zero(&p->refcount); } -/** - * ipc_schedule_free - free ipc + rcu space - * @head: RCU callback structure for queued work - * - * Since RCU callback function is called in bh, - * we need to defer the vfree to schedule_work(). - */ -static void ipc_schedule_free(struct rcu_head *head) +void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head)) { - struct ipc_rcu_grace *grace; - struct ipc_rcu_sched *sched; + struct ipc_rcu *p = ((struct ipc_rcu *)ptr) - 1; - grace = container_of(head, struct ipc_rcu_grace, rcu); - sched = container_of(&(grace->data[0]), struct ipc_rcu_sched, - data[0]); + if (!atomic_dec_and_test(&p->refcount)) + return; - INIT_WORK(&sched->work, ipc_do_vfree); - schedule_work(&sched->work); + call_rcu(&p->rcu, func); } -/** - * ipc_immediate_free - free ipc + rcu space - * @head: RCU callback structure that contains pointer to be freed - * - * Free from the RCU callback context. - */ -static void ipc_immediate_free(struct rcu_head *head) +void ipc_rcu_free(struct rcu_head *head) { - struct ipc_rcu_grace *free = - container_of(head, struct ipc_rcu_grace, rcu); - kfree(free); -} + struct ipc_rcu *p = container_of(head, struct ipc_rcu, rcu); -void ipc_rcu_putref(void *ptr) -{ - if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0) - return; - - if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { - call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu, - ipc_schedule_free); - } else { - call_rcu(&container_of(ptr, struct ipc_rcu_grace, data)->rcu, - ipc_immediate_free); - } + if (is_vmalloc_addr(p)) + vfree(p); + else + kfree(p); } /** - * ipcperms - check IPC permissions - * @ipcp: IPC permission set - * @flag: desired permission set. + * ipcperms - check ipc permissions + * @ns: ipc namespace + * @ipcp: ipc permission set + * @flag: desired permission set + * + * Check user, group, other permissions for access + * to ipc resources. return 0 if allowed * - * Check user, group, other permissions for access - * to ipc resources. return 0 if allowed + * @flag will most probably be 0 or S_...UGO from <linux/stat.h> */ - -int ipcperms (struct kern_ipc_perm *ipcp, short flag) -{ /* flag will most probably be 0 or S_...UGO from <linux/stat.h> */ - int requested_mode, granted_mode, err; +int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag) +{ + kuid_t euid = current_euid(); + int requested_mode, granted_mode; - if (unlikely((err = audit_ipc_obj(ipcp)))) - return err; + audit_ipc_obj(ipcp); requested_mode = (flag >> 6) | (flag >> 3) | flag; granted_mode = ipcp->mode; - if (current->euid == ipcp->cuid || current->euid == ipcp->uid) + if (uid_eq(euid, ipcp->cuid) || + uid_eq(euid, ipcp->uid)) granted_mode >>= 6; else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid)) granted_mode >>= 3; /* is there some bit set in requested_mode but not in granted_mode? */ - if ((requested_mode & ~granted_mode & 0007) && - !capable(CAP_IPC_OWNER)) + if ((requested_mode & ~granted_mode & 0007) && + !ns_capable(ns->user_ns, CAP_IPC_OWNER)) return -1; return security_ipc_permission(ipcp, flag); @@ -591,36 +551,33 @@ int ipcperms (struct kern_ipc_perm *ipcp, short flag) */ /** - * kernel_to_ipc64_perm - convert kernel ipc permissions to user - * @in: kernel permissions - * @out: new style IPC permissions + * kernel_to_ipc64_perm - convert kernel ipc permissions to user + * @in: kernel permissions + * @out: new style ipc permissions * - * Turn the kernel object @in into a set of permissions descriptions - * for returning to userspace (@out). + * Turn the kernel object @in into a set of permissions descriptions + * for returning to userspace (@out). */ - - -void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out) +void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out) { out->key = in->key; - out->uid = in->uid; - out->gid = in->gid; - out->cuid = in->cuid; - out->cgid = in->cgid; + out->uid = from_kuid_munged(current_user_ns(), in->uid); + out->gid = from_kgid_munged(current_user_ns(), in->gid); + out->cuid = from_kuid_munged(current_user_ns(), in->cuid); + out->cgid = from_kgid_munged(current_user_ns(), in->cgid); out->mode = in->mode; out->seq = in->seq; } /** - * ipc64_perm_to_ipc_perm - convert new ipc permissions to old - * @in: new style IPC permissions - * @out: old style IPC permissions + * ipc64_perm_to_ipc_perm - convert new ipc permissions to old + * @in: new style ipc permissions + * @out: old style ipc permissions * - * Turn the new style permissions object @in into a compatibility - * object and store it into the @out pointer. + * Turn the new style permissions object @in into a compatibility + * object and store it into the @out pointer. */ - -void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out) +void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out) { out->key = in->key; SET_UID(out->uid, in->uid); @@ -632,128 +589,96 @@ void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out) } /** - * ipc_lock - Lock an ipc structure without rw_mutex held - * @ids: IPC identifier set + * ipc_obtain_object + * @ids: ipc identifier set * @id: ipc id to look for * - * Look for an id in the ipc ids idr and lock the associated ipc object. - * - * The ipc object is locked on exit. + * Look for an id in the ipc ids idr and return associated ipc object. * - * This is the routine that should be called when the rw_mutex is not already - * held, i.e. idr tree not protected: it protects the idr tree in read mode - * during the idr_find(). + * Call inside the RCU critical section. + * The ipc object is *not* locked on exit. */ - -struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id) +struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id) { struct kern_ipc_perm *out; int lid = ipcid_to_idx(id); - down_read(&ids->rw_mutex); - - rcu_read_lock(); out = idr_find(&ids->ipcs_idr, lid); - if (out == NULL) { - rcu_read_unlock(); - up_read(&ids->rw_mutex); + if (!out) return ERR_PTR(-EINVAL); - } - - up_read(&ids->rw_mutex); - - spin_lock(&out->lock); - - /* ipc_rmid() may have already freed the ID while ipc_lock - * was spinning: here verify that the structure is still valid - */ - if (out->deleted) { - spin_unlock(&out->lock); - rcu_read_unlock(); - return ERR_PTR(-EINVAL); - } return out; } /** - * ipc_lock_down - Lock an ipc structure with rw_sem held - * @ids: IPC identifier set + * ipc_lock - lock an ipc structure without rwsem held + * @ids: ipc identifier set * @id: ipc id to look for * * Look for an id in the ipc ids idr and lock the associated ipc object. * - * The ipc object is locked on exit. - * - * This is the routine that should be called when the rw_mutex is already - * held, i.e. idr tree protected. + * The ipc object is locked on successful exit. */ - -struct kern_ipc_perm *ipc_lock_down(struct ipc_ids *ids, int id) +struct kern_ipc_perm *ipc_lock(struct ipc_ids *ids, int id) { struct kern_ipc_perm *out; - int lid = ipcid_to_idx(id); rcu_read_lock(); - out = idr_find(&ids->ipcs_idr, lid); - if (out == NULL) { - rcu_read_unlock(); - return ERR_PTR(-EINVAL); - } + out = ipc_obtain_object(ids, id); + if (IS_ERR(out)) + goto err1; spin_lock(&out->lock); - /* - * No need to verify that the structure is still valid since the - * rw_mutex is held. + /* ipc_rmid() may have already freed the ID while ipc_lock + * was spinning: here verify that the structure is still valid */ - return out; -} - -struct kern_ipc_perm *ipc_lock_check_down(struct ipc_ids *ids, int id) -{ - struct kern_ipc_perm *out; - - out = ipc_lock_down(ids, id); - if (IS_ERR(out)) + if (ipc_valid_object(out)) return out; - if (ipc_checkid(out, id)) { - ipc_unlock(out); - return ERR_PTR(-EIDRM); - } - + spin_unlock(&out->lock); + out = ERR_PTR(-EINVAL); +err1: + rcu_read_unlock(); return out; } -struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id) +/** + * ipc_obtain_object_check + * @ids: ipc identifier set + * @id: ipc id to look for + * + * Similar to ipc_obtain_object() but also checks + * the ipc object reference counter. + * + * Call inside the RCU critical section. + * The ipc object is *not* locked on exit. + */ +struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id) { - struct kern_ipc_perm *out; + struct kern_ipc_perm *out = ipc_obtain_object(ids, id); - out = ipc_lock(ids, id); if (IS_ERR(out)) - return out; + goto out; - if (ipc_checkid(out, id)) { - ipc_unlock(out); + if (ipc_checkid(out, id)) return ERR_PTR(-EIDRM); - } - +out: return out; } /** * ipcget - Common sys_*get() code - * @ns : namsepace - * @ids : IPC identifier set - * @ops : operations to be called on ipc object creation, permission checks - * and further checks - * @params : the parameters needed by the previous operations. + * @ns: namsepace + * @ids: ipc identifier set + * @ops: operations to be called on ipc object creation, permission checks + * and further checks + * @params: the parameters needed by the previous operations. * * Common routine called by sys_msgget(), sys_semget() and sys_shmget(). */ int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids, - struct ipc_ops *ops, struct ipc_params *params) + const struct ipc_ops *ops, struct ipc_params *params) { if (params->key == IPC_PRIVATE) return ipcget_new(ns, ids, ops, params); @@ -761,19 +686,83 @@ int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids, return ipcget_public(ns, ids, ops, params); } -#ifdef __ARCH_WANT_IPC_PARSE_VERSION +/** + * ipc_update_perm - update the permissions of an ipc object + * @in: the permission given as input. + * @out: the permission of the ipc to set. + */ +int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out) +{ + kuid_t uid = make_kuid(current_user_ns(), in->uid); + kgid_t gid = make_kgid(current_user_ns(), in->gid); + if (!uid_valid(uid) || !gid_valid(gid)) + return -EINVAL; + + out->uid = uid; + out->gid = gid; + out->mode = (out->mode & ~S_IRWXUGO) + | (in->mode & S_IRWXUGO); + + return 0; +} + +/** + * ipcctl_pre_down_nolock - retrieve an ipc and check permissions for some IPC_XXX cmd + * @ns: ipc namespace + * @ids: the table of ids where to look for the ipc + * @id: the id of the ipc to retrieve + * @cmd: the cmd to check + * @perm: the permission to set + * @extra_perm: one extra permission parameter used by msq + * + * This function does some common audit and permissions check for some IPC_XXX + * cmd and is called from semctl_down, shmctl_down and msgctl_down. + * It must be called without any lock held and + * - retrieves the ipc with the given id in the given table. + * - performs some audit and permission check, depending on the given cmd + * - returns a pointer to the ipc object or otherwise, the corresponding error. + * + * Call holding the both the rwsem and the rcu read lock. + */ +struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns, + struct ipc_ids *ids, int id, int cmd, + struct ipc64_perm *perm, int extra_perm) +{ + kuid_t euid; + int err = -EPERM; + struct kern_ipc_perm *ipcp; + + ipcp = ipc_obtain_object_check(ids, id); + if (IS_ERR(ipcp)) { + err = PTR_ERR(ipcp); + goto err; + } + + audit_ipc_obj(ipcp); + if (cmd == IPC_SET) + audit_ipc_set_perm(extra_perm, perm->uid, + perm->gid, perm->mode); + + euid = current_euid(); + if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid) || + ns_capable(ns->user_ns, CAP_SYS_ADMIN)) + return ipcp; /* successful lookup */ +err: + return ERR_PTR(err); +} + +#ifdef CONFIG_ARCH_WANT_IPC_PARSE_VERSION /** - * ipc_parse_version - IPC call version - * @cmd: pointer to command + * ipc_parse_version - ipc call version + * @cmd: pointer to command * - * Return IPC_64 for new style IPC and IPC_OLD for old style IPC. - * The @cmd value is turned from an encoding command and version into - * just the command code. + * Return IPC_64 for new style IPC and IPC_OLD for old style IPC. + * The @cmd value is turned from an encoding command and version into + * just the command code. */ - -int ipc_parse_version (int *cmd) +int ipc_parse_version(int *cmd) { if (*cmd & IPC_64) { *cmd ^= IPC_64; @@ -783,7 +772,7 @@ int ipc_parse_version (int *cmd) } } -#endif /* __ARCH_WANT_IPC_PARSE_VERSION */ +#endif /* CONFIG_ARCH_WANT_IPC_PARSE_VERSION */ #ifdef CONFIG_PROC_FS struct ipc_proc_iter { @@ -810,11 +799,12 @@ static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos, if (total >= ids->in_use) return NULL; - for ( ; pos < IPCMNI; pos++) { + for (; pos < IPCMNI; pos++) { ipc = idr_find(&ids->ipcs_idr, pos); if (ipc != NULL) { *new_pos = pos + 1; - ipc_lock_by_ptr(ipc); + rcu_read_lock(); + ipc_lock_object(ipc); return ipc; } } @@ -852,7 +842,7 @@ static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos) * Take the lock - this will be released by the corresponding * call to stop(). */ - down_read(&ids->rw_mutex); + down_read(&ids->rwsem); /* pos < 0 is invalid */ if (*pos < 0) @@ -879,7 +869,7 @@ static void sysvipc_proc_stop(struct seq_file *s, void *it) ids = &iter->ns->ids[iface->ids]; /* Release the lock we took in start() */ - up_read(&ids->rw_mutex); + up_read(&ids->rwsem); } static int sysvipc_proc_show(struct seq_file *s, void *it) @@ -893,7 +883,7 @@ static int sysvipc_proc_show(struct seq_file *s, void *it) return iface->show(s, it); } -static struct seq_operations sysvipc_proc_seqops = { +static const struct seq_operations sysvipc_proc_seqops = { .start = sysvipc_proc_start, .stop = sysvipc_proc_stop, .next = sysvipc_proc_next, @@ -912,19 +902,18 @@ static int sysvipc_proc_open(struct inode *inode, struct file *file) goto out; ret = seq_open(file, &sysvipc_proc_seqops); - if (ret) - goto out_kfree; + if (ret) { + kfree(iter); + goto out; + } seq = file->private_data; seq->private = iter; - iter->iface = PDE(inode)->data; + iter->iface = PDE_DATA(inode); iter->ns = get_ipc_ns(current->nsproxy->ipc_ns); out: return ret; -out_kfree: - kfree(iter); - goto out; } static int sysvipc_proc_release(struct inode *inode, struct file *file) diff --git a/ipc/util.h b/ipc/util.h index f37d160c98f..1a5a0fcd099 100644 --- a/ipc/util.h +++ b/ipc/util.h @@ -10,17 +10,26 @@ #ifndef _IPC_UTIL_H #define _IPC_UTIL_H +#include <linux/unistd.h> #include <linux/err.h> -#define USHRT_MAX 0xffff #define SEQ_MULTIPLIER (IPCMNI) -void sem_init (void); -void msg_init (void); -void shm_init (void); +void sem_init(void); +void msg_init(void); +void shm_init(void); struct ipc_namespace; +#ifdef CONFIG_POSIX_MQUEUE +extern void mq_clear_sbinfo(struct ipc_namespace *ns); +extern void mq_put_mnt(struct ipc_namespace *ns); +#else +static inline void mq_clear_sbinfo(struct ipc_namespace *ns) { } +static inline void mq_put_mnt(struct ipc_namespace *ns) { } +#endif + +#ifdef CONFIG_SYSVIPC void sem_init_ns(struct ipc_namespace *ns); void msg_init_ns(struct ipc_namespace *ns); void shm_init_ns(struct ipc_namespace *ns); @@ -28,6 +37,22 @@ void shm_init_ns(struct ipc_namespace *ns); void sem_exit_ns(struct ipc_namespace *ns); void msg_exit_ns(struct ipc_namespace *ns); void shm_exit_ns(struct ipc_namespace *ns); +#else +static inline void sem_init_ns(struct ipc_namespace *ns) { } +static inline void msg_init_ns(struct ipc_namespace *ns) { } +static inline void shm_init_ns(struct ipc_namespace *ns) { } + +static inline void sem_exit_ns(struct ipc_namespace *ns) { } +static inline void msg_exit_ns(struct ipc_namespace *ns) { } +static inline void shm_exit_ns(struct ipc_namespace *ns) { } +#endif + +struct ipc_rcu { + struct rcu_head rcu; + atomic_t refcount; +} ____cacheline_aligned_in_smp; + +#define ipc_rcu_to_struct(p) ((void *)(p+1)) /* * Structure that holds the parameters needed by the ipc operations @@ -53,9 +78,9 @@ struct ipc_params { * . routine to call for an extra check if needed */ struct ipc_ops { - int (*getnew) (struct ipc_namespace *, struct ipc_params *); - int (*associate) (struct kern_ipc_perm *, int); - int (*more_checks) (struct kern_ipc_perm *, struct ipc_params *); + int (*getnew)(struct ipc_namespace *, struct ipc_params *); + int (*associate)(struct kern_ipc_perm *, int); + int (*more_checks)(struct kern_ipc_perm *, struct ipc_params *); }; struct seq_file; @@ -74,24 +99,26 @@ void __init ipc_init_proc_interface(const char *path, const char *header, #define IPC_SHM_IDS 2 #define ipcid_to_idx(id) ((id) % SEQ_MULTIPLIER) +#define ipcid_to_seqx(id) ((id) / SEQ_MULTIPLIER) +#define IPCID_SEQ_MAX min_t(int, INT_MAX/SEQ_MULTIPLIER, USHRT_MAX) -/* must be called with ids->rw_mutex acquired for writing */ +/* must be called with ids->rwsem acquired for writing */ int ipc_addid(struct ipc_ids *, struct kern_ipc_perm *, int); -/* must be called with ids->rw_mutex acquired for reading */ +/* must be called with ids->rwsem acquired for reading */ int ipc_get_maxid(struct ipc_ids *); /* must be called with both locks acquired. */ void ipc_rmid(struct ipc_ids *, struct kern_ipc_perm *); /* must be called with ipcp locked */ -int ipcperms(struct kern_ipc_perm *ipcp, short flg); +int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flg); /* for rare, potentially huge allocations. * both function can sleep */ -void* ipc_alloc(int size); -void ipc_free(void* ptr, int size); +void *ipc_alloc(int size); +void ipc_free(void *ptr, int size); /* * For allocation that need to be freed by RCU. @@ -99,61 +126,82 @@ void ipc_free(void* ptr, int size); * getref increases the refcount, the putref call that reduces the recount * to 0 schedules the rcu destruction. Caller must guarantee locking. */ -void* ipc_rcu_alloc(int size); -void ipc_rcu_getref(void *ptr); -void ipc_rcu_putref(void *ptr); +void *ipc_rcu_alloc(int size); +int ipc_rcu_getref(void *ptr); +void ipc_rcu_putref(void *ptr, void (*func)(struct rcu_head *head)); +void ipc_rcu_free(struct rcu_head *head); -/* - * ipc_lock_down: called with rw_mutex held - * ipc_lock: called without that lock held - */ -struct kern_ipc_perm *ipc_lock_down(struct ipc_ids *, int); struct kern_ipc_perm *ipc_lock(struct ipc_ids *, int); +struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id); void kernel_to_ipc64_perm(struct kern_ipc_perm *in, struct ipc64_perm *out); void ipc64_perm_to_ipc_perm(struct ipc64_perm *in, struct ipc_perm *out); +int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out); +struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns, + struct ipc_ids *ids, int id, int cmd, + struct ipc64_perm *perm, int extra_perm); -#if defined(__ia64__) || defined(__x86_64__) || defined(__hppa__) || defined(__XTENSA__) - /* On IA-64, we always use the "64-bit version" of the IPC structures. */ +#ifndef CONFIG_ARCH_WANT_IPC_PARSE_VERSION +/* On IA-64, we always use the "64-bit version" of the IPC structures. */ # define ipc_parse_version(cmd) IPC_64 #else -int ipc_parse_version (int *cmd); +int ipc_parse_version(int *cmd); #endif extern void free_msg(struct msg_msg *msg); -extern struct msg_msg *load_msg(const void __user *src, int len); -extern int store_msg(void __user *dest, struct msg_msg *msg, int len); +extern struct msg_msg *load_msg(const void __user *src, size_t len); +extern struct msg_msg *copy_msg(struct msg_msg *src, struct msg_msg *dst); +extern int store_msg(void __user *dest, struct msg_msg *msg, size_t len); + +extern void recompute_msgmni(struct ipc_namespace *); static inline int ipc_buildid(int id, int seq) { return SEQ_MULTIPLIER * seq + id; } -/* - * Must be called with ipcp locked - */ static inline int ipc_checkid(struct kern_ipc_perm *ipcp, int uid) { - if (uid / SEQ_MULTIPLIER != ipcp->seq) - return 1; - return 0; + return uid / SEQ_MULTIPLIER != ipcp->seq; } -static inline void ipc_lock_by_ptr(struct kern_ipc_perm *perm) +static inline void ipc_lock_object(struct kern_ipc_perm *perm) { - rcu_read_lock(); spin_lock(&perm->lock); } -static inline void ipc_unlock(struct kern_ipc_perm *perm) +static inline void ipc_unlock_object(struct kern_ipc_perm *perm) { spin_unlock(&perm->lock); +} + +static inline void ipc_assert_locked_object(struct kern_ipc_perm *perm) +{ + assert_spin_locked(&perm->lock); +} + +static inline void ipc_unlock(struct kern_ipc_perm *perm) +{ + ipc_unlock_object(perm); rcu_read_unlock(); } -struct kern_ipc_perm *ipc_lock_check_down(struct ipc_ids *ids, int id); -struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id); -int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids, - struct ipc_ops *ops, struct ipc_params *params); +/* + * ipc_valid_object() - helper to sort out IPC_RMID races for codepaths + * where the respective ipc_ids.rwsem is not being held down. + * Checks whether the ipc object is still around or if it's gone already, as + * ipc_rmid() may have already freed the ID while the ipc lock was spinning. + * Needs to be called with kern_ipc_perm.lock held -- exception made for one + * checkpoint case at sys_semtimedop() as noted in code commentary. + */ +static inline bool ipc_valid_object(struct kern_ipc_perm *perm) +{ + return !perm->deleted; +} +struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id); +int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids, + const struct ipc_ops *ops, struct ipc_params *params); +void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids, + void (*free)(struct ipc_namespace *, struct kern_ipc_perm *)); #endif |
