diff options
Diffstat (limited to 'ipc/mqueue.c')
| -rw-r--r-- | ipc/mqueue.c | 1116 |
1 files changed, 648 insertions, 468 deletions
diff --git a/ipc/mqueue.c b/ipc/mqueue.c index 60f7a27f7a9..4fcf39af177 100644 --- a/ipc/mqueue.c +++ b/ipc/mqueue.c @@ -6,7 +6,7 @@ * * Spinlocks: Mohamed Abbas (abbas.mohamed@intel.com) * Lockless receive & send, fd based notify: - * Manfred Spraul (manfred@colorfullife.com) + * Manfred Spraul (manfred@colorfullife.com) * * Audit: George Wilson (ltcgcw@us.ibm.com) * @@ -24,6 +24,7 @@ #include <linux/mqueue.h> #include <linux/msg.h> #include <linux/skbuff.h> +#include <linux/vmalloc.h> #include <linux/netlink.h> #include <linux/syscalls.h> #include <linux/audit.h> @@ -31,6 +32,9 @@ #include <linux/mutex.h> #include <linux/nsproxy.h> #include <linux/pid.h> +#include <linux/ipc_namespace.h> +#include <linux/user_namespace.h> +#include <linux/slab.h> #include <net/sock.h> #include "util.h" @@ -46,12 +50,11 @@ #define STATE_PENDING 1 #define STATE_READY 2 -/* default values */ -#define DFLT_QUEUESMAX 256 /* max number of message queues */ -#define DFLT_MSGMAX 10 /* max number of messages in each queue */ -#define HARD_MSGMAX (131072/sizeof(void*)) -#define DFLT_MSGSIZEMAX 8192 /* max message size */ - +struct posix_msg_tree_node { + struct rb_node rb_node; + struct list_head msg_list; + int priority; +}; struct ext_wait_queue { /* queue of sleeping tasks */ struct task_struct *task; @@ -65,11 +68,13 @@ struct mqueue_inode_info { struct inode vfs_inode; wait_queue_head_t wait_q; - struct msg_msg **messages; + struct rb_root msg_tree; + struct posix_msg_tree_node *node_cache; struct mq_attr attr; struct sigevent notify; - struct pid* notify_owner; + struct pid *notify_owner; + struct user_namespace *notify_user_ns; struct user_struct *user; /* user who created, for accounting */ struct sock *notify_sock; struct sk_buff *notify_cookie; @@ -82,132 +87,263 @@ struct mqueue_inode_info { static const struct inode_operations mqueue_dir_inode_operations; static const struct file_operations mqueue_file_operations; -static struct super_operations mqueue_super_ops; +static const struct super_operations mqueue_super_ops; static void remove_notification(struct mqueue_inode_info *info); -static spinlock_t mq_lock; static struct kmem_cache *mqueue_inode_cachep; -static struct vfsmount *mqueue_mnt; -static unsigned int queues_count; -static unsigned int queues_max = DFLT_QUEUESMAX; -static unsigned int msg_max = DFLT_MSGMAX; -static unsigned int msgsize_max = DFLT_MSGSIZEMAX; - -static struct ctl_table_header * mq_sysctl_table; +static struct ctl_table_header *mq_sysctl_table; static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode) { return container_of(inode, struct mqueue_inode_info, vfs_inode); } -static struct inode *mqueue_get_inode(struct super_block *sb, int mode, - struct mq_attr *attr) +/* + * This routine should be called with the mq_lock held. + */ +static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode) +{ + return get_ipc_ns(inode->i_sb->s_fs_info); +} + +static struct ipc_namespace *get_ns_from_inode(struct inode *inode) +{ + struct ipc_namespace *ns; + + spin_lock(&mq_lock); + ns = __get_ns_from_inode(inode); + spin_unlock(&mq_lock); + return ns; +} + +/* Auxiliary functions to manipulate messages' list */ +static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info) +{ + struct rb_node **p, *parent = NULL; + struct posix_msg_tree_node *leaf; + + p = &info->msg_tree.rb_node; + while (*p) { + parent = *p; + leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); + + if (likely(leaf->priority == msg->m_type)) + goto insert_msg; + else if (msg->m_type < leaf->priority) + p = &(*p)->rb_left; + else + p = &(*p)->rb_right; + } + if (info->node_cache) { + leaf = info->node_cache; + info->node_cache = NULL; + } else { + leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC); + if (!leaf) + return -ENOMEM; + INIT_LIST_HEAD(&leaf->msg_list); + info->qsize += sizeof(*leaf); + } + leaf->priority = msg->m_type; + rb_link_node(&leaf->rb_node, parent, p); + rb_insert_color(&leaf->rb_node, &info->msg_tree); +insert_msg: + info->attr.mq_curmsgs++; + info->qsize += msg->m_ts; + list_add_tail(&msg->m_list, &leaf->msg_list); + return 0; +} + +static inline struct msg_msg *msg_get(struct mqueue_inode_info *info) +{ + struct rb_node **p, *parent = NULL; + struct posix_msg_tree_node *leaf; + struct msg_msg *msg; + +try_again: + p = &info->msg_tree.rb_node; + while (*p) { + parent = *p; + /* + * During insert, low priorities go to the left and high to the + * right. On receive, we want the highest priorities first, so + * walk all the way to the right. + */ + p = &(*p)->rb_right; + } + if (!parent) { + if (info->attr.mq_curmsgs) { + pr_warn_once("Inconsistency in POSIX message queue, " + "no tree element, but supposedly messages " + "should exist!\n"); + info->attr.mq_curmsgs = 0; + } + return NULL; + } + leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node); + if (unlikely(list_empty(&leaf->msg_list))) { + pr_warn_once("Inconsistency in POSIX message queue, " + "empty leaf node but we haven't implemented " + "lazy leaf delete!\n"); + rb_erase(&leaf->rb_node, &info->msg_tree); + if (info->node_cache) { + info->qsize -= sizeof(*leaf); + kfree(leaf); + } else { + info->node_cache = leaf; + } + goto try_again; + } else { + msg = list_first_entry(&leaf->msg_list, + struct msg_msg, m_list); + list_del(&msg->m_list); + if (list_empty(&leaf->msg_list)) { + rb_erase(&leaf->rb_node, &info->msg_tree); + if (info->node_cache) { + info->qsize -= sizeof(*leaf); + kfree(leaf); + } else { + info->node_cache = leaf; + } + } + } + info->attr.mq_curmsgs--; + info->qsize -= msg->m_ts; + return msg; +} + +static struct inode *mqueue_get_inode(struct super_block *sb, + struct ipc_namespace *ipc_ns, umode_t mode, + struct mq_attr *attr) { + struct user_struct *u = current_user(); struct inode *inode; + int ret = -ENOMEM; inode = new_inode(sb); - if (inode) { - inode->i_mode = mode; - inode->i_uid = current->fsuid; - inode->i_gid = current->fsgid; - inode->i_blocks = 0; - inode->i_mtime = inode->i_ctime = inode->i_atime = - CURRENT_TIME; + if (!inode) + goto err; + + inode->i_ino = get_next_ino(); + inode->i_mode = mode; + inode->i_uid = current_fsuid(); + inode->i_gid = current_fsgid(); + inode->i_mtime = inode->i_ctime = inode->i_atime = CURRENT_TIME; + + if (S_ISREG(mode)) { + struct mqueue_inode_info *info; + unsigned long mq_bytes, mq_treesize; + + inode->i_fop = &mqueue_file_operations; + inode->i_size = FILENT_SIZE; + /* mqueue specific info */ + info = MQUEUE_I(inode); + spin_lock_init(&info->lock); + init_waitqueue_head(&info->wait_q); + INIT_LIST_HEAD(&info->e_wait_q[0].list); + INIT_LIST_HEAD(&info->e_wait_q[1].list); + info->notify_owner = NULL; + info->notify_user_ns = NULL; + info->qsize = 0; + info->user = NULL; /* set when all is ok */ + info->msg_tree = RB_ROOT; + info->node_cache = NULL; + memset(&info->attr, 0, sizeof(info->attr)); + info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max, + ipc_ns->mq_msg_default); + info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, + ipc_ns->mq_msgsize_default); + if (attr) { + info->attr.mq_maxmsg = attr->mq_maxmsg; + info->attr.mq_msgsize = attr->mq_msgsize; + } + /* + * We used to allocate a static array of pointers and account + * the size of that array as well as one msg_msg struct per + * possible message into the queue size. That's no longer + * accurate as the queue is now an rbtree and will grow and + * shrink depending on usage patterns. We can, however, still + * account one msg_msg struct per message, but the nodes are + * allocated depending on priority usage, and most programs + * only use one, or a handful, of priorities. However, since + * this is pinned memory, we need to assume worst case, so + * that means the min(mq_maxmsg, max_priorities) * struct + * posix_msg_tree_node. + */ + mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + + min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * + sizeof(struct posix_msg_tree_node); + + mq_bytes = mq_treesize + (info->attr.mq_maxmsg * + info->attr.mq_msgsize); - if (S_ISREG(mode)) { - struct mqueue_inode_info *info; - struct task_struct *p = current; - struct user_struct *u = p->user; - unsigned long mq_bytes, mq_msg_tblsz; - - inode->i_fop = &mqueue_file_operations; - inode->i_size = FILENT_SIZE; - /* mqueue specific info */ - info = MQUEUE_I(inode); - spin_lock_init(&info->lock); - init_waitqueue_head(&info->wait_q); - INIT_LIST_HEAD(&info->e_wait_q[0].list); - INIT_LIST_HEAD(&info->e_wait_q[1].list); - info->messages = NULL; - info->notify_owner = NULL; - info->qsize = 0; - info->user = NULL; /* set when all is ok */ - memset(&info->attr, 0, sizeof(info->attr)); - info->attr.mq_maxmsg = DFLT_MSGMAX; - info->attr.mq_msgsize = DFLT_MSGSIZEMAX; - if (attr) { - info->attr.mq_maxmsg = attr->mq_maxmsg; - info->attr.mq_msgsize = attr->mq_msgsize; - } - mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *); - mq_bytes = (mq_msg_tblsz + - (info->attr.mq_maxmsg * info->attr.mq_msgsize)); - - spin_lock(&mq_lock); - if (u->mq_bytes + mq_bytes < u->mq_bytes || - u->mq_bytes + mq_bytes > - p->signal->rlim[RLIMIT_MSGQUEUE].rlim_cur) { - spin_unlock(&mq_lock); - goto out_inode; - } - u->mq_bytes += mq_bytes; + spin_lock(&mq_lock); + if (u->mq_bytes + mq_bytes < u->mq_bytes || + u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) { spin_unlock(&mq_lock); - - info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL); - if (!info->messages) { - spin_lock(&mq_lock); - u->mq_bytes -= mq_bytes; - spin_unlock(&mq_lock); - goto out_inode; - } - /* all is ok */ - info->user = get_uid(u); - } else if (S_ISDIR(mode)) { - inc_nlink(inode); - /* Some things misbehave if size == 0 on a directory */ - inode->i_size = 2 * DIRENT_SIZE; - inode->i_op = &mqueue_dir_inode_operations; - inode->i_fop = &simple_dir_operations; + /* mqueue_evict_inode() releases info->messages */ + ret = -EMFILE; + goto out_inode; } + u->mq_bytes += mq_bytes; + spin_unlock(&mq_lock); + + /* all is ok */ + info->user = get_uid(u); + } else if (S_ISDIR(mode)) { + inc_nlink(inode); + /* Some things misbehave if size == 0 on a directory */ + inode->i_size = 2 * DIRENT_SIZE; + inode->i_op = &mqueue_dir_inode_operations; + inode->i_fop = &simple_dir_operations; } + return inode; out_inode: - make_bad_inode(inode); iput(inode); - return NULL; +err: + return ERR_PTR(ret); } static int mqueue_fill_super(struct super_block *sb, void *data, int silent) { struct inode *inode; + struct ipc_namespace *ns = data; sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_magic = MQUEUE_MAGIC; sb->s_op = &mqueue_super_ops; - inode = mqueue_get_inode(sb, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL); - if (!inode) - return -ENOMEM; + inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL); + if (IS_ERR(inode)) + return PTR_ERR(inode); - sb->s_root = d_alloc_root(inode); - if (!sb->s_root) { - iput(inode); + sb->s_root = d_make_root(inode); + if (!sb->s_root) return -ENOMEM; - } - return 0; } -static int mqueue_get_sb(struct file_system_type *fs_type, +static struct dentry *mqueue_mount(struct file_system_type *fs_type, int flags, const char *dev_name, - void *data, struct vfsmount *mnt) + void *data) { - return get_sb_single(fs_type, flags, data, mqueue_fill_super, mnt); + if (!(flags & MS_KERNMOUNT)) { + struct ipc_namespace *ns = current->nsproxy->ipc_ns; + /* Don't allow mounting unless the caller has CAP_SYS_ADMIN + * over the ipc namespace. + */ + if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN)) + return ERR_PTR(-EPERM); + + data = ns; + } + return mount_ns(fs_type, flags, data, mqueue_fill_super); } -static void init_once(struct kmem_cache *cachep, void *foo) +static void init_once(void *foo) { struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo; @@ -224,86 +360,119 @@ static struct inode *mqueue_alloc_inode(struct super_block *sb) return &ei->vfs_inode; } -static void mqueue_destroy_inode(struct inode *inode) +static void mqueue_i_callback(struct rcu_head *head) { + struct inode *inode = container_of(head, struct inode, i_rcu); kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode)); } -static void mqueue_delete_inode(struct inode *inode) +static void mqueue_destroy_inode(struct inode *inode) +{ + call_rcu(&inode->i_rcu, mqueue_i_callback); +} + +static void mqueue_evict_inode(struct inode *inode) { struct mqueue_inode_info *info; struct user_struct *user; - unsigned long mq_bytes; - int i; + unsigned long mq_bytes, mq_treesize; + struct ipc_namespace *ipc_ns; + struct msg_msg *msg; + + clear_inode(inode); - if (S_ISDIR(inode->i_mode)) { - clear_inode(inode); + if (S_ISDIR(inode->i_mode)) return; - } + + ipc_ns = get_ns_from_inode(inode); info = MQUEUE_I(inode); spin_lock(&info->lock); - for (i = 0; i < info->attr.mq_curmsgs; i++) - free_msg(info->messages[i]); - kfree(info->messages); + while ((msg = msg_get(info)) != NULL) + free_msg(msg); + kfree(info->node_cache); spin_unlock(&info->lock); - clear_inode(inode); + /* Total amount of bytes accounted for the mqueue */ + mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) + + min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) * + sizeof(struct posix_msg_tree_node); + + mq_bytes = mq_treesize + (info->attr.mq_maxmsg * + info->attr.mq_msgsize); - mq_bytes = (info->attr.mq_maxmsg * sizeof(struct msg_msg *) + - (info->attr.mq_maxmsg * info->attr.mq_msgsize)); user = info->user; if (user) { spin_lock(&mq_lock); user->mq_bytes -= mq_bytes; - queues_count--; + /* + * get_ns_from_inode() ensures that the + * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns + * to which we now hold a reference, or it is NULL. + * We can't put it here under mq_lock, though. + */ + if (ipc_ns) + ipc_ns->mq_queues_count--; spin_unlock(&mq_lock); free_uid(user); } + if (ipc_ns) + put_ipc_ns(ipc_ns); } static int mqueue_create(struct inode *dir, struct dentry *dentry, - int mode, struct nameidata *nd) + umode_t mode, bool excl) { struct inode *inode; struct mq_attr *attr = dentry->d_fsdata; int error; + struct ipc_namespace *ipc_ns; spin_lock(&mq_lock); - if (queues_count >= queues_max && !capable(CAP_SYS_RESOURCE)) { + ipc_ns = __get_ns_from_inode(dir); + if (!ipc_ns) { + error = -EACCES; + goto out_unlock; + } + + if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && + !capable(CAP_SYS_RESOURCE)) { error = -ENOSPC; - goto out_lock; + goto out_unlock; } - queues_count++; + ipc_ns->mq_queues_count++; spin_unlock(&mq_lock); - inode = mqueue_get_inode(dir->i_sb, mode, attr); - if (!inode) { - error = -ENOMEM; + inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr); + if (IS_ERR(inode)) { + error = PTR_ERR(inode); spin_lock(&mq_lock); - queues_count--; - goto out_lock; + ipc_ns->mq_queues_count--; + goto out_unlock; } + put_ipc_ns(ipc_ns); dir->i_size += DIRENT_SIZE; dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; d_instantiate(dentry, inode); dget(dentry); return 0; -out_lock: +out_unlock: spin_unlock(&mq_lock); + if (ipc_ns) + put_ipc_ns(ipc_ns); return error; } static int mqueue_unlink(struct inode *dir, struct dentry *dentry) { - struct inode *inode = dentry->d_inode; + struct inode *inode = dentry->d_inode; dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME; dir->i_size -= DIRENT_SIZE; - drop_nlink(inode); - dput(dentry); - return 0; + drop_nlink(inode); + dput(dentry); + return 0; } /* @@ -314,15 +483,11 @@ static int mqueue_unlink(struct inode *dir, struct dentry *dentry) * through std routines) */ static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, - size_t count, loff_t * off) + size_t count, loff_t *off) { - struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); + struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); char buffer[FILENT_SIZE]; - size_t slen; - loff_t o; - - if (!count) - return 0; + ssize_t ret; spin_lock(&info->lock); snprintf(buffer, sizeof(buffer), @@ -335,26 +500,19 @@ static ssize_t mqueue_read_file(struct file *filp, char __user *u_data, pid_vnr(info->notify_owner)); spin_unlock(&info->lock); buffer[sizeof(buffer)-1] = '\0'; - slen = strlen(buffer)+1; - - o = *off; - if (o > slen) - return 0; - - if (o + count > slen) - count = slen - o; - if (copy_to_user(u_data, buffer + o, count)) - return -EFAULT; + ret = simple_read_from_buffer(u_data, count, off, buffer, + strlen(buffer)); + if (ret <= 0) + return ret; - *off = o + count; - filp->f_path.dentry->d_inode->i_atime = filp->f_path.dentry->d_inode->i_ctime = CURRENT_TIME; - return count; + file_inode(filp)->i_atime = file_inode(filp)->i_ctime = CURRENT_TIME; + return ret; } static int mqueue_flush_file(struct file *filp, fl_owner_t id) { - struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); + struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); spin_lock(&info->lock); if (task_tgid(current) == info->notify_owner) @@ -366,7 +524,7 @@ static int mqueue_flush_file(struct file *filp, fl_owner_t id) static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab) { - struct mqueue_inode_info *info = MQUEUE_I(filp->f_path.dentry->d_inode); + struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp)); int retval = 0; poll_wait(filp, &info->wait_q, poll_tab); @@ -405,7 +563,7 @@ static void wq_add(struct mqueue_inode_info *info, int sr, * sr: SEND or RECV */ static int wq_sleep(struct mqueue_inode_info *info, int sr, - long timeout, struct ext_wait_queue *ewp) + ktime_t *timeout, struct ext_wait_queue *ewp) { int retval; signed long time; @@ -416,7 +574,8 @@ static int wq_sleep(struct mqueue_inode_info *info, int sr, set_current_state(TASK_INTERRUPTIBLE); spin_unlock(&info->lock); - time = schedule_timeout(timeout); + time = schedule_hrtimeout_range_clock(timeout, 0, + HRTIMER_MODE_ABS, CLOCK_REALTIME); while (ewp->state == STATE_PENDING) cpu_relax(); @@ -460,30 +619,10 @@ static struct ext_wait_queue *wq_get_first_waiter( return list_entry(ptr, struct ext_wait_queue, list); } -/* Auxiliary functions to manipulate messages' list */ -static void msg_insert(struct msg_msg *ptr, struct mqueue_inode_info *info) -{ - int k; - - k = info->attr.mq_curmsgs - 1; - while (k >= 0 && info->messages[k]->m_type >= ptr->m_type) { - info->messages[k + 1] = info->messages[k]; - k--; - } - info->attr.mq_curmsgs++; - info->qsize += ptr->m_ts; - info->messages[k + 1] = ptr; -} - -static inline struct msg_msg *msg_get(struct mqueue_inode_info *info) -{ - info->qsize -= info->messages[--info->attr.mq_curmsgs]->m_ts; - return info->messages[info->attr.mq_curmsgs]; -} static inline void set_cookie(struct sk_buff *skb, char code) { - ((char*)skb->data)[NOTIFY_COOKIE_LEN-1] = code; + ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code; } /* @@ -509,8 +648,12 @@ static void __do_notify(struct mqueue_inode_info *info) sig_i.si_errno = 0; sig_i.si_code = SI_MESGQ; sig_i.si_value = info->notify.sigev_value; - sig_i.si_pid = task_tgid_vnr(current); - sig_i.si_uid = current->uid; + /* map current pid/uid into info->owner's namespaces */ + rcu_read_lock(); + sig_i.si_pid = task_tgid_nr_ns(current, + ns_of_pid(info->notify_owner)); + sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid()); + rcu_read_unlock(); kill_pid_info(info->notify.sigev_signo, &sig_i, info->notify_owner); @@ -522,40 +665,23 @@ static void __do_notify(struct mqueue_inode_info *info) } /* after notification unregisters process */ put_pid(info->notify_owner); + put_user_ns(info->notify_user_ns); info->notify_owner = NULL; + info->notify_user_ns = NULL; } wake_up(&info->wait_q); } -static long prepare_timeout(const struct timespec __user *u_arg) +static int prepare_timeout(const struct timespec __user *u_abs_timeout, + ktime_t *expires, struct timespec *ts) { - struct timespec ts, nowts; - long timeout; - - if (u_arg) { - if (unlikely(copy_from_user(&ts, u_arg, - sizeof(struct timespec)))) - return -EFAULT; - - if (unlikely(ts.tv_nsec < 0 || ts.tv_sec < 0 - || ts.tv_nsec >= NSEC_PER_SEC)) - return -EINVAL; - nowts = CURRENT_TIME; - /* first subtract as jiffies can't be too big */ - ts.tv_sec -= nowts.tv_sec; - if (ts.tv_nsec < nowts.tv_nsec) { - ts.tv_nsec += NSEC_PER_SEC; - ts.tv_sec--; - } - ts.tv_nsec -= nowts.tv_nsec; - if (ts.tv_sec < 0) - return 0; - - timeout = timespec_to_jiffies(&ts) + 1; - } else - return MAX_SCHEDULE_TIMEOUT; + if (copy_from_user(ts, u_abs_timeout, sizeof(struct timespec))) + return -EFAULT; + if (!timespec_valid(ts)) + return -EINVAL; - return timeout; + *expires = timespec_to_ktime(*ts); + return 0; } static void remove_notification(struct mqueue_inode_info *info) @@ -566,192 +692,211 @@ static void remove_notification(struct mqueue_inode_info *info) netlink_sendskb(info->notify_sock, info->notify_cookie); } put_pid(info->notify_owner); + put_user_ns(info->notify_user_ns); info->notify_owner = NULL; + info->notify_user_ns = NULL; } -static int mq_attr_ok(struct mq_attr *attr) +static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr) { + int mq_treesize; + unsigned long total_size; + if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0) - return 0; + return -EINVAL; if (capable(CAP_SYS_RESOURCE)) { - if (attr->mq_maxmsg > HARD_MSGMAX) - return 0; + if (attr->mq_maxmsg > HARD_MSGMAX || + attr->mq_msgsize > HARD_MSGSIZEMAX) + return -EINVAL; } else { - if (attr->mq_maxmsg > msg_max || - attr->mq_msgsize > msgsize_max) - return 0; + if (attr->mq_maxmsg > ipc_ns->mq_msg_max || + attr->mq_msgsize > ipc_ns->mq_msgsize_max) + return -EINVAL; } /* check for overflow */ if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg) - return 0; - if ((unsigned long)(attr->mq_maxmsg * attr->mq_msgsize) + - (attr->mq_maxmsg * sizeof (struct msg_msg *)) < - (unsigned long)(attr->mq_maxmsg * attr->mq_msgsize)) - return 0; - return 1; + return -EOVERFLOW; + mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) + + min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) * + sizeof(struct posix_msg_tree_node); + total_size = attr->mq_maxmsg * attr->mq_msgsize; + if (total_size + mq_treesize < total_size) + return -EOVERFLOW; + return 0; } /* * Invoked when creating a new queue via sys_mq_open */ -static struct file *do_create(struct dentry *dir, struct dentry *dentry, - int oflag, mode_t mode, struct mq_attr __user *u_attr) +static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir, + struct path *path, int oflag, umode_t mode, + struct mq_attr *attr) { - struct mq_attr attr; + const struct cred *cred = current_cred(); int ret; - if (u_attr) { - ret = -EFAULT; - if (copy_from_user(&attr, u_attr, sizeof(attr))) - goto out; - ret = -EINVAL; - if (!mq_attr_ok(&attr)) - goto out; + if (attr) { + ret = mq_attr_ok(ipc_ns, attr); + if (ret) + return ERR_PTR(ret); /* store for use during create */ - dentry->d_fsdata = &attr; + path->dentry->d_fsdata = attr; + } else { + struct mq_attr def_attr; + + def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max, + ipc_ns->mq_msg_default); + def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max, + ipc_ns->mq_msgsize_default); + ret = mq_attr_ok(ipc_ns, &def_attr); + if (ret) + return ERR_PTR(ret); } - mode &= ~current->fs->umask; - ret = vfs_create(dir->d_inode, dentry, mode, NULL); - dentry->d_fsdata = NULL; + mode &= ~current_umask(); + ret = vfs_create(dir, path->dentry, mode, true); + path->dentry->d_fsdata = NULL; if (ret) - goto out; - - return dentry_open(dentry, mqueue_mnt, oflag); - -out: - dput(dentry); - mntput(mqueue_mnt); - return ERR_PTR(ret); + return ERR_PTR(ret); + return dentry_open(path, oflag, cred); } /* Opens existing queue */ -static struct file *do_open(struct dentry *dentry, int oflag) +static struct file *do_open(struct path *path, int oflag) { -static int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, - MAY_READ | MAY_WRITE }; - - if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) { - dput(dentry); - mntput(mqueue_mnt); + static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE, + MAY_READ | MAY_WRITE }; + int acc; + if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY)) return ERR_PTR(-EINVAL); - } - - if (permission(dentry->d_inode, oflag2acc[oflag & O_ACCMODE], NULL)) { - dput(dentry); - mntput(mqueue_mnt); + acc = oflag2acc[oflag & O_ACCMODE]; + if (inode_permission(path->dentry->d_inode, acc)) return ERR_PTR(-EACCES); - } - - return dentry_open(dentry, mqueue_mnt, oflag); + return dentry_open(path, oflag, current_cred()); } -asmlinkage long sys_mq_open(const char __user *u_name, int oflag, mode_t mode, - struct mq_attr __user *u_attr) +SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode, + struct mq_attr __user *, u_attr) { - struct dentry *dentry; + struct path path; struct file *filp; - char *name; + struct filename *name; + struct mq_attr attr; int fd, error; + struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; + struct vfsmount *mnt = ipc_ns->mq_mnt; + struct dentry *root = mnt->mnt_root; + int ro; - error = audit_mq_open(oflag, mode, u_attr); - if (error != 0) - return error; + if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr))) + return -EFAULT; + + audit_mq_open(oflag, mode, u_attr ? &attr : NULL); if (IS_ERR(name = getname(u_name))) return PTR_ERR(name); - fd = get_unused_fd(); + fd = get_unused_fd_flags(O_CLOEXEC); if (fd < 0) goto out_putname; - mutex_lock(&mqueue_mnt->mnt_root->d_inode->i_mutex); - dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name)); - if (IS_ERR(dentry)) { - error = PTR_ERR(dentry); - goto out_err; + ro = mnt_want_write(mnt); /* we'll drop it in any case */ + error = 0; + mutex_lock(&root->d_inode->i_mutex); + path.dentry = lookup_one_len(name->name, root, strlen(name->name)); + if (IS_ERR(path.dentry)) { + error = PTR_ERR(path.dentry); + goto out_putfd; } - mntget(mqueue_mnt); + path.mnt = mntget(mnt); if (oflag & O_CREAT) { - if (dentry->d_inode) { /* entry already exists */ - audit_inode(name, dentry); - error = -EEXIST; - if (oflag & O_EXCL) + if (path.dentry->d_inode) { /* entry already exists */ + audit_inode(name, path.dentry, 0); + if (oflag & O_EXCL) { + error = -EEXIST; goto out; - filp = do_open(dentry, oflag); + } + filp = do_open(&path, oflag); } else { - filp = do_create(mqueue_mnt->mnt_root, dentry, - oflag, mode, u_attr); + if (ro) { + error = ro; + goto out; + } + audit_inode_parent_hidden(name, root); + filp = do_create(ipc_ns, root->d_inode, + &path, oflag, mode, + u_attr ? &attr : NULL); } } else { - error = -ENOENT; - if (!dentry->d_inode) + if (!path.dentry->d_inode) { + error = -ENOENT; goto out; - audit_inode(name, dentry); - filp = do_open(dentry, oflag); + } + audit_inode(name, path.dentry, 0); + filp = do_open(&path, oflag); } - if (IS_ERR(filp)) { + if (!IS_ERR(filp)) + fd_install(fd, filp); + else error = PTR_ERR(filp); - goto out_putfd; - } - - set_close_on_exec(fd, 1); - fd_install(fd, filp); - goto out_upsem; - out: - dput(dentry); - mntput(mqueue_mnt); + path_put(&path); out_putfd: - put_unused_fd(fd); -out_err: - fd = error; -out_upsem: - mutex_unlock(&mqueue_mnt->mnt_root->d_inode->i_mutex); + if (error) { + put_unused_fd(fd); + fd = error; + } + mutex_unlock(&root->d_inode->i_mutex); + if (!ro) + mnt_drop_write(mnt); out_putname: putname(name); return fd; } -asmlinkage long sys_mq_unlink(const char __user *u_name) +SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name) { int err; - char *name; + struct filename *name; struct dentry *dentry; struct inode *inode = NULL; + struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns; + struct vfsmount *mnt = ipc_ns->mq_mnt; name = getname(u_name); if (IS_ERR(name)) return PTR_ERR(name); - mutex_lock_nested(&mqueue_mnt->mnt_root->d_inode->i_mutex, - I_MUTEX_PARENT); - dentry = lookup_one_len(name, mqueue_mnt->mnt_root, strlen(name)); + audit_inode_parent_hidden(name, mnt->mnt_root); + err = mnt_want_write(mnt); + if (err) + goto out_name; + mutex_lock_nested(&mnt->mnt_root->d_inode->i_mutex, I_MUTEX_PARENT); + dentry = lookup_one_len(name->name, mnt->mnt_root, + strlen(name->name)); if (IS_ERR(dentry)) { err = PTR_ERR(dentry); goto out_unlock; } - if (!dentry->d_inode) { + inode = dentry->d_inode; + if (!inode) { err = -ENOENT; - goto out_err; + } else { + ihold(inode); + err = vfs_unlink(dentry->d_parent->d_inode, dentry, NULL); } - - inode = dentry->d_inode; - if (inode) - atomic_inc(&inode->i_count); - - err = vfs_unlink(dentry->d_parent->d_inode, dentry); -out_err: dput(dentry); out_unlock: - mutex_unlock(&mqueue_mnt->mnt_root->d_inode->i_mutex); - putname(name); + mutex_unlock(&mnt->mnt_root->d_inode->i_mutex); if (inode) iput(inode); + mnt_drop_write(mnt); +out_name: + putname(name); return err; } @@ -797,7 +942,8 @@ static inline void pipelined_receive(struct mqueue_inode_info *info) wake_up_interruptible(&info->wait_q); return; } - msg_insert(sender->msg, info); + if (msg_insert(sender->msg, info)) + return; list_del(&sender->list); sender->state = STATE_PENDING; wake_up_process(sender->task); @@ -805,41 +951,51 @@ static inline void pipelined_receive(struct mqueue_inode_info *info) sender->state = STATE_READY; } -asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr, - size_t msg_len, unsigned int msg_prio, - const struct timespec __user *u_abs_timeout) +SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr, + size_t, msg_len, unsigned int, msg_prio, + const struct timespec __user *, u_abs_timeout) { - struct file *filp; + struct fd f; struct inode *inode; struct ext_wait_queue wait; struct ext_wait_queue *receiver; struct msg_msg *msg_ptr; struct mqueue_inode_info *info; - long timeout; - int ret; - - ret = audit_mq_timedsend(mqdes, msg_len, msg_prio, u_abs_timeout); - if (ret != 0) - return ret; + ktime_t expires, *timeout = NULL; + struct timespec ts; + struct posix_msg_tree_node *new_leaf = NULL; + int ret = 0; + + if (u_abs_timeout) { + int res = prepare_timeout(u_abs_timeout, &expires, &ts); + if (res) + return res; + timeout = &expires; + } if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX)) return -EINVAL; - timeout = prepare_timeout(u_abs_timeout); + audit_mq_sendrecv(mqdes, msg_len, msg_prio, timeout ? &ts : NULL); - ret = -EBADF; - filp = fget(mqdes); - if (unlikely(!filp)) + f = fdget(mqdes); + if (unlikely(!f.file)) { + ret = -EBADF; goto out; + } - inode = filp->f_path.dentry->d_inode; - if (unlikely(filp->f_op != &mqueue_file_operations)) + inode = file_inode(f.file); + if (unlikely(f.file->f_op != &mqueue_file_operations)) { + ret = -EBADF; goto out_fput; + } info = MQUEUE_I(inode); - audit_inode(NULL, filp->f_path.dentry); + audit_inode(NULL, f.file->f_path.dentry, 0); - if (unlikely(!(filp->f_mode & FMODE_WRITE))) + if (unlikely(!(f.file->f_mode & FMODE_WRITE))) { + ret = -EBADF; goto out_fput; + } if (unlikely(msg_len > info->attr.mq_msgsize)) { ret = -EMSGSIZE; @@ -856,74 +1012,106 @@ asmlinkage long sys_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr, msg_ptr->m_ts = msg_len; msg_ptr->m_type = msg_prio; + /* + * msg_insert really wants us to have a valid, spare node struct so + * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will + * fall back to that if necessary. + */ + if (!info->node_cache) + new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); + spin_lock(&info->lock); + if (!info->node_cache && new_leaf) { + /* Save our speculative allocation into the cache */ + INIT_LIST_HEAD(&new_leaf->msg_list); + info->node_cache = new_leaf; + info->qsize += sizeof(*new_leaf); + new_leaf = NULL; + } else { + kfree(new_leaf); + } + if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) { - if (filp->f_flags & O_NONBLOCK) { - spin_unlock(&info->lock); + if (f.file->f_flags & O_NONBLOCK) { ret = -EAGAIN; - } else if (unlikely(timeout < 0)) { - spin_unlock(&info->lock); - ret = timeout; } else { wait.task = current; wait.msg = (void *) msg_ptr; wait.state = STATE_NONE; ret = wq_sleep(info, SEND, timeout, &wait); + /* + * wq_sleep must be called with info->lock held, and + * returns with the lock released + */ + goto out_free; } - if (ret < 0) - free_msg(msg_ptr); } else { receiver = wq_get_first_waiter(info, RECV); if (receiver) { pipelined_send(info, msg_ptr, receiver); } else { /* adds message to the queue */ - msg_insert(msg_ptr, info); + ret = msg_insert(msg_ptr, info); + if (ret) + goto out_unlock; __do_notify(info); } inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; - spin_unlock(&info->lock); - ret = 0; } +out_unlock: + spin_unlock(&info->lock); +out_free: + if (ret) + free_msg(msg_ptr); out_fput: - fput(filp); + fdput(f); out: return ret; } -asmlinkage ssize_t sys_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr, - size_t msg_len, unsigned int __user *u_msg_prio, - const struct timespec __user *u_abs_timeout) +SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr, + size_t, msg_len, unsigned int __user *, u_msg_prio, + const struct timespec __user *, u_abs_timeout) { - long timeout; ssize_t ret; struct msg_msg *msg_ptr; - struct file *filp; + struct fd f; struct inode *inode; struct mqueue_inode_info *info; struct ext_wait_queue wait; + ktime_t expires, *timeout = NULL; + struct timespec ts; + struct posix_msg_tree_node *new_leaf = NULL; + + if (u_abs_timeout) { + int res = prepare_timeout(u_abs_timeout, &expires, &ts); + if (res) + return res; + timeout = &expires; + } - ret = audit_mq_timedreceive(mqdes, msg_len, u_msg_prio, u_abs_timeout); - if (ret != 0) - return ret; - - timeout = prepare_timeout(u_abs_timeout); + audit_mq_sendrecv(mqdes, msg_len, 0, timeout ? &ts : NULL); - ret = -EBADF; - filp = fget(mqdes); - if (unlikely(!filp)) + f = fdget(mqdes); + if (unlikely(!f.file)) { + ret = -EBADF; goto out; + } - inode = filp->f_path.dentry->d_inode; - if (unlikely(filp->f_op != &mqueue_file_operations)) + inode = file_inode(f.file); + if (unlikely(f.file->f_op != &mqueue_file_operations)) { + ret = -EBADF; goto out_fput; + } info = MQUEUE_I(inode); - audit_inode(NULL, filp->f_path.dentry); + audit_inode(NULL, f.file->f_path.dentry, 0); - if (unlikely(!(filp->f_mode & FMODE_READ))) + if (unlikely(!(f.file->f_mode & FMODE_READ))) { + ret = -EBADF; goto out_fput; + } /* checks if buffer is big enough */ if (unlikely(msg_len < info->attr.mq_msgsize)) { @@ -931,16 +1119,29 @@ asmlinkage ssize_t sys_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr, goto out_fput; } + /* + * msg_insert really wants us to have a valid, spare node struct so + * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will + * fall back to that if necessary. + */ + if (!info->node_cache) + new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL); + spin_lock(&info->lock); + + if (!info->node_cache && new_leaf) { + /* Save our speculative allocation into the cache */ + INIT_LIST_HEAD(&new_leaf->msg_list); + info->node_cache = new_leaf; + info->qsize += sizeof(*new_leaf); + } else { + kfree(new_leaf); + } + if (info->attr.mq_curmsgs == 0) { - if (filp->f_flags & O_NONBLOCK) { + if (f.file->f_flags & O_NONBLOCK) { spin_unlock(&info->lock); ret = -EAGAIN; - msg_ptr = NULL; - } else if (unlikely(timeout < 0)) { - spin_unlock(&info->lock); - ret = timeout; - msg_ptr = NULL; } else { wait.task = current; wait.state = STATE_NONE; @@ -968,7 +1169,7 @@ asmlinkage ssize_t sys_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr, free_msg(msg_ptr); } out_fput: - fput(filp); + fdput(f); out: return ret; } @@ -978,28 +1179,28 @@ out: * and he isn't currently owner of notification, will be silently discarded. * It isn't explicitly defined in the POSIX. */ -asmlinkage long sys_mq_notify(mqd_t mqdes, - const struct sigevent __user *u_notification) +SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes, + const struct sigevent __user *, u_notification) { int ret; - struct file *filp; + struct fd f; struct sock *sock; struct inode *inode; struct sigevent notification; struct mqueue_inode_info *info; struct sk_buff *nc; - ret = audit_mq_notify(mqdes, u_notification); - if (ret != 0) - return ret; - - nc = NULL; - sock = NULL; - if (u_notification != NULL) { + if (u_notification) { if (copy_from_user(¬ification, u_notification, sizeof(struct sigevent))) return -EFAULT; + } + + audit_mq_notify(mqdes, u_notification ? ¬ification : NULL); + nc = NULL; + sock = NULL; + if (u_notification != NULL) { if (unlikely(notification.sigev_notify != SIGEV_NONE && notification.sigev_notify != SIGEV_SIGNAL && notification.sigev_notify != SIGEV_THREAD)) @@ -1013,13 +1214,14 @@ asmlinkage long sys_mq_notify(mqd_t mqdes, /* create the notify skb */ nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL); - ret = -ENOMEM; - if (!nc) + if (!nc) { + ret = -ENOMEM; goto out; - ret = -EFAULT; + } if (copy_from_user(nc->data, notification.sigev_value.sival_ptr, NOTIFY_COOKIE_LEN)) { + ret = -EFAULT; goto out; } @@ -1027,12 +1229,13 @@ asmlinkage long sys_mq_notify(mqd_t mqdes, skb_put(nc, NOTIFY_COOKIE_LEN); /* and attach it to the socket */ retry: - filp = fget(notification.sigev_signo); - ret = -EBADF; - if (!filp) + f = fdget(notification.sigev_signo); + if (!f.file) { + ret = -EBADF; goto out; - sock = netlink_getsockbyfilp(filp); - fput(filp); + } + sock = netlink_getsockbyfilp(f.file); + fdput(f); if (IS_ERR(sock)) { ret = PTR_ERR(sock); sock = NULL; @@ -1040,9 +1243,9 @@ retry: } timeo = MAX_SCHEDULE_TIMEOUT; - ret = netlink_attachskb(sock, nc, 0, &timeo, NULL); + ret = netlink_attachskb(sock, nc, &timeo, NULL); if (ret == 1) - goto retry; + goto retry; if (ret) { sock = NULL; nc = NULL; @@ -1051,14 +1254,17 @@ retry: } } - ret = -EBADF; - filp = fget(mqdes); - if (!filp) + f = fdget(mqdes); + if (!f.file) { + ret = -EBADF; goto out; + } - inode = filp->f_path.dentry->d_inode; - if (unlikely(filp->f_op != &mqueue_file_operations)) + inode = file_inode(f.file); + if (unlikely(f.file->f_op != &mqueue_file_operations)) { + ret = -EBADF; goto out_fput; + } info = MQUEUE_I(inode); ret = 0; @@ -1090,27 +1296,28 @@ retry: } info->notify_owner = get_pid(task_tgid(current)); + info->notify_user_ns = get_user_ns(current_user_ns()); inode->i_atime = inode->i_ctime = CURRENT_TIME; } spin_unlock(&info->lock); out_fput: - fput(filp); + fdput(f); out: - if (sock) { + if (sock) netlink_detachskb(sock, nc); - } else if (nc) { + else if (nc) dev_kfree_skb(nc); - } + return ret; } -asmlinkage long sys_mq_getsetattr(mqd_t mqdes, - const struct mq_attr __user *u_mqstat, - struct mq_attr __user *u_omqstat) +SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes, + const struct mq_attr __user *, u_mqstat, + struct mq_attr __user *, u_omqstat) { int ret; struct mq_attr mqstat, omqstat; - struct file *filp; + struct fd f; struct inode *inode; struct mqueue_inode_info *info; @@ -1121,30 +1328,31 @@ asmlinkage long sys_mq_getsetattr(mqd_t mqdes, return -EINVAL; } - ret = -EBADF; - filp = fget(mqdes); - if (!filp) + f = fdget(mqdes); + if (!f.file) { + ret = -EBADF; goto out; + } - inode = filp->f_path.dentry->d_inode; - if (unlikely(filp->f_op != &mqueue_file_operations)) + inode = file_inode(f.file); + if (unlikely(f.file->f_op != &mqueue_file_operations)) { + ret = -EBADF; goto out_fput; + } info = MQUEUE_I(inode); spin_lock(&info->lock); omqstat = info->attr; - omqstat.mq_flags = filp->f_flags & O_NONBLOCK; + omqstat.mq_flags = f.file->f_flags & O_NONBLOCK; if (u_mqstat) { - ret = audit_mq_getsetattr(mqdes, &mqstat); - if (ret != 0) { - spin_unlock(&info->lock); - goto out_fput; - } + audit_mq_getsetattr(mqdes, &mqstat); + spin_lock(&f.file->f_lock); if (mqstat.mq_flags & O_NONBLOCK) - filp->f_flags |= O_NONBLOCK; + f.file->f_flags |= O_NONBLOCK; else - filp->f_flags &= ~O_NONBLOCK; + f.file->f_flags &= ~O_NONBLOCK; + spin_unlock(&f.file->f_lock); inode->i_atime = inode->i_ctime = CURRENT_TIME; } @@ -1157,7 +1365,7 @@ asmlinkage long sys_mq_getsetattr(mqd_t mqdes, ret = -EFAULT; out_fput: - fput(filp); + fdput(f); out: return ret; } @@ -1172,75 +1380,50 @@ static const struct file_operations mqueue_file_operations = { .flush = mqueue_flush_file, .poll = mqueue_poll_file, .read = mqueue_read_file, + .llseek = default_llseek, }; -static struct super_operations mqueue_super_ops = { +static const struct super_operations mqueue_super_ops = { .alloc_inode = mqueue_alloc_inode, .destroy_inode = mqueue_destroy_inode, + .evict_inode = mqueue_evict_inode, .statfs = simple_statfs, - .delete_inode = mqueue_delete_inode, - .drop_inode = generic_delete_inode, }; static struct file_system_type mqueue_fs_type = { .name = "mqueue", - .get_sb = mqueue_get_sb, + .mount = mqueue_mount, .kill_sb = kill_litter_super, + .fs_flags = FS_USERNS_MOUNT, }; -static int msg_max_limit_min = DFLT_MSGMAX; -static int msg_max_limit_max = HARD_MSGMAX; - -static int msg_maxsize_limit_min = DFLT_MSGSIZEMAX; -static int msg_maxsize_limit_max = INT_MAX; - -static ctl_table mq_sysctls[] = { - { - .procname = "queues_max", - .data = &queues_max, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec, - }, - { - .procname = "msg_max", - .data = &msg_max, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec_minmax, - .extra1 = &msg_max_limit_min, - .extra2 = &msg_max_limit_max, - }, - { - .procname = "msgsize_max", - .data = &msgsize_max, - .maxlen = sizeof(int), - .mode = 0644, - .proc_handler = &proc_dointvec_minmax, - .extra1 = &msg_maxsize_limit_min, - .extra2 = &msg_maxsize_limit_max, - }, - { .ctl_name = 0 } -}; +int mq_init_ns(struct ipc_namespace *ns) +{ + ns->mq_queues_count = 0; + ns->mq_queues_max = DFLT_QUEUESMAX; + ns->mq_msg_max = DFLT_MSGMAX; + ns->mq_msgsize_max = DFLT_MSGSIZEMAX; + ns->mq_msg_default = DFLT_MSG; + ns->mq_msgsize_default = DFLT_MSGSIZE; + + ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns); + if (IS_ERR(ns->mq_mnt)) { + int err = PTR_ERR(ns->mq_mnt); + ns->mq_mnt = NULL; + return err; + } + return 0; +} -static ctl_table mq_sysctl_dir[] = { - { - .procname = "mqueue", - .mode = 0555, - .child = mq_sysctls, - }, - { .ctl_name = 0 } -}; +void mq_clear_sbinfo(struct ipc_namespace *ns) +{ + ns->mq_mnt->mnt_sb->s_fs_info = NULL; +} -static ctl_table mq_sysctl_root[] = { - { - .ctl_name = CTL_FS, - .procname = "fs", - .mode = 0555, - .child = mq_sysctl_dir, - }, - { .ctl_name = 0 } -}; +void mq_put_mnt(struct ipc_namespace *ns) +{ + kern_unmount(ns->mq_mnt); +} static int __init init_mqueue_fs(void) { @@ -1252,22 +1435,19 @@ static int __init init_mqueue_fs(void) if (mqueue_inode_cachep == NULL) return -ENOMEM; - /* ignore failues - they are not fatal */ - mq_sysctl_table = register_sysctl_table(mq_sysctl_root); + /* ignore failures - they are not fatal */ + mq_sysctl_table = mq_register_sysctl_table(); error = register_filesystem(&mqueue_fs_type); if (error) goto out_sysctl; - if (IS_ERR(mqueue_mnt = kern_mount(&mqueue_fs_type))) { - error = PTR_ERR(mqueue_mnt); - goto out_filesystem; - } - - /* internal initialization - not common for vfs */ - queues_count = 0; spin_lock_init(&mq_lock); + error = mq_init_ns(&init_ipc_ns); + if (error) + goto out_filesystem; + return 0; out_filesystem: @@ -1279,4 +1459,4 @@ out_sysctl: return error; } -__initcall(init_mqueue_fs); +device_initcall(init_mqueue_fs); |
