diff options
Diffstat (limited to 'fs/notify/fanotify/fanotify_user.c')
| -rw-r--r-- | fs/notify/fanotify/fanotify_user.c | 498 |
1 files changed, 262 insertions, 236 deletions
diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 3568c8a8b13..3fdc8a3e113 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -13,25 +13,36 @@ #include <linux/slab.h> #include <linux/types.h> #include <linux/uaccess.h> +#include <linux/compat.h> #include <asm/ioctls.h> #include "../../mount.h" +#include "../fdinfo.h" +#include "fanotify.h" #define FANOTIFY_DEFAULT_MAX_EVENTS 16384 #define FANOTIFY_DEFAULT_MAX_MARKS 8192 #define FANOTIFY_DEFAULT_MAX_LISTENERS 128 +/* + * All flags that may be specified in parameter event_f_flags of fanotify_init. + * + * Internal and external open flags are stored together in field f_flags of + * struct file. Only external open flags shall be allowed in event_f_flags. + * Internal flags like FMODE_NONOTIFY, FMODE_EXEC, FMODE_NOCMTIME shall be + * excluded. + */ +#define FANOTIFY_INIT_ALL_EVENT_F_BITS ( \ + O_ACCMODE | O_APPEND | O_NONBLOCK | \ + __O_SYNC | O_DSYNC | O_CLOEXEC | \ + O_LARGEFILE | O_NOATIME ) + extern const struct fsnotify_ops fanotify_fsnotify_ops; static struct kmem_cache *fanotify_mark_cache __read_mostly; -static struct kmem_cache *fanotify_response_event_cache __read_mostly; - -struct fanotify_response_event { - struct list_head list; - __s32 fd; - struct fsnotify_event *event; -}; +struct kmem_cache *fanotify_event_cachep __read_mostly; +struct kmem_cache *fanotify_perm_event_cachep __read_mostly; /* * Get an fsnotify notification event if one exists and is small @@ -58,11 +69,11 @@ static struct fsnotify_event *get_one_event(struct fsnotify_group *group, return fsnotify_remove_notify_event(group); } -static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event) +static int create_fd(struct fsnotify_group *group, + struct fanotify_event_info *event, + struct file **file) { int client_fd; - struct dentry *dentry; - struct vfsmount *mnt; struct file *new_file; pr_debug("%s: group=%p event=%p\n", __func__, group, event); @@ -71,22 +82,14 @@ static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event) if (client_fd < 0) return client_fd; - if (event->data_type != FSNOTIFY_EVENT_PATH) { - WARN_ON(1); - put_unused_fd(client_fd); - return -EINVAL; - } - /* * we need a new file handle for the userspace program so it can read even if it was * originally opened O_WRONLY. */ - dentry = dget(event->path.dentry); - mnt = mntget(event->path.mnt); /* it's possible this event was an overflow event. in that case dentry and mnt * are NULL; That's fine, just don't call dentry open */ - if (dentry && mnt) - new_file = dentry_open(dentry, mnt, + if (event->path.dentry && event->path.mnt) + new_file = dentry_open(&event->path, group->fanotify_data.f_flags | FMODE_NONOTIFY, current_cred()); else @@ -102,30 +105,35 @@ static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event) put_unused_fd(client_fd); client_fd = PTR_ERR(new_file); } else { - fd_install(client_fd, new_file); + *file = new_file; } return client_fd; } static int fill_event_metadata(struct fsnotify_group *group, - struct fanotify_event_metadata *metadata, - struct fsnotify_event *event) + struct fanotify_event_metadata *metadata, + struct fsnotify_event *fsn_event, + struct file **file) { int ret = 0; + struct fanotify_event_info *event; pr_debug("%s: group=%p metadata=%p event=%p\n", __func__, - group, metadata, event); + group, metadata, fsn_event); + *file = NULL; + event = container_of(fsn_event, struct fanotify_event_info, fse); metadata->event_len = FAN_EVENT_METADATA_LEN; metadata->metadata_len = FAN_EVENT_METADATA_LEN; metadata->vers = FANOTIFY_METADATA_VERSION; - metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS; + metadata->reserved = 0; + metadata->mask = fsn_event->mask & FAN_ALL_OUTGOING_EVENTS; metadata->pid = pid_vnr(event->tgid); - if (unlikely(event->mask & FAN_Q_OVERFLOW)) + if (unlikely(fsn_event->mask & FAN_Q_OVERFLOW)) metadata->fd = FAN_NOFD; else { - metadata->fd = create_fd(group, event); + metadata->fd = create_fd(group, event, file); if (metadata->fd < 0) ret = metadata->fd; } @@ -134,33 +142,34 @@ static int fill_event_metadata(struct fsnotify_group *group, } #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS -static struct fanotify_response_event *dequeue_re(struct fsnotify_group *group, - __s32 fd) +static struct fanotify_perm_event_info *dequeue_event( + struct fsnotify_group *group, int fd) { - struct fanotify_response_event *re, *return_re = NULL; + struct fanotify_perm_event_info *event, *return_e = NULL; - mutex_lock(&group->fanotify_data.access_mutex); - list_for_each_entry(re, &group->fanotify_data.access_list, list) { - if (re->fd != fd) + spin_lock(&group->fanotify_data.access_lock); + list_for_each_entry(event, &group->fanotify_data.access_list, + fae.fse.list) { + if (event->fd != fd) continue; - list_del_init(&re->list); - return_re = re; + list_del_init(&event->fae.fse.list); + return_e = event; break; } - mutex_unlock(&group->fanotify_data.access_mutex); + spin_unlock(&group->fanotify_data.access_lock); - pr_debug("%s: found return_re=%p\n", __func__, return_re); + pr_debug("%s: found return_re=%p\n", __func__, return_e); - return return_re; + return return_e; } static int process_access_response(struct fsnotify_group *group, struct fanotify_response *response_struct) { - struct fanotify_response_event *re; - __s32 fd = response_struct->fd; - __u32 response = response_struct->response; + struct fanotify_perm_event_info *event; + int fd = response_struct->fd; + int response = response_struct->response; pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group, fd, response); @@ -180,83 +189,15 @@ static int process_access_response(struct fsnotify_group *group, if (fd < 0) return -EINVAL; - re = dequeue_re(group, fd); - if (!re) + event = dequeue_event(group, fd); + if (!event) return -ENOENT; - re->event->response = response; - + event->response = response; wake_up(&group->fanotify_data.access_waitq); - kmem_cache_free(fanotify_response_event_cache, re); - return 0; } - -static int prepare_for_access_response(struct fsnotify_group *group, - struct fsnotify_event *event, - __s32 fd) -{ - struct fanotify_response_event *re; - - if (!(event->mask & FAN_ALL_PERM_EVENTS)) - return 0; - - re = kmem_cache_alloc(fanotify_response_event_cache, GFP_KERNEL); - if (!re) - return -ENOMEM; - - re->event = event; - re->fd = fd; - - mutex_lock(&group->fanotify_data.access_mutex); - - if (atomic_read(&group->fanotify_data.bypass_perm)) { - mutex_unlock(&group->fanotify_data.access_mutex); - kmem_cache_free(fanotify_response_event_cache, re); - event->response = FAN_ALLOW; - return 0; - } - - list_add_tail(&re->list, &group->fanotify_data.access_list); - mutex_unlock(&group->fanotify_data.access_mutex); - - return 0; -} - -static void remove_access_response(struct fsnotify_group *group, - struct fsnotify_event *event, - __s32 fd) -{ - struct fanotify_response_event *re; - - if (!(event->mask & FAN_ALL_PERM_EVENTS)) - return; - - re = dequeue_re(group, fd); - if (!re) - return; - - BUG_ON(re->event != event); - - kmem_cache_free(fanotify_response_event_cache, re); - - return; -} -#else -static int prepare_for_access_response(struct fsnotify_group *group, - struct fsnotify_event *event, - __s32 fd) -{ - return 0; -} - -static void remove_access_response(struct fsnotify_group *group, - struct fsnotify_event *event, - __s32 fd) -{ - return; -} #endif static ssize_t copy_event_to_user(struct fsnotify_group *group, @@ -264,38 +205,35 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, char __user *buf) { struct fanotify_event_metadata fanotify_event_metadata; + struct file *f; int fd, ret; pr_debug("%s: group=%p event=%p\n", __func__, group, event); - ret = fill_event_metadata(group, &fanotify_event_metadata, event); + ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f); if (ret < 0) - goto out; + return ret; fd = fanotify_event_metadata.fd; - ret = prepare_for_access_response(group, event, fd); - if (ret) - goto out_close_fd; - ret = -EFAULT; if (copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len)) - goto out_kill_access_response; + goto out_close_fd; + +#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS + if (event->mask & FAN_ALL_PERM_EVENTS) + FANOTIFY_PE(event)->fd = fd; +#endif + if (fd != FAN_NOFD) + fd_install(fd, f); return fanotify_event_metadata.event_len; -out_kill_access_response: - remove_access_response(group, event, fd); out_close_fd: - if (fd != FAN_NOFD) - sys_close(fd); -out: -#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS - if (event->mask & FAN_ALL_PERM_EVENTS) { - event->response = FAN_DENY; - wake_up(&group->fanotify_data.access_waitq); + if (fd != FAN_NOFD) { + put_unused_fd(fd); + fput(f); } -#endif return ret; } @@ -335,30 +273,50 @@ static ssize_t fanotify_read(struct file *file, char __user *buf, kevent = get_one_event(group, count); mutex_unlock(&group->notification_mutex); - if (kevent) { + if (IS_ERR(kevent)) { ret = PTR_ERR(kevent); - if (IS_ERR(kevent)) + break; + } + + if (!kevent) { + ret = -EAGAIN; + if (file->f_flags & O_NONBLOCK) break; - ret = copy_event_to_user(group, kevent, buf); - fsnotify_put_event(kevent); - if (ret < 0) + + ret = -ERESTARTSYS; + if (signal_pending(current)) + break; + + if (start != buf) break; - buf += ret; - count -= ret; + schedule(); continue; } - ret = -EAGAIN; - if (file->f_flags & O_NONBLOCK) - break; - ret = -ERESTARTSYS; - if (signal_pending(current)) - break; - - if (start != buf) - break; - - schedule(); + ret = copy_event_to_user(group, kevent, buf); + /* + * Permission events get queued to wait for response. Other + * events can be destroyed now. + */ + if (!(kevent->mask & FAN_ALL_PERM_EVENTS)) { + fsnotify_destroy_event(group, kevent); + if (ret < 0) + break; + } else { +#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS + if (ret < 0) { + FANOTIFY_PE(kevent)->response = FAN_DENY; + wake_up(&group->fanotify_data.access_waitq); + break; + } + spin_lock(&group->fanotify_data.access_lock); + list_add_tail(&kevent->list, + &group->fanotify_data.access_list); + spin_unlock(&group->fanotify_data.access_lock); +#endif + } + buf += ret; + count -= ret; } finish_wait(&group->notification_waitq, &wait); @@ -399,27 +357,27 @@ static int fanotify_release(struct inode *ignored, struct file *file) struct fsnotify_group *group = file->private_data; #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS - struct fanotify_response_event *re, *lre; + struct fanotify_perm_event_info *event, *next; - mutex_lock(&group->fanotify_data.access_mutex); + spin_lock(&group->fanotify_data.access_lock); atomic_inc(&group->fanotify_data.bypass_perm); - list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) { - pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group, - re, re->event); - - list_del_init(&re->list); - re->event->response = FAN_ALLOW; + list_for_each_entry_safe(event, next, &group->fanotify_data.access_list, + fae.fse.list) { + pr_debug("%s: found group=%p event=%p\n", __func__, group, + event); - kmem_cache_free(fanotify_response_event_cache, re); + list_del_init(&event->fae.fse.list); + event->response = FAN_ALLOW; } - mutex_unlock(&group->fanotify_data.access_mutex); + spin_unlock(&group->fanotify_data.access_lock); wake_up(&group->fanotify_data.access_waitq); #endif + /* matches the fanotify_init->fsnotify_alloc_group */ - fsnotify_put_group(group); + fsnotify_destroy_group(group); return 0; } @@ -427,7 +385,7 @@ static int fanotify_release(struct inode *ignored, struct file *file) static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct fsnotify_group *group; - struct fsnotify_event_holder *holder; + struct fsnotify_event *fsn_event; void __user *p; int ret = -ENOTTY; size_t send_len = 0; @@ -439,7 +397,7 @@ static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long ar switch (cmd) { case FIONREAD: mutex_lock(&group->notification_mutex); - list_for_each_entry(holder, &group->notification_list, event_list) + list_for_each_entry(fsn_event, &group->notification_list, list) send_len += FAN_EVENT_METADATA_LEN; mutex_unlock(&group->notification_mutex); ret = put_user(send_len, (int __user *) p); @@ -450,6 +408,7 @@ static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long ar } static const struct file_operations fanotify_fops = { + .show_fdinfo = fanotify_show_fdinfo, .poll = fanotify_poll, .read = fanotify_read, .write = fanotify_write, @@ -474,24 +433,22 @@ static int fanotify_find_path(int dfd, const char __user *filename, dfd, filename, flags); if (filename == NULL) { - struct file *file; - int fput_needed; + struct fd f = fdget(dfd); ret = -EBADF; - file = fget_light(dfd, &fput_needed); - if (!file) + if (!f.file) goto out; ret = -ENOTDIR; if ((flags & FAN_MARK_ONLYDIR) && - !(S_ISDIR(file->f_path.dentry->d_inode->i_mode))) { - fput_light(file, fput_needed); + !(S_ISDIR(file_inode(f.file)->i_mode))) { + fdput(f); goto out; } - *path = file->f_path; + *path = f.file->f_path; path_get(path); - fput_light(file, fput_needed); + fdput(f); } else { unsigned int lookup_flags = 0; @@ -515,7 +472,8 @@ out: static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark, __u32 mask, - unsigned int flags) + unsigned int flags, + int *destroy) { __u32 oldmask; @@ -529,8 +487,7 @@ static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark, } spin_unlock(&fsn_mark->lock); - if (!(oldmask & ~mask)) - fsnotify_destroy_mark(fsn_mark); + *destroy = !(oldmask & ~mask); return mask & oldmask; } @@ -541,12 +498,21 @@ static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group, { struct fsnotify_mark *fsn_mark = NULL; __u32 removed; + int destroy_mark; + mutex_lock(&group->mark_mutex); fsn_mark = fsnotify_find_vfsmount_mark(group, mnt); - if (!fsn_mark) + if (!fsn_mark) { + mutex_unlock(&group->mark_mutex); return -ENOENT; + } + + removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags, + &destroy_mark); + if (destroy_mark) + fsnotify_destroy_mark_locked(fsn_mark, group); + mutex_unlock(&group->mark_mutex); - removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags); fsnotify_put_mark(fsn_mark); if (removed & real_mount(mnt)->mnt_fsnotify_mask) fsnotify_recalc_vfsmount_mask(mnt); @@ -560,12 +526,21 @@ static int fanotify_remove_inode_mark(struct fsnotify_group *group, { struct fsnotify_mark *fsn_mark = NULL; __u32 removed; + int destroy_mark; + mutex_lock(&group->mark_mutex); fsn_mark = fsnotify_find_inode_mark(group, inode); - if (!fsn_mark) + if (!fsn_mark) { + mutex_unlock(&group->mark_mutex); return -ENOENT; + } + + removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags, + &destroy_mark); + if (destroy_mark) + fsnotify_destroy_mark_locked(fsn_mark, group); + mutex_unlock(&group->mark_mutex); - removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags); /* matches the fsnotify_find_inode_mark() */ fsnotify_put_mark(fsn_mark); if (removed & inode->i_fsnotify_mask) @@ -601,35 +576,55 @@ static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark, return mask & ~oldmask; } +static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group, + struct inode *inode, + struct vfsmount *mnt) +{ + struct fsnotify_mark *mark; + int ret; + + if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks) + return ERR_PTR(-ENOSPC); + + mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL); + if (!mark) + return ERR_PTR(-ENOMEM); + + fsnotify_init_mark(mark, fanotify_free_mark); + ret = fsnotify_add_mark_locked(mark, group, inode, mnt, 0); + if (ret) { + fsnotify_put_mark(mark); + return ERR_PTR(ret); + } + + return mark; +} + + static int fanotify_add_vfsmount_mark(struct fsnotify_group *group, struct vfsmount *mnt, __u32 mask, unsigned int flags) { struct fsnotify_mark *fsn_mark; __u32 added; - int ret = 0; + mutex_lock(&group->mark_mutex); fsn_mark = fsnotify_find_vfsmount_mark(group, mnt); if (!fsn_mark) { - if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks) - return -ENOSPC; - - fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL); - if (!fsn_mark) - return -ENOMEM; - - fsnotify_init_mark(fsn_mark, fanotify_free_mark); - ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0); - if (ret) - goto err; + fsn_mark = fanotify_add_new_mark(group, NULL, mnt); + if (IS_ERR(fsn_mark)) { + mutex_unlock(&group->mark_mutex); + return PTR_ERR(fsn_mark); + } } added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); + mutex_unlock(&group->mark_mutex); if (added & ~real_mount(mnt)->mnt_fsnotify_mask) fsnotify_recalc_vfsmount_mask(mnt); -err: + fsnotify_put_mark(fsn_mark); - return ret; + return 0; } static int fanotify_add_inode_mark(struct fsnotify_group *group, @@ -638,7 +633,6 @@ static int fanotify_add_inode_mark(struct fsnotify_group *group, { struct fsnotify_mark *fsn_mark; __u32 added; - int ret = 0; pr_debug("%s: group=%p inode=%p\n", __func__, group, inode); @@ -652,27 +646,23 @@ static int fanotify_add_inode_mark(struct fsnotify_group *group, (atomic_read(&inode->i_writecount) > 0)) return 0; + mutex_lock(&group->mark_mutex); fsn_mark = fsnotify_find_inode_mark(group, inode); if (!fsn_mark) { - if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks) - return -ENOSPC; - - fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL); - if (!fsn_mark) - return -ENOMEM; - - fsnotify_init_mark(fsn_mark, fanotify_free_mark); - ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0); - if (ret) - goto err; + fsn_mark = fanotify_add_new_mark(group, inode, NULL); + if (IS_ERR(fsn_mark)) { + mutex_unlock(&group->mark_mutex); + return PTR_ERR(fsn_mark); + } } added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); + mutex_unlock(&group->mark_mutex); if (added & ~inode->i_fsnotify_mask) fsnotify_recalc_inode_mask(inode); -err: + fsnotify_put_mark(fsn_mark); - return ret; + return 0; } /* fanotify syscalls */ @@ -681,6 +671,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) struct fsnotify_group *group; int f_flags, fd; struct user_struct *user; + struct fanotify_event_info *oevent; pr_debug("%s: flags=%d event_f_flags=%d\n", __func__, flags, event_f_flags); @@ -691,6 +682,18 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) if (flags & ~FAN_ALL_INIT_FLAGS) return -EINVAL; + if (event_f_flags & ~FANOTIFY_INIT_ALL_EVENT_F_BITS) + return -EINVAL; + + switch (event_f_flags & O_ACCMODE) { + case O_RDONLY: + case O_RDWR: + case O_WRONLY: + break; + default: + return -EINVAL; + } + user = get_current_user(); if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) { free_uid(user); @@ -713,9 +716,18 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) group->fanotify_data.user = user; atomic_inc(&user->fanotify_listeners); + oevent = fanotify_alloc_event(NULL, FS_Q_OVERFLOW, NULL); + if (unlikely(!oevent)) { + fd = -ENOMEM; + goto out_destroy_group; + } + group->overflow_event = &oevent->fse; + + if (force_o_largefile()) + event_f_flags |= O_LARGEFILE; group->fanotify_data.f_flags = event_f_flags; #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS - mutex_init(&group->fanotify_data.access_mutex); + spin_lock_init(&group->fanotify_data.access_lock); init_waitqueue_head(&group->fanotify_data.access_waitq); INIT_LIST_HEAD(&group->fanotify_data.access_list); atomic_set(&group->fanotify_data.bypass_perm, 0); @@ -732,13 +744,13 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) break; default: fd = -EINVAL; - goto out_put_group; + goto out_destroy_group; } if (flags & FAN_UNLIMITED_QUEUE) { fd = -EPERM; if (!capable(CAP_SYS_ADMIN)) - goto out_put_group; + goto out_destroy_group; group->max_events = UINT_MAX; } else { group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS; @@ -747,7 +759,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) if (flags & FAN_UNLIMITED_MARKS) { fd = -EPERM; if (!capable(CAP_SYS_ADMIN)) - goto out_put_group; + goto out_destroy_group; group->fanotify_data.max_marks = UINT_MAX; } else { group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS; @@ -755,25 +767,25 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags) fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags); if (fd < 0) - goto out_put_group; + goto out_destroy_group; return fd; -out_put_group: - fsnotify_put_group(group); +out_destroy_group: + fsnotify_destroy_group(group); return fd; } -SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags, - __u64 mask, int dfd, - const char __user * pathname) +SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags, + __u64, mask, int, dfd, + const char __user *, pathname) { struct inode *inode = NULL; struct vfsmount *mnt = NULL; struct fsnotify_group *group; - struct file *filp; + struct fd f; struct path path; - int ret, fput_needed; + int ret; pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n", __func__, fanotify_fd, flags, dfd, pathname, mask); @@ -789,7 +801,10 @@ SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags, case FAN_MARK_REMOVE: if (!mask) return -EINVAL; + break; case FAN_MARK_FLUSH: + if (flags & ~(FAN_MARK_MOUNT | FAN_MARK_FLUSH)) + return -EINVAL; break; default: return -EINVAL; @@ -807,15 +822,15 @@ SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags, #endif return -EINVAL; - filp = fget_light(fanotify_fd, &fput_needed); - if (unlikely(!filp)) + f = fdget(fanotify_fd); + if (unlikely(!f.file)) return -EBADF; /* verify that this is indeed an fanotify instance */ ret = -EINVAL; - if (unlikely(filp->f_op != &fanotify_fops)) + if (unlikely(f.file->f_op != &fanotify_fops)) goto fput_and_out; - group = filp->private_data; + group = f.file->private_data; /* * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF. These are not @@ -826,6 +841,15 @@ SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags, group->priority == FS_PRIO_0) goto fput_and_out; + if (flags & FAN_MARK_FLUSH) { + ret = 0; + if (flags & FAN_MARK_MOUNT) + fsnotify_clear_vfsmount_marks_by_group(group); + else + fsnotify_clear_inode_marks_by_group(group); + goto fput_and_out; + } + ret = fanotify_find_path(dfd, pathname, &path, flags); if (ret) goto fput_and_out; @@ -837,7 +861,7 @@ SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags, mnt = path.mnt; /* create/update an inode mark */ - switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) { + switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE)) { case FAN_MARK_ADD: if (flags & FAN_MARK_MOUNT) ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags); @@ -850,31 +874,30 @@ SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags, else ret = fanotify_remove_inode_mark(group, inode, mask, flags); break; - case FAN_MARK_FLUSH: - if (flags & FAN_MARK_MOUNT) - fsnotify_clear_vfsmount_marks_by_group(group); - else - fsnotify_clear_inode_marks_by_group(group); - break; default: ret = -EINVAL; } path_put(&path); fput_and_out: - fput_light(filp, fput_needed); + fdput(f); return ret; } -#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS -asmlinkage long SyS_fanotify_mark(long fanotify_fd, long flags, __u64 mask, - long dfd, long pathname) +#ifdef CONFIG_COMPAT +COMPAT_SYSCALL_DEFINE6(fanotify_mark, + int, fanotify_fd, unsigned int, flags, + __u32, mask0, __u32, mask1, int, dfd, + const char __user *, pathname) { - return SYSC_fanotify_mark((int) fanotify_fd, (unsigned int) flags, - mask, (int) dfd, - (const char __user *) pathname); + return sys_fanotify_mark(fanotify_fd, flags, +#ifdef __BIG_ENDIAN + ((__u64)mask0 << 32) | mask1, +#else + ((__u64)mask1 << 32) | mask0, +#endif + dfd, pathname); } -SYSCALL_ALIAS(sys_fanotify_mark, SyS_fanotify_mark); #endif /* @@ -885,8 +908,11 @@ SYSCALL_ALIAS(sys_fanotify_mark, SyS_fanotify_mark); static int __init fanotify_user_setup(void) { fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC); - fanotify_response_event_cache = KMEM_CACHE(fanotify_response_event, - SLAB_PANIC); + fanotify_event_cachep = KMEM_CACHE(fanotify_event_info, SLAB_PANIC); +#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS + fanotify_perm_event_cachep = KMEM_CACHE(fanotify_perm_event_info, + SLAB_PANIC); +#endif return 0; } |
