diff options
Diffstat (limited to 'fs/notify')
| -rw-r--r-- | fs/notify/Makefile | 2 | ||||
| -rw-r--r-- | fs/notify/dnotify/dnotify.c | 63 | ||||
| -rw-r--r-- | fs/notify/fanotify/Kconfig | 4 | ||||
| -rw-r--r-- | fs/notify/fanotify/fanotify.c | 264 | ||||
| -rw-r--r-- | fs/notify/fanotify/fanotify.h | 50 | ||||
| -rw-r--r-- | fs/notify/fanotify/fanotify_user.c | 531 | ||||
| -rw-r--r-- | fs/notify/fdinfo.c | 179 | ||||
| -rw-r--r-- | fs/notify/fdinfo.h | 27 | ||||
| -rw-r--r-- | fs/notify/fsnotify.c | 65 | ||||
| -rw-r--r-- | fs/notify/group.c | 56 | ||||
| -rw-r--r-- | fs/notify/inode_mark.c | 82 | ||||
| -rw-r--r-- | fs/notify/inotify/inotify.h | 21 | ||||
| -rw-r--r-- | fs/notify/inotify/inotify_fsnotify.c | 165 | ||||
| -rw-r--r-- | fs/notify/inotify/inotify_user.c | 269 | ||||
| -rw-r--r-- | fs/notify/mark.c | 146 | ||||
| -rw-r--r-- | fs/notify/notification.c | 364 | ||||
| -rw-r--r-- | fs/notify/vfsmount_mark.c | 47 | 
17 files changed, 1164 insertions, 1171 deletions
diff --git a/fs/notify/Makefile b/fs/notify/Makefile index ae5f33a6d86..96d3420d024 100644 --- a/fs/notify/Makefile +++ b/fs/notify/Makefile @@ -1,5 +1,5 @@  obj-$(CONFIG_FSNOTIFY)		+= fsnotify.o notification.o group.o inode_mark.o \ -				   mark.o vfsmount_mark.o +				   mark.o vfsmount_mark.o fdinfo.o  obj-y			+= dnotify/  obj-y			+= inotify/ diff --git a/fs/notify/dnotify/dnotify.c b/fs/notify/dnotify/dnotify.c index 3344bdd5506..abc8cbcfe90 100644 --- a/fs/notify/dnotify/dnotify.c +++ b/fs/notify/dnotify/dnotify.c @@ -31,7 +31,6 @@ int dir_notify_enable __read_mostly = 1;  static struct kmem_cache *dnotify_struct_cache __read_mostly;  static struct kmem_cache *dnotify_mark_cache __read_mostly;  static struct fsnotify_group *dnotify_group __read_mostly; -static DEFINE_MUTEX(dnotify_mark_mutex);  /*   * dnotify will attach one of these to each inode (i_fsnotify_marks) which @@ -83,20 +82,23 @@ static void dnotify_recalc_inode_mask(struct fsnotify_mark *fsn_mark)   * events.   */  static int dnotify_handle_event(struct fsnotify_group *group, +				struct inode *inode,  				struct fsnotify_mark *inode_mark,  				struct fsnotify_mark *vfsmount_mark, -				struct fsnotify_event *event) +				u32 mask, void *data, int data_type, +				const unsigned char *file_name, u32 cookie)  {  	struct dnotify_mark *dn_mark; -	struct inode *to_tell;  	struct dnotify_struct *dn;  	struct dnotify_struct **prev;  	struct fown_struct *fown; -	__u32 test_mask = event->mask & ~FS_EVENT_ON_CHILD; +	__u32 test_mask = mask & ~FS_EVENT_ON_CHILD; -	BUG_ON(vfsmount_mark); +	/* not a dir, dnotify doesn't care */ +	if (!S_ISDIR(inode->i_mode)) +		return 0; -	to_tell = event->to_tell; +	BUG_ON(vfsmount_mark);  	dn_mark = container_of(inode_mark, struct dnotify_mark, fsn_mark); @@ -123,23 +125,6 @@ static int dnotify_handle_event(struct fsnotify_group *group,  	return 0;  } -/* - * Given an inode and mask determine if dnotify would be interested in sending - * userspace notification for that pair. - */ -static bool dnotify_should_send_event(struct fsnotify_group *group, -				      struct inode *inode, -				      struct fsnotify_mark *inode_mark, -				      struct fsnotify_mark *vfsmount_mark, -				      __u32 mask, void *data, int data_type) -{ -	/* not a dir, dnotify doesn't care */ -	if (!S_ISDIR(inode->i_mode)) -		return false; - -	return true; -} -  static void dnotify_free_mark(struct fsnotify_mark *fsn_mark)  {  	struct dnotify_mark *dn_mark = container_of(fsn_mark, @@ -153,10 +138,6 @@ static void dnotify_free_mark(struct fsnotify_mark *fsn_mark)  static struct fsnotify_ops dnotify_fsnotify_ops = {  	.handle_event = dnotify_handle_event, -	.should_send_event = dnotify_should_send_event, -	.free_group_priv = NULL, -	.freeing_mark = NULL, -	.free_event_priv = NULL,  };  /* @@ -174,7 +155,7 @@ void dnotify_flush(struct file *filp, fl_owner_t id)  	struct dnotify_struct **prev;  	struct inode *inode; -	inode = filp->f_path.dentry->d_inode; +	inode = file_inode(filp);  	if (!S_ISDIR(inode->i_mode))  		return; @@ -183,7 +164,7 @@ void dnotify_flush(struct file *filp, fl_owner_t id)  		return;  	dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark); -	mutex_lock(&dnotify_mark_mutex); +	mutex_lock(&dnotify_group->mark_mutex);  	spin_lock(&fsn_mark->lock);  	prev = &dn_mark->dn; @@ -199,11 +180,12 @@ void dnotify_flush(struct file *filp, fl_owner_t id)  	spin_unlock(&fsn_mark->lock); -	/* nothing else could have found us thanks to the dnotify_mark_mutex */ +	/* nothing else could have found us thanks to the dnotify_groups +	   mark_mutex */  	if (dn_mark->dn == NULL) -		fsnotify_destroy_mark(fsn_mark); +		fsnotify_destroy_mark_locked(fsn_mark, dnotify_group); -	mutex_unlock(&dnotify_mark_mutex); +	mutex_unlock(&dnotify_group->mark_mutex);  	fsnotify_put_mark(fsn_mark);  } @@ -296,7 +278,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)  	}  	/* dnotify only works on directories */ -	inode = filp->f_path.dentry->d_inode; +	inode = file_inode(filp);  	if (!S_ISDIR(inode->i_mode)) {  		error = -ENOTDIR;  		goto out_err; @@ -326,7 +308,7 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)  	new_dn_mark->dn = NULL;  	/* this is needed to prevent the fcntl/close race described below */ -	mutex_lock(&dnotify_mark_mutex); +	mutex_lock(&dnotify_group->mark_mutex);  	/* add the new_fsn_mark or find an old one. */  	fsn_mark = fsnotify_find_inode_mark(dnotify_group, inode); @@ -334,7 +316,8 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)  		dn_mark = container_of(fsn_mark, struct dnotify_mark, fsn_mark);  		spin_lock(&fsn_mark->lock);  	} else { -		fsnotify_add_mark(new_fsn_mark, dnotify_group, inode, NULL, 0); +		fsnotify_add_mark_locked(new_fsn_mark, dnotify_group, inode, +					 NULL, 0);  		spin_lock(&new_fsn_mark->lock);  		fsn_mark = new_fsn_mark;  		dn_mark = new_dn_mark; @@ -348,9 +331,9 @@ int fcntl_dirnotify(int fd, struct file *filp, unsigned long arg)  	/* if (f != filp) means that we lost a race and another task/thread  	 * actually closed the fd we are still playing with before we grabbed -	 * the dnotify_mark_mutex and fsn_mark->lock.  Since closing the fd is the -	 * only time we clean up the marks we need to get our mark off -	 * the list. */ +	 * the dnotify_groups mark_mutex and fsn_mark->lock.  Since closing the +	 * fd is the only time we clean up the marks we need to get our mark +	 * off the list. */  	if (f != filp) {  		/* if we added ourselves, shoot ourselves, it's possible that  		 * the flush actually did shoot this fsn_mark.  That's fine too @@ -385,9 +368,9 @@ out:  	spin_unlock(&fsn_mark->lock);  	if (destroy) -		fsnotify_destroy_mark(fsn_mark); +		fsnotify_destroy_mark_locked(fsn_mark, dnotify_group); -	mutex_unlock(&dnotify_mark_mutex); +	mutex_unlock(&dnotify_group->mark_mutex);  	fsnotify_put_mark(fsn_mark);  out_err:  	if (new_fsn_mark) diff --git a/fs/notify/fanotify/Kconfig b/fs/notify/fanotify/Kconfig index 3ac36b7bf6b..e5f911bd80d 100644 --- a/fs/notify/fanotify/Kconfig +++ b/fs/notify/fanotify/Kconfig @@ -4,9 +4,9 @@ config FANOTIFY  	select ANON_INODES  	default n  	---help--- -	   Say Y here to enable fanotify suport.  fanotify is a file access +	   Say Y here to enable fanotify support.  fanotify is a file access  	   notification system which differs from inotify in that it sends -	   and open file descriptor to the userspace listener along with +	   an open file descriptor to the userspace listener along with  	   the event.  	   If unsure, say Y. diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index b04f88eed09..ee9cb3795c2 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c @@ -9,93 +9,71 @@  #include <linux/types.h>  #include <linux/wait.h> -static bool should_merge(struct fsnotify_event *old, struct fsnotify_event *new) +#include "fanotify.h" + +static bool should_merge(struct fsnotify_event *old_fsn, +			 struct fsnotify_event *new_fsn)  { -	pr_debug("%s: old=%p new=%p\n", __func__, old, new); - -	if (old->to_tell == new->to_tell && -	    old->data_type == new->data_type && -	    old->tgid == new->tgid) { -		switch (old->data_type) { -		case (FSNOTIFY_EVENT_PATH): -			if ((old->path.mnt == new->path.mnt) && -			    (old->path.dentry == new->path.dentry)) -				return true; -		case (FSNOTIFY_EVENT_NONE): -			return true; -		default: -			BUG(); -		}; -	} +	struct fanotify_event_info *old, *new; + +	pr_debug("%s: old=%p new=%p\n", __func__, old_fsn, new_fsn); +	old = FANOTIFY_E(old_fsn); +	new = FANOTIFY_E(new_fsn); + +	if (old_fsn->inode == new_fsn->inode && old->tgid == new->tgid && +	    old->path.mnt == new->path.mnt && +	    old->path.dentry == new->path.dentry) +		return true;  	return false;  }  /* and the list better be locked by something too! */ -static struct fsnotify_event *fanotify_merge(struct list_head *list, -					     struct fsnotify_event *event) +static int fanotify_merge(struct list_head *list, struct fsnotify_event *event)  { -	struct fsnotify_event_holder *test_holder; -	struct fsnotify_event *test_event = NULL; -	struct fsnotify_event *new_event; +	struct fsnotify_event *test_event; +	bool do_merge = false;  	pr_debug("%s: list=%p event=%p\n", __func__, list, event); +#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS +	/* +	 * Don't merge a permission event with any other event so that we know +	 * the event structure we have created in fanotify_handle_event() is the +	 * one we should check for permission response. +	 */ +	if (event->mask & FAN_ALL_PERM_EVENTS) +		return 0; +#endif -	list_for_each_entry_reverse(test_holder, list, event_list) { -		if (should_merge(test_holder->event, event)) { -			test_event = test_holder->event; +	list_for_each_entry_reverse(test_event, list, list) { +		if (should_merge(test_event, event)) { +			do_merge = true;  			break;  		}  	} -	if (!test_event) -		return NULL; - -	fsnotify_get_event(test_event); - -	/* if they are exactly the same we are done */ -	if (test_event->mask == event->mask) -		return test_event; - -	/* -	 * if the refcnt == 2 this is the only queue -	 * for this event and so we can update the mask -	 * in place. -	 */ -	if (atomic_read(&test_event->refcnt) == 2) { -		test_event->mask |= event->mask; -		return test_event; -	} - -	new_event = fsnotify_clone_event(test_event); - -	/* done with test_event */ -	fsnotify_put_event(test_event); +	if (!do_merge) +		return 0; -	/* couldn't allocate memory, merge was not possible */ -	if (unlikely(!new_event)) -		return ERR_PTR(-ENOMEM); - -	/* build new event and replace it on the list */ -	new_event->mask = (test_event->mask | event->mask); -	fsnotify_replace_event(test_holder, new_event); - -	/* we hold a reference on new_event from clone_event */ -	return new_event; +	test_event->mask |= event->mask; +	return 1;  }  #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS -static int fanotify_get_response_from_access(struct fsnotify_group *group, -					     struct fsnotify_event *event) +static int fanotify_get_response(struct fsnotify_group *group, +				 struct fanotify_perm_event_info *event)  {  	int ret;  	pr_debug("%s: group=%p event=%p\n", __func__, group, event); -	wait_event(group->fanotify_data.access_waitq, event->response); +	wait_event(group->fanotify_data.access_waitq, event->response || +				atomic_read(&group->fanotify_data.bypass_perm)); + +	if (!event->response) /* bypass_perm set */ +		return 0;  	/* userspace responded, convert to something usable */ -	spin_lock(&event->lock);  	switch (event->response) {  	case FAN_ALLOW:  		ret = 0; @@ -105,7 +83,6 @@ static int fanotify_get_response_from_access(struct fsnotify_group *group,  		ret = -EPERM;  	}  	event->response = 0; -	spin_unlock(&event->lock);  	pr_debug("%s: group=%p event=%p about to return ret=%d\n", __func__,  		 group, event, ret); @@ -114,58 +91,17 @@ static int fanotify_get_response_from_access(struct fsnotify_group *group,  }  #endif -static int fanotify_handle_event(struct fsnotify_group *group, -				 struct fsnotify_mark *inode_mark, -				 struct fsnotify_mark *fanotify_mark, -				 struct fsnotify_event *event) -{ -	int ret = 0; -	struct fsnotify_event *notify_event = NULL; - -	BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS); -	BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY); -	BUILD_BUG_ON(FAN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE); -	BUILD_BUG_ON(FAN_CLOSE_WRITE != FS_CLOSE_WRITE); -	BUILD_BUG_ON(FAN_OPEN != FS_OPEN); -	BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD); -	BUILD_BUG_ON(FAN_Q_OVERFLOW != FS_Q_OVERFLOW); -	BUILD_BUG_ON(FAN_OPEN_PERM != FS_OPEN_PERM); -	BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM); -	BUILD_BUG_ON(FAN_ONDIR != FS_ISDIR); - -	pr_debug("%s: group=%p event=%p\n", __func__, group, event); - -	notify_event = fsnotify_add_notify_event(group, event, NULL, fanotify_merge); -	if (IS_ERR(notify_event)) -		return PTR_ERR(notify_event); - -#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS -	if (event->mask & FAN_ALL_PERM_EVENTS) { -		/* if we merged we need to wait on the new event */ -		if (notify_event) -			event = notify_event; -		ret = fanotify_get_response_from_access(group, event); -	} -#endif - -	if (notify_event) -		fsnotify_put_event(notify_event); - -	return ret; -} - -static bool fanotify_should_send_event(struct fsnotify_group *group, -				       struct inode *to_tell, -				       struct fsnotify_mark *inode_mark, +static bool fanotify_should_send_event(struct fsnotify_mark *inode_mark,  				       struct fsnotify_mark *vfsmnt_mark, -				       __u32 event_mask, void *data, int data_type) +				       u32 event_mask, +				       void *data, int data_type)  {  	__u32 marks_mask, marks_ignored_mask;  	struct path *path = data; -	pr_debug("%s: group=%p to_tell=%p inode_mark=%p vfsmnt_mark=%p " -		 "mask=%x data=%p data_type=%d\n", __func__, group, to_tell, -		 inode_mark, vfsmnt_mark, event_mask, data, data_type); +	pr_debug("%s: inode_mark=%p vfsmnt_mark=%p mask=%x data=%p" +		 " data_type=%d\n", __func__, inode_mark, vfsmnt_mark, +		 event_mask, data, data_type);  	/* if we don't have enough info to send an event to userspace say no */  	if (data_type != FSNOTIFY_EVENT_PATH) @@ -206,6 +142,93 @@ static bool fanotify_should_send_event(struct fsnotify_group *group,  	return false;  } +struct fanotify_event_info *fanotify_alloc_event(struct inode *inode, u32 mask, +						 struct path *path) +{ +	struct fanotify_event_info *event; + +#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS +	if (mask & FAN_ALL_PERM_EVENTS) { +		struct fanotify_perm_event_info *pevent; + +		pevent = kmem_cache_alloc(fanotify_perm_event_cachep, +					  GFP_KERNEL); +		if (!pevent) +			return NULL; +		event = &pevent->fae; +		pevent->response = 0; +		goto init; +	} +#endif +	event = kmem_cache_alloc(fanotify_event_cachep, GFP_KERNEL); +	if (!event) +		return NULL; +init: __maybe_unused +	fsnotify_init_event(&event->fse, inode, mask); +	event->tgid = get_pid(task_tgid(current)); +	if (path) { +		event->path = *path; +		path_get(&event->path); +	} else { +		event->path.mnt = NULL; +		event->path.dentry = NULL; +	} +	return event; +} + +static int fanotify_handle_event(struct fsnotify_group *group, +				 struct inode *inode, +				 struct fsnotify_mark *inode_mark, +				 struct fsnotify_mark *fanotify_mark, +				 u32 mask, void *data, int data_type, +				 const unsigned char *file_name, u32 cookie) +{ +	int ret = 0; +	struct fanotify_event_info *event; +	struct fsnotify_event *fsn_event; + +	BUILD_BUG_ON(FAN_ACCESS != FS_ACCESS); +	BUILD_BUG_ON(FAN_MODIFY != FS_MODIFY); +	BUILD_BUG_ON(FAN_CLOSE_NOWRITE != FS_CLOSE_NOWRITE); +	BUILD_BUG_ON(FAN_CLOSE_WRITE != FS_CLOSE_WRITE); +	BUILD_BUG_ON(FAN_OPEN != FS_OPEN); +	BUILD_BUG_ON(FAN_EVENT_ON_CHILD != FS_EVENT_ON_CHILD); +	BUILD_BUG_ON(FAN_Q_OVERFLOW != FS_Q_OVERFLOW); +	BUILD_BUG_ON(FAN_OPEN_PERM != FS_OPEN_PERM); +	BUILD_BUG_ON(FAN_ACCESS_PERM != FS_ACCESS_PERM); +	BUILD_BUG_ON(FAN_ONDIR != FS_ISDIR); + +	if (!fanotify_should_send_event(inode_mark, fanotify_mark, mask, data, +					data_type)) +		return 0; + +	pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode, +		 mask); + +	event = fanotify_alloc_event(inode, mask, data); +	if (unlikely(!event)) +		return -ENOMEM; + +	fsn_event = &event->fse; +	ret = fsnotify_add_notify_event(group, fsn_event, fanotify_merge); +	if (ret) { +		/* Permission events shouldn't be merged */ +		BUG_ON(ret == 1 && mask & FAN_ALL_PERM_EVENTS); +		/* Our event wasn't used in the end. Free it. */ +		fsnotify_destroy_event(group, fsn_event); + +		return 0; +	} + +#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS +	if (mask & FAN_ALL_PERM_EVENTS) { +		ret = fanotify_get_response(group, FANOTIFY_PE(fsn_event)); +		fsnotify_destroy_event(group, fsn_event); +	} +#endif +	return ret; +} +  static void fanotify_free_group_priv(struct fsnotify_group *group)  {  	struct user_struct *user; @@ -215,10 +238,25 @@ static void fanotify_free_group_priv(struct fsnotify_group *group)  	free_uid(user);  } +static void fanotify_free_event(struct fsnotify_event *fsn_event) +{ +	struct fanotify_event_info *event; + +	event = FANOTIFY_E(fsn_event); +	path_put(&event->path); +	put_pid(event->tgid); +#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS +	if (fsn_event->mask & FAN_ALL_PERM_EVENTS) { +		kmem_cache_free(fanotify_perm_event_cachep, +				FANOTIFY_PE(fsn_event)); +		return; +	} +#endif +	kmem_cache_free(fanotify_event_cachep, event); +} +  const struct fsnotify_ops fanotify_fsnotify_ops = {  	.handle_event = fanotify_handle_event, -	.should_send_event = fanotify_should_send_event,  	.free_group_priv = fanotify_free_group_priv, -	.free_event_priv = NULL, -	.freeing_mark = NULL, +	.free_event = fanotify_free_event,  }; diff --git a/fs/notify/fanotify/fanotify.h b/fs/notify/fanotify/fanotify.h new file mode 100644 index 00000000000..2a5fb14115d --- /dev/null +++ b/fs/notify/fanotify/fanotify.h @@ -0,0 +1,50 @@ +#include <linux/fsnotify_backend.h> +#include <linux/path.h> +#include <linux/slab.h> + +extern struct kmem_cache *fanotify_event_cachep; +extern struct kmem_cache *fanotify_perm_event_cachep; + +/* + * Structure for normal fanotify events. It gets allocated in + * fanotify_handle_event() and freed when the information is retrieved by + * userspace + */ +struct fanotify_event_info { +	struct fsnotify_event fse; +	/* +	 * We hold ref to this path so it may be dereferenced at any point +	 * during this object's lifetime +	 */ +	struct path path; +	struct pid *tgid; +}; + +#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS +/* + * Structure for permission fanotify events. It gets allocated and freed in + * fanotify_handle_event() since we wait there for user response. When the + * information is retrieved by userspace the structure is moved from + * group->notification_list to group->fanotify_data.access_list to wait for + * user response. + */ +struct fanotify_perm_event_info { +	struct fanotify_event_info fae; +	int response;	/* userspace answer to question */ +	int fd;		/* fd we passed to userspace for this event */ +}; + +static inline struct fanotify_perm_event_info * +FANOTIFY_PE(struct fsnotify_event *fse) +{ +	return container_of(fse, struct fanotify_perm_event_info, fae.fse); +} +#endif + +static inline struct fanotify_event_info *FANOTIFY_E(struct fsnotify_event *fse) +{ +	return container_of(fse, struct fanotify_event_info, fse); +} + +struct fanotify_event_info *fanotify_alloc_event(struct inode *inode, u32 mask, +						 struct path *path); diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 063224812b7..3fdc8a3e113 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -13,23 +13,36 @@  #include <linux/slab.h>  #include <linux/types.h>  #include <linux/uaccess.h> +#include <linux/compat.h>  #include <asm/ioctls.h> +#include "../../mount.h" +#include "../fdinfo.h" +#include "fanotify.h" +  #define FANOTIFY_DEFAULT_MAX_EVENTS	16384  #define FANOTIFY_DEFAULT_MAX_MARKS	8192  #define FANOTIFY_DEFAULT_MAX_LISTENERS	128 +/* + * All flags that may be specified in parameter event_f_flags of fanotify_init. + * + * Internal and external open flags are stored together in field f_flags of + * struct file. Only external open flags shall be allowed in event_f_flags. + * Internal flags like FMODE_NONOTIFY, FMODE_EXEC, FMODE_NOCMTIME shall be + * excluded. + */ +#define	FANOTIFY_INIT_ALL_EVENT_F_BITS				( \ +		O_ACCMODE	| O_APPEND	| O_NONBLOCK	| \ +		__O_SYNC	| O_DSYNC	| O_CLOEXEC     | \ +		O_LARGEFILE	| O_NOATIME	) +  extern const struct fsnotify_ops fanotify_fsnotify_ops;  static struct kmem_cache *fanotify_mark_cache __read_mostly; -static struct kmem_cache *fanotify_response_event_cache __read_mostly; - -struct fanotify_response_event { -	struct list_head list; -	__s32 fd; -	struct fsnotify_event *event; -}; +struct kmem_cache *fanotify_event_cachep __read_mostly; +struct kmem_cache *fanotify_perm_event_cachep __read_mostly;  /*   * Get an fsnotify notification event if one exists and is small @@ -56,11 +69,11 @@ static struct fsnotify_event *get_one_event(struct fsnotify_group *group,  	return fsnotify_remove_notify_event(group);  } -static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event) +static int create_fd(struct fsnotify_group *group, +		     struct fanotify_event_info *event, +		     struct file **file)  {  	int client_fd; -	struct dentry *dentry; -	struct vfsmount *mnt;  	struct file *new_file;  	pr_debug("%s: group=%p event=%p\n", __func__, group, event); @@ -69,22 +82,14 @@ static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event)  	if (client_fd < 0)  		return client_fd; -	if (event->data_type != FSNOTIFY_EVENT_PATH) { -		WARN_ON(1); -		put_unused_fd(client_fd); -		return -EINVAL; -	} -  	/*  	 * we need a new file handle for the userspace program so it can read even if it was  	 * originally opened O_WRONLY.  	 */ -	dentry = dget(event->path.dentry); -	mnt = mntget(event->path.mnt);  	/* it's possible this event was an overflow event.  in that case dentry and mnt  	 * are NULL;  That's fine, just don't call dentry open */ -	if (dentry && mnt) -		new_file = dentry_open(dentry, mnt, +	if (event->path.dentry && event->path.mnt) +		new_file = dentry_open(&event->path,  				       group->fanotify_data.f_flags | FMODE_NONOTIFY,  				       current_cred());  	else @@ -100,62 +105,77 @@ static int create_fd(struct fsnotify_group *group, struct fsnotify_event *event)  		put_unused_fd(client_fd);  		client_fd = PTR_ERR(new_file);  	} else { -		fd_install(client_fd, new_file); +		*file = new_file;  	}  	return client_fd;  } -static ssize_t fill_event_metadata(struct fsnotify_group *group, -				   struct fanotify_event_metadata *metadata, -				   struct fsnotify_event *event) +static int fill_event_metadata(struct fsnotify_group *group, +			       struct fanotify_event_metadata *metadata, +			       struct fsnotify_event *fsn_event, +			       struct file **file)  { +	int ret = 0; +	struct fanotify_event_info *event; +  	pr_debug("%s: group=%p metadata=%p event=%p\n", __func__, -		 group, metadata, event); +		 group, metadata, fsn_event); +	*file = NULL; +	event = container_of(fsn_event, struct fanotify_event_info, fse);  	metadata->event_len = FAN_EVENT_METADATA_LEN; +	metadata->metadata_len = FAN_EVENT_METADATA_LEN;  	metadata->vers = FANOTIFY_METADATA_VERSION; -	metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS; +	metadata->reserved = 0; +	metadata->mask = fsn_event->mask & FAN_ALL_OUTGOING_EVENTS;  	metadata->pid = pid_vnr(event->tgid); -	metadata->fd = create_fd(group, event); +	if (unlikely(fsn_event->mask & FAN_Q_OVERFLOW)) +		metadata->fd = FAN_NOFD; +	else { +		metadata->fd = create_fd(group, event, file); +		if (metadata->fd < 0) +			ret = metadata->fd; +	} -	return metadata->fd; +	return ret;  }  #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS -static struct fanotify_response_event *dequeue_re(struct fsnotify_group *group, -						  __s32 fd) +static struct fanotify_perm_event_info *dequeue_event( +				struct fsnotify_group *group, int fd)  { -	struct fanotify_response_event *re, *return_re = NULL; +	struct fanotify_perm_event_info *event, *return_e = NULL; -	mutex_lock(&group->fanotify_data.access_mutex); -	list_for_each_entry(re, &group->fanotify_data.access_list, list) { -		if (re->fd != fd) +	spin_lock(&group->fanotify_data.access_lock); +	list_for_each_entry(event, &group->fanotify_data.access_list, +			    fae.fse.list) { +		if (event->fd != fd)  			continue; -		list_del_init(&re->list); -		return_re = re; +		list_del_init(&event->fae.fse.list); +		return_e = event;  		break;  	} -	mutex_unlock(&group->fanotify_data.access_mutex); +	spin_unlock(&group->fanotify_data.access_lock); -	pr_debug("%s: found return_re=%p\n", __func__, return_re); +	pr_debug("%s: found return_re=%p\n", __func__, return_e); -	return return_re; +	return return_e;  }  static int process_access_response(struct fsnotify_group *group,  				   struct fanotify_response *response_struct)  { -	struct fanotify_response_event *re; -	__s32 fd = response_struct->fd; -	__u32 response = response_struct->response; +	struct fanotify_perm_event_info *event; +	int fd = response_struct->fd; +	int response = response_struct->response;  	pr_debug("%s: group=%p fd=%d response=%d\n", __func__, group,  		 fd, response);  	/*  	 * make sure the response is valid, if invalid we do nothing and either -	 * userspace can send a valid responce or we will clean it up after the +	 * userspace can send a valid response or we will clean it up after the  	 * timeout  	 */  	switch (response) { @@ -169,83 +189,15 @@ static int process_access_response(struct fsnotify_group *group,  	if (fd < 0)  		return -EINVAL; -	re = dequeue_re(group, fd); -	if (!re) +	event = dequeue_event(group, fd); +	if (!event)  		return -ENOENT; -	re->event->response = response; - +	event->response = response;  	wake_up(&group->fanotify_data.access_waitq); -	kmem_cache_free(fanotify_response_event_cache, re); -  	return 0;  } - -static int prepare_for_access_response(struct fsnotify_group *group, -				       struct fsnotify_event *event, -				       __s32 fd) -{ -	struct fanotify_response_event *re; - -	if (!(event->mask & FAN_ALL_PERM_EVENTS)) -		return 0; - -	re = kmem_cache_alloc(fanotify_response_event_cache, GFP_KERNEL); -	if (!re) -		return -ENOMEM; - -	re->event = event; -	re->fd = fd; - -	mutex_lock(&group->fanotify_data.access_mutex); - -	if (group->fanotify_data.bypass_perm) { -		mutex_unlock(&group->fanotify_data.access_mutex); -		kmem_cache_free(fanotify_response_event_cache, re); -		event->response = FAN_ALLOW; -		return 0; -	} -		 -	list_add_tail(&re->list, &group->fanotify_data.access_list); -	mutex_unlock(&group->fanotify_data.access_mutex); - -	return 0; -} - -static void remove_access_response(struct fsnotify_group *group, -				   struct fsnotify_event *event, -				   __s32 fd) -{ -	struct fanotify_response_event *re; - -	if (!(event->mask & FAN_ALL_PERM_EVENTS)) -		return; - -	re = dequeue_re(group, fd); -	if (!re) -		return; - -	BUG_ON(re->event != event); - -	kmem_cache_free(fanotify_response_event_cache, re); - -	return; -} -#else -static int prepare_for_access_response(struct fsnotify_group *group, -				       struct fsnotify_event *event, -				       __s32 fd) -{ -	return 0; -} - -static void remove_access_response(struct fsnotify_group *group, -				   struct fsnotify_event *event, -				   __s32 fd) -{ -	return; -}  #endif  static ssize_t copy_event_to_user(struct fsnotify_group *group, @@ -253,28 +205,35 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,  				  char __user *buf)  {  	struct fanotify_event_metadata fanotify_event_metadata; +	struct file *f;  	int fd, ret;  	pr_debug("%s: group=%p event=%p\n", __func__, group, event); -	fd = fill_event_metadata(group, &fanotify_event_metadata, event); -	if (fd < 0) -		return fd; +	ret = fill_event_metadata(group, &fanotify_event_metadata, event, &f); +	if (ret < 0) +		return ret; -	ret = prepare_for_access_response(group, event, fd); -	if (ret) +	fd = fanotify_event_metadata.fd; +	ret = -EFAULT; +	if (copy_to_user(buf, &fanotify_event_metadata, +			 fanotify_event_metadata.event_len))  		goto out_close_fd; -	ret = -EFAULT; -	if (copy_to_user(buf, &fanotify_event_metadata, FAN_EVENT_METADATA_LEN)) -		goto out_kill_access_response; +#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS +	if (event->mask & FAN_ALL_PERM_EVENTS) +		FANOTIFY_PE(event)->fd = fd; +#endif -	return FAN_EVENT_METADATA_LEN; +	if (fd != FAN_NOFD) +		fd_install(fd, f); +	return fanotify_event_metadata.event_len; -out_kill_access_response: -	remove_access_response(group, event, fd);  out_close_fd: -	sys_close(fd); +	if (fd != FAN_NOFD) { +		put_unused_fd(fd); +		fput(f); +	}  	return ret;  } @@ -314,30 +273,50 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,  		kevent = get_one_event(group, count);  		mutex_unlock(&group->notification_mutex); -		if (kevent) { +		if (IS_ERR(kevent)) {  			ret = PTR_ERR(kevent); -			if (IS_ERR(kevent)) +			break; +		} + +		if (!kevent) { +			ret = -EAGAIN; +			if (file->f_flags & O_NONBLOCK)  				break; -			ret = copy_event_to_user(group, kevent, buf); -			fsnotify_put_event(kevent); -			if (ret < 0) + +			ret = -ERESTARTSYS; +			if (signal_pending(current)) +				break; + +			if (start != buf)  				break; -			buf += ret; -			count -= ret; +			schedule();  			continue;  		} -		ret = -EAGAIN; -		if (file->f_flags & O_NONBLOCK) -			break; -		ret = -ERESTARTSYS; -		if (signal_pending(current)) -			break; - -		if (start != buf) -			break; - -		schedule(); +		ret = copy_event_to_user(group, kevent, buf); +		/* +		 * Permission events get queued to wait for response.  Other +		 * events can be destroyed now. +		 */ +		if (!(kevent->mask & FAN_ALL_PERM_EVENTS)) { +			fsnotify_destroy_event(group, kevent); +			if (ret < 0) +				break; +		} else { +#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS +			if (ret < 0) { +				FANOTIFY_PE(kevent)->response = FAN_DENY; +				wake_up(&group->fanotify_data.access_waitq); +				break; +			} +			spin_lock(&group->fanotify_data.access_lock); +			list_add_tail(&kevent->list, +				      &group->fanotify_data.access_list); +			spin_unlock(&group->fanotify_data.access_lock); +#endif +		} +		buf += ret; +		count -= ret;  	}  	finish_wait(&group->notification_waitq, &wait); @@ -378,27 +357,27 @@ static int fanotify_release(struct inode *ignored, struct file *file)  	struct fsnotify_group *group = file->private_data;  #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS -	struct fanotify_response_event *re, *lre; +	struct fanotify_perm_event_info *event, *next; -	mutex_lock(&group->fanotify_data.access_mutex); +	spin_lock(&group->fanotify_data.access_lock); -	group->fanotify_data.bypass_perm = true; +	atomic_inc(&group->fanotify_data.bypass_perm); -	list_for_each_entry_safe(re, lre, &group->fanotify_data.access_list, list) { -		pr_debug("%s: found group=%p re=%p event=%p\n", __func__, group, -			 re, re->event); +	list_for_each_entry_safe(event, next, &group->fanotify_data.access_list, +				 fae.fse.list) { +		pr_debug("%s: found group=%p event=%p\n", __func__, group, +			 event); -		list_del_init(&re->list); -		re->event->response = FAN_ALLOW; - -		kmem_cache_free(fanotify_response_event_cache, re); +		list_del_init(&event->fae.fse.list); +		event->response = FAN_ALLOW;  	} -	mutex_unlock(&group->fanotify_data.access_mutex); +	spin_unlock(&group->fanotify_data.access_lock);  	wake_up(&group->fanotify_data.access_waitq);  #endif +  	/* matches the fanotify_init->fsnotify_alloc_group */ -	fsnotify_put_group(group); +	fsnotify_destroy_group(group);  	return 0;  } @@ -406,7 +385,7 @@ static int fanotify_release(struct inode *ignored, struct file *file)  static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long arg)  {  	struct fsnotify_group *group; -	struct fsnotify_event_holder *holder; +	struct fsnotify_event *fsn_event;  	void __user *p;  	int ret = -ENOTTY;  	size_t send_len = 0; @@ -418,7 +397,7 @@ static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long ar  	switch (cmd) {  	case FIONREAD:  		mutex_lock(&group->notification_mutex); -		list_for_each_entry(holder, &group->notification_list, event_list) +		list_for_each_entry(fsn_event, &group->notification_list, list)  			send_len += FAN_EVENT_METADATA_LEN;  		mutex_unlock(&group->notification_mutex);  		ret = put_user(send_len, (int __user *) p); @@ -429,6 +408,7 @@ static long fanotify_ioctl(struct file *file, unsigned int cmd, unsigned long ar  }  static const struct file_operations fanotify_fops = { +	.show_fdinfo	= fanotify_show_fdinfo,  	.poll		= fanotify_poll,  	.read		= fanotify_read,  	.write		= fanotify_write, @@ -453,24 +433,22 @@ static int fanotify_find_path(int dfd, const char __user *filename,  		 dfd, filename, flags);  	if (filename == NULL) { -		struct file *file; -		int fput_needed; +		struct fd f = fdget(dfd);  		ret = -EBADF; -		file = fget_light(dfd, &fput_needed); -		if (!file) +		if (!f.file)  			goto out;  		ret = -ENOTDIR;  		if ((flags & FAN_MARK_ONLYDIR) && -		    !(S_ISDIR(file->f_path.dentry->d_inode->i_mode))) { -			fput_light(file, fput_needed); +		    !(S_ISDIR(file_inode(f.file)->i_mode))) { +			fdput(f);  			goto out;  		} -		*path = file->f_path; +		*path = f.file->f_path;  		path_get(path); -		fput_light(file, fput_needed); +		fdput(f);  	} else {  		unsigned int lookup_flags = 0; @@ -494,7 +472,8 @@ out:  static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,  					    __u32 mask, -					    unsigned int flags) +					    unsigned int flags, +					    int *destroy)  {  	__u32 oldmask; @@ -508,8 +487,7 @@ static __u32 fanotify_mark_remove_from_mask(struct fsnotify_mark *fsn_mark,  	}  	spin_unlock(&fsn_mark->lock); -	if (!(oldmask & ~mask)) -		fsnotify_destroy_mark(fsn_mark); +	*destroy = !(oldmask & ~mask);  	return mask & oldmask;  } @@ -520,14 +498,23 @@ static int fanotify_remove_vfsmount_mark(struct fsnotify_group *group,  {  	struct fsnotify_mark *fsn_mark = NULL;  	__u32 removed; +	int destroy_mark; +	mutex_lock(&group->mark_mutex);  	fsn_mark = fsnotify_find_vfsmount_mark(group, mnt); -	if (!fsn_mark) +	if (!fsn_mark) { +		mutex_unlock(&group->mark_mutex);  		return -ENOENT; +	} + +	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags, +						 &destroy_mark); +	if (destroy_mark) +		fsnotify_destroy_mark_locked(fsn_mark, group); +	mutex_unlock(&group->mark_mutex); -	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);  	fsnotify_put_mark(fsn_mark); -	if (removed & mnt->mnt_fsnotify_mask) +	if (removed & real_mount(mnt)->mnt_fsnotify_mask)  		fsnotify_recalc_vfsmount_mask(mnt);  	return 0; @@ -539,12 +526,21 @@ static int fanotify_remove_inode_mark(struct fsnotify_group *group,  {  	struct fsnotify_mark *fsn_mark = NULL;  	__u32 removed; +	int destroy_mark; +	mutex_lock(&group->mark_mutex);  	fsn_mark = fsnotify_find_inode_mark(group, inode); -	if (!fsn_mark) +	if (!fsn_mark) { +		mutex_unlock(&group->mark_mutex);  		return -ENOENT; +	} + +	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags, +						 &destroy_mark); +	if (destroy_mark) +		fsnotify_destroy_mark_locked(fsn_mark, group); +	mutex_unlock(&group->mark_mutex); -	removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags);  	/* matches the fsnotify_find_inode_mark() */  	fsnotify_put_mark(fsn_mark);  	if (removed & inode->i_fsnotify_mask) @@ -580,6 +576,31 @@ static __u32 fanotify_mark_add_to_mask(struct fsnotify_mark *fsn_mark,  	return mask & ~oldmask;  } +static struct fsnotify_mark *fanotify_add_new_mark(struct fsnotify_group *group, +						   struct inode *inode, +						   struct vfsmount *mnt) +{ +	struct fsnotify_mark *mark; +	int ret; + +	if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks) +		return ERR_PTR(-ENOSPC); + +	mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL); +	if (!mark) +		return ERR_PTR(-ENOMEM); + +	fsnotify_init_mark(mark, fanotify_free_mark); +	ret = fsnotify_add_mark_locked(mark, group, inode, mnt, 0); +	if (ret) { +		fsnotify_put_mark(mark); +		return ERR_PTR(ret); +	} + +	return mark; +} + +  static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,  				      struct vfsmount *mnt, __u32 mask,  				      unsigned int flags) @@ -587,29 +608,22 @@ static int fanotify_add_vfsmount_mark(struct fsnotify_group *group,  	struct fsnotify_mark *fsn_mark;  	__u32 added; +	mutex_lock(&group->mark_mutex);  	fsn_mark = fsnotify_find_vfsmount_mark(group, mnt);  	if (!fsn_mark) { -		int ret; - -		if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks) -			return -ENOSPC; - -		fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL); -		if (!fsn_mark) -			return -ENOMEM; - -		fsnotify_init_mark(fsn_mark, fanotify_free_mark); -		ret = fsnotify_add_mark(fsn_mark, group, NULL, mnt, 0); -		if (ret) { -			fanotify_free_mark(fsn_mark); -			return ret; +		fsn_mark = fanotify_add_new_mark(group, NULL, mnt); +		if (IS_ERR(fsn_mark)) { +			mutex_unlock(&group->mark_mutex); +			return PTR_ERR(fsn_mark);  		}  	}  	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); -	fsnotify_put_mark(fsn_mark); -	if (added & ~mnt->mnt_fsnotify_mask) +	mutex_unlock(&group->mark_mutex); + +	if (added & ~real_mount(mnt)->mnt_fsnotify_mask)  		fsnotify_recalc_vfsmount_mask(mnt); +	fsnotify_put_mark(fsn_mark);  	return 0;  } @@ -632,28 +646,22 @@ static int fanotify_add_inode_mark(struct fsnotify_group *group,  	    (atomic_read(&inode->i_writecount) > 0))  		return 0; +	mutex_lock(&group->mark_mutex);  	fsn_mark = fsnotify_find_inode_mark(group, inode);  	if (!fsn_mark) { -		int ret; - -		if (atomic_read(&group->num_marks) > group->fanotify_data.max_marks) -			return -ENOSPC; - -		fsn_mark = kmem_cache_alloc(fanotify_mark_cache, GFP_KERNEL); -		if (!fsn_mark) -			return -ENOMEM; - -		fsnotify_init_mark(fsn_mark, fanotify_free_mark); -		ret = fsnotify_add_mark(fsn_mark, group, inode, NULL, 0); -		if (ret) { -			fanotify_free_mark(fsn_mark); -			return ret; +		fsn_mark = fanotify_add_new_mark(group, inode, NULL); +		if (IS_ERR(fsn_mark)) { +			mutex_unlock(&group->mark_mutex); +			return PTR_ERR(fsn_mark);  		}  	}  	added = fanotify_mark_add_to_mask(fsn_mark, mask, flags); -	fsnotify_put_mark(fsn_mark); +	mutex_unlock(&group->mark_mutex); +  	if (added & ~inode->i_fsnotify_mask)  		fsnotify_recalc_inode_mask(inode); + +	fsnotify_put_mark(fsn_mark);  	return 0;  } @@ -663,6 +671,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)  	struct fsnotify_group *group;  	int f_flags, fd;  	struct user_struct *user; +	struct fanotify_event_info *oevent;  	pr_debug("%s: flags=%d event_f_flags=%d\n",  		__func__, flags, event_f_flags); @@ -673,6 +682,18 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)  	if (flags & ~FAN_ALL_INIT_FLAGS)  		return -EINVAL; +	if (event_f_flags & ~FANOTIFY_INIT_ALL_EVENT_F_BITS) +		return -EINVAL; + +	switch (event_f_flags & O_ACCMODE) { +	case O_RDONLY: +	case O_RDWR: +	case O_WRONLY: +		break; +	default: +		return -EINVAL; +	} +  	user = get_current_user();  	if (atomic_read(&user->fanotify_listeners) > FANOTIFY_DEFAULT_MAX_LISTENERS) {  		free_uid(user); @@ -687,17 +708,29 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)  	/* fsnotify_alloc_group takes a ref.  Dropped in fanotify_release */  	group = fsnotify_alloc_group(&fanotify_fsnotify_ops); -	if (IS_ERR(group)) +	if (IS_ERR(group)) { +		free_uid(user);  		return PTR_ERR(group); +	}  	group->fanotify_data.user = user;  	atomic_inc(&user->fanotify_listeners); +	oevent = fanotify_alloc_event(NULL, FS_Q_OVERFLOW, NULL); +	if (unlikely(!oevent)) { +		fd = -ENOMEM; +		goto out_destroy_group; +	} +	group->overflow_event = &oevent->fse; + +	if (force_o_largefile()) +		event_f_flags |= O_LARGEFILE;  	group->fanotify_data.f_flags = event_f_flags;  #ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS -	mutex_init(&group->fanotify_data.access_mutex); +	spin_lock_init(&group->fanotify_data.access_lock);  	init_waitqueue_head(&group->fanotify_data.access_waitq);  	INIT_LIST_HEAD(&group->fanotify_data.access_list); +	atomic_set(&group->fanotify_data.bypass_perm, 0);  #endif  	switch (flags & FAN_ALL_CLASS_BITS) {  	case FAN_CLASS_NOTIF: @@ -711,13 +744,13 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)  		break;  	default:  		fd = -EINVAL; -		goto out_put_group; +		goto out_destroy_group;  	}  	if (flags & FAN_UNLIMITED_QUEUE) {  		fd = -EPERM;  		if (!capable(CAP_SYS_ADMIN)) -			goto out_put_group; +			goto out_destroy_group;  		group->max_events = UINT_MAX;  	} else {  		group->max_events = FANOTIFY_DEFAULT_MAX_EVENTS; @@ -726,7 +759,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)  	if (flags & FAN_UNLIMITED_MARKS) {  		fd = -EPERM;  		if (!capable(CAP_SYS_ADMIN)) -			goto out_put_group; +			goto out_destroy_group;  		group->fanotify_data.max_marks = UINT_MAX;  	} else {  		group->fanotify_data.max_marks = FANOTIFY_DEFAULT_MAX_MARKS; @@ -734,25 +767,25 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)  	fd = anon_inode_getfd("[fanotify]", &fanotify_fops, group, f_flags);  	if (fd < 0) -		goto out_put_group; +		goto out_destroy_group;  	return fd; -out_put_group: -	fsnotify_put_group(group); +out_destroy_group: +	fsnotify_destroy_group(group);  	return fd;  } -SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags, -			      __u64 mask, int dfd, -			      const char  __user * pathname) +SYSCALL_DEFINE5(fanotify_mark, int, fanotify_fd, unsigned int, flags, +			      __u64, mask, int, dfd, +			      const char  __user *, pathname)  {  	struct inode *inode = NULL;  	struct vfsmount *mnt = NULL;  	struct fsnotify_group *group; -	struct file *filp; +	struct fd f;  	struct path path; -	int ret, fput_needed; +	int ret;  	pr_debug("%s: fanotify_fd=%d flags=%x dfd=%d pathname=%p mask=%llx\n",  		 __func__, fanotify_fd, flags, dfd, pathname, mask); @@ -764,9 +797,14 @@ SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags,  	if (flags & ~FAN_ALL_MARK_FLAGS)  		return -EINVAL;  	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) { -	case FAN_MARK_ADD: +	case FAN_MARK_ADD:		/* fallthrough */  	case FAN_MARK_REMOVE: +		if (!mask) +			return -EINVAL; +		break;  	case FAN_MARK_FLUSH: +		if (flags & ~(FAN_MARK_MOUNT | FAN_MARK_FLUSH)) +			return -EINVAL;  		break;  	default:  		return -EINVAL; @@ -784,15 +822,15 @@ SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags,  #endif  		return -EINVAL; -	filp = fget_light(fanotify_fd, &fput_needed); -	if (unlikely(!filp)) +	f = fdget(fanotify_fd); +	if (unlikely(!f.file))  		return -EBADF;  	/* verify that this is indeed an fanotify instance */  	ret = -EINVAL; -	if (unlikely(filp->f_op != &fanotify_fops)) +	if (unlikely(f.file->f_op != &fanotify_fops))  		goto fput_and_out; -	group = filp->private_data; +	group = f.file->private_data;  	/*  	 * group->priority == FS_PRIO_0 == FAN_CLASS_NOTIF.  These are not @@ -803,6 +841,15 @@ SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags,  	    group->priority == FS_PRIO_0)  		goto fput_and_out; +	if (flags & FAN_MARK_FLUSH) { +		ret = 0; +		if (flags & FAN_MARK_MOUNT) +			fsnotify_clear_vfsmount_marks_by_group(group); +		else +			fsnotify_clear_inode_marks_by_group(group); +		goto fput_and_out; +	} +  	ret = fanotify_find_path(dfd, pathname, &path, flags);  	if (ret)  		goto fput_and_out; @@ -814,7 +861,7 @@ SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags,  		mnt = path.mnt;  	/* create/update an inode mark */ -	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE | FAN_MARK_FLUSH)) { +	switch (flags & (FAN_MARK_ADD | FAN_MARK_REMOVE)) {  	case FAN_MARK_ADD:  		if (flags & FAN_MARK_MOUNT)  			ret = fanotify_add_vfsmount_mark(group, mnt, mask, flags); @@ -827,43 +874,45 @@ SYSCALL_DEFINE(fanotify_mark)(int fanotify_fd, unsigned int flags,  		else  			ret = fanotify_remove_inode_mark(group, inode, mask, flags);  		break; -	case FAN_MARK_FLUSH: -		if (flags & FAN_MARK_MOUNT) -			fsnotify_clear_vfsmount_marks_by_group(group); -		else -			fsnotify_clear_inode_marks_by_group(group); -		break;  	default:  		ret = -EINVAL;  	}  	path_put(&path);  fput_and_out: -	fput_light(filp, fput_needed); +	fdput(f);  	return ret;  } -#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS -asmlinkage long SyS_fanotify_mark(long fanotify_fd, long flags, __u64 mask, -				  long dfd, long pathname) +#ifdef CONFIG_COMPAT +COMPAT_SYSCALL_DEFINE6(fanotify_mark, +				int, fanotify_fd, unsigned int, flags, +				__u32, mask0, __u32, mask1, int, dfd, +				const char  __user *, pathname)  { -	return SYSC_fanotify_mark((int) fanotify_fd, (unsigned int) flags, -				  mask, (int) dfd, -				  (const char  __user *) pathname); +	return sys_fanotify_mark(fanotify_fd, flags, +#ifdef __BIG_ENDIAN +				((__u64)mask0 << 32) | mask1, +#else +				((__u64)mask1 << 32) | mask0, +#endif +				 dfd, pathname);  } -SYSCALL_ALIAS(sys_fanotify_mark, SyS_fanotify_mark);  #endif  /* - * fanotify_user_setup - Our initialization function.  Note that we cannnot return + * fanotify_user_setup - Our initialization function.  Note that we cannot return   * error because we have compiled-in VFS hooks.  So an (unlikely) failure here   * must result in panic().   */  static int __init fanotify_user_setup(void)  {  	fanotify_mark_cache = KMEM_CACHE(fsnotify_mark, SLAB_PANIC); -	fanotify_response_event_cache = KMEM_CACHE(fanotify_response_event, -						   SLAB_PANIC); +	fanotify_event_cachep = KMEM_CACHE(fanotify_event_info, SLAB_PANIC); +#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS +	fanotify_perm_event_cachep = KMEM_CACHE(fanotify_perm_event_info, +						SLAB_PANIC); +#endif  	return 0;  } diff --git a/fs/notify/fdinfo.c b/fs/notify/fdinfo.c new file mode 100644 index 00000000000..238a5930cb3 --- /dev/null +++ b/fs/notify/fdinfo.c @@ -0,0 +1,179 @@ +#include <linux/file.h> +#include <linux/fs.h> +#include <linux/fsnotify_backend.h> +#include <linux/idr.h> +#include <linux/init.h> +#include <linux/inotify.h> +#include <linux/fanotify.h> +#include <linux/kernel.h> +#include <linux/namei.h> +#include <linux/sched.h> +#include <linux/types.h> +#include <linux/seq_file.h> +#include <linux/proc_fs.h> +#include <linux/exportfs.h> + +#include "inotify/inotify.h" +#include "../fs/mount.h" + +#if defined(CONFIG_PROC_FS) + +#if defined(CONFIG_INOTIFY_USER) || defined(CONFIG_FANOTIFY) + +static int show_fdinfo(struct seq_file *m, struct file *f, +		       int (*show)(struct seq_file *m, struct fsnotify_mark *mark)) +{ +	struct fsnotify_group *group = f->private_data; +	struct fsnotify_mark *mark; +	int ret = 0; + +	mutex_lock(&group->mark_mutex); +	list_for_each_entry(mark, &group->marks_list, g_list) { +		ret = show(m, mark); +		if (ret) +			break; +	} +	mutex_unlock(&group->mark_mutex); +	return ret; +} + +#if defined(CONFIG_EXPORTFS) +static int show_mark_fhandle(struct seq_file *m, struct inode *inode) +{ +	struct { +		struct file_handle handle; +		u8 pad[64]; +	} f; +	int size, ret, i; + +	f.handle.handle_bytes = sizeof(f.pad); +	size = f.handle.handle_bytes >> 2; + +	ret = exportfs_encode_inode_fh(inode, (struct fid *)f.handle.f_handle, &size, 0); +	if ((ret == 255) || (ret == -ENOSPC)) { +		WARN_ONCE(1, "Can't encode file handler for inotify: %d\n", ret); +		return 0; +	} + +	f.handle.handle_type = ret; +	f.handle.handle_bytes = size * sizeof(u32); + +	ret = seq_printf(m, "fhandle-bytes:%x fhandle-type:%x f_handle:", +			 f.handle.handle_bytes, f.handle.handle_type); + +	for (i = 0; i < f.handle.handle_bytes; i++) +		ret |= seq_printf(m, "%02x", (int)f.handle.f_handle[i]); + +	return ret; +} +#else +static int show_mark_fhandle(struct seq_file *m, struct inode *inode) +{ +	return 0; +} +#endif + +#ifdef CONFIG_INOTIFY_USER + +static int inotify_fdinfo(struct seq_file *m, struct fsnotify_mark *mark) +{ +	struct inotify_inode_mark *inode_mark; +	struct inode *inode; +	int ret = 0; + +	if (!(mark->flags & (FSNOTIFY_MARK_FLAG_ALIVE | FSNOTIFY_MARK_FLAG_INODE))) +		return 0; + +	inode_mark = container_of(mark, struct inotify_inode_mark, fsn_mark); +	inode = igrab(mark->i.inode); +	if (inode) { +		ret = seq_printf(m, "inotify wd:%x ino:%lx sdev:%x " +				 "mask:%x ignored_mask:%x ", +				 inode_mark->wd, inode->i_ino, +				 inode->i_sb->s_dev, +				 mark->mask, mark->ignored_mask); +		ret |= show_mark_fhandle(m, inode); +		ret |= seq_putc(m, '\n'); +		iput(inode); +	} + +	return ret; +} + +int inotify_show_fdinfo(struct seq_file *m, struct file *f) +{ +	return show_fdinfo(m, f, inotify_fdinfo); +} + +#endif /* CONFIG_INOTIFY_USER */ + +#ifdef CONFIG_FANOTIFY + +static int fanotify_fdinfo(struct seq_file *m, struct fsnotify_mark *mark) +{ +	unsigned int mflags = 0; +	struct inode *inode; +	int ret = 0; + +	if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) +		return 0; + +	if (mark->flags & FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY) +		mflags |= FAN_MARK_IGNORED_SURV_MODIFY; + +	if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) { +		inode = igrab(mark->i.inode); +		if (!inode) +			goto out; +		ret = seq_printf(m, "fanotify ino:%lx sdev:%x " +				 "mflags:%x mask:%x ignored_mask:%x ", +				 inode->i_ino, inode->i_sb->s_dev, +				 mflags, mark->mask, mark->ignored_mask); +		ret |= show_mark_fhandle(m, inode); +		ret |= seq_putc(m, '\n'); +		iput(inode); +	} else if (mark->flags & FSNOTIFY_MARK_FLAG_VFSMOUNT) { +		struct mount *mnt = real_mount(mark->m.mnt); + +		ret = seq_printf(m, "fanotify mnt_id:%x mflags:%x mask:%x " +				 "ignored_mask:%x\n", mnt->mnt_id, mflags, +				 mark->mask, mark->ignored_mask); +	} +out: +	return ret; +} + +int fanotify_show_fdinfo(struct seq_file *m, struct file *f) +{ +	struct fsnotify_group *group = f->private_data; +	unsigned int flags = 0; + +	switch (group->priority) { +	case FS_PRIO_0: +		flags |= FAN_CLASS_NOTIF; +		break; +	case FS_PRIO_1: +		flags |= FAN_CLASS_CONTENT; +		break; +	case FS_PRIO_2: +		flags |= FAN_CLASS_PRE_CONTENT; +		break; +	} + +	if (group->max_events == UINT_MAX) +		flags |= FAN_UNLIMITED_QUEUE; + +	if (group->fanotify_data.max_marks == UINT_MAX) +		flags |= FAN_UNLIMITED_MARKS; + +	seq_printf(m, "fanotify flags:%x event-flags:%x\n", +		   flags, group->fanotify_data.f_flags); + +	return show_fdinfo(m, f, fanotify_fdinfo); +} + +#endif /* CONFIG_FANOTIFY */ + +#endif /* CONFIG_INOTIFY_USER || CONFIG_FANOTIFY */ + +#endif /* CONFIG_PROC_FS */ diff --git a/fs/notify/fdinfo.h b/fs/notify/fdinfo.h new file mode 100644 index 00000000000..556afda990e --- /dev/null +++ b/fs/notify/fdinfo.h @@ -0,0 +1,27 @@ +#ifndef __FSNOTIFY_FDINFO_H__ +#define __FSNOTIFY_FDINFO_H__ + +#include <linux/errno.h> +#include <linux/proc_fs.h> + +struct seq_file; +struct file; + +#ifdef CONFIG_PROC_FS + +#ifdef CONFIG_INOTIFY_USER +extern int inotify_show_fdinfo(struct seq_file *m, struct file *f); +#endif + +#ifdef CONFIG_FANOTIFY +extern int fanotify_show_fdinfo(struct seq_file *m, struct file *f); +#endif + +#else /* CONFIG_PROC_FS */ + +#define inotify_show_fdinfo	NULL +#define fanotify_show_fdinfo	NULL + +#endif /* CONFIG_PROC_FS */ + +#endif /* __FSNOTIFY_FDINFO_H__ */ diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c index 20dc218707c..9d3e9c50066 100644 --- a/fs/notify/fsnotify.c +++ b/fs/notify/fsnotify.c @@ -26,6 +26,7 @@  #include <linux/fsnotify_backend.h>  #include "fsnotify.h" +#include "../mount.h"  /*   * Clear all of the marks on an inode when it is being evicted from core @@ -59,28 +60,30 @@ void __fsnotify_update_child_dentry_flags(struct inode *inode)  	/* determine if the children should tell inode about their events */  	watched = fsnotify_inode_watches_children(inode); -	spin_lock(&dcache_lock); +	spin_lock(&inode->i_lock);  	/* run all of the dentries associated with this inode.  Since this is a  	 * directory, there damn well better only be one item on this list */ -	list_for_each_entry(alias, &inode->i_dentry, d_alias) { +	hlist_for_each_entry(alias, &inode->i_dentry, d_alias) {  		struct dentry *child;  		/* run all of the children of the original inode and fix their  		 * d_flags to indicate parental interest (their parent is the  		 * original inode) */ +		spin_lock(&alias->d_lock);  		list_for_each_entry(child, &alias->d_subdirs, d_u.d_child) {  			if (!child->d_inode)  				continue; -			spin_lock(&child->d_lock); +			spin_lock_nested(&child->d_lock, DENTRY_D_LOCK_NESTED);  			if (watched)  				child->d_flags |= DCACHE_FSNOTIFY_PARENT_WATCHED;  			else  				child->d_flags &= ~DCACHE_FSNOTIFY_PARENT_WATCHED;  			spin_unlock(&child->d_lock);  		} +		spin_unlock(&alias->d_lock);  	} -	spin_unlock(&dcache_lock); +	spin_unlock(&inode->i_lock);  }  /* Notify this dentry's parent about a child's events. */ @@ -120,13 +123,12 @@ int __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)  }  EXPORT_SYMBOL_GPL(__fsnotify_parent); -static int send_to_group(struct inode *to_tell, struct vfsmount *mnt, +static int send_to_group(struct inode *to_tell,  			 struct fsnotify_mark *inode_mark,  			 struct fsnotify_mark *vfsmount_mark,  			 __u32 mask, void *data,  			 int data_is, u32 cookie, -			 const unsigned char *file_name, -			 struct fsnotify_event **event) +			 const unsigned char *file_name)  {  	struct fsnotify_group *group = NULL;  	__u32 inode_test_mask = 0; @@ -165,29 +167,19 @@ static int send_to_group(struct inode *to_tell, struct vfsmount *mnt,  			vfsmount_test_mask &= ~inode_mark->ignored_mask;  	} -	pr_debug("%s: group=%p to_tell=%p mnt=%p mask=%x inode_mark=%p" +	pr_debug("%s: group=%p to_tell=%p mask=%x inode_mark=%p"  		 " inode_test_mask=%x vfsmount_mark=%p vfsmount_test_mask=%x" -		 " data=%p data_is=%d cookie=%d event=%p\n", -		 __func__, group, to_tell, mnt, mask, inode_mark, +		 " data=%p data_is=%d cookie=%d\n", +		 __func__, group, to_tell, mask, inode_mark,  		 inode_test_mask, vfsmount_mark, vfsmount_test_mask, data, -		 data_is, cookie, *event); +		 data_is, cookie);  	if (!inode_test_mask && !vfsmount_test_mask)  		return 0; -	if (group->ops->should_send_event(group, to_tell, inode_mark, -					  vfsmount_mark, mask, data, -					  data_is) == false) -		return 0; - -	if (!*event) { -		*event = fsnotify_create_event(to_tell, mask, data, -						data_is, file_name, -						cookie, GFP_KERNEL); -		if (!*event) -			return -ENOMEM; -	} -	return group->ops->handle_event(group, inode_mark, vfsmount_mark, *event); +	return group->ops->handle_event(group, to_tell, inode_mark, +					vfsmount_mark, mask, data, data_is, +					file_name, cookie);  }  /* @@ -202,14 +194,13 @@ int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,  	struct hlist_node *inode_node = NULL, *vfsmount_node = NULL;  	struct fsnotify_mark *inode_mark = NULL, *vfsmount_mark = NULL;  	struct fsnotify_group *inode_group, *vfsmount_group; -	struct fsnotify_event *event = NULL; -	struct vfsmount *mnt; +	struct mount *mnt;  	int idx, ret = 0;  	/* global tests shouldn't care about events on child only the specific event */  	__u32 test_mask = (mask & ~FS_EVENT_ON_CHILD);  	if (data_is == FSNOTIFY_EVENT_PATH) -		mnt = ((struct path *)data)->mnt; +		mnt = real_mount(((struct path *)data)->mnt);  	else  		mnt = NULL; @@ -255,18 +246,18 @@ int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,  		if (inode_group > vfsmount_group) {  			/* handle inode */ -			ret = send_to_group(to_tell, NULL, inode_mark, NULL, mask, data, -					    data_is, cookie, file_name, &event); +			ret = send_to_group(to_tell, inode_mark, NULL, mask, +					    data, data_is, cookie, file_name);  			/* we didn't use the vfsmount_mark */  			vfsmount_group = NULL;  		} else if (vfsmount_group > inode_group) { -			ret = send_to_group(to_tell, mnt, NULL, vfsmount_mark, mask, data, -					    data_is, cookie, file_name, &event); +			ret = send_to_group(to_tell, NULL, vfsmount_mark, mask, +					    data, data_is, cookie, file_name);  			inode_group = NULL;  		} else { -			ret = send_to_group(to_tell, mnt, inode_mark, vfsmount_mark, -					    mask, data, data_is, cookie, file_name, -					    &event); +			ret = send_to_group(to_tell, inode_mark, vfsmount_mark, +					    mask, data, data_is, cookie, +					    file_name);  		}  		if (ret && (mask & ALL_FSNOTIFY_PERM_EVENTS)) @@ -282,12 +273,6 @@ int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,  	ret = 0;  out:  	srcu_read_unlock(&fsnotify_mark_srcu, idx); -	/* -	 * fsnotify_create_event() took a reference so the event can't be cleaned -	 * up while we are still trying to add it to lists, drop that one. -	 */ -	if (event) -		fsnotify_put_event(event);  	return ret;  } diff --git a/fs/notify/group.c b/fs/notify/group.c index d309f38449c..ad199598045 100644 --- a/fs/notify/group.c +++ b/fs/notify/group.c @@ -26,16 +26,13 @@  #include <linux/fsnotify_backend.h>  #include "fsnotify.h" -#include <asm/atomic.h> +#include <linux/atomic.h>  /*   * Final freeing of a group   */  void fsnotify_final_destroy_group(struct fsnotify_group *group)  { -	/* clear the notification queue of all events */ -	fsnotify_flush_notify(group); -  	if (group->ops->free_group_priv)  		group->ops->free_group_priv(group); @@ -43,23 +40,37 @@ void fsnotify_final_destroy_group(struct fsnotify_group *group)  }  /* - * Trying to get rid of a group.  We need to first get rid of any outstanding - * allocations and then free the group.  Remember that fsnotify_clear_marks_by_group - * could miss marks that are being freed by inode and those marks could still - * hold a reference to this group (via group->num_marks)  If we get into that - * situtation, the fsnotify_final_destroy_group will get called when that final - * mark is freed. + * Trying to get rid of a group. Remove all marks, flush all events and release + * the group reference. + * Note that another thread calling fsnotify_clear_marks_by_group() may still + * hold a ref to the group.   */ -static void fsnotify_destroy_group(struct fsnotify_group *group) +void fsnotify_destroy_group(struct fsnotify_group *group)  {  	/* clear all inode marks for this group */  	fsnotify_clear_marks_by_group(group);  	synchronize_srcu(&fsnotify_mark_srcu); -	/* past the point of no return, matches the initial value of 1 */ -	if (atomic_dec_and_test(&group->num_marks)) -		fsnotify_final_destroy_group(group); +	/* clear the notification queue of all events */ +	fsnotify_flush_notify(group); + +	/* +	 * Destroy overflow event (we cannot use fsnotify_destroy_event() as +	 * that deliberately ignores overflow events. +	 */ +	if (group->overflow_event) +		group->ops->free_event(group->overflow_event); + +	fsnotify_put_group(group); +} + +/* + * Get reference to a group. + */ +void fsnotify_get_group(struct fsnotify_group *group) +{ +	atomic_inc(&group->refcnt);  }  /* @@ -68,7 +79,7 @@ static void fsnotify_destroy_group(struct fsnotify_group *group)  void fsnotify_put_group(struct fsnotify_group *group)  {  	if (atomic_dec_and_test(&group->refcnt)) -		fsnotify_destroy_group(group); +		fsnotify_final_destroy_group(group);  }  /* @@ -84,21 +95,24 @@ struct fsnotify_group *fsnotify_alloc_group(const struct fsnotify_ops *ops)  	/* set to 0 when there a no external references to this group */  	atomic_set(&group->refcnt, 1); -	/* -	 * hits 0 when there are no external references AND no marks for -	 * this group -	 */ -	atomic_set(&group->num_marks, 1); +	atomic_set(&group->num_marks, 0);  	mutex_init(&group->notification_mutex);  	INIT_LIST_HEAD(&group->notification_list);  	init_waitqueue_head(&group->notification_waitq);  	group->max_events = UINT_MAX; -	spin_lock_init(&group->mark_lock); +	mutex_init(&group->mark_mutex);  	INIT_LIST_HEAD(&group->marks_list);  	group->ops = ops;  	return group;  } + +int fsnotify_fasync(int fd, struct file *file, int on) +{ +	struct fsnotify_group *group = file->private_data; + +	return fasync_helper(fd, file, on, &group->fsn_fa) >= 0 ? 0 : -EIO; +} diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c index 4c29fcf557d..74825be65b7 100644 --- a/fs/notify/inode_mark.c +++ b/fs/notify/inode_mark.c @@ -22,25 +22,25 @@  #include <linux/module.h>  #include <linux/mutex.h>  #include <linux/spinlock.h> -#include <linux/writeback.h> /* for inode_lock */ -#include <asm/atomic.h> +#include <linux/atomic.h>  #include <linux/fsnotify_backend.h>  #include "fsnotify.h" +#include "../internal.h" +  /*   * Recalculate the mask of events relevant to a given inode locked.   */  static void fsnotify_recalc_inode_mask_locked(struct inode *inode)  {  	struct fsnotify_mark *mark; -	struct hlist_node *pos;  	__u32 new_mask = 0;  	assert_spin_locked(&inode->i_lock); -	hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list) +	hlist_for_each_entry(mark, &inode->i_fsnotify_marks, i.i_list)  		new_mask |= mark->mask;  	inode->i_fsnotify_mask = new_mask;  } @@ -62,8 +62,8 @@ void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark)  {  	struct inode *inode = mark->i.inode; +	BUG_ON(!mutex_is_locked(&mark->group->mark_mutex));  	assert_spin_locked(&mark->lock); -	assert_spin_locked(&mark->group->mark_lock);  	spin_lock(&inode->i_lock); @@ -86,11 +86,11 @@ void fsnotify_destroy_inode_mark(struct fsnotify_mark *mark)  void fsnotify_clear_marks_by_inode(struct inode *inode)  {  	struct fsnotify_mark *mark, *lmark; -	struct hlist_node *pos, *n; +	struct hlist_node *n;  	LIST_HEAD(free_list);  	spin_lock(&inode->i_lock); -	hlist_for_each_entry_safe(mark, pos, n, &inode->i_fsnotify_marks, i.i_list) { +	hlist_for_each_entry_safe(mark, n, &inode->i_fsnotify_marks, i.i_list) {  		list_add(&mark->i.free_i_list, &free_list);  		hlist_del_init_rcu(&mark->i.i_list);  		fsnotify_get_mark(mark); @@ -98,8 +98,16 @@ void fsnotify_clear_marks_by_inode(struct inode *inode)  	spin_unlock(&inode->i_lock);  	list_for_each_entry_safe(mark, lmark, &free_list, i.free_i_list) { -		fsnotify_destroy_mark(mark); +		struct fsnotify_group *group; + +		spin_lock(&mark->lock); +		fsnotify_get_group(mark->group); +		group = mark->group; +		spin_unlock(&mark->lock); + +		fsnotify_destroy_mark(mark, group);  		fsnotify_put_mark(mark); +		fsnotify_put_group(group);  	}  } @@ -115,15 +123,15 @@ void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group)   * given a group and inode, find the mark associated with that combination.   * if found take a reference to that mark and return it, else return NULL   */ -struct fsnotify_mark *fsnotify_find_inode_mark_locked(struct fsnotify_group *group, -						      struct inode *inode) +static struct fsnotify_mark *fsnotify_find_inode_mark_locked( +		struct fsnotify_group *group, +		struct inode *inode)  {  	struct fsnotify_mark *mark; -	struct hlist_node *pos;  	assert_spin_locked(&inode->i_lock); -	hlist_for_each_entry(mark, pos, &inode->i_fsnotify_marks, i.i_list) { +	hlist_for_each_entry(mark, &inode->i_fsnotify_marks, i.i_list) {  		if (mark->group == group) {  			fsnotify_get_mark(mark);  			return mark; @@ -184,14 +192,13 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,  			    struct fsnotify_group *group, struct inode *inode,  			    int allow_dups)  { -	struct fsnotify_mark *lmark; -	struct hlist_node *node, *last = NULL; +	struct fsnotify_mark *lmark, *last = NULL;  	int ret = 0;  	mark->flags |= FSNOTIFY_MARK_FLAG_INODE; +	BUG_ON(!mutex_is_locked(&group->mark_mutex));  	assert_spin_locked(&mark->lock); -	assert_spin_locked(&group->mark_lock);  	spin_lock(&inode->i_lock); @@ -204,8 +211,8 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,  	}  	/* should mark be in the middle of the current list? */ -	hlist_for_each_entry(lmark, node, &inode->i_fsnotify_marks, i.i_list) { -		last = node; +	hlist_for_each_entry(lmark, &inode->i_fsnotify_marks, i.i_list) { +		last = lmark;  		if ((lmark->group == group) && !allow_dups) {  			ret = -EEXIST; @@ -225,7 +232,7 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,  	BUG_ON(last == NULL);  	/* mark should be the last entry.  last is the current last entry */ -	hlist_add_after_rcu(last, &mark->i.i_list); +	hlist_add_after_rcu(&last->i.i_list, &mark->i.i_list);  out:  	fsnotify_recalc_inode_mask_locked(inode);  	spin_unlock(&inode->i_lock); @@ -237,15 +244,14 @@ out:   * fsnotify_unmount_inodes - an sb is unmounting.  handle any watched inodes.   * @list: list of inodes being unmounted (sb->s_inodes)   * - * Called with inode_lock held, protecting the unmounting super block's list - * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay. - * We temporarily drop inode_lock, however, and CAN block. + * Called during unmount with no locks held, so needs to be safe against + * concurrent modifiers. We temporarily drop inode_sb_list_lock and CAN block.   */  void fsnotify_unmount_inodes(struct list_head *list)  {  	struct inode *inode, *next_i, *need_iput = NULL; -	spin_lock(&inode_lock); +	spin_lock(&inode_sb_list_lock);  	list_for_each_entry_safe(inode, next_i, list, i_sb_list) {  		struct inode *need_iput_tmp; @@ -254,8 +260,11 @@ void fsnotify_unmount_inodes(struct list_head *list)  		 * I_WILL_FREE, or I_NEW which is fine because by that point  		 * the inode cannot have any associated watches.  		 */ -		if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) +		spin_lock(&inode->i_lock); +		if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) { +			spin_unlock(&inode->i_lock);  			continue; +		}  		/*  		 * If i_count is zero, the inode cannot have any watches and @@ -263,8 +272,10 @@ void fsnotify_unmount_inodes(struct list_head *list)  		 * evict all inodes with zero i_count from icache which is  		 * unnecessarily violent and may in fact be illegal to do.  		 */ -		if (!atomic_read(&inode->i_count)) +		if (!atomic_read(&inode->i_count)) { +			spin_unlock(&inode->i_lock);  			continue; +		}  		need_iput_tmp = need_iput;  		need_iput = NULL; @@ -274,22 +285,25 @@ void fsnotify_unmount_inodes(struct list_head *list)  			__iget(inode);  		else  			need_iput_tmp = NULL; +		spin_unlock(&inode->i_lock);  		/* In case the dropping of a reference would nuke next_i. */  		if ((&next_i->i_sb_list != list) && -		    atomic_read(&next_i->i_count) && -		    !(next_i->i_state & (I_FREEING | I_WILL_FREE))) { -			__iget(next_i); -			need_iput = next_i; +		    atomic_read(&next_i->i_count)) { +			spin_lock(&next_i->i_lock); +			if (!(next_i->i_state & (I_FREEING | I_WILL_FREE))) { +				__iget(next_i); +				need_iput = next_i; +			} +			spin_unlock(&next_i->i_lock);  		}  		/* -		 * We can safely drop inode_lock here because we hold +		 * We can safely drop inode_sb_list_lock here because we hold  		 * references on both inode and next_i.  Also no new inodes -		 * will be added since the umount has begun.  Finally, -		 * iprune_mutex keeps shrink_icache_memory() away. +		 * will be added since the umount has begun.  		 */ -		spin_unlock(&inode_lock); +		spin_unlock(&inode_sb_list_lock);  		if (need_iput_tmp)  			iput(need_iput_tmp); @@ -301,7 +315,7 @@ void fsnotify_unmount_inodes(struct list_head *list)  		iput(inode); -		spin_lock(&inode_lock); +		spin_lock(&inode_sb_list_lock);  	} -	spin_unlock(&inode_lock); +	spin_unlock(&inode_sb_list_lock);  } diff --git a/fs/notify/inotify/inotify.h b/fs/notify/inotify/inotify.h index b6642e4de4b..ed855ef6f07 100644 --- a/fs/notify/inotify/inotify.h +++ b/fs/notify/inotify/inotify.h @@ -2,11 +2,12 @@  #include <linux/inotify.h>  #include <linux/slab.h> /* struct kmem_cache */ -extern struct kmem_cache *event_priv_cachep; - -struct inotify_event_private_data { -	struct fsnotify_event_private_data fsnotify_event_priv_data; +struct inotify_event_info { +	struct fsnotify_event fse;  	int wd; +	u32 sync_cookie; +	int name_len; +	char name[];  };  struct inotify_inode_mark { @@ -14,8 +15,18 @@ struct inotify_inode_mark {  	int wd;  }; +static inline struct inotify_event_info *INOTIFY_E(struct fsnotify_event *fse) +{ +	return container_of(fse, struct inotify_event_info, fse); +} +  extern void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,  					   struct fsnotify_group *group); -extern void inotify_free_event_priv(struct fsnotify_event_private_data *event_priv); +extern int inotify_handle_event(struct fsnotify_group *group, +				struct inode *inode, +				struct fsnotify_mark *inode_mark, +				struct fsnotify_mark *vfsmount_mark, +				u32 mask, void *data, int data_type, +				const unsigned char *file_name, u32 cookie);  extern const struct fsnotify_ops inotify_fsnotify_ops; diff --git a/fs/notify/inotify/inotify_fsnotify.c b/fs/notify/inotify/inotify_fsnotify.c index a91b69a6a29..43ab1e1a07a 100644 --- a/fs/notify/inotify/inotify_fsnotify.c +++ b/fs/notify/inotify/inotify_fsnotify.c @@ -34,106 +34,90 @@  #include "inotify.h"  /* - * Check if 2 events contain the same information.  We do not compare private data - * but at this moment that isn't a problem for any know fsnotify listeners. + * Check if 2 events contain the same information.   */ -static bool event_compare(struct fsnotify_event *old, struct fsnotify_event *new) +static bool event_compare(struct fsnotify_event *old_fsn, +			  struct fsnotify_event *new_fsn)  { -	if ((old->mask == new->mask) && -	    (old->to_tell == new->to_tell) && -	    (old->data_type == new->data_type) && -	    (old->name_len == new->name_len)) { -		switch (old->data_type) { -		case (FSNOTIFY_EVENT_INODE): -			/* remember, after old was put on the wait_q we aren't -			 * allowed to look at the inode any more, only thing -			 * left to check was if the file_name is the same */ -			if (!old->name_len || -			    !strcmp(old->file_name, new->file_name)) -				return true; -			break; -		case (FSNOTIFY_EVENT_PATH): -			if ((old->path.mnt == new->path.mnt) && -			    (old->path.dentry == new->path.dentry)) -				return true; -			break; -		case (FSNOTIFY_EVENT_NONE): -			if (old->mask & FS_Q_OVERFLOW) -				return true; -			else if (old->mask & FS_IN_IGNORED) -				return false; -			return true; -		}; -	} +	struct inotify_event_info *old, *new; + +	if (old_fsn->mask & FS_IN_IGNORED) +		return false; +	old = INOTIFY_E(old_fsn); +	new = INOTIFY_E(new_fsn); +	if ((old_fsn->mask == new_fsn->mask) && +	    (old_fsn->inode == new_fsn->inode) && +	    (old->name_len == new->name_len) && +	    (!old->name_len || !strcmp(old->name, new->name))) +		return true;  	return false;  } -static struct fsnotify_event *inotify_merge(struct list_head *list, -					    struct fsnotify_event *event) +static int inotify_merge(struct list_head *list, +			  struct fsnotify_event *event)  { -	struct fsnotify_event_holder *last_holder;  	struct fsnotify_event *last_event; -	/* and the list better be locked by something too */ -	spin_lock(&event->lock); - -	last_holder = list_entry(list->prev, struct fsnotify_event_holder, event_list); -	last_event = last_holder->event; -	if (event_compare(last_event, event)) -		fsnotify_get_event(last_event); -	else -		last_event = NULL; - -	spin_unlock(&event->lock); - -	return last_event; +	last_event = list_entry(list->prev, struct fsnotify_event, list); +	return event_compare(last_event, event);  } -static int inotify_handle_event(struct fsnotify_group *group, -				struct fsnotify_mark *inode_mark, -				struct fsnotify_mark *vfsmount_mark, -				struct fsnotify_event *event) +int inotify_handle_event(struct fsnotify_group *group, +			 struct inode *inode, +			 struct fsnotify_mark *inode_mark, +			 struct fsnotify_mark *vfsmount_mark, +			 u32 mask, void *data, int data_type, +			 const unsigned char *file_name, u32 cookie)  {  	struct inotify_inode_mark *i_mark; -	struct inode *to_tell; -	struct inotify_event_private_data *event_priv; -	struct fsnotify_event_private_data *fsn_event_priv; -	struct fsnotify_event *added_event; -	int wd, ret = 0; +	struct inotify_event_info *event; +	struct fsnotify_event *fsn_event; +	int ret; +	int len = 0; +	int alloc_len = sizeof(struct inotify_event_info);  	BUG_ON(vfsmount_mark); -	pr_debug("%s: group=%p event=%p to_tell=%p mask=%x\n", __func__, group, -		 event, event->to_tell, event->mask); +	if ((inode_mark->mask & FS_EXCL_UNLINK) && +	    (data_type == FSNOTIFY_EVENT_PATH)) { +		struct path *path = data; + +		if (d_unlinked(path->dentry)) +			return 0; +	} +	if (file_name) { +		len = strlen(file_name); +		alloc_len += len + 1; +	} -	to_tell = event->to_tell; +	pr_debug("%s: group=%p inode=%p mask=%x\n", __func__, group, inode, +		 mask);  	i_mark = container_of(inode_mark, struct inotify_inode_mark,  			      fsn_mark); -	wd = i_mark->wd; -	event_priv = kmem_cache_alloc(event_priv_cachep, GFP_KERNEL); -	if (unlikely(!event_priv)) +	event = kmalloc(alloc_len, GFP_KERNEL); +	if (unlikely(!event))  		return -ENOMEM; -	fsn_event_priv = &event_priv->fsnotify_event_priv_data; - -	fsn_event_priv->group = group; -	event_priv->wd = wd; - -	added_event = fsnotify_add_notify_event(group, event, fsn_event_priv, inotify_merge); -	if (added_event) { -		inotify_free_event_priv(fsn_event_priv); -		if (!IS_ERR(added_event)) -			fsnotify_put_event(added_event); -		else -			ret = PTR_ERR(added_event); +	fsn_event = &event->fse; +	fsnotify_init_event(fsn_event, inode, mask); +	event->wd = i_mark->wd; +	event->sync_cookie = cookie; +	event->name_len = len; +	if (len) +		strcpy(event->name, file_name); + +	ret = fsnotify_add_notify_event(group, fsn_event, inotify_merge); +	if (ret) { +		/* Our event wasn't used in the end. Free it. */ +		fsnotify_destroy_event(group, fsn_event);  	}  	if (inode_mark->mask & IN_ONESHOT) -		fsnotify_destroy_mark(inode_mark); +		fsnotify_destroy_mark(inode_mark, group); -	return ret; +	return 0;  }  static void inotify_freeing_mark(struct fsnotify_mark *fsn_mark, struct fsnotify_group *group) @@ -141,22 +125,6 @@ static void inotify_freeing_mark(struct fsnotify_mark *fsn_mark, struct fsnotify  	inotify_ignored_and_remove_idr(fsn_mark, group);  } -static bool inotify_should_send_event(struct fsnotify_group *group, struct inode *inode, -				      struct fsnotify_mark *inode_mark, -				      struct fsnotify_mark *vfsmount_mark, -				      __u32 mask, void *data, int data_type) -{ -	if ((inode_mark->mask & FS_EXCL_UNLINK) && -	    (data_type == FSNOTIFY_EVENT_PATH)) { -		struct path *path = data; - -		if (d_unlinked(path->dentry)) -			return false; -	} - -	return true; -} -  /*   * This is NEVER supposed to be called.  Inotify marks should either have been   * removed from the idr when the watch was removed or in the @@ -194,28 +162,21 @@ static int idr_callback(int id, void *p, void *data)  static void inotify_free_group_priv(struct fsnotify_group *group)  { -	/* ideally the idr is empty and we won't hit the BUG in teh callback */ +	/* ideally the idr is empty and we won't hit the BUG in the callback */  	idr_for_each(&group->inotify_data.idr, idr_callback, group); -	idr_remove_all(&group->inotify_data.idr);  	idr_destroy(&group->inotify_data.idr); +	atomic_dec(&group->inotify_data.user->inotify_devs);  	free_uid(group->inotify_data.user);  } -void inotify_free_event_priv(struct fsnotify_event_private_data *fsn_event_priv) +static void inotify_free_event(struct fsnotify_event *fsn_event)  { -	struct inotify_event_private_data *event_priv; - - -	event_priv = container_of(fsn_event_priv, struct inotify_event_private_data, -				  fsnotify_event_priv_data); - -	kmem_cache_free(event_priv_cachep, event_priv); +	kfree(INOTIFY_E(fsn_event));  }  const struct fsnotify_ops inotify_fsnotify_ops = {  	.handle_event = inotify_handle_event, -	.should_send_event = inotify_should_send_event,  	.free_group_priv = inotify_free_group_priv, -	.free_event_priv = inotify_free_event_priv, +	.free_event = inotify_free_event,  	.freeing_mark = inotify_freeing_mark,  }; diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c index 444c305a468..cc423a30a0c 100644 --- a/fs/notify/inotify/inotify_user.c +++ b/fs/notify/inotify/inotify_user.c @@ -40,6 +40,7 @@  #include <linux/wait.h>  #include "inotify.h" +#include "../fdinfo.h"  #include <asm/ioctls.h> @@ -49,7 +50,6 @@ static int inotify_max_queued_events __read_mostly;  static int inotify_max_user_watches __read_mostly;  static struct kmem_cache *inotify_inode_mark_cachep __read_mostly; -struct kmem_cache *event_priv_cachep __read_mostly;  #ifdef CONFIG_SYSCTL @@ -57,7 +57,7 @@ struct kmem_cache *event_priv_cachep __read_mostly;  static int zero; -ctl_table inotify_table[] = { +struct ctl_table inotify_table[] = {  	{  		.procname	= "max_user_instances",  		.data		= &inotify_max_user_instances, @@ -123,6 +123,16 @@ static unsigned int inotify_poll(struct file *file, poll_table *wait)  	return ret;  } +static int round_event_name_len(struct fsnotify_event *fsn_event) +{ +	struct inotify_event_info *event; + +	event = INOTIFY_E(fsn_event); +	if (!event->name_len) +		return 0; +	return roundup(event->name_len + 1, sizeof(struct inotify_event)); +} +  /*   * Get an inotify_kernel_event if one exists and is small   * enough to fit in "count". Return an error pointer if @@ -143,9 +153,7 @@ static struct fsnotify_event *get_one_event(struct fsnotify_group *group,  	pr_debug("%s: group=%p event=%p\n", __func__, group, event); -	if (event->name_len) -		event_size += roundup(event->name_len + 1, event_size); - +	event_size += round_event_name_len(event);  	if (event_size > count)  		return ERR_PTR(-EINVAL); @@ -163,40 +171,27 @@ static struct fsnotify_event *get_one_event(struct fsnotify_group *group,   * buffer we had in "get_one_event()" above.   */  static ssize_t copy_event_to_user(struct fsnotify_group *group, -				  struct fsnotify_event *event, +				  struct fsnotify_event *fsn_event,  				  char __user *buf)  {  	struct inotify_event inotify_event; -	struct fsnotify_event_private_data *fsn_priv; -	struct inotify_event_private_data *priv; +	struct inotify_event_info *event;  	size_t event_size = sizeof(struct inotify_event); -	size_t name_len = 0; +	size_t name_len; +	size_t pad_name_len; -	pr_debug("%s: group=%p event=%p\n", __func__, group, event); - -	/* we get the inotify watch descriptor from the event private data */ -	spin_lock(&event->lock); -	fsn_priv = fsnotify_remove_priv_from_event(group, event); -	spin_unlock(&event->lock); - -	if (!fsn_priv) -		inotify_event.wd = -1; -	else { -		priv = container_of(fsn_priv, struct inotify_event_private_data, -				    fsnotify_event_priv_data); -		inotify_event.wd = priv->wd; -		inotify_free_event_priv(fsn_priv); -	} +	pr_debug("%s: group=%p event=%p\n", __func__, group, fsn_event); +	event = INOTIFY_E(fsn_event); +	name_len = event->name_len;  	/* -	 * round up event->name_len so it is a multiple of event_size +	 * round up name length so it is a multiple of event_size  	 * plus an extra byte for the terminating '\0'.  	 */ -	if (event->name_len) -		name_len = roundup(event->name_len + 1, event_size); -	inotify_event.len = name_len; - -	inotify_event.mask = inotify_mask_to_arg(event->mask); +	pad_name_len = round_event_name_len(fsn_event); +	inotify_event.len = pad_name_len; +	inotify_event.mask = inotify_mask_to_arg(fsn_event->mask); +	inotify_event.wd = event->wd;  	inotify_event.cookie = event->sync_cookie;  	/* send the main event */ @@ -208,20 +203,18 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,  	/*  	 * fsnotify only stores the pathname, so here we have to send the pathname  	 * and then pad that pathname out to a multiple of sizeof(inotify_event) -	 * with zeros.  I get my zeros from the nul_inotify_event. +	 * with zeros.  	 */ -	if (name_len) { -		unsigned int len_to_zero = name_len - event->name_len; +	if (pad_name_len) {  		/* copy the path name */ -		if (copy_to_user(buf, event->file_name, event->name_len)) +		if (copy_to_user(buf, event->name, name_len))  			return -EFAULT; -		buf += event->name_len; +		buf += name_len;  		/* fill userspace with 0's */ -		if (clear_user(buf, len_to_zero)) +		if (clear_user(buf, pad_name_len - name_len))  			return -EFAULT; -		buf += len_to_zero; -		event_size += name_len; +		event_size += pad_name_len;  	}  	return event_size; @@ -253,7 +246,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf,  			if (IS_ERR(kevent))  				break;  			ret = copy_event_to_user(group, kevent, buf); -			fsnotify_put_event(kevent); +			fsnotify_destroy_event(group, kevent);  			if (ret < 0)  				break;  			buf += ret; @@ -264,7 +257,7 @@ static ssize_t inotify_read(struct file *file, char __user *buf,  		ret = -EAGAIN;  		if (file->f_flags & O_NONBLOCK)  			break; -		ret = -EINTR; +		ret = -ERESTARTSYS;  		if (signal_pending(current))  			break; @@ -280,26 +273,14 @@ static ssize_t inotify_read(struct file *file, char __user *buf,  	return ret;  } -static int inotify_fasync(int fd, struct file *file, int on) -{ -	struct fsnotify_group *group = file->private_data; - -	return fasync_helper(fd, file, on, &group->inotify_data.fa) >= 0 ? 0 : -EIO; -} -  static int inotify_release(struct inode *ignored, struct file *file)  {  	struct fsnotify_group *group = file->private_data; -	struct user_struct *user = group->inotify_data.user;  	pr_debug("%s: group=%p\n", __func__, group); -	fsnotify_clear_marks_by_group(group); -  	/* free this group, matching get was inotify_init->fsnotify_obtain_group */ -	fsnotify_put_group(group); - -	atomic_dec(&user->inotify_devs); +	fsnotify_destroy_group(group);  	return 0;  } @@ -308,8 +289,7 @@ static long inotify_ioctl(struct file *file, unsigned int cmd,  			  unsigned long arg)  {  	struct fsnotify_group *group; -	struct fsnotify_event_holder *holder; -	struct fsnotify_event *event; +	struct fsnotify_event *fsn_event;  	void __user *p;  	int ret = -ENOTTY;  	size_t send_len = 0; @@ -322,12 +302,10 @@ static long inotify_ioctl(struct file *file, unsigned int cmd,  	switch (cmd) {  	case FIONREAD:  		mutex_lock(&group->notification_mutex); -		list_for_each_entry(holder, &group->notification_list, event_list) { -			event = holder->event; +		list_for_each_entry(fsn_event, &group->notification_list, +				    list) {  			send_len += sizeof(struct inotify_event); -			if (event->name_len) -				send_len += roundup(event->name_len + 1, -						sizeof(struct inotify_event)); +			send_len += round_event_name_len(fsn_event);  		}  		mutex_unlock(&group->notification_mutex);  		ret = put_user(send_len, (int __user *) p); @@ -338,9 +316,10 @@ static long inotify_ioctl(struct file *file, unsigned int cmd,  }  static const struct file_operations inotify_fops = { +	.show_fdinfo	= inotify_show_fdinfo,  	.poll		= inotify_poll,  	.read		= inotify_read, -	.fasync		= inotify_fasync, +	.fasync		= fsnotify_fasync,  	.release	= inotify_release,  	.unlocked_ioctl	= inotify_ioctl,  	.compat_ioctl	= inotify_ioctl, @@ -366,27 +345,23 @@ static int inotify_find_inode(const char __user *dirname, struct path *path, uns  }  static int inotify_add_to_idr(struct idr *idr, spinlock_t *idr_lock, -			      int *last_wd,  			      struct inotify_inode_mark *i_mark)  {  	int ret; -	do { -		if (unlikely(!idr_pre_get(idr, GFP_KERNEL))) -			return -ENOMEM; +	idr_preload(GFP_KERNEL); +	spin_lock(idr_lock); -		spin_lock(idr_lock); -		ret = idr_get_new_above(idr, i_mark, *last_wd + 1, -					&i_mark->wd); +	ret = idr_alloc_cyclic(idr, i_mark, 1, 0, GFP_NOWAIT); +	if (ret >= 0) {  		/* we added the mark to the idr, take a reference */ -		if (!ret) { -			*last_wd = i_mark->wd; -			fsnotify_get_mark(&i_mark->fsn_mark); -		} -		spin_unlock(idr_lock); -	} while (ret == -EAGAIN); +		i_mark->wd = ret; +		fsnotify_get_mark(&i_mark->fsn_mark); +	} -	return ret; +	spin_unlock(idr_lock); +	idr_preload_end(); +	return ret < 0 ? ret : 0;  }  static struct inotify_inode_mark *inotify_idr_find_locked(struct fsnotify_group *group, @@ -517,42 +492,12 @@ void inotify_ignored_and_remove_idr(struct fsnotify_mark *fsn_mark,  				    struct fsnotify_group *group)  {  	struct inotify_inode_mark *i_mark; -	struct fsnotify_event *ignored_event, *notify_event; -	struct inotify_event_private_data *event_priv; -	struct fsnotify_event_private_data *fsn_event_priv; -	int ret; -	ignored_event = fsnotify_create_event(NULL, FS_IN_IGNORED, NULL, -					      FSNOTIFY_EVENT_NONE, NULL, 0, -					      GFP_NOFS); -	if (!ignored_event) -		return; +	/* Queue ignore event for the watch */ +	inotify_handle_event(group, NULL, fsn_mark, NULL, FS_IN_IGNORED, +			     NULL, FSNOTIFY_EVENT_NONE, NULL, 0);  	i_mark = container_of(fsn_mark, struct inotify_inode_mark, fsn_mark); - -	event_priv = kmem_cache_alloc(event_priv_cachep, GFP_NOFS); -	if (unlikely(!event_priv)) -		goto skip_send_ignore; - -	fsn_event_priv = &event_priv->fsnotify_event_priv_data; - -	fsn_event_priv->group = group; -	event_priv->wd = i_mark->wd; - -	notify_event = fsnotify_add_notify_event(group, ignored_event, fsn_event_priv, NULL); -	if (notify_event) { -		if (IS_ERR(notify_event)) -			ret = PTR_ERR(notify_event); -		else -			fsnotify_put_event(notify_event); -		inotify_free_event_priv(fsn_event_priv); -	} - -skip_send_ignore: - -	/* matches the reference taken when the event was created */ -	fsnotify_put_event(ignored_event); -  	/* remove this mark from the idr */  	inotify_remove_from_idr(group, i_mark); @@ -580,10 +525,7 @@ static int inotify_update_existing_watch(struct fsnotify_group *group,  	int add = (arg & IN_MASK_ADD);  	int ret; -	/* don't allow invalid bits: we don't want flags set */  	mask = inotify_arg_to_mask(arg); -	if (unlikely(!(mask & IN_ALL_EVENTS))) -		return -EINVAL;  	fsn_mark = fsnotify_find_inode_mark(group, inode);  	if (!fsn_mark) @@ -633,10 +575,7 @@ static int inotify_new_watch(struct fsnotify_group *group,  	struct idr *idr = &group->inotify_data.idr;  	spinlock_t *idr_lock = &group->inotify_data.idr_lock; -	/* don't allow invalid bits: we don't want flags set */  	mask = inotify_arg_to_mask(arg); -	if (unlikely(!(mask & IN_ALL_EVENTS))) -		return -EINVAL;  	tmp_i_mark = kmem_cache_alloc(inotify_inode_mark_cachep, GFP_KERNEL);  	if (unlikely(!tmp_i_mark)) @@ -650,13 +589,13 @@ static int inotify_new_watch(struct fsnotify_group *group,  	if (atomic_read(&group->inotify_data.user->inotify_watches) >= inotify_max_user_watches)  		goto out_err; -	ret = inotify_add_to_idr(idr, idr_lock, &group->inotify_data.last_wd, -				 tmp_i_mark); +	ret = inotify_add_to_idr(idr, idr_lock, tmp_i_mark);  	if (ret)  		goto out_err;  	/* we are on the idr, now get on the inode */ -	ret = fsnotify_add_mark(&tmp_i_mark->fsn_mark, group, inode, NULL, 0); +	ret = fsnotify_add_mark_locked(&tmp_i_mark->fsn_mark, group, inode, +				       NULL, 0);  	if (ret) {  		/* we failed to get on the inode, get off the idr */  		inotify_remove_from_idr(group, tmp_i_mark); @@ -680,38 +619,48 @@ static int inotify_update_watch(struct fsnotify_group *group, struct inode *inod  {  	int ret = 0; -retry: +	mutex_lock(&group->mark_mutex);  	/* try to update and existing watch with the new arg */  	ret = inotify_update_existing_watch(group, inode, arg);  	/* no mark present, try to add a new one */  	if (ret == -ENOENT)  		ret = inotify_new_watch(group, inode, arg); -	/* -	 * inotify_new_watch could race with another thread which did an -	 * inotify_new_watch between the update_existing and the add watch -	 * here, go back and try to update an existing mark again. -	 */ -	if (ret == -EEXIST) -		goto retry; +	mutex_unlock(&group->mark_mutex);  	return ret;  } -static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsigned int max_events) +static struct fsnotify_group *inotify_new_group(unsigned int max_events)  {  	struct fsnotify_group *group; +	struct inotify_event_info *oevent;  	group = fsnotify_alloc_group(&inotify_fsnotify_ops);  	if (IS_ERR(group))  		return group; +	oevent = kmalloc(sizeof(struct inotify_event_info), GFP_KERNEL); +	if (unlikely(!oevent)) { +		fsnotify_destroy_group(group); +		return ERR_PTR(-ENOMEM); +	} +	group->overflow_event = &oevent->fse; +	fsnotify_init_event(group->overflow_event, NULL, FS_Q_OVERFLOW); +	oevent->wd = -1; +	oevent->sync_cookie = 0; +	oevent->name_len = 0; +  	group->max_events = max_events;  	spin_lock_init(&group->inotify_data.idr_lock);  	idr_init(&group->inotify_data.idr); -	group->inotify_data.last_wd = 0; -	group->inotify_data.user = user; -	group->inotify_data.fa = NULL; +	group->inotify_data.user = get_current_user(); + +	if (atomic_inc_return(&group->inotify_data.user->inotify_devs) > +	    inotify_max_user_instances) { +		fsnotify_destroy_group(group); +		return ERR_PTR(-EMFILE); +	}  	return group;  } @@ -721,7 +670,6 @@ static struct fsnotify_group *inotify_new_group(struct user_struct *user, unsign  SYSCALL_DEFINE1(inotify_init1, int, flags)  {  	struct fsnotify_group *group; -	struct user_struct *user;  	int ret;  	/* Check the IN_* constants for consistency.  */ @@ -731,30 +679,16 @@ SYSCALL_DEFINE1(inotify_init1, int, flags)  	if (flags & ~(IN_CLOEXEC | IN_NONBLOCK))  		return -EINVAL; -	user = get_current_user(); -	if (unlikely(atomic_read(&user->inotify_devs) >= -			inotify_max_user_instances)) { -		ret = -EMFILE; -		goto out_free_uid; -	} -  	/* fsnotify_obtain_group took a reference to group, we put this when we kill the file in the end */ -	group = inotify_new_group(user, inotify_max_queued_events); -	if (IS_ERR(group)) { -		ret = PTR_ERR(group); -		goto out_free_uid; -	} - -	atomic_inc(&user->inotify_devs); +	group = inotify_new_group(inotify_max_queued_events); +	if (IS_ERR(group)) +		return PTR_ERR(group);  	ret = anon_inode_getfd("inotify", &inotify_fops, group,  				  O_RDONLY | flags); -	if (ret >= 0) -		return ret; +	if (ret < 0) +		fsnotify_destroy_group(group); -	atomic_dec(&user->inotify_devs); -out_free_uid: -	free_uid(user);  	return ret;  } @@ -769,16 +703,20 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,  	struct fsnotify_group *group;  	struct inode *inode;  	struct path path; -	struct file *filp; -	int ret, fput_needed; +	struct fd f; +	int ret;  	unsigned flags = 0; -	filp = fget_light(fd, &fput_needed); -	if (unlikely(!filp)) +	/* don't allow invalid bits: we don't want flags set */ +	if (unlikely(!(mask & ALL_INOTIFY_BITS))) +		return -EINVAL; + +	f = fdget(fd); +	if (unlikely(!f.file))  		return -EBADF;  	/* verify that this is indeed an inotify instance */ -	if (unlikely(filp->f_op != &inotify_fops)) { +	if (unlikely(f.file->f_op != &inotify_fops)) {  		ret = -EINVAL;  		goto fput_and_out;  	} @@ -794,13 +732,13 @@ SYSCALL_DEFINE3(inotify_add_watch, int, fd, const char __user *, pathname,  	/* inode held in place by reference to path; group by fget on fd */  	inode = path.dentry->d_inode; -	group = filp->private_data; +	group = f.file->private_data;  	/* create/update an inode mark */  	ret = inotify_update_watch(group, inode, mask);  	path_put(&path);  fput_and_out: -	fput_light(filp, fput_needed); +	fdput(f);  	return ret;  } @@ -808,19 +746,19 @@ SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)  {  	struct fsnotify_group *group;  	struct inotify_inode_mark *i_mark; -	struct file *filp; -	int ret = 0, fput_needed; +	struct fd f; +	int ret = 0; -	filp = fget_light(fd, &fput_needed); -	if (unlikely(!filp)) +	f = fdget(fd); +	if (unlikely(!f.file))  		return -EBADF;  	/* verify that this is indeed an inotify instance */  	ret = -EINVAL; -	if (unlikely(filp->f_op != &inotify_fops)) +	if (unlikely(f.file->f_op != &inotify_fops))  		goto out; -	group = filp->private_data; +	group = f.file->private_data;  	ret = -EINVAL;  	i_mark = inotify_idr_find(group, wd); @@ -829,18 +767,18 @@ SYSCALL_DEFINE2(inotify_rm_watch, int, fd, __s32, wd)  	ret = 0; -	fsnotify_destroy_mark(&i_mark->fsn_mark); +	fsnotify_destroy_mark(&i_mark->fsn_mark, group);  	/* match ref taken by inotify_idr_find */  	fsnotify_put_mark(&i_mark->fsn_mark);  out: -	fput_light(filp, fput_needed); +	fdput(f);  	return ret;  }  /* - * inotify_user_setup - Our initialization function.  Note that we cannnot return + * inotify_user_setup - Our initialization function.  Note that we cannot return   * error because we have compiled-in VFS hooks.  So an (unlikely) failure here   * must result in panic().   */ @@ -868,7 +806,6 @@ static int __init inotify_user_setup(void)  	BUG_ON(hweight32(ALL_INOTIFY_BITS) != 21);  	inotify_inode_mark_cachep = KMEM_CACHE(inotify_inode_mark, SLAB_PANIC); -	event_priv_cachep = KMEM_CACHE(inotify_event_private_data, SLAB_PANIC);  	inotify_max_queued_events = 16384;  	inotify_max_user_instances = 128; diff --git a/fs/notify/mark.c b/fs/notify/mark.c index 325185e514b..d90deaa08e7 100644 --- a/fs/notify/mark.c +++ b/fs/notify/mark.c @@ -20,28 +20,29 @@   * fsnotify inode mark locking/lifetime/and refcnting   *   * REFCNT: - * The mark->refcnt tells how many "things" in the kernel currently are - * referencing this object.  The object typically will live inside the kernel - * with a refcnt of 2, one for each list it is on (i_list, g_list).  Any task - * which can find this object holding the appropriete locks, can take a reference - * and the object itself is guarenteed to survive until the reference is dropped. + * The group->recnt and mark->refcnt tell how many "things" in the kernel + * currently are referencing the objects. Both kind of objects typically will + * live inside the kernel with a refcnt of 2, one for its creation and one for + * the reference a group and a mark hold to each other. + * If you are holding the appropriate locks, you can take a reference and the + * object itself is guaranteed to survive until the reference is dropped.   *   * LOCKING: - * There are 3 spinlocks involved with fsnotify inode marks and they MUST - * be taken in order as follows: + * There are 3 locks involved with fsnotify inode marks and they MUST be taken + * in order as follows:   * + * group->mark_mutex   * mark->lock - * group->mark_lock   * inode->i_lock   * - * mark->lock protects 2 things, mark->group and mark->inode.  You must hold - * that lock to dereference either of these things (they could be NULL even with - * the lock) - * - * group->mark_lock protects the marks_list anchored inside a given group - * and each mark is hooked via the g_list.  It also sorta protects the - * free_g_list, which when used is anchored by a private list on the stack of the - * task which held the group->mark_lock. + * group->mark_mutex protects the marks_list anchored inside a given group and + * each mark is hooked via the g_list.  It also protects the groups private + * data (i.e group limits). + + * mark->lock protects the marks attributes like its masks and flags. + * Furthermore it protects the access to a reference of the group that the mark + * is assigned to as well as the access to a reference of the inode/vfsmount + * that is being watched by the mark.   *   * inode->i_lock protects the i_fsnotify_marks list anchored inside a   * given inode and each mark is hooked via the i_list. (and sorta the @@ -64,18 +65,11 @@   * inode.  We take i_lock and walk the i_fsnotify_marks safely.  For each   * mark on the list we take a reference (so the mark can't disappear under us).   * We remove that mark form the inode's list of marks and we add this mark to a - * private list anchored on the stack using i_free_list;  At this point we no - * longer fear anything finding the mark using the inode's list of marks. - * - * We can safely and locklessly run the private list on the stack of everything - * we just unattached from the original inode.  For each mark on the private list - * we grab the mark-> and can thus dereference mark->group and mark->inode.  If - * we see the group and inode are not NULL we take those locks.  Now holding all - * 3 locks we can completely remove the mark from other tasks finding it in the - * future.  Remember, 10 things might already be referencing this mark, but they - * better be holding a ref.  We drop our reference we took before we unhooked it - * from the inode.  When the ref hits 0 we can free the mark. - * + * private list anchored on the stack using i_free_list; we walk i_free_list + * and before we destroy the mark we make sure that we dont race with a + * concurrent destroy_group by getting a ref to the marks group and taking the + * groups mutex. +   * Very similarly for freeing by group, except we use free_g_list.   *   * This has the very interesting property of being able to run concurrently with @@ -91,9 +85,8 @@  #include <linux/slab.h>  #include <linux/spinlock.h>  #include <linux/srcu.h> -#include <linux/writeback.h> /* for inode_lock */ -#include <asm/atomic.h> +#include <linux/atomic.h>  #include <linux/fsnotify_backend.h>  #include "fsnotify.h" @@ -110,8 +103,11 @@ void fsnotify_get_mark(struct fsnotify_mark *mark)  void fsnotify_put_mark(struct fsnotify_mark *mark)  { -	if (atomic_dec_and_test(&mark->refcnt)) +	if (atomic_dec_and_test(&mark->refcnt)) { +		if (mark->group) +			fsnotify_put_group(mark->group);  		mark->free_mark(mark); +	}  }  /* @@ -119,14 +115,14 @@ void fsnotify_put_mark(struct fsnotify_mark *mark)   * The caller had better be holding a reference to this mark so we don't actually   * do the final put under the mark->lock   */ -void fsnotify_destroy_mark(struct fsnotify_mark *mark) +void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark, +				  struct fsnotify_group *group)  { -	struct fsnotify_group *group;  	struct inode *inode = NULL; -	spin_lock(&mark->lock); +	BUG_ON(!mutex_is_locked(&group->mark_mutex)); -	group = mark->group; +	spin_lock(&mark->lock);  	/* something else already called this function on this mark */  	if (!(mark->flags & FSNOTIFY_MARK_FLAG_ALIVE)) { @@ -136,11 +132,6 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)  	mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE; -	/* 1 from caller and 1 for being on i_list/g_list */ -	BUG_ON(atomic_read(&mark->refcnt) < 2); - -	spin_lock(&group->mark_lock); -  	if (mark->flags & FSNOTIFY_MARK_FLAG_INODE) {  		inode = mark->i.inode;  		fsnotify_destroy_inode_mark(mark); @@ -151,13 +142,22 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)  	list_del_init(&mark->g_list); -	spin_unlock(&group->mark_lock);  	spin_unlock(&mark->lock); +	if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED)) +		iput(inode); +	/* release lock temporarily */ +	mutex_unlock(&group->mark_mutex); +  	spin_lock(&destroy_lock);  	list_add(&mark->destroy_list, &destroy_list);  	spin_unlock(&destroy_lock);  	wake_up(&destroy_waitq); +	/* +	 * We don't necessarily have a ref on mark from caller so the above destroy +	 * may have actually freed it, unless this group provides a 'freeing_mark' +	 * function which must be holding a reference. +	 */  	/*  	 * Some groups like to know that marks are being freed.  This is a @@ -179,16 +179,17 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark)  	 * is just a lazy update (and could be a perf win...)  	 */ -	if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED)) -		iput(inode); +	atomic_dec(&group->num_marks); -	/* -	 * it's possible that this group tried to destroy itself, but this -	 * this mark was simultaneously being freed by inode.  If that's the -	 * case, we finish freeing the group here. -	 */ -	if (unlikely(atomic_dec_and_test(&group->num_marks))) -		fsnotify_final_destroy_group(group); +	mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); +} + +void fsnotify_destroy_mark(struct fsnotify_mark *mark, +			   struct fsnotify_group *group) +{ +	mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); +	fsnotify_destroy_mark_locked(mark, group); +	mutex_unlock(&group->mark_mutex);  }  void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask) @@ -213,26 +214,26 @@ void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mas   * These marks may be used for the fsnotify backend to determine which   * event types should be delivered to which group.   */ -int fsnotify_add_mark(struct fsnotify_mark *mark, -		      struct fsnotify_group *group, struct inode *inode, -		      struct vfsmount *mnt, int allow_dups) +int fsnotify_add_mark_locked(struct fsnotify_mark *mark, +			     struct fsnotify_group *group, struct inode *inode, +			     struct vfsmount *mnt, int allow_dups)  {  	int ret = 0;  	BUG_ON(inode && mnt);  	BUG_ON(!inode && !mnt); +	BUG_ON(!mutex_is_locked(&group->mark_mutex));  	/*  	 * LOCKING ORDER!!!! +	 * group->mark_mutex  	 * mark->lock -	 * group->mark_lock  	 * inode->i_lock  	 */  	spin_lock(&mark->lock); -	spin_lock(&group->mark_lock); -  	mark->flags |= FSNOTIFY_MARK_FLAG_ALIVE; +	fsnotify_get_group(group);  	mark->group = group;  	list_add(&mark->g_list, &group->marks_list);  	atomic_inc(&group->num_marks); @@ -250,11 +251,8 @@ int fsnotify_add_mark(struct fsnotify_mark *mark,  		BUG();  	} -	spin_unlock(&group->mark_lock); -  	/* this will pin the object if appropriate */  	fsnotify_set_mark_mask_locked(mark, mark->mask); -  	spin_unlock(&mark->lock);  	if (inode) @@ -264,10 +262,10 @@ int fsnotify_add_mark(struct fsnotify_mark *mark,  err:  	mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;  	list_del_init(&mark->g_list); +	fsnotify_put_group(group);  	mark->group = NULL;  	atomic_dec(&group->num_marks); -	spin_unlock(&group->mark_lock);  	spin_unlock(&mark->lock);  	spin_lock(&destroy_lock); @@ -278,6 +276,16 @@ err:  	return ret;  } +int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group, +		      struct inode *inode, struct vfsmount *mnt, int allow_dups) +{ +	int ret; +	mutex_lock(&group->mark_mutex); +	ret = fsnotify_add_mark_locked(mark, group, inode, mnt, allow_dups); +	mutex_unlock(&group->mark_mutex); +	return ret; +} +  /*   * clear any marks in a group in which mark->flags & flags is true   */ @@ -285,22 +293,16 @@ void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,  					 unsigned int flags)  {  	struct fsnotify_mark *lmark, *mark; -	LIST_HEAD(free_list); -	spin_lock(&group->mark_lock); +	mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);  	list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {  		if (mark->flags & flags) { -			list_add(&mark->free_g_list, &free_list); -			list_del_init(&mark->g_list);  			fsnotify_get_mark(mark); +			fsnotify_destroy_mark_locked(mark, group); +			fsnotify_put_mark(mark);  		}  	} -	spin_unlock(&group->mark_lock); - -	list_for_each_entry_safe(mark, lmark, &free_list, free_g_list) { -		fsnotify_destroy_mark(mark); -		fsnotify_put_mark(mark); -	} +	mutex_unlock(&group->mark_mutex);  }  /* @@ -316,6 +318,8 @@ void fsnotify_duplicate_mark(struct fsnotify_mark *new, struct fsnotify_mark *ol  	assert_spin_locked(&old->lock);  	new->i.inode = old->i.inode;  	new->m.mnt = old->m.mnt; +	if (old->group) +		fsnotify_get_group(old->group);  	new->group = old->group;  	new->mask = old->mask;  	new->free_mark = old->free_mark; @@ -336,7 +340,7 @@ void fsnotify_init_mark(struct fsnotify_mark *mark,  static int fsnotify_mark_destroy(void *ignored)  {  	struct fsnotify_mark *mark, *next; -	LIST_HEAD(private_destroy_list); +	struct list_head private_destroy_list;  	for (;;) {  		spin_lock(&destroy_lock); diff --git a/fs/notify/notification.c b/fs/notify/notification.c index f39260f8f86..1e58402171a 100644 --- a/fs/notify/notification.c +++ b/fs/notify/notification.c @@ -18,7 +18,7 @@  /*   * Basic idea behind the notification queue: An fsnotify group (like inotify) - * sends the userspace notification about events asyncronously some time after + * sends the userspace notification about events asynchronously some time after   * the event happened.  When inotify gets an event it will need to add that   * event to the group notify queue.  Since a single event might need to be on   * multiple group's notification queues we can't add the event directly to each @@ -43,20 +43,11 @@  #include <linux/slab.h>  #include <linux/spinlock.h> -#include <asm/atomic.h> +#include <linux/atomic.h>  #include <linux/fsnotify_backend.h>  #include "fsnotify.h" -static struct kmem_cache *fsnotify_event_cachep; -static struct kmem_cache *fsnotify_event_holder_cachep; -/* - * This is a magic event we send when the q is too full.  Since it doesn't - * hold real event information we just keep one system wide and use it any time - * it is needed.  It's refcnt is set 1 at kernel init time and will never - * get set to 0 so it will never get 'freed' - */ -static struct fsnotify_event *q_overflow_event;  static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);  /** @@ -76,185 +67,82 @@ bool fsnotify_notify_queue_is_empty(struct fsnotify_group *group)  	return list_empty(&group->notification_list) ? true : false;  } -void fsnotify_get_event(struct fsnotify_event *event) +void fsnotify_destroy_event(struct fsnotify_group *group, +			    struct fsnotify_event *event)  { -	atomic_inc(&event->refcnt); -} - -void fsnotify_put_event(struct fsnotify_event *event) -{ -	if (!event) +	/* Overflow events are per-group and we don't want to free them */ +	if (!event || event->mask == FS_Q_OVERFLOW)  		return; -	if (atomic_dec_and_test(&event->refcnt)) { -		pr_debug("%s: event=%p\n", __func__, event); - -		if (event->data_type == FSNOTIFY_EVENT_PATH) -			path_put(&event->path); - -		BUG_ON(!list_empty(&event->private_data_list)); - -		kfree(event->file_name); -		put_pid(event->tgid); -		kmem_cache_free(fsnotify_event_cachep, event); -	} -} - -struct fsnotify_event_holder *fsnotify_alloc_event_holder(void) -{ -	return kmem_cache_alloc(fsnotify_event_holder_cachep, GFP_KERNEL); -} - -void fsnotify_destroy_event_holder(struct fsnotify_event_holder *holder) -{ -	if (holder) -		kmem_cache_free(fsnotify_event_holder_cachep, holder); -} - -/* - * Find the private data that the group previously attached to this event when - * the group added the event to the notification queue (fsnotify_add_notify_event) - */ -struct fsnotify_event_private_data *fsnotify_remove_priv_from_event(struct fsnotify_group *group, struct fsnotify_event *event) -{ -	struct fsnotify_event_private_data *lpriv; -	struct fsnotify_event_private_data *priv = NULL; - -	assert_spin_locked(&event->lock); - -	list_for_each_entry(lpriv, &event->private_data_list, event_list) { -		if (lpriv->group == group) { -			priv = lpriv; -			list_del(&priv->event_list); -			break; -		} -	} -	return priv; +	group->ops->free_event(event);  }  /*   * Add an event to the group notification queue.  The group can later pull this - * event off the queue to deal with.  If the event is successfully added to the - * group's notification queue, a reference is taken on event. + * event off the queue to deal with.  The function returns 0 if the event was + * added to the queue, 1 if the event was merged with some other queued event, + * 2 if the queue of events has overflown.   */ -struct fsnotify_event *fsnotify_add_notify_event(struct fsnotify_group *group, struct fsnotify_event *event, -						 struct fsnotify_event_private_data *priv, -						 struct fsnotify_event *(*merge)(struct list_head *, -										 struct fsnotify_event *)) +int fsnotify_add_notify_event(struct fsnotify_group *group, +			      struct fsnotify_event *event, +			      int (*merge)(struct list_head *, +					   struct fsnotify_event *))  { -	struct fsnotify_event *return_event = NULL; -	struct fsnotify_event_holder *holder = NULL; +	int ret = 0;  	struct list_head *list = &group->notification_list; -	pr_debug("%s: group=%p event=%p priv=%p\n", __func__, group, event, priv); - -	/* -	 * There is one fsnotify_event_holder embedded inside each fsnotify_event. -	 * Check if we expect to be able to use that holder.  If not alloc a new -	 * holder. -	 * For the overflow event it's possible that something will use the in -	 * event holder before we get the lock so we may need to jump back and -	 * alloc a new holder, this can't happen for most events... -	 */ -	if (!list_empty(&event->holder.event_list)) { -alloc_holder: -		holder = fsnotify_alloc_event_holder(); -		if (!holder) -			return ERR_PTR(-ENOMEM); -	} +	pr_debug("%s: group=%p event=%p\n", __func__, group, event);  	mutex_lock(&group->notification_mutex);  	if (group->q_len >= group->max_events) { -		event = q_overflow_event; - -		/* -		 * we need to return the overflow event -		 * which means we need a ref -		 */ -		fsnotify_get_event(event); -		return_event = event; - -		/* sorry, no private data on the overflow event */ -		priv = NULL; -	} - -	if (!list_empty(list) && merge) { -		struct fsnotify_event *tmp; - -		tmp = merge(list, event); -		if (tmp) { +		ret = 2; +		/* Queue overflow event only if it isn't already queued */ +		if (!list_empty(&group->overflow_event->list)) {  			mutex_unlock(&group->notification_mutex); - -			if (return_event) -				fsnotify_put_event(return_event); -			if (holder != &event->holder) -				fsnotify_destroy_event_holder(holder); -			return tmp; +			return ret;  		} +		event = group->overflow_event; +		goto queue;  	} -	spin_lock(&event->lock); - -	if (list_empty(&event->holder.event_list)) { -		if (unlikely(holder)) -			fsnotify_destroy_event_holder(holder); -		holder = &event->holder; -	} else if (unlikely(!holder)) { -		/* between the time we checked above and got the lock the in -		 * event holder was used, go back and get a new one */ -		spin_unlock(&event->lock); -		mutex_unlock(&group->notification_mutex); - -		if (return_event) { -			fsnotify_put_event(return_event); -			return_event = NULL; +	if (!list_empty(list) && merge) { +		ret = merge(list, event); +		if (ret) { +			mutex_unlock(&group->notification_mutex); +			return ret;  		} - -		goto alloc_holder;  	} +queue:  	group->q_len++; -	holder->event = event; - -	fsnotify_get_event(event); -	list_add_tail(&holder->event_list, list); -	if (priv) -		list_add_tail(&priv->event_list, &event->private_data_list); -	spin_unlock(&event->lock); +	list_add_tail(&event->list, list);  	mutex_unlock(&group->notification_mutex);  	wake_up(&group->notification_waitq); -	return return_event; +	kill_fasync(&group->fsn_fa, SIGIO, POLL_IN); +	return ret;  }  /* - * Remove and return the first event from the notification list.  There is a - * reference held on this event since it was on the list.  It is the responsibility - * of the caller to drop this reference. + * Remove and return the first event from the notification list.  It is the + * responsibility of the caller to destroy the obtained event   */  struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group)  {  	struct fsnotify_event *event; -	struct fsnotify_event_holder *holder;  	BUG_ON(!mutex_is_locked(&group->notification_mutex));  	pr_debug("%s: group=%p\n", __func__, group); -	holder = list_first_entry(&group->notification_list, struct fsnotify_event_holder, event_list); - -	event = holder->event; - -	spin_lock(&event->lock); -	holder->event = NULL; -	list_del_init(&holder->event_list); -	spin_unlock(&event->lock); - -	/* event == holder means we are referenced through the in event holder */ -	if (holder != &event->holder) -		fsnotify_destroy_event_holder(holder); - +	event = list_first_entry(&group->notification_list, +				 struct fsnotify_event, list); +	/* +	 * We need to init list head for the case of overflow event so that +	 * check in fsnotify_add_notify_events() works +	 */ +	list_del_init(&event->list);  	group->q_len--;  	return event; @@ -265,15 +153,10 @@ struct fsnotify_event *fsnotify_remove_notify_event(struct fsnotify_group *group   */  struct fsnotify_event *fsnotify_peek_notify_event(struct fsnotify_group *group)  { -	struct fsnotify_event *event; -	struct fsnotify_event_holder *holder; -  	BUG_ON(!mutex_is_locked(&group->notification_mutex)); -	holder = list_first_entry(&group->notification_list, struct fsnotify_event_holder, event_list); -	event = holder->event; - -	return event; +	return list_first_entry(&group->notification_list, +				struct fsnotify_event, list);  }  /* @@ -283,182 +166,31 @@ struct fsnotify_event *fsnotify_peek_notify_event(struct fsnotify_group *group)  void fsnotify_flush_notify(struct fsnotify_group *group)  {  	struct fsnotify_event *event; -	struct fsnotify_event_private_data *priv;  	mutex_lock(&group->notification_mutex);  	while (!fsnotify_notify_queue_is_empty(group)) {  		event = fsnotify_remove_notify_event(group); -		/* if they don't implement free_event_priv they better not have attached any */ -		if (group->ops->free_event_priv) { -			spin_lock(&event->lock); -			priv = fsnotify_remove_priv_from_event(group, event); -			spin_unlock(&event->lock); -			if (priv) -				group->ops->free_event_priv(priv); -		} -		fsnotify_put_event(event); /* matches fsnotify_add_notify_event */ +		fsnotify_destroy_event(group, event);  	}  	mutex_unlock(&group->notification_mutex);  } -static void initialize_event(struct fsnotify_event *event) -{ -	INIT_LIST_HEAD(&event->holder.event_list); -	atomic_set(&event->refcnt, 1); - -	spin_lock_init(&event->lock); - -	INIT_LIST_HEAD(&event->private_data_list); -} - -/* - * Caller damn well better be holding whatever mutex is protecting the - * old_holder->event_list and the new_event must be a clean event which - * cannot be found anywhere else in the kernel. - */ -int fsnotify_replace_event(struct fsnotify_event_holder *old_holder, -			   struct fsnotify_event *new_event) -{ -	struct fsnotify_event *old_event = old_holder->event; -	struct fsnotify_event_holder *new_holder = &new_event->holder; - -	enum event_spinlock_class { -		SPINLOCK_OLD, -		SPINLOCK_NEW, -	}; - -	pr_debug("%s: old_event=%p new_event=%p\n", __func__, old_event, new_event); - -	/* -	 * if the new_event's embedded holder is in use someone -	 * screwed up and didn't give us a clean new event. -	 */ -	BUG_ON(!list_empty(&new_holder->event_list)); - -	spin_lock_nested(&old_event->lock, SPINLOCK_OLD); -	spin_lock_nested(&new_event->lock, SPINLOCK_NEW); - -	new_holder->event = new_event; -	list_replace_init(&old_holder->event_list, &new_holder->event_list); - -	spin_unlock(&new_event->lock); -	spin_unlock(&old_event->lock); - -	/* event == holder means we are referenced through the in event holder */ -	if (old_holder != &old_event->holder) -		fsnotify_destroy_event_holder(old_holder); - -	fsnotify_get_event(new_event); /* on the list take reference */ -	fsnotify_put_event(old_event); /* off the list, drop reference */ - -	return 0; -} - -struct fsnotify_event *fsnotify_clone_event(struct fsnotify_event *old_event) -{ -	struct fsnotify_event *event; - -	event = kmem_cache_alloc(fsnotify_event_cachep, GFP_KERNEL); -	if (!event) -		return NULL; - -	pr_debug("%s: old_event=%p new_event=%p\n", __func__, old_event, event); - -	memcpy(event, old_event, sizeof(*event)); -	initialize_event(event); - -	if (event->name_len) { -		event->file_name = kstrdup(old_event->file_name, GFP_KERNEL); -		if (!event->file_name) { -			kmem_cache_free(fsnotify_event_cachep, event); -			return NULL; -		} -	} -	event->tgid = get_pid(old_event->tgid); -	if (event->data_type == FSNOTIFY_EVENT_PATH) -		path_get(&event->path); - -	return event; -} -  /*   * fsnotify_create_event - Allocate a new event which will be sent to each   * group's handle_event function if the group was interested in this   * particular event.   * - * @to_tell the inode which is supposed to receive the event (sometimes a + * @inode the inode which is supposed to receive the event (sometimes a   *	parent of the inode to which the event happened.   * @mask what actually happened.   * @data pointer to the object which was actually affected   * @data_type flag indication if the data is a file, path, inode, nothing...   * @name the filename, if available   */ -struct fsnotify_event *fsnotify_create_event(struct inode *to_tell, __u32 mask, void *data, -					     int data_type, const unsigned char *name, -					     u32 cookie, gfp_t gfp) +void fsnotify_init_event(struct fsnotify_event *event, struct inode *inode, +			 u32 mask)  { -	struct fsnotify_event *event; - -	event = kmem_cache_zalloc(fsnotify_event_cachep, gfp); -	if (!event) -		return NULL; - -	pr_debug("%s: event=%p to_tell=%p mask=%x data=%p data_type=%d\n", -		 __func__, event, to_tell, mask, data, data_type); - -	initialize_event(event); - -	if (name) { -		event->file_name = kstrdup(name, gfp); -		if (!event->file_name) { -			kmem_cache_free(fsnotify_event_cachep, event); -			return NULL; -		} -		event->name_len = strlen(event->file_name); -	} - -	event->tgid = get_pid(task_tgid(current)); -	event->sync_cookie = cookie; -	event->to_tell = to_tell; -	event->data_type = data_type; - -	switch (data_type) { -	case FSNOTIFY_EVENT_PATH: { -		struct path *path = data; -		event->path.dentry = path->dentry; -		event->path.mnt = path->mnt; -		path_get(&event->path); -		break; -	} -	case FSNOTIFY_EVENT_INODE: -		event->inode = data; -		break; -	case FSNOTIFY_EVENT_NONE: -		event->inode = NULL; -		event->path.dentry = NULL; -		event->path.mnt = NULL; -		break; -	default: -		BUG(); -	} - +	INIT_LIST_HEAD(&event->list); +	event->inode = inode;  	event->mask = mask; - -	return event;  } - -__init int fsnotify_notification_init(void) -{ -	fsnotify_event_cachep = KMEM_CACHE(fsnotify_event, SLAB_PANIC); -	fsnotify_event_holder_cachep = KMEM_CACHE(fsnotify_event_holder, SLAB_PANIC); - -	q_overflow_event = fsnotify_create_event(NULL, FS_Q_OVERFLOW, NULL, -						 FSNOTIFY_EVENT_NONE, NULL, 0, -						 GFP_KERNEL); -	if (!q_overflow_event) -		panic("unable to allocate fsnotify q_overflow_event\n"); - -	return 0; -} -subsys_initcall(fsnotify_notification_init); - diff --git a/fs/notify/vfsmount_mark.c b/fs/notify/vfsmount_mark.c index 85eebff6d0d..68ca5a8704b 100644 --- a/fs/notify/vfsmount_mark.c +++ b/fs/notify/vfsmount_mark.c @@ -23,21 +23,22 @@  #include <linux/mount.h>  #include <linux/mutex.h>  #include <linux/spinlock.h> -#include <linux/writeback.h> /* for inode_lock */ -#include <asm/atomic.h> +#include <linux/atomic.h>  #include <linux/fsnotify_backend.h>  #include "fsnotify.h" +#include "../mount.h"  void fsnotify_clear_marks_by_mount(struct vfsmount *mnt)  {  	struct fsnotify_mark *mark, *lmark; -	struct hlist_node *pos, *n; +	struct hlist_node *n; +	struct mount *m = real_mount(mnt);  	LIST_HEAD(free_list);  	spin_lock(&mnt->mnt_root->d_lock); -	hlist_for_each_entry_safe(mark, pos, n, &mnt->mnt_fsnotify_marks, m.m_list) { +	hlist_for_each_entry_safe(mark, n, &m->mnt_fsnotify_marks, m.m_list) {  		list_add(&mark->m.free_m_list, &free_list);  		hlist_del_init_rcu(&mark->m.m_list);  		fsnotify_get_mark(mark); @@ -45,8 +46,16 @@ void fsnotify_clear_marks_by_mount(struct vfsmount *mnt)  	spin_unlock(&mnt->mnt_root->d_lock);  	list_for_each_entry_safe(mark, lmark, &free_list, m.free_m_list) { -		fsnotify_destroy_mark(mark); +		struct fsnotify_group *group; + +		spin_lock(&mark->lock); +		fsnotify_get_group(mark->group); +		group = mark->group; +		spin_unlock(&mark->lock); + +		fsnotify_destroy_mark(mark, group);  		fsnotify_put_mark(mark); +		fsnotify_put_group(group);  	}  } @@ -60,15 +69,15 @@ void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group)   */  static void fsnotify_recalc_vfsmount_mask_locked(struct vfsmount *mnt)  { +	struct mount *m = real_mount(mnt);  	struct fsnotify_mark *mark; -	struct hlist_node *pos;  	__u32 new_mask = 0;  	assert_spin_locked(&mnt->mnt_root->d_lock); -	hlist_for_each_entry(mark, pos, &mnt->mnt_fsnotify_marks, m.m_list) +	hlist_for_each_entry(mark, &m->mnt_fsnotify_marks, m.m_list)  		new_mask |= mark->mask; -	mnt->mnt_fsnotify_mask = new_mask; +	m->mnt_fsnotify_mask = new_mask;  }  /* @@ -86,8 +95,8 @@ void fsnotify_destroy_vfsmount_mark(struct fsnotify_mark *mark)  {  	struct vfsmount *mnt = mark->m.mnt; +	BUG_ON(!mutex_is_locked(&mark->group->mark_mutex));  	assert_spin_locked(&mark->lock); -	assert_spin_locked(&mark->group->mark_lock);  	spin_lock(&mnt->mnt_root->d_lock); @@ -102,12 +111,12 @@ void fsnotify_destroy_vfsmount_mark(struct fsnotify_mark *mark)  static struct fsnotify_mark *fsnotify_find_vfsmount_mark_locked(struct fsnotify_group *group,  								struct vfsmount *mnt)  { +	struct mount *m = real_mount(mnt);  	struct fsnotify_mark *mark; -	struct hlist_node *pos;  	assert_spin_locked(&mnt->mnt_root->d_lock); -	hlist_for_each_entry(mark, pos, &mnt->mnt_fsnotify_marks, m.m_list) { +	hlist_for_each_entry(mark, &m->mnt_fsnotify_marks, m.m_list) {  		if (mark->group == group) {  			fsnotify_get_mark(mark);  			return mark; @@ -141,28 +150,28 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,  			       struct fsnotify_group *group, struct vfsmount *mnt,  			       int allow_dups)  { -	struct fsnotify_mark *lmark; -	struct hlist_node *node, *last = NULL; +	struct mount *m = real_mount(mnt); +	struct fsnotify_mark *lmark, *last = NULL;  	int ret = 0;  	mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT; +	BUG_ON(!mutex_is_locked(&group->mark_mutex));  	assert_spin_locked(&mark->lock); -	assert_spin_locked(&group->mark_lock);  	spin_lock(&mnt->mnt_root->d_lock);  	mark->m.mnt = mnt;  	/* is mark the first mark? */ -	if (hlist_empty(&mnt->mnt_fsnotify_marks)) { -		hlist_add_head_rcu(&mark->m.m_list, &mnt->mnt_fsnotify_marks); +	if (hlist_empty(&m->mnt_fsnotify_marks)) { +		hlist_add_head_rcu(&mark->m.m_list, &m->mnt_fsnotify_marks);  		goto out;  	}  	/* should mark be in the middle of the current list? */ -	hlist_for_each_entry(lmark, node, &mnt->mnt_fsnotify_marks, m.m_list) { -		last = node; +	hlist_for_each_entry(lmark, &m->mnt_fsnotify_marks, m.m_list) { +		last = lmark;  		if ((lmark->group == group) && !allow_dups) {  			ret = -EEXIST; @@ -182,7 +191,7 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,  	BUG_ON(last == NULL);  	/* mark should be the last entry.  last is the current last entry */ -	hlist_add_after_rcu(last, &mark->m.m_list); +	hlist_add_after_rcu(&last->m.m_list, &mark->m.m_list);  out:  	fsnotify_recalc_vfsmount_mask_locked(mnt);  	spin_unlock(&mnt->mnt_root->d_lock);  | 
