diff options
Diffstat (limited to 'fs/namespace.c')
| -rw-r--r-- | fs/namespace.c | 581 | 
1 files changed, 324 insertions, 257 deletions
diff --git a/fs/namespace.c b/fs/namespace.c index da5c4948343..182bc41cd88 100644 --- a/fs/namespace.c +++ b/fs/namespace.c @@ -23,23 +23,46 @@  #include <linux/uaccess.h>  #include <linux/proc_ns.h>  #include <linux/magic.h> +#include <linux/bootmem.h>  #include "pnode.h"  #include "internal.h" -#define HASH_SHIFT ilog2(PAGE_SIZE / sizeof(struct list_head)) -#define HASH_SIZE (1UL << HASH_SHIFT) +static unsigned int m_hash_mask __read_mostly; +static unsigned int m_hash_shift __read_mostly; +static unsigned int mp_hash_mask __read_mostly; +static unsigned int mp_hash_shift __read_mostly; -static int event; +static __initdata unsigned long mhash_entries; +static int __init set_mhash_entries(char *str) +{ +	if (!str) +		return 0; +	mhash_entries = simple_strtoul(str, &str, 0); +	return 1; +} +__setup("mhash_entries=", set_mhash_entries); + +static __initdata unsigned long mphash_entries; +static int __init set_mphash_entries(char *str) +{ +	if (!str) +		return 0; +	mphash_entries = simple_strtoul(str, &str, 0); +	return 1; +} +__setup("mphash_entries=", set_mphash_entries); + +static u64 event;  static DEFINE_IDA(mnt_id_ida);  static DEFINE_IDA(mnt_group_ida);  static DEFINE_SPINLOCK(mnt_id_lock);  static int mnt_id_start = 0;  static int mnt_group_start = 1; -static struct list_head *mount_hashtable __read_mostly; -static struct list_head *mountpoint_hashtable __read_mostly; +static struct hlist_head *mount_hashtable __read_mostly; +static struct hlist_head *mountpoint_hashtable __read_mostly;  static struct kmem_cache *mnt_cache __read_mostly; -static struct rw_semaphore namespace_sem; +static DECLARE_RWSEM(namespace_sem);  /* /sys/fs */  struct kobject *fs_kobj; @@ -53,17 +76,22 @@ EXPORT_SYMBOL_GPL(fs_kobj);   * It should be taken for write in all cases where the vfsmount   * tree or hash is modified or when a vfsmount structure is modified.   */ -DEFINE_BRLOCK(vfsmount_lock); +__cacheline_aligned_in_smp DEFINE_SEQLOCK(mount_lock); -static inline unsigned long hash(struct vfsmount *mnt, struct dentry *dentry) +static inline struct hlist_head *m_hash(struct vfsmount *mnt, struct dentry *dentry)  {  	unsigned long tmp = ((unsigned long)mnt / L1_CACHE_BYTES);  	tmp += ((unsigned long)dentry / L1_CACHE_BYTES); -	tmp = tmp + (tmp >> HASH_SHIFT); -	return tmp & (HASH_SIZE - 1); +	tmp = tmp + (tmp >> m_hash_shift); +	return &mount_hashtable[tmp & m_hash_mask];  } -#define MNT_WRITER_UNDERFLOW_LIMIT -(1<<16) +static inline struct hlist_head *mp_hash(struct dentry *dentry) +{ +	unsigned long tmp = ((unsigned long)dentry / L1_CACHE_BYTES); +	tmp = tmp + (tmp >> mp_hash_shift); +	return &mountpoint_hashtable[tmp & mp_hash_mask]; +}  /*   * allocation is serialized by namespace_sem, but we need the spinlock to @@ -189,7 +217,7 @@ static struct mount *alloc_vfsmnt(const char *name)  		mnt->mnt_writers = 0;  #endif -		INIT_LIST_HEAD(&mnt->mnt_hash); +		INIT_HLIST_NODE(&mnt->mnt_hash);  		INIT_LIST_HEAD(&mnt->mnt_child);  		INIT_LIST_HEAD(&mnt->mnt_mounts);  		INIT_LIST_HEAD(&mnt->mnt_list); @@ -386,9 +414,7 @@ EXPORT_SYMBOL_GPL(mnt_clone_write);   */  int __mnt_want_write_file(struct file *file)  { -	struct inode *inode = file_inode(file); - -	if (!(file->f_mode & FMODE_WRITE) || special_file(inode->i_mode)) +	if (!(file->f_mode & FMODE_WRITER))  		return __mnt_want_write(file->f_path.mnt);  	else  		return mnt_clone_write(file->f_path.mnt); @@ -458,7 +484,7 @@ static int mnt_make_readonly(struct mount *mnt)  {  	int ret = 0; -	br_write_lock(&vfsmount_lock); +	lock_mount_hash();  	mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;  	/*  	 * After storing MNT_WRITE_HOLD, we'll read the counters. This store @@ -492,15 +518,15 @@ static int mnt_make_readonly(struct mount *mnt)  	 */  	smp_wmb();  	mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; -	br_write_unlock(&vfsmount_lock); +	unlock_mount_hash();  	return ret;  }  static void __mnt_unmake_readonly(struct mount *mnt)  { -	br_write_lock(&vfsmount_lock); +	lock_mount_hash();  	mnt->mnt.mnt_flags &= ~MNT_READONLY; -	br_write_unlock(&vfsmount_lock); +	unlock_mount_hash();  }  int sb_prepare_remount_readonly(struct super_block *sb) @@ -512,7 +538,7 @@ int sb_prepare_remount_readonly(struct super_block *sb)  	if (atomic_long_read(&sb->s_remove_count))  		return -EBUSY; -	br_write_lock(&vfsmount_lock); +	lock_mount_hash();  	list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {  		if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {  			mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; @@ -534,7 +560,7 @@ int sb_prepare_remount_readonly(struct super_block *sb)  		if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)  			mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;  	} -	br_write_unlock(&vfsmount_lock); +	unlock_mount_hash();  	return err;  } @@ -542,37 +568,71 @@ int sb_prepare_remount_readonly(struct super_block *sb)  static void free_vfsmnt(struct mount *mnt)  {  	kfree(mnt->mnt_devname); -	mnt_free_id(mnt);  #ifdef CONFIG_SMP  	free_percpu(mnt->mnt_pcp);  #endif  	kmem_cache_free(mnt_cache, mnt);  } +static void delayed_free_vfsmnt(struct rcu_head *head) +{ +	free_vfsmnt(container_of(head, struct mount, mnt_rcu)); +} + +/* call under rcu_read_lock */ +bool legitimize_mnt(struct vfsmount *bastard, unsigned seq) +{ +	struct mount *mnt; +	if (read_seqretry(&mount_lock, seq)) +		return false; +	if (bastard == NULL) +		return true; +	mnt = real_mount(bastard); +	mnt_add_count(mnt, 1); +	if (likely(!read_seqretry(&mount_lock, seq))) +		return true; +	if (bastard->mnt_flags & MNT_SYNC_UMOUNT) { +		mnt_add_count(mnt, -1); +		return false; +	} +	rcu_read_unlock(); +	mntput(bastard); +	rcu_read_lock(); +	return false; +} +  /* - * find the first or last mount at @dentry on vfsmount @mnt depending on - * @dir. If @dir is set return the first mount else return the last mount. - * vfsmount_lock must be held for read or write. + * find the first mount at @dentry on vfsmount @mnt. + * call under rcu_read_lock()   */ -struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry, -			      int dir) +struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry)  { -	struct list_head *head = mount_hashtable + hash(mnt, dentry); -	struct list_head *tmp = head; -	struct mount *p, *found = NULL; +	struct hlist_head *head = m_hash(mnt, dentry); +	struct mount *p; -	for (;;) { -		tmp = dir ? tmp->next : tmp->prev; -		p = NULL; -		if (tmp == head) -			break; -		p = list_entry(tmp, struct mount, mnt_hash); -		if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) { -			found = p; +	hlist_for_each_entry_rcu(p, head, mnt_hash) +		if (&p->mnt_parent->mnt == mnt && p->mnt_mountpoint == dentry) +			return p; +	return NULL; +} + +/* + * find the last mount at @dentry on vfsmount @mnt. + * mount_lock must be held. + */ +struct mount *__lookup_mnt_last(struct vfsmount *mnt, struct dentry *dentry) +{ +	struct mount *p, *res; +	res = p = __lookup_mnt(mnt, dentry); +	if (!p) +		goto out; +	hlist_for_each_entry_continue(p, mnt_hash) { +		if (&p->mnt_parent->mnt != mnt || p->mnt_mountpoint != dentry)  			break; -		} +		res = p;  	} -	return found; +out: +	return res;  }  /* @@ -594,26 +654,26 @@ struct mount *__lookup_mnt(struct vfsmount *mnt, struct dentry *dentry,  struct vfsmount *lookup_mnt(struct path *path)  {  	struct mount *child_mnt; +	struct vfsmount *m; +	unsigned seq; -	br_read_lock(&vfsmount_lock); -	child_mnt = __lookup_mnt(path->mnt, path->dentry, 1); -	if (child_mnt) { -		mnt_add_count(child_mnt, 1); -		br_read_unlock(&vfsmount_lock); -		return &child_mnt->mnt; -	} else { -		br_read_unlock(&vfsmount_lock); -		return NULL; -	} +	rcu_read_lock(); +	do { +		seq = read_seqbegin(&mount_lock); +		child_mnt = __lookup_mnt(path->mnt, path->dentry); +		m = child_mnt ? &child_mnt->mnt : NULL; +	} while (!legitimize_mnt(m, seq)); +	rcu_read_unlock(); +	return m;  }  static struct mountpoint *new_mountpoint(struct dentry *dentry)  { -	struct list_head *chain = mountpoint_hashtable + hash(NULL, dentry); +	struct hlist_head *chain = mp_hash(dentry);  	struct mountpoint *mp;  	int ret; -	list_for_each_entry(mp, chain, m_hash) { +	hlist_for_each_entry(mp, chain, m_hash) {  		if (mp->m_dentry == dentry) {  			/* might be worth a WARN_ON() */  			if (d_unlinked(dentry)) @@ -635,7 +695,7 @@ static struct mountpoint *new_mountpoint(struct dentry *dentry)  	mp->m_dentry = dentry;  	mp->m_count = 1; -	list_add(&mp->m_hash, chain); +	hlist_add_head(&mp->m_hash, chain);  	return mp;  } @@ -646,7 +706,7 @@ static void put_mountpoint(struct mountpoint *mp)  		spin_lock(&dentry->d_lock);  		dentry->d_flags &= ~DCACHE_MOUNTED;  		spin_unlock(&dentry->d_lock); -		list_del(&mp->m_hash); +		hlist_del(&mp->m_hash);  		kfree(mp);  	}  } @@ -688,7 +748,7 @@ static void detach_mnt(struct mount *mnt, struct path *old_path)  	mnt->mnt_parent = mnt;  	mnt->mnt_mountpoint = mnt->mnt.mnt_root;  	list_del_init(&mnt->mnt_child); -	list_del_init(&mnt->mnt_hash); +	hlist_del_init_rcu(&mnt->mnt_hash);  	put_mountpoint(mnt->mnt_mp);  	mnt->mnt_mp = NULL;  } @@ -715,15 +775,14 @@ static void attach_mnt(struct mount *mnt,  			struct mountpoint *mp)  {  	mnt_set_mountpoint(parent, mp, mnt); -	list_add_tail(&mnt->mnt_hash, mount_hashtable + -			hash(&parent->mnt, mp->m_dentry)); +	hlist_add_head_rcu(&mnt->mnt_hash, m_hash(&parent->mnt, mp->m_dentry));  	list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);  }  /*   * vfsmount lock must be held for write   */ -static void commit_tree(struct mount *mnt) +static void commit_tree(struct mount *mnt, struct mount *shadows)  {  	struct mount *parent = mnt->mnt_parent;  	struct mount *m; @@ -738,8 +797,11 @@ static void commit_tree(struct mount *mnt)  	list_splice(&head, n->list.prev); -	list_add_tail(&mnt->mnt_hash, mount_hashtable + -				hash(&parent->mnt, mnt->mnt_mountpoint)); +	if (shadows) +		hlist_add_after_rcu(&shadows->mnt_hash, &mnt->mnt_hash); +	else +		hlist_add_head_rcu(&mnt->mnt_hash, +				m_hash(&parent->mnt, mnt->mnt_mountpoint));  	list_add_tail(&mnt->mnt_child, &parent->mnt_mounts);  	touch_mnt_namespace(n);  } @@ -788,6 +850,7 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void  	root = mount_fs(type, flags, name, data);  	if (IS_ERR(root)) { +		mnt_free_id(mnt);  		free_vfsmnt(mnt);  		return ERR_CAST(root);  	} @@ -796,9 +859,9 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void  	mnt->mnt.mnt_sb = root->d_sb;  	mnt->mnt_mountpoint = mnt->mnt.mnt_root;  	mnt->mnt_parent = mnt; -	br_write_lock(&vfsmount_lock); +	lock_mount_hash();  	list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts); -	br_write_unlock(&vfsmount_lock); +	unlock_mount_hash();  	return &mnt->mnt;  }  EXPORT_SYMBOL_GPL(vfs_kern_mount); @@ -825,7 +888,7 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,  			goto out_free;  	} -	mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~MNT_WRITE_HOLD; +	mnt->mnt.mnt_flags = old->mnt.mnt_flags & ~(MNT_WRITE_HOLD|MNT_MARKED);  	/* Don't allow unprivileged users to change mount flags */  	if ((flag & CL_UNPRIVILEGED) && (mnt->mnt.mnt_flags & MNT_READONLY))  		mnt->mnt.mnt_flags |= MNT_LOCK_READONLY; @@ -839,9 +902,9 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,  	mnt->mnt.mnt_root = dget(root);  	mnt->mnt_mountpoint = mnt->mnt.mnt_root;  	mnt->mnt_parent = mnt; -	br_write_lock(&vfsmount_lock); +	lock_mount_hash();  	list_add_tail(&mnt->mnt_instance, &sb->s_mounts); -	br_write_unlock(&vfsmount_lock); +	unlock_mount_hash();  	if ((flag & CL_SLAVE) ||  	    ((flag & CL_SHARED_TO_SLAVE) && IS_MNT_SHARED(old))) { @@ -868,68 +931,61 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,  	return mnt;   out_free: +	mnt_free_id(mnt);  	free_vfsmnt(mnt);  	return ERR_PTR(err);  } -static inline void mntfree(struct mount *mnt) -{ -	struct vfsmount *m = &mnt->mnt; -	struct super_block *sb = m->mnt_sb; - -	/* -	 * This probably indicates that somebody messed -	 * up a mnt_want/drop_write() pair.  If this -	 * happens, the filesystem was probably unable -	 * to make r/w->r/o transitions. -	 */ -	/* -	 * The locking used to deal with mnt_count decrement provides barriers, -	 * so mnt_get_writers() below is safe. -	 */ -	WARN_ON(mnt_get_writers(mnt)); -	fsnotify_vfsmount_delete(m); -	dput(m->mnt_root); -	free_vfsmnt(mnt); -	deactivate_super(sb); -} -  static void mntput_no_expire(struct mount *mnt)  {  put_again: -#ifdef CONFIG_SMP -	br_read_lock(&vfsmount_lock); -	if (likely(mnt->mnt_ns)) { -		/* shouldn't be the last one */ -		mnt_add_count(mnt, -1); -		br_read_unlock(&vfsmount_lock); +	rcu_read_lock(); +	mnt_add_count(mnt, -1); +	if (likely(mnt->mnt_ns)) { /* shouldn't be the last one */ +		rcu_read_unlock();  		return;  	} -	br_read_unlock(&vfsmount_lock); - -	br_write_lock(&vfsmount_lock); -	mnt_add_count(mnt, -1); +	lock_mount_hash();  	if (mnt_get_count(mnt)) { -		br_write_unlock(&vfsmount_lock); +		rcu_read_unlock(); +		unlock_mount_hash();  		return;  	} -#else -	mnt_add_count(mnt, -1); -	if (likely(mnt_get_count(mnt))) -		return; -	br_write_lock(&vfsmount_lock); -#endif  	if (unlikely(mnt->mnt_pinned)) {  		mnt_add_count(mnt, mnt->mnt_pinned + 1);  		mnt->mnt_pinned = 0; -		br_write_unlock(&vfsmount_lock); +		rcu_read_unlock(); +		unlock_mount_hash();  		acct_auto_close_mnt(&mnt->mnt);  		goto put_again;  	} +	if (unlikely(mnt->mnt.mnt_flags & MNT_DOOMED)) { +		rcu_read_unlock(); +		unlock_mount_hash(); +		return; +	} +	mnt->mnt.mnt_flags |= MNT_DOOMED; +	rcu_read_unlock();  	list_del(&mnt->mnt_instance); -	br_write_unlock(&vfsmount_lock); -	mntfree(mnt); +	unlock_mount_hash(); + +	/* +	 * This probably indicates that somebody messed +	 * up a mnt_want/drop_write() pair.  If this +	 * happens, the filesystem was probably unable +	 * to make r/w->r/o transitions. +	 */ +	/* +	 * The locking used to deal with mnt_count decrement provides barriers, +	 * so mnt_get_writers() below is safe. +	 */ +	WARN_ON(mnt_get_writers(mnt)); +	fsnotify_vfsmount_delete(&mnt->mnt); +	dput(mnt->mnt.mnt_root); +	deactivate_super(mnt->mnt.mnt_sb); +	mnt_free_id(mnt); +	call_rcu(&mnt->mnt_rcu, delayed_free_vfsmnt);  }  void mntput(struct vfsmount *mnt) @@ -954,21 +1010,21 @@ EXPORT_SYMBOL(mntget);  void mnt_pin(struct vfsmount *mnt)  { -	br_write_lock(&vfsmount_lock); +	lock_mount_hash();  	real_mount(mnt)->mnt_pinned++; -	br_write_unlock(&vfsmount_lock); +	unlock_mount_hash();  }  EXPORT_SYMBOL(mnt_pin);  void mnt_unpin(struct vfsmount *m)  {  	struct mount *mnt = real_mount(m); -	br_write_lock(&vfsmount_lock); +	lock_mount_hash();  	if (mnt->mnt_pinned) {  		mnt_add_count(mnt, 1);  		mnt->mnt_pinned--;  	} -	br_write_unlock(&vfsmount_lock); +	unlock_mount_hash();  }  EXPORT_SYMBOL(mnt_unpin); @@ -1038,14 +1094,29 @@ static void *m_start(struct seq_file *m, loff_t *pos)  	struct proc_mounts *p = proc_mounts(m);  	down_read(&namespace_sem); -	return seq_list_start(&p->ns->list, *pos); +	if (p->cached_event == p->ns->event) { +		void *v = p->cached_mount; +		if (*pos == p->cached_index) +			return v; +		if (*pos == p->cached_index + 1) { +			v = seq_list_next(v, &p->ns->list, &p->cached_index); +			return p->cached_mount = v; +		} +	} + +	p->cached_event = p->ns->event; +	p->cached_mount = seq_list_start(&p->ns->list, *pos); +	p->cached_index = *pos; +	return p->cached_mount;  }  static void *m_next(struct seq_file *m, void *v, loff_t *pos)  {  	struct proc_mounts *p = proc_mounts(m); -	return seq_list_next(v, &p->ns->list, pos); +	p->cached_mount = seq_list_next(v, &p->ns->list, pos); +	p->cached_index = *pos; +	return p->cached_mount;  }  static void m_stop(struct seq_file *m, void *v) @@ -1085,12 +1156,12 @@ int may_umount_tree(struct vfsmount *m)  	BUG_ON(!m);  	/* write lock needed for mnt_get_count */ -	br_write_lock(&vfsmount_lock); +	lock_mount_hash();  	for (p = mnt; p; p = next_mnt(p, mnt)) {  		actual_refs += mnt_get_count(p);  		minimum_refs += 2;  	} -	br_write_unlock(&vfsmount_lock); +	unlock_mount_hash();  	if (actual_refs > minimum_refs)  		return 0; @@ -1117,48 +1188,40 @@ int may_umount(struct vfsmount *mnt)  {  	int ret = 1;  	down_read(&namespace_sem); -	br_write_lock(&vfsmount_lock); +	lock_mount_hash();  	if (propagate_mount_busy(real_mount(mnt), 2))  		ret = 0; -	br_write_unlock(&vfsmount_lock); +	unlock_mount_hash();  	up_read(&namespace_sem);  	return ret;  }  EXPORT_SYMBOL(may_umount); -static LIST_HEAD(unmounted);	/* protected by namespace_sem */ +static HLIST_HEAD(unmounted);	/* protected by namespace_sem */  static void namespace_unlock(void)  {  	struct mount *mnt; -	LIST_HEAD(head); +	struct hlist_head head = unmounted; -	if (likely(list_empty(&unmounted))) { +	if (likely(hlist_empty(&head))) {  		up_write(&namespace_sem);  		return;  	} -	list_splice_init(&unmounted, &head); +	head.first->pprev = &head.first; +	INIT_HLIST_HEAD(&unmounted); +  	up_write(&namespace_sem); -	while (!list_empty(&head)) { -		mnt = list_first_entry(&head, struct mount, mnt_hash); -		list_del_init(&mnt->mnt_hash); -		if (mnt_has_parent(mnt)) { -			struct dentry *dentry; -			struct mount *m; - -			br_write_lock(&vfsmount_lock); -			dentry = mnt->mnt_mountpoint; -			m = mnt->mnt_parent; -			mnt->mnt_mountpoint = mnt->mnt.mnt_root; -			mnt->mnt_parent = mnt; -			m->mnt_ghosts--; -			br_write_unlock(&vfsmount_lock); -			dput(dentry); -			mntput(&m->mnt); -		} +	synchronize_rcu(); + +	while (!hlist_empty(&head)) { +		mnt = hlist_entry(head.first, struct mount, mnt_hash); +		hlist_del_init(&mnt->mnt_hash); +		if (mnt->mnt_ex_mountpoint.mnt) +			path_put(&mnt->mnt_ex_mountpoint);  		mntput(&mnt->mnt);  	}  } @@ -1169,34 +1232,51 @@ static inline void namespace_lock(void)  }  /* - * vfsmount lock must be held for write + * mount_lock must be held   * namespace_sem must be held for write + * how = 0 => just this tree, don't propagate + * how = 1 => propagate; we know that nobody else has reference to any victims + * how = 2 => lazy umount   */ -void umount_tree(struct mount *mnt, int propagate) +void umount_tree(struct mount *mnt, int how)  { -	LIST_HEAD(tmp_list); +	HLIST_HEAD(tmp_list);  	struct mount *p; +	struct mount *last = NULL; -	for (p = mnt; p; p = next_mnt(p, mnt)) -		list_move(&p->mnt_hash, &tmp_list); +	for (p = mnt; p; p = next_mnt(p, mnt)) { +		hlist_del_init_rcu(&p->mnt_hash); +		hlist_add_head(&p->mnt_hash, &tmp_list); +	} -	if (propagate) +	if (how)  		propagate_umount(&tmp_list); -	list_for_each_entry(p, &tmp_list, mnt_hash) { +	hlist_for_each_entry(p, &tmp_list, mnt_hash) {  		list_del_init(&p->mnt_expire);  		list_del_init(&p->mnt_list);  		__touch_mnt_namespace(p->mnt_ns);  		p->mnt_ns = NULL; +		if (how < 2) +			p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;  		list_del_init(&p->mnt_child);  		if (mnt_has_parent(p)) { -			p->mnt_parent->mnt_ghosts++;  			put_mountpoint(p->mnt_mp); +			/* move the reference to mountpoint into ->mnt_ex_mountpoint */ +			p->mnt_ex_mountpoint.dentry = p->mnt_mountpoint; +			p->mnt_ex_mountpoint.mnt = &p->mnt_parent->mnt; +			p->mnt_mountpoint = p->mnt.mnt_root; +			p->mnt_parent = p;  			p->mnt_mp = NULL;  		}  		change_mnt_propagation(p, MS_PRIVATE); +		last = p; +	} +	if (last) { +		last->mnt_hash.next = unmounted.first; +		unmounted.first = tmp_list.first; +		unmounted.first->pprev = &unmounted.first;  	} -	list_splice(&tmp_list, &unmounted);  }  static void shrink_submounts(struct mount *mnt); @@ -1225,12 +1305,12 @@ static int do_umount(struct mount *mnt, int flags)  		 * probably don't strictly need the lock here if we examined  		 * all race cases, but it's a slowpath.  		 */ -		br_write_lock(&vfsmount_lock); +		lock_mount_hash();  		if (mnt_get_count(mnt) != 2) { -			br_write_unlock(&vfsmount_lock); +			unlock_mount_hash();  			return -EBUSY;  		} -		br_write_unlock(&vfsmount_lock); +		unlock_mount_hash();  		if (!xchg(&mnt->mnt_expiry_mark, 1))  			return -EAGAIN; @@ -1272,19 +1352,23 @@ static int do_umount(struct mount *mnt, int flags)  	}  	namespace_lock(); -	br_write_lock(&vfsmount_lock); +	lock_mount_hash();  	event++; -	if (!(flags & MNT_DETACH)) -		shrink_submounts(mnt); - -	retval = -EBUSY; -	if (flags & MNT_DETACH || !propagate_mount_busy(mnt, 2)) { +	if (flags & MNT_DETACH) {  		if (!list_empty(&mnt->mnt_list)) -			umount_tree(mnt, 1); +			umount_tree(mnt, 2);  		retval = 0; +	} else { +		shrink_submounts(mnt); +		retval = -EBUSY; +		if (!propagate_mount_busy(mnt, 2)) { +			if (!list_empty(&mnt->mnt_list)) +				umount_tree(mnt, 1); +			retval = 0; +		}  	} -	br_write_unlock(&vfsmount_lock); +	unlock_mount_hash();  	namespace_unlock();  	return retval;  } @@ -1427,18 +1511,18 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,  			q = clone_mnt(p, p->mnt.mnt_root, flag);  			if (IS_ERR(q))  				goto out; -			br_write_lock(&vfsmount_lock); +			lock_mount_hash();  			list_add_tail(&q->mnt_list, &res->mnt_list);  			attach_mnt(q, parent, p->mnt_mp); -			br_write_unlock(&vfsmount_lock); +			unlock_mount_hash();  		}  	}  	return res;  out:  	if (res) { -		br_write_lock(&vfsmount_lock); +		lock_mount_hash();  		umount_tree(res, 0); -		br_write_unlock(&vfsmount_lock); +		unlock_mount_hash();  	}  	return q;  } @@ -1460,9 +1544,9 @@ struct vfsmount *collect_mounts(struct path *path)  void drop_collected_mounts(struct vfsmount *mnt)  {  	namespace_lock(); -	br_write_lock(&vfsmount_lock); +	lock_mount_hash();  	umount_tree(real_mount(mnt), 0); -	br_write_unlock(&vfsmount_lock); +	unlock_mount_hash();  	namespace_unlock();  } @@ -1576,24 +1660,23 @@ static int attach_recursive_mnt(struct mount *source_mnt,  			struct mountpoint *dest_mp,  			struct path *parent_path)  { -	LIST_HEAD(tree_list); +	HLIST_HEAD(tree_list);  	struct mount *child, *p; +	struct hlist_node *n;  	int err;  	if (IS_MNT_SHARED(dest_mnt)) {  		err = invent_group_ids(source_mnt, true);  		if (err)  			goto out; -	} -	err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list); -	if (err) -		goto out_cleanup_ids; - -	br_write_lock(&vfsmount_lock); - -	if (IS_MNT_SHARED(dest_mnt)) { +		err = propagate_mnt(dest_mnt, dest_mp, source_mnt, &tree_list); +		lock_mount_hash(); +		if (err) +			goto out_cleanup_ids;  		for (p = source_mnt; p; p = next_mnt(p, source_mnt))  			set_mnt_shared(p); +	} else { +		lock_mount_hash();  	}  	if (parent_path) {  		detach_mnt(source_mnt, parent_path); @@ -1601,20 +1684,27 @@ static int attach_recursive_mnt(struct mount *source_mnt,  		touch_mnt_namespace(source_mnt->mnt_ns);  	} else {  		mnt_set_mountpoint(dest_mnt, dest_mp, source_mnt); -		commit_tree(source_mnt); +		commit_tree(source_mnt, NULL);  	} -	list_for_each_entry_safe(child, p, &tree_list, mnt_hash) { -		list_del_init(&child->mnt_hash); -		commit_tree(child); +	hlist_for_each_entry_safe(child, n, &tree_list, mnt_hash) { +		struct mount *q; +		hlist_del_init(&child->mnt_hash); +		q = __lookup_mnt_last(&child->mnt_parent->mnt, +				      child->mnt_mountpoint); +		commit_tree(child, q);  	} -	br_write_unlock(&vfsmount_lock); +	unlock_mount_hash();  	return 0;   out_cleanup_ids: -	if (IS_MNT_SHARED(dest_mnt)) -		cleanup_group_ids(source_mnt, NULL); +	while (!hlist_empty(&tree_list)) { +		child = hlist_entry(tree_list.first, struct mount, mnt_hash); +		umount_tree(child, 0); +	} +	unlock_mount_hash(); +	cleanup_group_ids(source_mnt, NULL);   out:  	return err;  } @@ -1710,10 +1800,10 @@ static int do_change_type(struct path *path, int flag)  			goto out_unlock;  	} -	br_write_lock(&vfsmount_lock); +	lock_mount_hash();  	for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))  		change_mnt_propagation(m, type); -	br_write_unlock(&vfsmount_lock); +	unlock_mount_hash();   out_unlock:  	namespace_unlock(); @@ -1785,9 +1875,9 @@ static int do_loopback(struct path *path, const char *old_name,  	err = graft_tree(mnt, parent, mp);  	if (err) { -		br_write_lock(&vfsmount_lock); +		lock_mount_hash();  		umount_tree(mnt, 0); -		br_write_unlock(&vfsmount_lock); +		unlock_mount_hash();  	}  out2:  	unlock_mount(mp); @@ -1846,17 +1936,13 @@ static int do_remount(struct path *path, int flags, int mnt_flags,  	else  		err = do_remount_sb(sb, flags, data, 0);  	if (!err) { -		br_write_lock(&vfsmount_lock); +		lock_mount_hash();  		mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK;  		mnt->mnt.mnt_flags = mnt_flags; -		br_write_unlock(&vfsmount_lock); -	} -	up_write(&sb->s_umount); -	if (!err) { -		br_write_lock(&vfsmount_lock);  		touch_mnt_namespace(mnt->mnt_ns); -		br_write_unlock(&vfsmount_lock); +		unlock_mount_hash();  	} +	up_write(&sb->s_umount);  	return err;  } @@ -1972,7 +2058,7 @@ static int do_add_mount(struct mount *newmnt, struct path *path, int mnt_flags)  	struct mount *parent;  	int err; -	mnt_flags &= ~(MNT_SHARED | MNT_WRITE_HOLD | MNT_INTERNAL); +	mnt_flags &= ~MNT_INTERNAL_FLAGS;  	mp = lock_mount(path);  	if (IS_ERR(mp)) @@ -2077,9 +2163,7 @@ fail:  	/* remove m from any expiration list it may be on */  	if (!list_empty(&mnt->mnt_expire)) {  		namespace_lock(); -		br_write_lock(&vfsmount_lock);  		list_del_init(&mnt->mnt_expire); -		br_write_unlock(&vfsmount_lock);  		namespace_unlock();  	}  	mntput(m); @@ -2095,11 +2179,9 @@ fail:  void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)  {  	namespace_lock(); -	br_write_lock(&vfsmount_lock);  	list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list); -	br_write_unlock(&vfsmount_lock);  	namespace_unlock();  }  EXPORT_SYMBOL(mnt_set_expiry); @@ -2118,7 +2200,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)  		return;  	namespace_lock(); -	br_write_lock(&vfsmount_lock); +	lock_mount_hash();  	/* extract from the expiration list every vfsmount that matches the  	 * following criteria: @@ -2137,7 +2219,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)  		touch_mnt_namespace(mnt->mnt_ns);  		umount_tree(mnt, 1);  	} -	br_write_unlock(&vfsmount_lock); +	unlock_mount_hash();  	namespace_unlock();  } @@ -2193,7 +2275,7 @@ resume:   * process a list of expirable mountpoints with the intent of discarding any   * submounts of a specific parent mountpoint   * - * vfsmount_lock must be held for write + * mount_lock must be held for write   */  static void shrink_submounts(struct mount *mnt)  { @@ -2414,20 +2496,25 @@ static struct mnt_namespace *alloc_mnt_ns(struct user_namespace *user_ns)  	return new_ns;  } -/* - * Allocate a new namespace structure and populate it with contents - * copied from the namespace of the passed in task structure. - */ -static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns, -		struct user_namespace *user_ns, struct fs_struct *fs) +struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns, +		struct user_namespace *user_ns, struct fs_struct *new_fs)  {  	struct mnt_namespace *new_ns;  	struct vfsmount *rootmnt = NULL, *pwdmnt = NULL;  	struct mount *p, *q; -	struct mount *old = mnt_ns->root; +	struct mount *old;  	struct mount *new;  	int copy_flags; +	BUG_ON(!ns); + +	if (likely(!(flags & CLONE_NEWNS))) { +		get_mnt_ns(ns); +		return ns; +	} + +	old = ns->root; +  	new_ns = alloc_mnt_ns(user_ns);  	if (IS_ERR(new_ns))  		return new_ns; @@ -2435,7 +2522,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,  	namespace_lock();  	/* First pass: copy the tree topology */  	copy_flags = CL_COPY_UNBINDABLE | CL_EXPIRE; -	if (user_ns != mnt_ns->user_ns) +	if (user_ns != ns->user_ns)  		copy_flags |= CL_SHARED_TO_SLAVE | CL_UNPRIVILEGED;  	new = copy_tree(old, old->mnt.mnt_root, copy_flags);  	if (IS_ERR(new)) { @@ -2444,9 +2531,7 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,  		return ERR_CAST(new);  	}  	new_ns->root = new; -	br_write_lock(&vfsmount_lock);  	list_add_tail(&new_ns->list, &new->mnt_list); -	br_write_unlock(&vfsmount_lock);  	/*  	 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts @@ -2457,13 +2542,13 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,  	q = new;  	while (p) {  		q->mnt_ns = new_ns; -		if (fs) { -			if (&p->mnt == fs->root.mnt) { -				fs->root.mnt = mntget(&q->mnt); +		if (new_fs) { +			if (&p->mnt == new_fs->root.mnt) { +				new_fs->root.mnt = mntget(&q->mnt);  				rootmnt = &p->mnt;  			} -			if (&p->mnt == fs->pwd.mnt) { -				fs->pwd.mnt = mntget(&q->mnt); +			if (&p->mnt == new_fs->pwd.mnt) { +				new_fs->pwd.mnt = mntget(&q->mnt);  				pwdmnt = &p->mnt;  			}  		} @@ -2484,23 +2569,6 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,  	return new_ns;  } -struct mnt_namespace *copy_mnt_ns(unsigned long flags, struct mnt_namespace *ns, -		struct user_namespace *user_ns, struct fs_struct *new_fs) -{ -	struct mnt_namespace *new_ns; - -	BUG_ON(!ns); -	get_mnt_ns(ns); - -	if (!(flags & CLONE_NEWNS)) -		return ns; - -	new_ns = dup_mnt_ns(ns, user_ns, new_fs); - -	put_mnt_ns(ns); -	return new_ns; -} -  /**   * create_mnt_ns - creates a private namespace and adds a root filesystem   * @mnt: pointer to the new root filesystem mountpoint @@ -2593,7 +2661,7 @@ out_type:  /*   * Return true if path is reachable from root   * - * namespace_sem or vfsmount_lock is held + * namespace_sem or mount_lock is held   */  bool is_path_reachable(struct mount *mnt, struct dentry *dentry,  			 const struct path *root) @@ -2608,9 +2676,9 @@ bool is_path_reachable(struct mount *mnt, struct dentry *dentry,  int path_is_under(struct path *path1, struct path *path2)  {  	int res; -	br_read_lock(&vfsmount_lock); +	read_seqlock_excl(&mount_lock);  	res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2); -	br_read_unlock(&vfsmount_lock); +	read_sequnlock_excl(&mount_lock);  	return res;  }  EXPORT_SYMBOL(path_is_under); @@ -2701,7 +2769,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,  	if (!is_path_reachable(old_mnt, old.dentry, &new))  		goto out4;  	root_mp->m_count++; /* pin it so it won't go away */ -	br_write_lock(&vfsmount_lock); +	lock_mount_hash();  	detach_mnt(new_mnt, &parent_path);  	detach_mnt(root_mnt, &root_parent);  	if (root_mnt->mnt.mnt_flags & MNT_LOCKED) { @@ -2713,7 +2781,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,  	/* mount new_root on / */  	attach_mnt(new_mnt, real_mount(root_parent.mnt), root_mp);  	touch_mnt_namespace(current->nsproxy->mnt_ns); -	br_write_unlock(&vfsmount_lock); +	unlock_mount_hash();  	chroot_fs_refs(&root, &new);  	put_mountpoint(root_mp);  	error = 0; @@ -2767,25 +2835,29 @@ void __init mnt_init(void)  	unsigned u;  	int err; -	init_rwsem(&namespace_sem); -  	mnt_cache = kmem_cache_create("mnt_cache", sizeof(struct mount),  			0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); -	mount_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC); -	mountpoint_hashtable = (struct list_head *)__get_free_page(GFP_ATOMIC); +	mount_hashtable = alloc_large_system_hash("Mount-cache", +				sizeof(struct hlist_head), +				mhash_entries, 19, +				0, +				&m_hash_shift, &m_hash_mask, 0, 0); +	mountpoint_hashtable = alloc_large_system_hash("Mountpoint-cache", +				sizeof(struct hlist_head), +				mphash_entries, 19, +				0, +				&mp_hash_shift, &mp_hash_mask, 0, 0);  	if (!mount_hashtable || !mountpoint_hashtable)  		panic("Failed to allocate mount hash table\n"); -	printk(KERN_INFO "Mount-cache hash table entries: %lu\n", HASH_SIZE); - -	for (u = 0; u < HASH_SIZE; u++) -		INIT_LIST_HEAD(&mount_hashtable[u]); -	for (u = 0; u < HASH_SIZE; u++) -		INIT_LIST_HEAD(&mountpoint_hashtable[u]); +	for (u = 0; u <= m_hash_mask; u++) +		INIT_HLIST_HEAD(&mount_hashtable[u]); +	for (u = 0; u <= mp_hash_mask; u++) +		INIT_HLIST_HEAD(&mountpoint_hashtable[u]); -	br_lock_init(&vfsmount_lock); +	kernfs_init();  	err = sysfs_init();  	if (err) @@ -2802,11 +2874,7 @@ void put_mnt_ns(struct mnt_namespace *ns)  {  	if (!atomic_dec_and_test(&ns->count))  		return; -	namespace_lock(); -	br_write_lock(&vfsmount_lock); -	umount_tree(ns->root, 0); -	br_write_unlock(&vfsmount_lock); -	namespace_unlock(); +	drop_collected_mounts(&ns->root->mnt);  	free_mnt_ns(ns);  } @@ -2829,9 +2897,8 @@ void kern_unmount(struct vfsmount *mnt)  {  	/* release long term mount so mount point can be released */  	if (!IS_ERR_OR_NULL(mnt)) { -		br_write_lock(&vfsmount_lock);  		real_mount(mnt)->mnt_ns = NULL; -		br_write_unlock(&vfsmount_lock); +		synchronize_rcu();	/* yecchhh... */  		mntput(mnt);  	}  } @@ -2875,7 +2942,7 @@ bool fs_fully_visible(struct file_system_type *type)  	if (unlikely(!ns))  		return false; -	namespace_lock(); +	down_read(&namespace_sem);  	list_for_each_entry(mnt, &ns->list, mnt_list) {  		struct mount *child;  		if (mnt->mnt.mnt_sb->s_type != type) @@ -2888,7 +2955,7 @@ bool fs_fully_visible(struct file_system_type *type)  			struct inode *inode = child->mnt_mountpoint->d_inode;  			if (!S_ISDIR(inode->i_mode))  				goto next; -			if (inode->i_nlink != 2) +			if (inode->i_nlink > 2)  				goto next;  		}  		visible = true; @@ -2896,7 +2963,7 @@ bool fs_fully_visible(struct file_system_type *type)  	next:	;  	}  found: -	namespace_unlock(); +	up_read(&namespace_sem);  	return visible;  }  | 
