diff options
Diffstat (limited to 'fs/inode.c')
| -rw-r--r-- | fs/inode.c | 1443 | 
1 files changed, 834 insertions, 609 deletions
diff --git a/fs/inode.c b/fs/inode.c index ae2727ab0c3..6eecb7ff0b9 100644 --- a/fs/inode.c +++ b/fs/inode.c @@ -1,151 +1,119 @@  /* - * linux/fs/inode.c - *   * (C) 1997 Linus Torvalds + * (C) 1999 Andrea Arcangeli <andrea@suse.de> (dynamic inode allocation)   */ - +#include <linux/export.h>  #include <linux/fs.h>  #include <linux/mm.h> -#include <linux/dcache.h> -#include <linux/init.h> -#include <linux/slab.h> -#include <linux/writeback.h> -#include <linux/module.h>  #include <linux/backing-dev.h> -#include <linux/wait.h> -#include <linux/rwsem.h>  #include <linux/hash.h>  #include <linux/swap.h>  #include <linux/security.h> -#include <linux/pagemap.h>  #include <linux/cdev.h>  #include <linux/bootmem.h>  #include <linux/fsnotify.h>  #include <linux/mount.h> -#include <linux/async.h>  #include <linux/posix_acl.h> -#include <linux/ima.h> +#include <linux/prefetch.h> +#include <linux/buffer_head.h> /* for inode_has_buffers */ +#include <linux/ratelimit.h> +#include <linux/list_lru.h> +#include "internal.h"  /* - * This is needed for the following functions: - *  - inode_has_buffers - *  - invalidate_bdev + * Inode locking rules:   * - * FIXME: remove all knowledge of the buffer layer from this file - */ -#include <linux/buffer_head.h> - -/* - * New inode.c implementation. + * inode->i_lock protects: + *   inode->i_state, inode->i_hash, __iget() + * Inode LRU list locks protect: + *   inode->i_sb->s_inode_lru, inode->i_lru + * inode_sb_list_lock protects: + *   sb->s_inodes, inode->i_sb_list + * bdi->wb.list_lock protects: + *   bdi->wb.b_{dirty,io,more_io}, inode->i_wb_list + * inode_hash_lock protects: + *   inode_hashtable, inode->i_hash   * - * This implementation has the basic premise of trying - * to be extremely low-overhead and SMP-safe, yet be - * simple enough to be "obviously correct". + * Lock ordering:   * - * Famous last words. - */ - -/* inode dynamic allocation 1999, Andrea Arcangeli <andrea@suse.de> */ - -/* #define INODE_PARANOIA 1 */ -/* #define INODE_DEBUG 1 */ - -/* - * Inode lookup is no longer as critical as it used to be: - * most of the lookups are going to be through the dcache. + * inode_sb_list_lock + *   inode->i_lock + *     Inode LRU list locks + * + * bdi->wb.list_lock + *   inode->i_lock + * + * inode_hash_lock + *   inode_sb_list_lock + *   inode->i_lock + * + * iunique_lock + *   inode_hash_lock   */ -#define I_HASHBITS	i_hash_shift -#define I_HASHMASK	i_hash_mask  static unsigned int i_hash_mask __read_mostly;  static unsigned int i_hash_shift __read_mostly; - -/* - * Each inode can be on two separate lists. One is - * the hash list of the inode, used for lookups. The - * other linked list is the "type" list: - *  "in_use" - valid inode, i_count > 0, i_nlink > 0 - *  "dirty"  - as "in_use" but also dirty - *  "unused" - valid inode, i_count = 0 - * - * A "dirty" list is maintained for each super block, - * allowing for low-overhead inode sync() operations. - */ - -static LIST_HEAD(inode_lru);  static struct hlist_head *inode_hashtable __read_mostly; +static __cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_hash_lock); -/* - * A simple spinlock to protect the list manipulations. - * - * NOTE! You also have to own the lock if you change - * the i_state of an inode while it is in use.. - */ -DEFINE_SPINLOCK(inode_lock); +__cacheline_aligned_in_smp DEFINE_SPINLOCK(inode_sb_list_lock);  /* - * iprune_sem provides exclusion between the kswapd or try_to_free_pages - * icache shrinking path, and the umount path.  Without this exclusion, - * by the time prune_icache calls iput for the inode whose pages it has - * been invalidating, or by the time it calls clear_inode & destroy_inode - * from its final dispose_list, the struct super_block they refer to - * (for inode->i_sb->s_op) may already have been freed and reused. - * - * We make this an rwsem because the fastpath is icache shrinking. In - * some cases a filesystem may be doing a significant amount of work in - * its inode reclaim code, so this should improve parallelism. + * Empty aops. Can be used for the cases where the user does not + * define any of the address_space operations.   */ -static DECLARE_RWSEM(iprune_sem); +const struct address_space_operations empty_aops = { +}; +EXPORT_SYMBOL(empty_aops);  /*   * Statistics gathering..   */  struct inodes_stat_t inodes_stat; -static struct percpu_counter nr_inodes __cacheline_aligned_in_smp; -static struct percpu_counter nr_inodes_unused __cacheline_aligned_in_smp; +static DEFINE_PER_CPU(unsigned long, nr_inodes); +static DEFINE_PER_CPU(unsigned long, nr_unused);  static struct kmem_cache *inode_cachep __read_mostly; -static inline int get_nr_inodes(void) +static long get_nr_inodes(void)  { -	return percpu_counter_sum_positive(&nr_inodes); +	int i; +	long sum = 0; +	for_each_possible_cpu(i) +		sum += per_cpu(nr_inodes, i); +	return sum < 0 ? 0 : sum;  } -static inline int get_nr_inodes_unused(void) +static inline long get_nr_inodes_unused(void)  { -	return percpu_counter_sum_positive(&nr_inodes_unused); +	int i; +	long sum = 0; +	for_each_possible_cpu(i) +		sum += per_cpu(nr_unused, i); +	return sum < 0 ? 0 : sum;  } -int get_nr_dirty_inodes(void) +long get_nr_dirty_inodes(void)  { -	int nr_dirty = get_nr_inodes() - get_nr_inodes_unused(); +	/* not actually dirty inodes, but a wild approximation */ +	long nr_dirty = get_nr_inodes() - get_nr_inodes_unused();  	return nr_dirty > 0 ? nr_dirty : 0; -  }  /*   * Handle nr_inode sysctl   */  #ifdef CONFIG_SYSCTL -int proc_nr_inodes(ctl_table *table, int write, +int proc_nr_inodes(struct ctl_table *table, int write,  		   void __user *buffer, size_t *lenp, loff_t *ppos)  {  	inodes_stat.nr_inodes = get_nr_inodes();  	inodes_stat.nr_unused = get_nr_inodes_unused(); -	return proc_dointvec(table, write, buffer, lenp, ppos); +	return proc_doulongvec_minmax(table, write, buffer, lenp, ppos);  }  #endif -static void wake_up_inode(struct inode *inode) -{ -	/* -	 * Prevent speculative execution through spin_unlock(&inode_lock); -	 */ -	smp_mb(); -	wake_up_bit(&inode->i_state, __I_NEW); -} -  /**   * inode_init_always - perform inode structure intialisation   * @sb: superblock inode belongs to @@ -156,7 +124,6 @@ static void wake_up_inode(struct inode *inode)   */  int inode_init_always(struct super_block *sb, struct inode *inode)  { -	static const struct address_space_operations empty_aops;  	static const struct inode_operations empty_iops;  	static const struct file_operations empty_fops;  	struct address_space *const mapping = &inode->i_data; @@ -167,9 +134,10 @@ int inode_init_always(struct super_block *sb, struct inode *inode)  	atomic_set(&inode->i_count, 1);  	inode->i_op = &empty_iops;  	inode->i_fop = &empty_fops; -	inode->i_nlink = 1; -	inode->i_uid = 0; -	inode->i_gid = 0; +	inode->__i_nlink = 1; +	inode->i_opflags = 0; +	i_uid_write(inode, 0); +	i_gid_write(inode, 0);  	atomic_set(&inode->i_writecount, 0);  	inode->i_size = 0;  	inode->i_blocks = 0; @@ -192,14 +160,13 @@ int inode_init_always(struct super_block *sb, struct inode *inode)  	mutex_init(&inode->i_mutex);  	lockdep_set_class(&inode->i_mutex, &sb->s_type->i_mutex_key); -	init_rwsem(&inode->i_alloc_sem); -	lockdep_set_class(&inode->i_alloc_sem, &sb->s_type->i_alloc_sem_key); +	atomic_set(&inode->i_dio_count, 0);  	mapping->a_ops = &empty_aops;  	mapping->host = inode;  	mapping->flags = 0;  	mapping_set_gfp_mask(mapping, GFP_HIGHUSER_MOVABLE); -	mapping->assoc_mapping = NULL; +	mapping->private_data = NULL;  	mapping->backing_dev_info = &default_backing_dev_info;  	mapping->writeback_index = 0; @@ -216,6 +183,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)  	}  	inode->i_private = NULL;  	inode->i_mapping = mapping; +	INIT_HLIST_HEAD(&inode->i_dentry);	/* buggered by rcu freeing */  #ifdef CONFIG_FS_POSIX_ACL  	inode->i_acl = inode->i_default_acl = ACL_NOT_CACHED;  #endif @@ -224,7 +192,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)  	inode->i_fsnotify_mask = 0;  #endif -	percpu_counter_inc(&nr_inodes); +	this_cpu_inc(nr_inodes);  	return 0;  out: @@ -255,21 +223,38 @@ static struct inode *alloc_inode(struct super_block *sb)  	return inode;  } +void free_inode_nonrcu(struct inode *inode) +{ +	kmem_cache_free(inode_cachep, inode); +} +EXPORT_SYMBOL(free_inode_nonrcu); +  void __destroy_inode(struct inode *inode)  {  	BUG_ON(inode_has_buffers(inode));  	security_inode_free(inode);  	fsnotify_inode_delete(inode); +	if (!inode->i_nlink) { +		WARN_ON(atomic_long_read(&inode->i_sb->s_remove_count) == 0); +		atomic_long_dec(&inode->i_sb->s_remove_count); +	} +  #ifdef CONFIG_FS_POSIX_ACL  	if (inode->i_acl && inode->i_acl != ACL_NOT_CACHED)  		posix_acl_release(inode->i_acl);  	if (inode->i_default_acl && inode->i_default_acl != ACL_NOT_CACHED)  		posix_acl_release(inode->i_default_acl);  #endif -	percpu_counter_dec(&nr_inodes); +	this_cpu_dec(nr_inodes);  }  EXPORT_SYMBOL(__destroy_inode); +static void i_callback(struct rcu_head *head) +{ +	struct inode *inode = container_of(head, struct inode, i_rcu); +	kmem_cache_free(inode_cachep, inode); +} +  static void destroy_inode(struct inode *inode)  {  	BUG_ON(!list_empty(&inode->i_lru)); @@ -277,8 +262,99 @@ static void destroy_inode(struct inode *inode)  	if (inode->i_sb->s_op->destroy_inode)  		inode->i_sb->s_op->destroy_inode(inode);  	else -		kmem_cache_free(inode_cachep, (inode)); +		call_rcu(&inode->i_rcu, i_callback); +} + +/** + * drop_nlink - directly drop an inode's link count + * @inode: inode + * + * This is a low-level filesystem helper to replace any + * direct filesystem manipulation of i_nlink.  In cases + * where we are attempting to track writes to the + * filesystem, a decrement to zero means an imminent + * write when the file is truncated and actually unlinked + * on the filesystem. + */ +void drop_nlink(struct inode *inode) +{ +	WARN_ON(inode->i_nlink == 0); +	inode->__i_nlink--; +	if (!inode->i_nlink) +		atomic_long_inc(&inode->i_sb->s_remove_count); +} +EXPORT_SYMBOL(drop_nlink); + +/** + * clear_nlink - directly zero an inode's link count + * @inode: inode + * + * This is a low-level filesystem helper to replace any + * direct filesystem manipulation of i_nlink.  See + * drop_nlink() for why we care about i_nlink hitting zero. + */ +void clear_nlink(struct inode *inode) +{ +	if (inode->i_nlink) { +		inode->__i_nlink = 0; +		atomic_long_inc(&inode->i_sb->s_remove_count); +	} +} +EXPORT_SYMBOL(clear_nlink); + +/** + * set_nlink - directly set an inode's link count + * @inode: inode + * @nlink: new nlink (should be non-zero) + * + * This is a low-level filesystem helper to replace any + * direct filesystem manipulation of i_nlink. + */ +void set_nlink(struct inode *inode, unsigned int nlink) +{ +	if (!nlink) { +		clear_nlink(inode); +	} else { +		/* Yes, some filesystems do change nlink from zero to one */ +		if (inode->i_nlink == 0) +			atomic_long_dec(&inode->i_sb->s_remove_count); + +		inode->__i_nlink = nlink; +	}  } +EXPORT_SYMBOL(set_nlink); + +/** + * inc_nlink - directly increment an inode's link count + * @inode: inode + * + * This is a low-level filesystem helper to replace any + * direct filesystem manipulation of i_nlink.  Currently, + * it is only here for parity with dec_nlink(). + */ +void inc_nlink(struct inode *inode) +{ +	if (unlikely(inode->i_nlink == 0)) { +		WARN_ON(!(inode->i_state & I_LINKABLE)); +		atomic_long_dec(&inode->i_sb->s_remove_count); +	} + +	inode->__i_nlink++; +} +EXPORT_SYMBOL(inc_nlink); + +void address_space_init_once(struct address_space *mapping) +{ +	memset(mapping, 0, sizeof(*mapping)); +	INIT_RADIX_TREE(&mapping->page_tree, GFP_ATOMIC); +	spin_lock_init(&mapping->tree_lock); +	mutex_init(&mapping->i_mmap_mutex); +	INIT_LIST_HEAD(&mapping->private_list); +	spin_lock_init(&mapping->private_lock); +	mapping->i_mmap = RB_ROOT; +	INIT_LIST_HEAD(&mapping->i_mmap_nonlinear); +} +EXPORT_SYMBOL(address_space_init_once);  /*   * These are initializations that only need to be done @@ -289,17 +365,10 @@ void inode_init_once(struct inode *inode)  {  	memset(inode, 0, sizeof(*inode));  	INIT_HLIST_NODE(&inode->i_hash); -	INIT_LIST_HEAD(&inode->i_dentry);  	INIT_LIST_HEAD(&inode->i_devices);  	INIT_LIST_HEAD(&inode->i_wb_list);  	INIT_LIST_HEAD(&inode->i_lru); -	INIT_RADIX_TREE(&inode->i_data.page_tree, GFP_ATOMIC); -	spin_lock_init(&inode->i_data.tree_lock); -	spin_lock_init(&inode->i_data.i_mmap_lock); -	INIT_LIST_HEAD(&inode->i_data.private_list); -	spin_lock_init(&inode->i_data.private_lock); -	INIT_RAW_PRIO_TREE_ROOT(&inode->i_data.i_mmap); -	INIT_LIST_HEAD(&inode->i_data.i_mmap_nonlinear); +	address_space_init_once(&inode->i_data);  	i_size_ordered_init(inode);  #ifdef CONFIG_FSNOTIFY  	INIT_HLIST_HEAD(&inode->i_fsnotify_marks); @@ -315,7 +384,7 @@ static void init_once(void *foo)  }  /* - * inode_lock must be held + * inode->i_lock must be held   */  void __iget(struct inode *inode)  { @@ -333,23 +402,28 @@ EXPORT_SYMBOL(ihold);  static void inode_lru_list_add(struct inode *inode)  { -	if (list_empty(&inode->i_lru)) { -		list_add(&inode->i_lru, &inode_lru); -		percpu_counter_inc(&nr_inodes_unused); -	} +	if (list_lru_add(&inode->i_sb->s_inode_lru, &inode->i_lru)) +		this_cpu_inc(nr_unused);  } -static void inode_lru_list_del(struct inode *inode) +/* + * Add inode to LRU if needed (inode is unused and clean). + * + * Needs inode->i_lock held. + */ +void inode_add_lru(struct inode *inode)  { -	if (!list_empty(&inode->i_lru)) { -		list_del_init(&inode->i_lru); -		percpu_counter_dec(&nr_inodes_unused); -	} +	if (!(inode->i_state & (I_DIRTY | I_SYNC | I_FREEING | I_WILL_FREE)) && +	    !atomic_read(&inode->i_count) && inode->i_sb->s_flags & MS_ACTIVE) +		inode_lru_list_add(inode);  } -static inline void __inode_sb_list_add(struct inode *inode) + +static void inode_lru_list_del(struct inode *inode)  { -	list_add(&inode->i_sb_list, &inode->i_sb->s_inodes); + +	if (list_lru_del(&inode->i_sb->s_inode_lru, &inode->i_lru)) +		this_cpu_dec(nr_unused);  }  /** @@ -358,15 +432,19 @@ static inline void __inode_sb_list_add(struct inode *inode)   */  void inode_sb_list_add(struct inode *inode)  { -	spin_lock(&inode_lock); -	__inode_sb_list_add(inode); -	spin_unlock(&inode_lock); +	spin_lock(&inode_sb_list_lock); +	list_add(&inode->i_sb_list, &inode->i_sb->s_inodes); +	spin_unlock(&inode_sb_list_lock);  }  EXPORT_SYMBOL_GPL(inode_sb_list_add); -static inline void __inode_sb_list_del(struct inode *inode) +static inline void inode_sb_list_del(struct inode *inode)  { -	list_del_init(&inode->i_sb_list); +	if (!list_empty(&inode->i_sb_list)) { +		spin_lock(&inode_sb_list_lock); +		list_del_init(&inode->i_sb_list); +		spin_unlock(&inode_sb_list_lock); +	}  }  static unsigned long hash(struct super_block *sb, unsigned long hashval) @@ -375,8 +453,8 @@ static unsigned long hash(struct super_block *sb, unsigned long hashval)  	tmp = (hashval * (unsigned long)sb) ^ (GOLDEN_RATIO_PRIME + hashval) /  			L1_CACHE_BYTES; -	tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> I_HASHBITS); -	return tmp & I_HASHMASK; +	tmp = tmp ^ ((tmp ^ GOLDEN_RATIO_PRIME) >> i_hash_shift); +	return tmp & i_hash_mask;  }  /** @@ -391,9 +469,11 @@ void __insert_inode_hash(struct inode *inode, unsigned long hashval)  {  	struct hlist_head *b = inode_hashtable + hash(inode->i_sb, hashval); -	spin_lock(&inode_lock); +	spin_lock(&inode_hash_lock); +	spin_lock(&inode->i_lock);  	hlist_add_head(&inode->i_hash, b); -	spin_unlock(&inode_lock); +	spin_unlock(&inode->i_lock); +	spin_unlock(&inode_hash_lock);  }  EXPORT_SYMBOL(__insert_inode_hash); @@ -403,52 +483,88 @@ EXPORT_SYMBOL(__insert_inode_hash);   *   *	Remove an inode from the superblock.   */ -static void __remove_inode_hash(struct inode *inode) +void __remove_inode_hash(struct inode *inode)  { +	spin_lock(&inode_hash_lock); +	spin_lock(&inode->i_lock);  	hlist_del_init(&inode->i_hash); +	spin_unlock(&inode->i_lock); +	spin_unlock(&inode_hash_lock);  } +EXPORT_SYMBOL(__remove_inode_hash); -/** - *	remove_inode_hash - remove an inode from the hash - *	@inode: inode to unhash - * - *	Remove an inode from the superblock. - */ -void remove_inode_hash(struct inode *inode) -{ -	spin_lock(&inode_lock); -	hlist_del_init(&inode->i_hash); -	spin_unlock(&inode_lock); -} -EXPORT_SYMBOL(remove_inode_hash); - -void end_writeback(struct inode *inode) +void clear_inode(struct inode *inode)  {  	might_sleep(); +	/* +	 * We have to cycle tree_lock here because reclaim can be still in the +	 * process of removing the last page (in __delete_from_page_cache()) +	 * and we must not free mapping under it. +	 */ +	spin_lock_irq(&inode->i_data.tree_lock);  	BUG_ON(inode->i_data.nrpages); +	BUG_ON(inode->i_data.nrshadows); +	spin_unlock_irq(&inode->i_data.tree_lock);  	BUG_ON(!list_empty(&inode->i_data.private_list));  	BUG_ON(!(inode->i_state & I_FREEING));  	BUG_ON(inode->i_state & I_CLEAR); -	inode_sync_wait(inode); +	/* don't need i_lock here, no concurrent mods to i_state */  	inode->i_state = I_FREEING | I_CLEAR;  } -EXPORT_SYMBOL(end_writeback); +EXPORT_SYMBOL(clear_inode); +/* + * Free the inode passed in, removing it from the lists it is still connected + * to. We remove any pages still attached to the inode and wait for any IO that + * is still in progress before finally destroying the inode. + * + * An inode must already be marked I_FREEING so that we avoid the inode being + * moved back onto lists if we race with other code that manipulates the lists + * (e.g. writeback_single_inode). The caller is responsible for setting this. + * + * An inode must already be removed from the LRU list before being evicted from + * the cache. This should occur atomically with setting the I_FREEING state + * flag, so no inodes here should ever be on the LRU when being evicted. + */  static void evict(struct inode *inode)  {  	const struct super_operations *op = inode->i_sb->s_op; +	BUG_ON(!(inode->i_state & I_FREEING)); +	BUG_ON(!list_empty(&inode->i_lru)); + +	if (!list_empty(&inode->i_wb_list)) +		inode_wb_list_del(inode); + +	inode_sb_list_del(inode); + +	/* +	 * Wait for flusher thread to be done with the inode so that filesystem +	 * does not start destroying it while writeback is still running. Since +	 * the inode has I_FREEING set, flusher thread won't start new work on +	 * the inode.  We just have to wait for running writeback to finish. +	 */ +	inode_wait_for_writeback(inode); +  	if (op->evict_inode) {  		op->evict_inode(inode);  	} else { -		if (inode->i_data.nrpages) -			truncate_inode_pages(&inode->i_data, 0); -		end_writeback(inode); +		truncate_inode_pages_final(&inode->i_data); +		clear_inode(inode);  	}  	if (S_ISBLK(inode->i_mode) && inode->i_bdev)  		bd_forget(inode);  	if (S_ISCHR(inode->i_mode) && inode->i_cdev)  		cd_forget(inode); + +	remove_inode_hash(inode); + +	spin_lock(&inode->i_lock); +	wake_up_bit(&inode->i_state, __I_NEW); +	BUG_ON(inode->i_state != (I_FREEING | I_CLEAR)); +	spin_unlock(&inode->i_lock); + +	destroy_inode(inode);  }  /* @@ -467,14 +583,6 @@ static void dispose_list(struct list_head *head)  		list_del_init(&inode->i_lru);  		evict(inode); - -		spin_lock(&inode_lock); -		__remove_inode_hash(inode); -		__inode_sb_list_del(inode); -		spin_unlock(&inode_lock); - -		wake_up_inode(inode); -		destroy_inode(inode);  	}  } @@ -492,94 +600,75 @@ void evict_inodes(struct super_block *sb)  	struct inode *inode, *next;  	LIST_HEAD(dispose); -	down_write(&iprune_sem); - -	spin_lock(&inode_lock); +	spin_lock(&inode_sb_list_lock);  	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {  		if (atomic_read(&inode->i_count))  			continue; +		spin_lock(&inode->i_lock);  		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { -			WARN_ON(1); +			spin_unlock(&inode->i_lock);  			continue;  		}  		inode->i_state |= I_FREEING; - -		/* -		 * Move the inode off the IO lists and LRU once I_FREEING is -		 * set so that it won't get moved back on there if it is dirty. -		 */ -		list_move(&inode->i_lru, &dispose); -		list_del_init(&inode->i_wb_list); -		if (!(inode->i_state & (I_DIRTY | I_SYNC))) -			percpu_counter_dec(&nr_inodes_unused); +		inode_lru_list_del(inode); +		spin_unlock(&inode->i_lock); +		list_add(&inode->i_lru, &dispose);  	} -	spin_unlock(&inode_lock); +	spin_unlock(&inode_sb_list_lock);  	dispose_list(&dispose); -	up_write(&iprune_sem);  }  /**   * invalidate_inodes	- attempt to free all inodes on a superblock   * @sb:		superblock to operate on + * @kill_dirty: flag to guide handling of dirty inodes   *   * Attempts to free all inodes for a given superblock.  If there were any   * busy inodes return a non-zero value, else zero. + * If @kill_dirty is set, discard dirty inodes too, otherwise treat + * them as busy.   */ -int invalidate_inodes(struct super_block *sb) +int invalidate_inodes(struct super_block *sb, bool kill_dirty)  {  	int busy = 0;  	struct inode *inode, *next;  	LIST_HEAD(dispose); -	down_write(&iprune_sem); - -	spin_lock(&inode_lock); +	spin_lock(&inode_sb_list_lock);  	list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) { -		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) +		spin_lock(&inode->i_lock); +		if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) { +			spin_unlock(&inode->i_lock); +			continue; +		} +		if (inode->i_state & I_DIRTY && !kill_dirty) { +			spin_unlock(&inode->i_lock); +			busy = 1;  			continue; +		}  		if (atomic_read(&inode->i_count)) { +			spin_unlock(&inode->i_lock);  			busy = 1;  			continue;  		}  		inode->i_state |= I_FREEING; - -		/* -		 * Move the inode off the IO lists and LRU once I_FREEING is -		 * set so that it won't get moved back on there if it is dirty. -		 */ -		list_move(&inode->i_lru, &dispose); -		list_del_init(&inode->i_wb_list); -		if (!(inode->i_state & (I_DIRTY | I_SYNC))) -			percpu_counter_dec(&nr_inodes_unused); +		inode_lru_list_del(inode); +		spin_unlock(&inode->i_lock); +		list_add(&inode->i_lru, &dispose);  	} -	spin_unlock(&inode_lock); +	spin_unlock(&inode_sb_list_lock);  	dispose_list(&dispose); -	up_write(&iprune_sem);  	return busy;  } -static int can_unuse(struct inode *inode) -{ -	if (inode->i_state & ~I_REFERENCED) -		return 0; -	if (inode_has_buffers(inode)) -		return 0; -	if (atomic_read(&inode->i_count)) -		return 0; -	if (inode->i_data.nrpages) -		return 0; -	return 1; -} -  /* - * Scan `goal' inodes on the unused list for freeable ones. They are moved to a - * temporary list and then are freed outside inode_lock by dispose_list(). + * Isolate the inode from the LRU in preparation for freeing it.   *   * Any inodes which are pinned purely because of attached pagecache have their   * pagecache removed.  If the inode has metadata buffers attached to @@ -593,103 +682,83 @@ static int can_unuse(struct inode *inode)   * LRU does not have strict ordering. Hence we don't want to reclaim inodes   * with this flag set because they are the inodes that are out of order.   */ -static void prune_icache(int nr_to_scan) +static enum lru_status +inode_lru_isolate(struct list_head *item, spinlock_t *lru_lock, void *arg)  { -	LIST_HEAD(freeable); -	int nr_scanned; -	unsigned long reap = 0; - -	down_read(&iprune_sem); -	spin_lock(&inode_lock); -	for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) { -		struct inode *inode; +	struct list_head *freeable = arg; +	struct inode	*inode = container_of(item, struct inode, i_lru); -		if (list_empty(&inode_lru)) -			break; +	/* +	 * we are inverting the lru lock/inode->i_lock here, so use a trylock. +	 * If we fail to get the lock, just skip it. +	 */ +	if (!spin_trylock(&inode->i_lock)) +		return LRU_SKIP; -		inode = list_entry(inode_lru.prev, struct inode, i_lru); +	/* +	 * Referenced or dirty inodes are still in use. Give them another pass +	 * through the LRU as we canot reclaim them now. +	 */ +	if (atomic_read(&inode->i_count) || +	    (inode->i_state & ~I_REFERENCED)) { +		list_del_init(&inode->i_lru); +		spin_unlock(&inode->i_lock); +		this_cpu_dec(nr_unused); +		return LRU_REMOVED; +	} -		/* -		 * Referenced or dirty inodes are still in use. Give them -		 * another pass through the LRU as we canot reclaim them now. -		 */ -		if (atomic_read(&inode->i_count) || -		    (inode->i_state & ~I_REFERENCED)) { -			list_del_init(&inode->i_lru); -			percpu_counter_dec(&nr_inodes_unused); -			continue; -		} +	/* recently referenced inodes get one more pass */ +	if (inode->i_state & I_REFERENCED) { +		inode->i_state &= ~I_REFERENCED; +		spin_unlock(&inode->i_lock); +		return LRU_ROTATE; +	} -		/* recently referenced inodes get one more pass */ -		if (inode->i_state & I_REFERENCED) { -			list_move(&inode->i_lru, &inode_lru); -			inode->i_state &= ~I_REFERENCED; -			continue; -		} -		if (inode_has_buffers(inode) || inode->i_data.nrpages) { -			__iget(inode); -			spin_unlock(&inode_lock); -			if (remove_inode_buffers(inode)) -				reap += invalidate_mapping_pages(&inode->i_data, -								0, -1); -			iput(inode); -			spin_lock(&inode_lock); - -			if (inode != list_entry(inode_lru.next, -						struct inode, i_lru)) -				continue;	/* wrong inode or list_empty */ -			if (!can_unuse(inode)) -				continue; +	if (inode_has_buffers(inode) || inode->i_data.nrpages) { +		__iget(inode); +		spin_unlock(&inode->i_lock); +		spin_unlock(lru_lock); +		if (remove_inode_buffers(inode)) { +			unsigned long reap; +			reap = invalidate_mapping_pages(&inode->i_data, 0, -1); +			if (current_is_kswapd()) +				__count_vm_events(KSWAPD_INODESTEAL, reap); +			else +				__count_vm_events(PGINODESTEAL, reap); +			if (current->reclaim_state) +				current->reclaim_state->reclaimed_slab += reap;  		} -		WARN_ON(inode->i_state & I_NEW); -		inode->i_state |= I_FREEING; - -		/* -		 * Move the inode off the IO lists and LRU once I_FREEING is -		 * set so that it won't get moved back on there if it is dirty. -		 */ -		list_move(&inode->i_lru, &freeable); -		list_del_init(&inode->i_wb_list); -		percpu_counter_dec(&nr_inodes_unused); +		iput(inode); +		spin_lock(lru_lock); +		return LRU_RETRY;  	} -	if (current_is_kswapd()) -		__count_vm_events(KSWAPD_INODESTEAL, reap); -	else -		__count_vm_events(PGINODESTEAL, reap); -	spin_unlock(&inode_lock); -	dispose_list(&freeable); -	up_read(&iprune_sem); +	WARN_ON(inode->i_state & I_NEW); +	inode->i_state |= I_FREEING; +	list_move(&inode->i_lru, freeable); +	spin_unlock(&inode->i_lock); + +	this_cpu_dec(nr_unused); +	return LRU_REMOVED;  }  /* - * shrink_icache_memory() will attempt to reclaim some unused inodes.  Here, - * "unused" means that no dentries are referring to the inodes: the files are - * not open and the dcache references to those inodes have already been - * reclaimed. - * - * This function is passed the number of inodes to scan, and it returns the - * total number of remaining possibly-reclaimable inodes. + * Walk the superblock inode LRU for freeable inodes and attempt to free them. + * This is called from the superblock shrinker function with a number of inodes + * to trim from the LRU. Inodes to be freed are moved to a temporary list and + * then are freed outside inode_lock by dispose_list().   */ -static int shrink_icache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) +long prune_icache_sb(struct super_block *sb, unsigned long nr_to_scan, +		     int nid)  { -	if (nr) { -		/* -		 * Nasty deadlock avoidance.  We may hold various FS locks, -		 * and we don't want to recurse into the FS that called us -		 * in clear_inode() and friends.. -		 */ -		if (!(gfp_mask & __GFP_FS)) -			return -1; -		prune_icache(nr); -	} -	return (get_nr_inodes_unused() / 100) * sysctl_vfs_cache_pressure; -} +	LIST_HEAD(freeable); +	long freed; -static struct shrinker icache_shrinker = { -	.shrink = shrink_icache_memory, -	.seeks = DEFAULT_SEEKS, -}; +	freed = list_lru_walk_node(&sb->s_inode_lru, nid, inode_lru_isolate, +				       &freeable, &nr_to_scan); +	dispose_list(&freeable); +	return freed; +}  static void __wait_on_freeing_inode(struct inode *inode);  /* @@ -700,20 +769,21 @@ static struct inode *find_inode(struct super_block *sb,  				int (*test)(struct inode *, void *),  				void *data)  { -	struct hlist_node *node;  	struct inode *inode = NULL;  repeat: -	hlist_for_each_entry(inode, node, head, i_hash) { +	hlist_for_each_entry(inode, head, i_hash) {  		if (inode->i_sb != sb)  			continue;  		if (!test(inode, data))  			continue; +		spin_lock(&inode->i_lock);  		if (inode->i_state & (I_FREEING|I_WILL_FREE)) {  			__wait_on_freeing_inode(inode);  			goto repeat;  		}  		__iget(inode); +		spin_unlock(&inode->i_lock);  		return inode;  	}  	return NULL; @@ -726,20 +796,21 @@ repeat:  static struct inode *find_inode_fast(struct super_block *sb,  				struct hlist_head *head, unsigned long ino)  { -	struct hlist_node *node;  	struct inode *inode = NULL;  repeat: -	hlist_for_each_entry(inode, node, head, i_hash) { +	hlist_for_each_entry(inode, head, i_hash) {  		if (inode->i_ino != ino)  			continue;  		if (inode->i_sb != sb)  			continue; +		spin_lock(&inode->i_lock);  		if (inode->i_state & (I_FREEING|I_WILL_FREE)) {  			__wait_on_freeing_inode(inode);  			goto repeat;  		}  		__iget(inode); +		spin_unlock(&inode->i_lock);  		return inode;  	}  	return NULL; @@ -784,6 +855,29 @@ unsigned int get_next_ino(void)  EXPORT_SYMBOL(get_next_ino);  /** + *	new_inode_pseudo 	- obtain an inode + *	@sb: superblock + * + *	Allocates a new inode for given superblock. + *	Inode wont be chained in superblock s_inodes list + *	This means : + *	- fs can't be unmount + *	- quotas, fsnotify, writeback can't work + */ +struct inode *new_inode_pseudo(struct super_block *sb) +{ +	struct inode *inode = alloc_inode(sb); + +	if (inode) { +		spin_lock(&inode->i_lock); +		inode->i_state = 0; +		spin_unlock(&inode->i_lock); +		INIT_LIST_HEAD(&inode->i_sb_list); +	} +	return inode; +} + +/**   *	new_inode 	- obtain an inode   *	@sb: superblock   * @@ -799,28 +893,23 @@ struct inode *new_inode(struct super_block *sb)  {  	struct inode *inode; -	spin_lock_prefetch(&inode_lock); +	spin_lock_prefetch(&inode_sb_list_lock); -	inode = alloc_inode(sb); -	if (inode) { -		spin_lock(&inode_lock); -		__inode_sb_list_add(inode); -		inode->i_state = 0; -		spin_unlock(&inode_lock); -	} +	inode = new_inode_pseudo(sb); +	if (inode) +		inode_sb_list_add(inode);  	return inode;  }  EXPORT_SYMBOL(new_inode); -void unlock_new_inode(struct inode *inode) -{  #ifdef CONFIG_DEBUG_LOCK_ALLOC +void lockdep_annotate_inode_mutex_key(struct inode *inode) +{  	if (S_ISDIR(inode->i_mode)) {  		struct file_system_type *type = inode->i_sb->s_type;  		/* Set new key only if filesystem hasn't already changed it */ -		if (!lockdep_match_class(&inode->i_mutex, -		    &type->i_mutex_key)) { +		if (lockdep_match_class(&inode->i_mutex, &type->i_mutex_key)) {  			/*  			 * ensure nobody is actually holding i_mutex  			 */ @@ -830,52 +919,117 @@ void unlock_new_inode(struct inode *inode)  					  &type->i_mutex_dir_key);  		}  	} +} +EXPORT_SYMBOL(lockdep_annotate_inode_mutex_key);  #endif -	/* -	 * This is special!  We do not need the spinlock when clearing I_NEW, -	 * because we're guaranteed that nobody else tries to do anything about -	 * the state of the inode when it is locked, as we just created it (so -	 * there can be no old holders that haven't tested I_NEW). -	 * However we must emit the memory barrier so that other CPUs reliably -	 * see the clearing of I_NEW after the other inode initialisation has -	 * completed. -	 */ -	smp_mb(); + +/** + * unlock_new_inode - clear the I_NEW state and wake up any waiters + * @inode:	new inode to unlock + * + * Called when the inode is fully initialised to clear the new state of the + * inode and wake up anyone waiting for the inode to finish initialisation. + */ +void unlock_new_inode(struct inode *inode) +{ +	lockdep_annotate_inode_mutex_key(inode); +	spin_lock(&inode->i_lock);  	WARN_ON(!(inode->i_state & I_NEW));  	inode->i_state &= ~I_NEW; -	wake_up_inode(inode); +	smp_mb(); +	wake_up_bit(&inode->i_state, __I_NEW); +	spin_unlock(&inode->i_lock);  }  EXPORT_SYMBOL(unlock_new_inode); -/* - * This is called without the inode lock held.. Be careful. +/** + * lock_two_nondirectories - take two i_mutexes on non-directory objects   * - * We no longer cache the sb_flags in i_flags - see fs.h - *	-- rmk@arm.uk.linux.org + * Lock any non-NULL argument that is not a directory. + * Zero, one or two objects may be locked by this function. + * + * @inode1: first inode to lock + * @inode2: second inode to lock   */ -static struct inode *get_new_inode(struct super_block *sb, -				struct hlist_head *head, -				int (*test)(struct inode *, void *), -				int (*set)(struct inode *, void *), -				void *data) +void lock_two_nondirectories(struct inode *inode1, struct inode *inode2) +{ +	if (inode1 > inode2) +		swap(inode1, inode2); + +	if (inode1 && !S_ISDIR(inode1->i_mode)) +		mutex_lock(&inode1->i_mutex); +	if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1) +		mutex_lock_nested(&inode2->i_mutex, I_MUTEX_NONDIR2); +} +EXPORT_SYMBOL(lock_two_nondirectories); + +/** + * unlock_two_nondirectories - release locks from lock_two_nondirectories() + * @inode1: first inode to unlock + * @inode2: second inode to unlock + */ +void unlock_two_nondirectories(struct inode *inode1, struct inode *inode2) +{ +	if (inode1 && !S_ISDIR(inode1->i_mode)) +		mutex_unlock(&inode1->i_mutex); +	if (inode2 && !S_ISDIR(inode2->i_mode) && inode2 != inode1) +		mutex_unlock(&inode2->i_mutex); +} +EXPORT_SYMBOL(unlock_two_nondirectories); + +/** + * iget5_locked - obtain an inode from a mounted file system + * @sb:		super block of file system + * @hashval:	hash value (usually inode number) to get + * @test:	callback used for comparisons between inodes + * @set:	callback used to initialize a new struct inode + * @data:	opaque data pointer to pass to @test and @set + * + * Search for the inode specified by @hashval and @data in the inode cache, + * and if present it is return it with an increased reference count. This is + * a generalized version of iget_locked() for file systems where the inode + * number is not sufficient for unique identification of an inode. + * + * If the inode is not in cache, allocate a new inode and return it locked, + * hashed, and with the I_NEW flag set. The file system gets to fill it in + * before unlocking it via unlock_new_inode(). + * + * Note both @test and @set are called with the inode_hash_lock held, so can't + * sleep. + */ +struct inode *iget5_locked(struct super_block *sb, unsigned long hashval, +		int (*test)(struct inode *, void *), +		int (*set)(struct inode *, void *), void *data)  { +	struct hlist_head *head = inode_hashtable + hash(sb, hashval);  	struct inode *inode; +	spin_lock(&inode_hash_lock); +	inode = find_inode(sb, head, test, data); +	spin_unlock(&inode_hash_lock); + +	if (inode) { +		wait_on_inode(inode); +		return inode; +	} +  	inode = alloc_inode(sb);  	if (inode) {  		struct inode *old; -		spin_lock(&inode_lock); +		spin_lock(&inode_hash_lock);  		/* We released the lock, so.. */  		old = find_inode(sb, head, test, data);  		if (!old) {  			if (set(inode, data))  				goto set_failed; -			hlist_add_head(&inode->i_hash, head); -			__inode_sb_list_add(inode); +			spin_lock(&inode->i_lock);  			inode->i_state = I_NEW; -			spin_unlock(&inode_lock); +			hlist_add_head(&inode->i_hash, head); +			spin_unlock(&inode->i_lock); +			inode_sb_list_add(inode); +			spin_unlock(&inode_hash_lock);  			/* Return the locked inode with I_NEW set, the  			 * caller is responsible for filling in the contents @@ -888,7 +1042,7 @@ static struct inode *get_new_inode(struct super_block *sb,  		 * us. Use the old inode instead of the one we just  		 * allocated.  		 */ -		spin_unlock(&inode_lock); +		spin_unlock(&inode_hash_lock);  		destroy_inode(inode);  		inode = old;  		wait_on_inode(inode); @@ -896,33 +1050,53 @@ static struct inode *get_new_inode(struct super_block *sb,  	return inode;  set_failed: -	spin_unlock(&inode_lock); +	spin_unlock(&inode_hash_lock);  	destroy_inode(inode);  	return NULL;  } +EXPORT_SYMBOL(iget5_locked); -/* - * get_new_inode_fast is the fast path version of get_new_inode, see the - * comment at iget_locked for details. +/** + * iget_locked - obtain an inode from a mounted file system + * @sb:		super block of file system + * @ino:	inode number to get + * + * Search for the inode specified by @ino in the inode cache and if present + * return it with an increased reference count. This is for file systems + * where the inode number is sufficient for unique identification of an inode. + * + * If the inode is not in cache, allocate a new inode and return it locked, + * hashed, and with the I_NEW flag set.  The file system gets to fill it in + * before unlocking it via unlock_new_inode().   */ -static struct inode *get_new_inode_fast(struct super_block *sb, -				struct hlist_head *head, unsigned long ino) +struct inode *iget_locked(struct super_block *sb, unsigned long ino)  { +	struct hlist_head *head = inode_hashtable + hash(sb, ino);  	struct inode *inode; +	spin_lock(&inode_hash_lock); +	inode = find_inode_fast(sb, head, ino); +	spin_unlock(&inode_hash_lock); +	if (inode) { +		wait_on_inode(inode); +		return inode; +	} +  	inode = alloc_inode(sb);  	if (inode) {  		struct inode *old; -		spin_lock(&inode_lock); +		spin_lock(&inode_hash_lock);  		/* We released the lock, so.. */  		old = find_inode_fast(sb, head, ino);  		if (!old) {  			inode->i_ino = ino; -			hlist_add_head(&inode->i_hash, head); -			__inode_sb_list_add(inode); +			spin_lock(&inode->i_lock);  			inode->i_state = I_NEW; -			spin_unlock(&inode_lock); +			hlist_add_head(&inode->i_hash, head); +			spin_unlock(&inode->i_lock); +			inode_sb_list_add(inode); +			spin_unlock(&inode_hash_lock);  			/* Return the locked inode with I_NEW set, the  			 * caller is responsible for filling in the contents @@ -935,13 +1109,14 @@ static struct inode *get_new_inode_fast(struct super_block *sb,  		 * us. Use the old inode instead of the one we just  		 * allocated.  		 */ -		spin_unlock(&inode_lock); +		spin_unlock(&inode_hash_lock);  		destroy_inode(inode);  		inode = old;  		wait_on_inode(inode);  	}  	return inode;  } +EXPORT_SYMBOL(iget_locked);  /*   * search the inode cache for a matching inode number. @@ -953,13 +1128,16 @@ static struct inode *get_new_inode_fast(struct super_block *sb,  static int test_inode_iunique(struct super_block *sb, unsigned long ino)  {  	struct hlist_head *b = inode_hashtable + hash(sb, ino); -	struct hlist_node *node;  	struct inode *inode; -	hlist_for_each_entry(inode, node, b, i_hash) { -		if (inode->i_ino == ino && inode->i_sb == sb) +	spin_lock(&inode_hash_lock); +	hlist_for_each_entry(inode, b, i_hash) { +		if (inode->i_ino == ino && inode->i_sb == sb) { +			spin_unlock(&inode_hash_lock);  			return 0; +		}  	} +	spin_unlock(&inode_hash_lock);  	return 1;  } @@ -989,7 +1167,6 @@ ino_t iunique(struct super_block *sb, ino_t max_reserved)  	static unsigned int counter;  	ino_t res; -	spin_lock(&inode_lock);  	spin_lock(&iunique_lock);  	do {  		if (counter <= max_reserved) @@ -997,7 +1174,6 @@ ino_t iunique(struct super_block *sb, ino_t max_reserved)  		res = counter++;  	} while (!test_inode_iunique(sb, res));  	spin_unlock(&iunique_lock); -	spin_unlock(&inode_lock);  	return res;  } @@ -1005,116 +1181,50 @@ EXPORT_SYMBOL(iunique);  struct inode *igrab(struct inode *inode)  { -	spin_lock(&inode_lock); -	if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) +	spin_lock(&inode->i_lock); +	if (!(inode->i_state & (I_FREEING|I_WILL_FREE))) {  		__iget(inode); -	else +		spin_unlock(&inode->i_lock); +	} else { +		spin_unlock(&inode->i_lock);  		/*  		 * Handle the case where s_op->clear_inode is not been  		 * called yet, and somebody is calling igrab  		 * while the inode is getting freed.  		 */  		inode = NULL; -	spin_unlock(&inode_lock); +	}  	return inode;  }  EXPORT_SYMBOL(igrab);  /** - * ifind - internal function, you want ilookup5() or iget5(). - * @sb:		super block of file system to search - * @head:       the head of the list to search - * @test:	callback used for comparisons between inodes - * @data:	opaque data pointer to pass to @test - * @wait:	if true wait for the inode to be unlocked, if false do not - * - * ifind() searches for the inode specified by @data in the inode - * cache. This is a generalized version of ifind_fast() for file systems where - * the inode number is not sufficient for unique identification of an inode. - * - * If the inode is in the cache, the inode is returned with an incremented - * reference count. - * - * Otherwise NULL is returned. - * - * Note, @test is called with the inode_lock held, so can't sleep. - */ -static struct inode *ifind(struct super_block *sb, -		struct hlist_head *head, int (*test)(struct inode *, void *), -		void *data, const int wait) -{ -	struct inode *inode; - -	spin_lock(&inode_lock); -	inode = find_inode(sb, head, test, data); -	if (inode) { -		spin_unlock(&inode_lock); -		if (likely(wait)) -			wait_on_inode(inode); -		return inode; -	} -	spin_unlock(&inode_lock); -	return NULL; -} - -/** - * ifind_fast - internal function, you want ilookup() or iget(). - * @sb:		super block of file system to search - * @head:       head of the list to search - * @ino:	inode number to search for - * - * ifind_fast() searches for the inode @ino in the inode cache. This is for - * file systems where the inode number is sufficient for unique identification - * of an inode. - * - * If the inode is in the cache, the inode is returned with an incremented - * reference count. - * - * Otherwise NULL is returned. - */ -static struct inode *ifind_fast(struct super_block *sb, -		struct hlist_head *head, unsigned long ino) -{ -	struct inode *inode; - -	spin_lock(&inode_lock); -	inode = find_inode_fast(sb, head, ino); -	if (inode) { -		spin_unlock(&inode_lock); -		wait_on_inode(inode); -		return inode; -	} -	spin_unlock(&inode_lock); -	return NULL; -} - -/**   * ilookup5_nowait - search for an inode in the inode cache   * @sb:		super block of file system to search   * @hashval:	hash value (usually inode number) to search for   * @test:	callback used for comparisons between inodes   * @data:	opaque data pointer to pass to @test   * - * ilookup5() uses ifind() to search for the inode specified by @hashval and - * @data in the inode cache. This is a generalized version of ilookup() for - * file systems where the inode number is not sufficient for unique - * identification of an inode. - * + * Search for the inode specified by @hashval and @data in the inode cache.   * If the inode is in the cache, the inode is returned with an incremented - * reference count.  Note, the inode lock is not waited upon so you have to be - * very careful what you do with the returned inode.  You probably should be - * using ilookup5() instead. + * reference count.   * - * Otherwise NULL is returned. + * Note: I_NEW is not waited upon so you have to be very careful what you do + * with the returned inode.  You probably should be using ilookup5() instead.   * - * Note, @test is called with the inode_lock held, so can't sleep. + * Note2: @test is called with the inode_hash_lock held, so can't sleep.   */  struct inode *ilookup5_nowait(struct super_block *sb, unsigned long hashval,  		int (*test)(struct inode *, void *), void *data)  {  	struct hlist_head *head = inode_hashtable + hash(sb, hashval); +	struct inode *inode; + +	spin_lock(&inode_hash_lock); +	inode = find_inode(sb, head, test, data); +	spin_unlock(&inode_hash_lock); -	return ifind(sb, head, test, data, 0); +	return inode;  }  EXPORT_SYMBOL(ilookup5_nowait); @@ -1125,24 +1235,24 @@ EXPORT_SYMBOL(ilookup5_nowait);   * @test:	callback used for comparisons between inodes   * @data:	opaque data pointer to pass to @test   * - * ilookup5() uses ifind() to search for the inode specified by @hashval and - * @data in the inode cache. This is a generalized version of ilookup() for - * file systems where the inode number is not sufficient for unique - * identification of an inode. - * - * If the inode is in the cache, the inode lock is waited upon and the inode is + * Search for the inode specified by @hashval and @data in the inode cache, + * and if the inode is in the cache, return the inode with an incremented + * reference count.  Waits on I_NEW before returning the inode.   * returned with an incremented reference count.   * - * Otherwise NULL is returned. + * This is a generalized version of ilookup() for file systems where the + * inode number is not sufficient for unique identification of an inode.   * - * Note, @test is called with the inode_lock held, so can't sleep. + * Note: @test is called with the inode_hash_lock held, so can't sleep.   */  struct inode *ilookup5(struct super_block *sb, unsigned long hashval,  		int (*test)(struct inode *, void *), void *data)  { -	struct hlist_head *head = inode_hashtable + hash(sb, hashval); +	struct inode *inode = ilookup5_nowait(sb, hashval, test, data); -	return ifind(sb, head, test, data, 1); +	if (inode) +		wait_on_inode(inode); +	return inode;  }  EXPORT_SYMBOL(ilookup5); @@ -1151,91 +1261,23 @@ EXPORT_SYMBOL(ilookup5);   * @sb:		super block of file system to search   * @ino:	inode number to search for   * - * ilookup() uses ifind_fast() to search for the inode @ino in the inode cache. - * This is for file systems where the inode number is sufficient for unique - * identification of an inode. - * - * If the inode is in the cache, the inode is returned with an incremented - * reference count. - * - * Otherwise NULL is returned. + * Search for the inode @ino in the inode cache, and if the inode is in the + * cache, the inode is returned with an incremented reference count.   */  struct inode *ilookup(struct super_block *sb, unsigned long ino)  {  	struct hlist_head *head = inode_hashtable + hash(sb, ino); - -	return ifind_fast(sb, head, ino); -} -EXPORT_SYMBOL(ilookup); - -/** - * iget5_locked - obtain an inode from a mounted file system - * @sb:		super block of file system - * @hashval:	hash value (usually inode number) to get - * @test:	callback used for comparisons between inodes - * @set:	callback used to initialize a new struct inode - * @data:	opaque data pointer to pass to @test and @set - * - * iget5_locked() uses ifind() to search for the inode specified by @hashval - * and @data in the inode cache and if present it is returned with an increased - * reference count. This is a generalized version of iget_locked() for file - * systems where the inode number is not sufficient for unique identification - * of an inode. - * - * If the inode is not in cache, get_new_inode() is called to allocate a new - * inode and this is returned locked, hashed, and with the I_NEW flag set. The - * file system gets to fill it in before unlocking it via unlock_new_inode(). - * - * Note both @test and @set are called with the inode_lock held, so can't sleep. - */ -struct inode *iget5_locked(struct super_block *sb, unsigned long hashval, -		int (*test)(struct inode *, void *), -		int (*set)(struct inode *, void *), void *data) -{ -	struct hlist_head *head = inode_hashtable + hash(sb, hashval);  	struct inode *inode; -	inode = ifind(sb, head, test, data, 1); -	if (inode) -		return inode; -	/* -	 * get_new_inode() will do the right thing, re-trying the search -	 * in case it had to block at any point. -	 */ -	return get_new_inode(sb, head, test, set, data); -} -EXPORT_SYMBOL(iget5_locked); - -/** - * iget_locked - obtain an inode from a mounted file system - * @sb:		super block of file system - * @ino:	inode number to get - * - * iget_locked() uses ifind_fast() to search for the inode specified by @ino in - * the inode cache and if present it is returned with an increased reference - * count. This is for file systems where the inode number is sufficient for - * unique identification of an inode. - * - * If the inode is not in cache, get_new_inode_fast() is called to allocate a - * new inode and this is returned locked, hashed, and with the I_NEW flag set. - * The file system gets to fill it in before unlocking it via - * unlock_new_inode(). - */ -struct inode *iget_locked(struct super_block *sb, unsigned long ino) -{ -	struct hlist_head *head = inode_hashtable + hash(sb, ino); -	struct inode *inode; +	spin_lock(&inode_hash_lock); +	inode = find_inode_fast(sb, head, ino); +	spin_unlock(&inode_hash_lock); -	inode = ifind_fast(sb, head, ino);  	if (inode) -		return inode; -	/* -	 * get_new_inode_fast() will do the right thing, re-trying the search -	 * in case it had to block at any point. -	 */ -	return get_new_inode_fast(sb, head, ino); +		wait_on_inode(inode); +	return inode;  } -EXPORT_SYMBOL(iget_locked); +EXPORT_SYMBOL(ilookup);  int insert_inode_locked(struct inode *inode)  { @@ -1243,27 +1285,32 @@ int insert_inode_locked(struct inode *inode)  	ino_t ino = inode->i_ino;  	struct hlist_head *head = inode_hashtable + hash(sb, ino); -	inode->i_state |= I_NEW;  	while (1) { -		struct hlist_node *node;  		struct inode *old = NULL; -		spin_lock(&inode_lock); -		hlist_for_each_entry(old, node, head, i_hash) { +		spin_lock(&inode_hash_lock); +		hlist_for_each_entry(old, head, i_hash) {  			if (old->i_ino != ino)  				continue;  			if (old->i_sb != sb)  				continue; -			if (old->i_state & (I_FREEING|I_WILL_FREE)) +			spin_lock(&old->i_lock); +			if (old->i_state & (I_FREEING|I_WILL_FREE)) { +				spin_unlock(&old->i_lock);  				continue; +			}  			break;  		} -		if (likely(!node)) { +		if (likely(!old)) { +			spin_lock(&inode->i_lock); +			inode->i_state |= I_NEW;  			hlist_add_head(&inode->i_hash, head); -			spin_unlock(&inode_lock); +			spin_unlock(&inode->i_lock); +			spin_unlock(&inode_hash_lock);  			return 0;  		}  		__iget(old); -		spin_unlock(&inode_lock); +		spin_unlock(&old->i_lock); +		spin_unlock(&inode_hash_lock);  		wait_on_inode(old);  		if (unlikely(!inode_unhashed(old))) {  			iput(old); @@ -1280,29 +1327,33 @@ int insert_inode_locked4(struct inode *inode, unsigned long hashval,  	struct super_block *sb = inode->i_sb;  	struct hlist_head *head = inode_hashtable + hash(sb, hashval); -	inode->i_state |= I_NEW; -  	while (1) { -		struct hlist_node *node;  		struct inode *old = NULL; -		spin_lock(&inode_lock); -		hlist_for_each_entry(old, node, head, i_hash) { +		spin_lock(&inode_hash_lock); +		hlist_for_each_entry(old, head, i_hash) {  			if (old->i_sb != sb)  				continue;  			if (!test(old, data))  				continue; -			if (old->i_state & (I_FREEING|I_WILL_FREE)) +			spin_lock(&old->i_lock); +			if (old->i_state & (I_FREEING|I_WILL_FREE)) { +				spin_unlock(&old->i_lock);  				continue; +			}  			break;  		} -		if (likely(!node)) { +		if (likely(!old)) { +			spin_lock(&inode->i_lock); +			inode->i_state |= I_NEW;  			hlist_add_head(&inode->i_hash, head); -			spin_unlock(&inode_lock); +			spin_unlock(&inode->i_lock); +			spin_unlock(&inode_hash_lock);  			return 0;  		}  		__iget(old); -		spin_unlock(&inode_lock); +		spin_unlock(&old->i_lock); +		spin_unlock(&inode_hash_lock);  		wait_on_inode(old);  		if (unlikely(!inode_unhashed(old))) {  			iput(old); @@ -1321,17 +1372,6 @@ int generic_delete_inode(struct inode *inode)  EXPORT_SYMBOL(generic_delete_inode);  /* - * Normal UNIX filesystem behaviour: delete the - * inode when the usage count drops to zero, and - * i_nlink is zero. - */ -int generic_drop_inode(struct inode *inode) -{ -	return !inode->i_nlink || inode_unhashed(inode); -} -EXPORT_SYMBOL_GPL(generic_drop_inode); - -/*   * Called when we're dropping the last reference   * to an inode.   * @@ -1347,47 +1387,35 @@ static void iput_final(struct inode *inode)  	const struct super_operations *op = inode->i_sb->s_op;  	int drop; -	if (op && op->drop_inode) +	WARN_ON(inode->i_state & I_NEW); + +	if (op->drop_inode)  		drop = op->drop_inode(inode);  	else  		drop = generic_drop_inode(inode); +	if (!drop && (sb->s_flags & MS_ACTIVE)) { +		inode->i_state |= I_REFERENCED; +		inode_add_lru(inode); +		spin_unlock(&inode->i_lock); +		return; +	} +  	if (!drop) { -		if (sb->s_flags & MS_ACTIVE) { -			inode->i_state |= I_REFERENCED; -			if (!(inode->i_state & (I_DIRTY|I_SYNC))) { -				inode_lru_list_add(inode); -			} -			spin_unlock(&inode_lock); -			return; -		} -		WARN_ON(inode->i_state & I_NEW);  		inode->i_state |= I_WILL_FREE; -		spin_unlock(&inode_lock); +		spin_unlock(&inode->i_lock);  		write_inode_now(inode, 1); -		spin_lock(&inode_lock); +		spin_lock(&inode->i_lock);  		WARN_ON(inode->i_state & I_NEW);  		inode->i_state &= ~I_WILL_FREE; -		__remove_inode_hash(inode);  	} -	WARN_ON(inode->i_state & I_NEW);  	inode->i_state |= I_FREEING; +	if (!list_empty(&inode->i_lru)) +		inode_lru_list_del(inode); +	spin_unlock(&inode->i_lock); -	/* -	 * Move the inode off the IO lists and LRU once I_FREEING is -	 * set so that it won't get moved back on there if it is dirty. -	 */ -	inode_lru_list_del(inode); -	list_del_init(&inode->i_wb_list); - -	__inode_sb_list_del(inode); -	spin_unlock(&inode_lock);  	evict(inode); -	remove_inode_hash(inode); -	wake_up_inode(inode); -	BUG_ON(inode->i_state != (I_FREEING | I_CLEAR)); -	destroy_inode(inode);  }  /** @@ -1404,7 +1432,7 @@ void iput(struct inode *inode)  	if (inode) {  		BUG_ON(inode->i_state & I_CLEAR); -		if (atomic_dec_and_lock(&inode->i_count, &inode_lock)) +		if (atomic_dec_and_lock(&inode->i_count, &inode->i_lock))  			iput_final(inode);  	}  } @@ -1464,18 +1492,39 @@ static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,  	return 0;  } +/* + * This does the actual work of updating an inodes time or version.  Must have + * had called mnt_want_write() before calling this. + */ +static int update_time(struct inode *inode, struct timespec *time, int flags) +{ +	if (inode->i_op->update_time) +		return inode->i_op->update_time(inode, time, flags); + +	if (flags & S_ATIME) +		inode->i_atime = *time; +	if (flags & S_VERSION) +		inode_inc_iversion(inode); +	if (flags & S_CTIME) +		inode->i_ctime = *time; +	if (flags & S_MTIME) +		inode->i_mtime = *time; +	mark_inode_dirty_sync(inode); +	return 0; +} +  /**   *	touch_atime	-	update the access time - *	@mnt: mount the inode is accessed on - *	@dentry: dentry accessed + *	@path: the &struct path to update   *   *	Update the accessed time on an inode and mark it for writeback.   *	This function automatically handles read only file systems and media,   *	as well as the "noatime" flag and inode specific "noatime" markers.   */ -void touch_atime(struct vfsmount *mnt, struct dentry *dentry) +void touch_atime(const struct path *path)  { -	struct inode *inode = dentry->d_inode; +	struct vfsmount *mnt = path->mnt; +	struct inode *inode = path->dentry->d_inode;  	struct timespec now;  	if (inode->i_flags & S_NOATIME) @@ -1498,15 +1547,96 @@ void touch_atime(struct vfsmount *mnt, struct dentry *dentry)  	if (timespec_equal(&inode->i_atime, &now))  		return; -	if (mnt_want_write(mnt)) +	if (!sb_start_write_trylock(inode->i_sb))  		return; -	inode->i_atime = now; -	mark_inode_dirty_sync(inode); -	mnt_drop_write(mnt); +	if (__mnt_want_write(mnt)) +		goto skip_update; +	/* +	 * File systems can error out when updating inodes if they need to +	 * allocate new space to modify an inode (such is the case for +	 * Btrfs), but since we touch atime while walking down the path we +	 * really don't care if we failed to update the atime of the file, +	 * so just ignore the return value. +	 * We may also fail on filesystems that have the ability to make parts +	 * of the fs read only, e.g. subvolumes in Btrfs. +	 */ +	update_time(inode, &now, S_ATIME); +	__mnt_drop_write(mnt); +skip_update: +	sb_end_write(inode->i_sb);  }  EXPORT_SYMBOL(touch_atime); +/* + * The logic we want is + * + *	if suid or (sgid and xgrp) + *		remove privs + */ +int should_remove_suid(struct dentry *dentry) +{ +	umode_t mode = dentry->d_inode->i_mode; +	int kill = 0; + +	/* suid always must be killed */ +	if (unlikely(mode & S_ISUID)) +		kill = ATTR_KILL_SUID; + +	/* +	 * sgid without any exec bits is just a mandatory locking mark; leave +	 * it alone.  If some exec bits are set, it's a real sgid; kill it. +	 */ +	if (unlikely((mode & S_ISGID) && (mode & S_IXGRP))) +		kill |= ATTR_KILL_SGID; + +	if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode))) +		return kill; + +	return 0; +} +EXPORT_SYMBOL(should_remove_suid); + +static int __remove_suid(struct dentry *dentry, int kill) +{ +	struct iattr newattrs; + +	newattrs.ia_valid = ATTR_FORCE | kill; +	/* +	 * Note we call this on write, so notify_change will not +	 * encounter any conflicting delegations: +	 */ +	return notify_change(dentry, &newattrs, NULL); +} + +int file_remove_suid(struct file *file) +{ +	struct dentry *dentry = file->f_path.dentry; +	struct inode *inode = dentry->d_inode; +	int killsuid; +	int killpriv; +	int error = 0; + +	/* Fast path for nothing security related */ +	if (IS_NOSEC(inode)) +		return 0; + +	killsuid = should_remove_suid(dentry); +	killpriv = security_inode_need_killpriv(dentry); + +	if (killpriv < 0) +		return killpriv; +	if (killpriv) +		error = security_inode_killpriv(dentry); +	if (!error && killsuid) +		error = __remove_suid(dentry, killsuid); +	if (!error && (inode->i_sb->s_flags & MS_NOSEC)) +		inode->i_flags |= S_NOSEC; + +	return error; +} +EXPORT_SYMBOL(file_remove_suid); +  /**   *	file_update_time	-	update mtime and ctime time   *	@file: file accessed @@ -1516,18 +1646,20 @@ EXPORT_SYMBOL(touch_atime);   *	usage in the file write path of filesystems, and filesystems may   *	choose to explicitly ignore update via this function with the   *	S_NOCMTIME inode flag, e.g. for network filesystem where these - *	timestamps are handled by the server. + *	timestamps are handled by the server.  This can return an error for + *	file systems who need to allocate space in order to update an inode.   */ -void file_update_time(struct file *file) +int file_update_time(struct file *file)  { -	struct inode *inode = file->f_path.dentry->d_inode; +	struct inode *inode = file_inode(file);  	struct timespec now; -	enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0; +	int sync_it = 0; +	int ret;  	/* First try to exhaust all avenues to not sync */  	if (IS_NOCMTIME(inode)) -		return; +		return 0;  	now = current_fs_time(inode->i_sb);  	if (!timespec_equal(&inode->i_mtime, &now)) @@ -1540,21 +1672,16 @@ void file_update_time(struct file *file)  		sync_it |= S_VERSION;  	if (!sync_it) -		return; +		return 0;  	/* Finally allowed to write? Takes lock. */ -	if (mnt_want_write_file(file)) -		return; +	if (__mnt_want_write_file(file)) +		return 0; -	/* Only change inode inside the lock region */ -	if (sync_it & S_VERSION) -		inode_inc_iversion(inode); -	if (sync_it & S_CTIME) -		inode->i_ctime = now; -	if (sync_it & S_MTIME) -		inode->i_mtime = now; -	mark_inode_dirty_sync(inode); -	mnt_drop_write(file->f_path.mnt); +	ret = update_time(inode, &now, sync_it); +	__mnt_drop_write_file(file); + +	return ret;  }  EXPORT_SYMBOL(file_update_time); @@ -1583,9 +1710,8 @@ EXPORT_SYMBOL(inode_wait);   * to recheck inode state.   *   * It doesn't matter if I_NEW is not set initially, a call to - * wake_up_inode() after removing from the hash list will DTRT. - * - * This is called with inode_lock held. + * wake_up_bit(&inode->i_state, __I_NEW) after removing from the hash list + * will DTRT.   */  static void __wait_on_freeing_inode(struct inode *inode)  { @@ -1593,10 +1719,11 @@ static void __wait_on_freeing_inode(struct inode *inode)  	DEFINE_WAIT_BIT(wait, &inode->i_state, __I_NEW);  	wq = bit_waitqueue(&inode->i_state, __I_NEW);  	prepare_to_wait(wq, &wait.wait, TASK_UNINTERRUPTIBLE); -	spin_unlock(&inode_lock); +	spin_unlock(&inode->i_lock); +	spin_unlock(&inode_hash_lock);  	schedule();  	finish_wait(wq, &wait.wait); -	spin_lock(&inode_lock); +	spin_lock(&inode_hash_lock);  }  static __initdata unsigned long ihash_entries; @@ -1614,7 +1741,7 @@ __setup("ihash_entries=", set_ihash_entries);   */  void __init inode_init_early(void)  { -	int loop; +	unsigned int loop;  	/* If hashes are distributed across NUMA nodes, defer  	 * hash allocation until vmalloc space is available. @@ -1630,15 +1757,16 @@ void __init inode_init_early(void)  					HASH_EARLY,  					&i_hash_shift,  					&i_hash_mask, +					0,  					0); -	for (loop = 0; loop < (1 << i_hash_shift); loop++) +	for (loop = 0; loop < (1U << i_hash_shift); loop++)  		INIT_HLIST_HEAD(&inode_hashtable[loop]);  }  void __init inode_init(void)  { -	int loop; +	unsigned int loop;  	/* inode slab cache */  	inode_cachep = kmem_cache_create("inode_cache", @@ -1647,9 +1775,6 @@ void __init inode_init(void)  					 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|  					 SLAB_MEM_SPREAD),  					 init_once); -	register_shrinker(&icache_shrinker); -	percpu_counter_init(&nr_inodes, 0); -	percpu_counter_init(&nr_inodes_unused, 0);  	/* Hash may have been set up in inode_init_early */  	if (!hashdist) @@ -1663,9 +1788,10 @@ void __init inode_init(void)  					0,  					&i_hash_shift,  					&i_hash_mask, +					0,  					0); -	for (loop = 0; loop < (1 << i_hash_shift); loop++) +	for (loop = 0; loop < (1U << i_hash_shift); loop++)  		INIT_HLIST_HEAD(&inode_hashtable[loop]);  } @@ -1679,7 +1805,7 @@ void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)  		inode->i_fop = &def_blk_fops;  		inode->i_rdev = rdev;  	} else if (S_ISFIFO(mode)) -		inode->i_fop = &def_fifo_fops; +		inode->i_fop = &pipefifo_fops;  	else if (S_ISSOCK(mode))  		inode->i_fop = &bad_sock_fops;  	else @@ -1690,13 +1816,13 @@ void init_special_inode(struct inode *inode, umode_t mode, dev_t rdev)  EXPORT_SYMBOL(init_special_inode);  /** - * Init uid,gid,mode for new inode according to posix standards + * inode_init_owner - Init uid,gid,mode for new inode according to posix standards   * @inode: New inode   * @dir: Directory inode   * @mode: mode of the new inode   */  void inode_init_owner(struct inode *inode, const struct inode *dir, -			mode_t mode) +			umode_t mode)  {  	inode->i_uid = current_fsuid();  	if (dir && dir->i_mode & S_ISGID) { @@ -1708,3 +1834,102 @@ void inode_init_owner(struct inode *inode, const struct inode *dir,  	inode->i_mode = mode;  }  EXPORT_SYMBOL(inode_init_owner); + +/** + * inode_owner_or_capable - check current task permissions to inode + * @inode: inode being checked + * + * Return true if current either has CAP_FOWNER in a namespace with the + * inode owner uid mapped, or owns the file. + */ +bool inode_owner_or_capable(const struct inode *inode) +{ +	struct user_namespace *ns; + +	if (uid_eq(current_fsuid(), inode->i_uid)) +		return true; + +	ns = current_user_ns(); +	if (ns_capable(ns, CAP_FOWNER) && kuid_has_mapping(ns, inode->i_uid)) +		return true; +	return false; +} +EXPORT_SYMBOL(inode_owner_or_capable); + +/* + * Direct i/o helper functions + */ +static void __inode_dio_wait(struct inode *inode) +{ +	wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP); +	DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP); + +	do { +		prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE); +		if (atomic_read(&inode->i_dio_count)) +			schedule(); +	} while (atomic_read(&inode->i_dio_count)); +	finish_wait(wq, &q.wait); +} + +/** + * inode_dio_wait - wait for outstanding DIO requests to finish + * @inode: inode to wait for + * + * Waits for all pending direct I/O requests to finish so that we can + * proceed with a truncate or equivalent operation. + * + * Must be called under a lock that serializes taking new references + * to i_dio_count, usually by inode->i_mutex. + */ +void inode_dio_wait(struct inode *inode) +{ +	if (atomic_read(&inode->i_dio_count)) +		__inode_dio_wait(inode); +} +EXPORT_SYMBOL(inode_dio_wait); + +/* + * inode_dio_done - signal finish of a direct I/O requests + * @inode: inode the direct I/O happens on + * + * This is called once we've finished processing a direct I/O request, + * and is used to wake up callers waiting for direct I/O to be quiesced. + */ +void inode_dio_done(struct inode *inode) +{ +	if (atomic_dec_and_test(&inode->i_dio_count)) +		wake_up_bit(&inode->i_state, __I_DIO_WAKEUP); +} +EXPORT_SYMBOL(inode_dio_done); + +/* + * inode_set_flags - atomically set some inode flags + * + * Note: the caller should be holding i_mutex, or else be sure that + * they have exclusive access to the inode structure (i.e., while the + * inode is being instantiated).  The reason for the cmpxchg() loop + * --- which wouldn't be necessary if all code paths which modify + * i_flags actually followed this rule, is that there is at least one + * code path which doesn't today --- for example, + * __generic_file_aio_write() calls file_remove_suid() without holding + * i_mutex --- so we use cmpxchg() out of an abundance of caution. + * + * In the long run, i_mutex is overkill, and we should probably look + * at using the i_lock spinlock to protect i_flags, and then make sure + * it is so documented in include/linux/fs.h and that all code follows + * the locking convention!! + */ +void inode_set_flags(struct inode *inode, unsigned int flags, +		     unsigned int mask) +{ +	unsigned int old_flags, new_flags; + +	WARN_ON_ONCE(flags & ~mask); +	do { +		old_flags = ACCESS_ONCE(inode->i_flags); +		new_flags = (old_flags & ~mask) | flags; +	} while (unlikely(cmpxchg(&inode->i_flags, old_flags, +				  new_flags) != old_flags)); +} +EXPORT_SYMBOL(inode_set_flags);  | 
