diff options
Diffstat (limited to 'fs/dcache.c')
-rw-r--r-- | fs/dcache.c | 277 |
1 files changed, 144 insertions, 133 deletions
diff --git a/fs/dcache.c b/fs/dcache.c index 83293be4814..23702a9d4e6 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -67,33 +67,43 @@ struct dentry_stat_t dentry_stat = { .age_limit = 45, }; -static void __d_free(struct dentry *dentry) +static struct percpu_counter nr_dentry __cacheline_aligned_in_smp; +static struct percpu_counter nr_dentry_unused __cacheline_aligned_in_smp; + +#if defined(CONFIG_SYSCTL) && defined(CONFIG_PROC_FS) +int proc_nr_dentry(ctl_table *table, int write, void __user *buffer, + size_t *lenp, loff_t *ppos) +{ + dentry_stat.nr_dentry = percpu_counter_sum_positive(&nr_dentry); + dentry_stat.nr_unused = percpu_counter_sum_positive(&nr_dentry_unused); + return proc_dointvec(table, write, buffer, lenp, ppos); +} +#endif + +static void __d_free(struct rcu_head *head) { + struct dentry *dentry = container_of(head, struct dentry, d_u.d_rcu); + WARN_ON(!list_empty(&dentry->d_alias)); if (dname_external(dentry)) kfree(dentry->d_name.name); kmem_cache_free(dentry_cache, dentry); } -static void d_callback(struct rcu_head *head) -{ - struct dentry * dentry = container_of(head, struct dentry, d_u.d_rcu); - __d_free(dentry); -} - /* - * no dcache_lock, please. The caller must decrement dentry_stat.nr_dentry - * inside dcache_lock. + * no dcache_lock, please. */ static void d_free(struct dentry *dentry) { + percpu_counter_dec(&nr_dentry); if (dentry->d_op && dentry->d_op->d_release) dentry->d_op->d_release(dentry); + /* if dentry was never inserted into hash, immediate free is OK */ if (hlist_unhashed(&dentry->d_hash)) - __d_free(dentry); + __d_free(&dentry->d_u.d_rcu); else - call_rcu(&dentry->d_u.d_rcu, d_callback); + call_rcu(&dentry->d_u.d_rcu, __d_free); } /* @@ -123,37 +133,34 @@ static void dentry_iput(struct dentry * dentry) } /* - * dentry_lru_(add|add_tail|del|del_init) must be called with dcache_lock held. + * dentry_lru_(add|del|move_tail) must be called with dcache_lock held. */ static void dentry_lru_add(struct dentry *dentry) { - list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); - dentry->d_sb->s_nr_dentry_unused++; - dentry_stat.nr_unused++; -} - -static void dentry_lru_add_tail(struct dentry *dentry) -{ - list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); - dentry->d_sb->s_nr_dentry_unused++; - dentry_stat.nr_unused++; + if (list_empty(&dentry->d_lru)) { + list_add(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); + dentry->d_sb->s_nr_dentry_unused++; + percpu_counter_inc(&nr_dentry_unused); + } } static void dentry_lru_del(struct dentry *dentry) { if (!list_empty(&dentry->d_lru)) { - list_del(&dentry->d_lru); + list_del_init(&dentry->d_lru); dentry->d_sb->s_nr_dentry_unused--; - dentry_stat.nr_unused--; + percpu_counter_dec(&nr_dentry_unused); } } -static void dentry_lru_del_init(struct dentry *dentry) +static void dentry_lru_move_tail(struct dentry *dentry) { - if (likely(!list_empty(&dentry->d_lru))) { - list_del_init(&dentry->d_lru); - dentry->d_sb->s_nr_dentry_unused--; - dentry_stat.nr_unused--; + if (list_empty(&dentry->d_lru)) { + list_add_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); + dentry->d_sb->s_nr_dentry_unused++; + percpu_counter_inc(&nr_dentry_unused); + } else { + list_move_tail(&dentry->d_lru, &dentry->d_sb->s_dentry_lru); } } @@ -172,7 +179,6 @@ static struct dentry *d_kill(struct dentry *dentry) struct dentry *parent; list_del(&dentry->d_u.d_child); - dentry_stat.nr_dentry--; /* For d_free, below */ /*drops the locks, at that point nobody can reach this dentry */ dentry_iput(dentry); if (IS_ROOT(dentry)) @@ -237,13 +243,15 @@ repeat: if (dentry->d_op->d_delete(dentry)) goto unhash_it; } + /* Unreachable? Get rid of it */ if (d_unhashed(dentry)) goto kill_it; - if (list_empty(&dentry->d_lru)) { - dentry->d_flags |= DCACHE_REFERENCED; - dentry_lru_add(dentry); - } + + /* Otherwise leave it cached and ensure it's on the LRU */ + dentry->d_flags |= DCACHE_REFERENCED; + dentry_lru_add(dentry); + spin_unlock(&dentry->d_lock); spin_unlock(&dcache_lock); return; @@ -318,11 +326,10 @@ int d_invalidate(struct dentry * dentry) EXPORT_SYMBOL(d_invalidate); /* This should be called _only_ with dcache_lock held */ - static inline struct dentry * __dget_locked(struct dentry *dentry) { atomic_inc(&dentry->d_count); - dentry_lru_del_init(dentry); + dentry_lru_del(dentry); return dentry; } @@ -441,73 +448,27 @@ static void prune_one_dentry(struct dentry * dentry) if (dentry->d_op && dentry->d_op->d_delete) dentry->d_op->d_delete(dentry); - dentry_lru_del_init(dentry); + dentry_lru_del(dentry); __d_drop(dentry); dentry = d_kill(dentry); spin_lock(&dcache_lock); } } -/* - * Shrink the dentry LRU on a given superblock. - * @sb : superblock to shrink dentry LRU. - * @count: If count is NULL, we prune all dentries on superblock. - * @flags: If flags is non-zero, we need to do special processing based on - * which flags are set. This means we don't need to maintain multiple - * similar copies of this loop. - */ -static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags) +static void shrink_dentry_list(struct list_head *list) { - LIST_HEAD(referenced); - LIST_HEAD(tmp); struct dentry *dentry; - int cnt = 0; - BUG_ON(!sb); - BUG_ON((flags & DCACHE_REFERENCED) && count == NULL); - spin_lock(&dcache_lock); - if (count != NULL) - /* called from prune_dcache() and shrink_dcache_parent() */ - cnt = *count; -restart: - if (count == NULL) - list_splice_init(&sb->s_dentry_lru, &tmp); - else { - while (!list_empty(&sb->s_dentry_lru)) { - dentry = list_entry(sb->s_dentry_lru.prev, - struct dentry, d_lru); - BUG_ON(dentry->d_sb != sb); + while (!list_empty(list)) { + dentry = list_entry(list->prev, struct dentry, d_lru); + dentry_lru_del(dentry); - spin_lock(&dentry->d_lock); - /* - * If we are honouring the DCACHE_REFERENCED flag and - * the dentry has this flag set, don't free it. Clear - * the flag and put it back on the LRU. - */ - if ((flags & DCACHE_REFERENCED) - && (dentry->d_flags & DCACHE_REFERENCED)) { - dentry->d_flags &= ~DCACHE_REFERENCED; - list_move(&dentry->d_lru, &referenced); - spin_unlock(&dentry->d_lock); - } else { - list_move_tail(&dentry->d_lru, &tmp); - spin_unlock(&dentry->d_lock); - cnt--; - if (!cnt) - break; - } - cond_resched_lock(&dcache_lock); - } - } - while (!list_empty(&tmp)) { - dentry = list_entry(tmp.prev, struct dentry, d_lru); - dentry_lru_del_init(dentry); - spin_lock(&dentry->d_lock); /* * We found an inuse dentry which was not removed from * the LRU because of laziness during lookup. Do not free * it - just keep it off the LRU list. */ + spin_lock(&dentry->d_lock); if (atomic_read(&dentry->d_count)) { spin_unlock(&dentry->d_lock); continue; @@ -516,13 +477,60 @@ restart: /* dentry->d_lock was dropped in prune_one_dentry() */ cond_resched_lock(&dcache_lock); } - if (count == NULL && !list_empty(&sb->s_dentry_lru)) - goto restart; - if (count != NULL) - *count = cnt; +} + +/** + * __shrink_dcache_sb - shrink the dentry LRU on a given superblock + * @sb: superblock to shrink dentry LRU. + * @count: number of entries to prune + * @flags: flags to control the dentry processing + * + * If flags contains DCACHE_REFERENCED reference dentries will not be pruned. + */ +static void __shrink_dcache_sb(struct super_block *sb, int *count, int flags) +{ + /* called from prune_dcache() and shrink_dcache_parent() */ + struct dentry *dentry; + LIST_HEAD(referenced); + LIST_HEAD(tmp); + int cnt = *count; + + spin_lock(&dcache_lock); + while (!list_empty(&sb->s_dentry_lru)) { + dentry = list_entry(sb->s_dentry_lru.prev, + struct dentry, d_lru); + BUG_ON(dentry->d_sb != sb); + + /* + * If we are honouring the DCACHE_REFERENCED flag and the + * dentry has this flag set, don't free it. Clear the flag + * and put it back on the LRU. + */ + if (flags & DCACHE_REFERENCED) { + spin_lock(&dentry->d_lock); + if (dentry->d_flags & DCACHE_REFERENCED) { + dentry->d_flags &= ~DCACHE_REFERENCED; + list_move(&dentry->d_lru, &referenced); + spin_unlock(&dentry->d_lock); + cond_resched_lock(&dcache_lock); + continue; + } + spin_unlock(&dentry->d_lock); + } + + list_move_tail(&dentry->d_lru, &tmp); + if (!--cnt) + break; + cond_resched_lock(&dcache_lock); + } + + *count = cnt; + shrink_dentry_list(&tmp); + if (!list_empty(&referenced)) list_splice(&referenced, &sb->s_dentry_lru); spin_unlock(&dcache_lock); + } /** @@ -538,7 +546,7 @@ static void prune_dcache(int count) { struct super_block *sb, *p = NULL; int w_count; - int unused = dentry_stat.nr_unused; + int unused = percpu_counter_sum_positive(&nr_dentry_unused); int prune_ratio; int pruned; @@ -608,13 +616,19 @@ static void prune_dcache(int count) * shrink_dcache_sb - shrink dcache for a superblock * @sb: superblock * - * Shrink the dcache for the specified super block. This - * is used to free the dcache before unmounting a file - * system + * Shrink the dcache for the specified super block. This is used to free + * the dcache before unmounting a file system. */ -void shrink_dcache_sb(struct super_block * sb) +void shrink_dcache_sb(struct super_block *sb) { - __shrink_dcache_sb(sb, NULL, 0); + LIST_HEAD(tmp); + + spin_lock(&dcache_lock); + while (!list_empty(&sb->s_dentry_lru)) { + list_splice_init(&sb->s_dentry_lru, &tmp); + shrink_dentry_list(&tmp); + } + spin_unlock(&dcache_lock); } EXPORT_SYMBOL(shrink_dcache_sb); @@ -632,7 +646,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) /* detach this root from the system */ spin_lock(&dcache_lock); - dentry_lru_del_init(dentry); + dentry_lru_del(dentry); __d_drop(dentry); spin_unlock(&dcache_lock); @@ -646,7 +660,7 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) spin_lock(&dcache_lock); list_for_each_entry(loop, &dentry->d_subdirs, d_u.d_child) { - dentry_lru_del_init(loop); + dentry_lru_del(loop); __d_drop(loop); cond_resched_lock(&dcache_lock); } @@ -703,20 +717,13 @@ static void shrink_dcache_for_umount_subtree(struct dentry *dentry) * otherwise we ascend to the parent and move to the * next sibling if there is one */ if (!parent) - goto out; - + return; dentry = parent; - } while (list_empty(&dentry->d_subdirs)); dentry = list_entry(dentry->d_subdirs.next, struct dentry, d_u.d_child); } -out: - /* several dentries were freed, need to correct nr_dentry */ - spin_lock(&dcache_lock); - dentry_stat.nr_dentry -= detached; - spin_unlock(&dcache_lock); } /* @@ -830,14 +837,15 @@ resume: struct dentry *dentry = list_entry(tmp, struct dentry, d_u.d_child); next = tmp->next; - dentry_lru_del_init(dentry); /* * move only zero ref count dentries to the end * of the unused list for prune_dcache */ if (!atomic_read(&dentry->d_count)) { - dentry_lru_add_tail(dentry); + dentry_lru_move_tail(dentry); found++; + } else { + dentry_lru_del(dentry); } /* @@ -900,12 +908,16 @@ EXPORT_SYMBOL(shrink_dcache_parent); */ static int shrink_dcache_memory(struct shrinker *shrink, int nr, gfp_t gfp_mask) { + int nr_unused; + if (nr) { if (!(gfp_mask & __GFP_FS)) return -1; prune_dcache(nr); } - return (dentry_stat.nr_unused / 100) * sysctl_vfs_cache_pressure; + + nr_unused = percpu_counter_sum_positive(&nr_dentry_unused); + return (nr_unused / 100) * sysctl_vfs_cache_pressure; } static struct shrinker dcache_shrinker = { @@ -972,9 +984,10 @@ struct dentry *d_alloc(struct dentry * parent, const struct qstr *name) spin_lock(&dcache_lock); if (parent) list_add(&dentry->d_u.d_child, &parent->d_subdirs); - dentry_stat.nr_dentry++; spin_unlock(&dcache_lock); + percpu_counter_inc(&nr_dentry); + return dentry; } EXPORT_SYMBOL(d_alloc); @@ -1478,33 +1491,26 @@ out: * This is used by ncpfs in its readdir implementation. * Zero is returned in the dentry is invalid. */ - -int d_validate(struct dentry *dentry, struct dentry *dparent) +int d_validate(struct dentry *dentry, struct dentry *parent) { - struct hlist_head *base; - struct hlist_node *lhp; + struct hlist_head *head = d_hash(parent, dentry->d_name.hash); + struct hlist_node *node; + struct dentry *d; /* Check whether the ptr might be valid at all.. */ if (!kmem_ptr_validate(dentry_cache, dentry)) - goto out; - - if (dentry->d_parent != dparent) - goto out; + return 0; + if (dentry->d_parent != parent) + return 0; - spin_lock(&dcache_lock); - base = d_hash(dparent, dentry->d_name.hash); - hlist_for_each(lhp,base) { - /* hlist_for_each_entry_rcu() not required for d_hash list - * as it is parsed under dcache_lock - */ - if (dentry == hlist_entry(lhp, struct dentry, d_hash)) { - __dget_locked(dentry); - spin_unlock(&dcache_lock); + rcu_read_lock(); + hlist_for_each_entry_rcu(d, node, head, d_hash) { + if (d == dentry) { + dget(dentry); return 1; } } - spin_unlock(&dcache_lock); -out: + rcu_read_unlock(); return 0; } EXPORT_SYMBOL(d_validate); @@ -1994,7 +2000,7 @@ global_root: * Returns a pointer into the buffer or an error code if the * path was too long. * - * "buflen" should be positive. Caller holds the dcache_lock. + * "buflen" should be positive. * * If path is not reachable from the supplied root, then the value of * root is changed (without modifying refcounts). @@ -2006,10 +2012,12 @@ char *__d_path(const struct path *path, struct path *root, int error; prepend(&res, &buflen, "\0", 1); + spin_lock(&dcache_lock); error = prepend_path(path, root, &res, &buflen); + spin_unlock(&dcache_lock); + if (error) return ERR_PTR(error); - return res; } @@ -2419,6 +2427,9 @@ static void __init dcache_init(void) { int loop; + percpu_counter_init(&nr_dentry, 0); + percpu_counter_init(&nr_dentry_unused, 0); + /* * A constructor could be added for stable state like the lists, * but it is probably not worth it because of the cache nature |