aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2013-09-12 17:54:48 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2013-09-12 17:54:48 -0400
commitbf2ba3bc185269eca274b458aac46ba1ad7c1121 (patch)
tree2cdaf7568df3ab762f42e7849fc9211dcb60d1bc
parentbcceeeba9b3ca99c29523bb7af16727f8a837db4 (diff)
parentf5e1dd34561e0fb06400b378d595198918833021 (diff)
Merge branch 'for-next' into for-linus
-rw-r--r--arch/x86/kvm/mmu.c25
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c82
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c44
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c51
-rw-r--r--drivers/md/bcache/btree.c43
-rw-r--r--drivers/md/bcache/sysfs.c2
-rw-r--r--drivers/md/dm-bufio.c64
-rw-r--r--drivers/staging/android/ashmem.c44
-rw-r--r--drivers/staging/android/lowmemorykiller.c43
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h38
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_pool.c148
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c98
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c76
-rw-r--r--fs/dcache.c276
-rw-r--r--fs/drop_caches.c1
-rw-r--r--fs/ext4/extents_status.c33
-rw-r--r--fs/gfs2/glock.c30
-rw-r--r--fs/gfs2/main.c3
-rw-r--r--fs/gfs2/quota.c18
-rw-r--r--fs/gfs2/quota.h6
-rw-r--r--fs/inode.c193
-rw-r--r--fs/internal.h6
-rw-r--r--fs/mbcache.c49
-rw-r--r--fs/nfs/dir.c16
-rw-r--r--fs/nfs/internal.h6
-rw-r--r--fs/nfs/super.c3
-rw-r--r--fs/nfsd/nfscache.c32
-rw-r--r--fs/quota/dquot.c34
-rw-r--r--fs/super.c111
-rw-r--r--fs/ubifs/shrinker.c29
-rw-r--r--fs/ubifs/super.c3
-rw-r--r--fs/ubifs/ubifs.h5
-rw-r--r--fs/xfs/xfs_buf.c253
-rw-r--r--fs/xfs/xfs_buf.h17
-rw-r--r--fs/xfs/xfs_dquot.c7
-rw-r--r--fs/xfs/xfs_icache.c4
-rw-r--r--fs/xfs/xfs_icache.h2
-rw-r--r--fs/xfs/xfs_qm.c287
-rw-r--r--fs/xfs/xfs_qm.h4
-rw-r--r--fs/xfs/xfs_super.c12
-rw-r--r--include/linux/dcache.h14
-rw-r--r--include/linux/fs.h25
-rw-r--r--include/linux/list_lru.h131
-rw-r--r--include/linux/shrinker.h54
-rw-r--r--include/trace/events/vmscan.h4
-rw-r--r--include/uapi/linux/fs.h6
-rw-r--r--kernel/sysctl.c6
-rw-r--r--mm/Makefile2
-rw-r--r--mm/huge_memory.c17
-rw-r--r--mm/list_lru.c139
-rw-r--r--mm/memory-failure.c2
-rw-r--r--mm/vmscan.c241
-rw-r--r--net/sunrpc/auth.c41
54 files changed, 1755 insertions, 1129 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 6e2d2c8f230..dce0df8150d 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4421,13 +4421,12 @@ void kvm_mmu_invalidate_mmio_sptes(struct kvm *kvm)
}
}
-static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
+static unsigned long
+mmu_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
struct kvm *kvm;
int nr_to_scan = sc->nr_to_scan;
-
- if (nr_to_scan == 0)
- goto out;
+ unsigned long freed = 0;
raw_spin_lock(&kvm_lock);
@@ -4462,25 +4461,37 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
goto unlock;
}
- prepare_zap_oldest_mmu_page(kvm, &invalid_list);
+ if (prepare_zap_oldest_mmu_page(kvm, &invalid_list))
+ freed++;
kvm_mmu_commit_zap_page(kvm, &invalid_list);
unlock:
spin_unlock(&kvm->mmu_lock);
srcu_read_unlock(&kvm->srcu, idx);
+ /*
+ * unfair on small ones
+ * per-vm shrinkers cry out
+ * sadness comes quickly
+ */
list_move_tail(&kvm->vm_list, &vm_list);
break;
}
raw_spin_unlock(&kvm_lock);
+ return freed;
-out:
+}
+
+static unsigned long
+mmu_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
return percpu_counter_read_positive(&kvm_total_used_mmu_pages);
}
static struct shrinker mmu_shrinker = {
- .shrink = mmu_shrink,
+ .count_objects = mmu_shrink_count,
+ .scan_objects = mmu_shrink_scan,
.seeks = DEFAULT_SEEKS * 10,
};
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index fdaa0915ce5..d5c784d4867 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1667,7 +1667,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
return 0;
out_gem_unload:
- if (dev_priv->mm.inactive_shrinker.shrink)
+ if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
if (dev->pdev->msi_enabled)
@@ -1706,7 +1706,7 @@ int i915_driver_unload(struct drm_device *dev)
i915_teardown_sysfs(dev);
- if (dev_priv->mm.inactive_shrinker.shrink)
+ if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 2d1cb10d846..a7ff3db4f60 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -57,10 +57,12 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable);
-static int i915_gem_inactive_shrink(struct shrinker *shrinker,
- struct shrink_control *sc);
+static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
+ struct shrink_control *sc);
+static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
+ struct shrink_control *sc);
static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
-static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
+static long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
static bool cpu_cache_is_coherent(struct drm_device *dev,
@@ -1736,16 +1738,21 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
return __i915_gem_shrink(dev_priv, target, true);
}
-static void
+static long
i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{
struct drm_i915_gem_object *obj, *next;
+ long freed = 0;
i915_gem_evict_everything(dev_priv->dev);
list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
- global_list)
+ global_list) {
+ if (obj->pages_pin_count == 0)
+ freed += obj->base.size >> PAGE_SHIFT;
i915_gem_object_put_pages(obj);
+ }
+ return freed;
}
static int
@@ -4526,7 +4533,8 @@ i915_gem_load(struct drm_device *dev)
dev_priv->mm.interruptible = true;
- dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
+ dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
+ dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
register_shrinker(&dev_priv->mm.inactive_shrinker);
}
@@ -4749,8 +4757,8 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
#endif
}
-static int
-i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
+static unsigned long
+i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
container_of(shrinker,
@@ -4758,45 +4766,35 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
mm.inactive_shrinker);
struct drm_device *dev = dev_priv->dev;
struct drm_i915_gem_object *obj;
- int nr_to_scan = sc->nr_to_scan;
bool unlock = true;
- int cnt;
+ unsigned long count;
if (!mutex_trylock(&dev->struct_mutex)) {
if (!mutex_is_locked_by(&dev->struct_mutex, current))
- return 0;
+ return SHRINK_STOP;
if (dev_priv->mm.shrinker_no_lock_stealing)
- return 0;
+ return SHRINK_STOP;
unlock = false;
}
- if (nr_to_scan) {
- nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
- if (nr_to_scan > 0)
- nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
- false);
- if (nr_to_scan > 0)
- i915_gem_shrink_all(dev_priv);
- }
-
- cnt = 0;
+ count = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
if (obj->pages_pin_count == 0)
- cnt += obj->base.size >> PAGE_SHIFT;
+ count += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (obj->active)
continue;
if (obj->pin_count == 0 && obj->pages_pin_count == 0)
- cnt += obj->base.size >> PAGE_SHIFT;
+ count += obj->base.size >> PAGE_SHIFT;
}
if (unlock)
mutex_unlock(&dev->struct_mutex);
- return cnt;
+ return count;
}
/* All the new VM stuff */
@@ -4860,6 +4858,40 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
return 0;
}
+static unsigned long
+i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(shrinker,
+ struct drm_i915_private,
+ mm.inactive_shrinker);
+ struct drm_device *dev = dev_priv->dev;
+ int nr_to_scan = sc->nr_to_scan;
+ unsigned long freed;
+ bool unlock = true;
+
+ if (!mutex_trylock(&dev->struct_mutex)) {
+ if (!mutex_is_locked_by(&dev->struct_mutex, current))
+ return 0;
+
+ if (dev_priv->mm.shrinker_no_lock_stealing)
+ return 0;
+
+ unlock = false;
+ }
+
+ freed = i915_gem_purge(dev_priv, nr_to_scan);
+ if (freed < nr_to_scan)
+ freed += __i915_gem_shrink(dev_priv, nr_to_scan,
+ false);
+ if (freed < nr_to_scan)
+ freed += i915_gem_shrink_all(dev_priv);
+
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
+ return freed;
+}
+
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
{
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index bd2a3b40cd1..863bef9f923 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -377,28 +377,26 @@ out:
return nr_free;
}
-/* Get good estimation how many pages are free in pools */
-static int ttm_pool_get_num_unused_pages(void)
-{
- unsigned i;
- int total = 0;
- for (i = 0; i < NUM_POOLS; ++i)
- total += _manager->pools[i].npages;
-
- return total;
-}
-
/**
* Callback for mm to request pool to reduce number of page held.
+ *
+ * XXX: (dchinner) Deadlock warning!
+ *
+ * ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means
+ * this can deadlock when called a sc->gfp_mask that is not equal to
+ * GFP_KERNEL.
+ *
+ * This code is crying out for a shrinker per pool....
*/
-static int ttm_pool_mm_shrink(struct shrinker *shrink,
- struct shrink_control *sc)
+static unsigned long
+ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
static atomic_t start_pool = ATOMIC_INIT(0);
unsigned i;
unsigned pool_offset = atomic_add_return(1, &start_pool);
struct ttm_page_pool *pool;
int shrink_pages = sc->nr_to_scan;
+ unsigned long freed = 0;
pool_offset = pool_offset % NUM_POOLS;
/* select start pool in round robin fashion */
@@ -408,14 +406,28 @@ static int ttm_pool_mm_shrink(struct shrinker *shrink,
break;
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
shrink_pages = ttm_page_pool_free(pool, nr_free);
+ freed += nr_free - shrink_pages;
}
- /* return estimated number of unused pages in pool */
- return ttm_pool_get_num_unused_pages();
+ return freed;
+}
+
+
+static unsigned long
+ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ unsigned i;
+ unsigned long count = 0;
+
+ for (i = 0; i < NUM_POOLS; ++i)
+ count += _manager->pools[i].npages;
+
+ return count;
}
static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
- manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
+ manager->mm_shrink.count_objects = ttm_pool_shrink_count;
+ manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
manager->mm_shrink.seeks = 1;
register_shrinker(&manager->mm_shrink);
}
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index b8b394319b4..7957beeeaf7 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -918,19 +918,6 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
}
EXPORT_SYMBOL_GPL(ttm_dma_populate);
-/* Get good estimation how many pages are free in pools */
-static int ttm_dma_pool_get_num_unused_pages(void)
-{
- struct device_pools *p;
- unsigned total = 0;
-
- mutex_lock(&_manager->lock);
- list_for_each_entry(p, &_manager->pools, pools)
- total += p->pool->npages_free;
- mutex_unlock(&_manager->lock);
- return total;
-}
-
/* Put all pages in pages list to correct pool to wait for reuse */
void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
{
@@ -1002,18 +989,29 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
/**
* Callback for mm to request pool to reduce number of page held.
+ *
+ * XXX: (dchinner) Deadlock warning!
+ *
+ * ttm_dma_page_pool_free() does GFP_KERNEL memory allocation, and so attention
+ * needs to be paid to sc->gfp_mask to determine if this can be done or not.
+ * GFP_KERNEL memory allocation in a GFP_ATOMIC reclaim context woul dbe really
+ * bad.
+ *
+ * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
+ * shrinkers
*/
-static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
- struct shrink_control *sc)
+static unsigned long
+ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
static atomic_t start_pool = ATOMIC_INIT(0);
unsigned idx = 0;
unsigned pool_offset = atomic_add_return(1, &start_pool);
unsigned shrink_pages = sc->nr_to_scan;
struct device_pools *p;
+ unsigned long freed = 0;
if (list_empty(&_manager->pools))
- return 0;
+ return SHRINK_STOP;
mutex_lock(&_manager->lock);
pool_offset = pool_offset % _manager->npools;
@@ -1029,18 +1027,33 @@ static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
continue;
nr_free = shrink_pages;
shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
+ freed += nr_free - shrink_pages;
+
pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
p->pool->dev_name, p->pool->name, current->pid,
nr_free, shrink_pages);
}
mutex_unlock(&_manager->lock);
- /* return estimated number of unused pages in pool */
- return ttm_dma_pool_get_num_unused_pages();
+ return freed;
+}
+
+static unsigned long
+ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ struct device_pools *p;
+ unsigned long count = 0;
+
+ mutex_lock(&_manager->lock);
+ list_for_each_entry(p, &_manager->pools, pools)
+ count += p->pool->npages_free;
+ mutex_unlock(&_manager->lock);
+ return count;
}
static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
- manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
+ manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
+ manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
manager->mm_shrink.seeks = 1;
register_shrinker(&manager->mm_shrink);
}
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index ee372884c40..f9764e61978 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -597,24 +597,19 @@ static int mca_reap(struct btree *b, struct closure *cl, unsigned min_order)
return 0;
}
-static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc)
+static unsigned long bch_mca_scan(struct shrinker *shrink,
+ struct shrink_control *sc)
{
struct cache_set *c = container_of(shrink, struct cache_set, shrink);
struct btree *b, *t;
unsigned long i, nr = sc->nr_to_scan;
+ unsigned long freed = 0;
if (c->shrinker_disabled)
- return 0;
+ return SHRINK_STOP;
if (c->try_harder)
- return 0;
-
- /*
- * If nr == 0, we're supposed to return the number of items we have
- * cached. Not allowed to return -1.
- */
- if (!nr)
- return mca_can_free(c) * c->btree_pages;
+ return SHRINK_STOP;
/* Return -1 if we can't do anything right now */
if (sc->gfp_mask & __GFP_WAIT)
@@ -634,14 +629,14 @@ static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc)
i = 0;
list_for_each_entry_safe(b, t, &c->btree_cache_freeable, list) {
- if (!nr)
+ if (freed >= nr)
break;
if (++i > 3 &&
!mca_reap(b, NULL, 0)) {
mca_data_free(b);
rw_unlock(true, b);
- --nr;
+ freed++;
}
}
@@ -652,7 +647,7 @@ static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc)
if (list_empty(&c->btree_cache))
goto out;
- for (i = 0; nr && i < c->bucket_cache_used; i++) {
+ for (i = 0; (nr--) && i < c->bucket_cache_used; i++) {
b = list_first_entry(&c->btree_cache, struct btree, list);
list_rotate_left(&c->btree_cache);
@@ -661,14 +656,27 @@ static int bch_mca_shrink(struct shrinker *shrink, struct shrink_control *sc)
mca_bucket_free(b);
mca_data_free(b);
rw_unlock(true, b);
- --nr;
+ freed++;
} else
b->accessed = 0;
}
out:
- nr = mca_can_free(c) * c->btree_pages;
mutex_unlock(&c->bucket_lock);
- return nr;
+ return freed;
+}
+
+static unsigned long bch_mca_count(struct shrinker *shrink,
+ struct shrink_control *sc)
+{
+ struct cache_set *c = container_of(shrink, struct cache_set, shrink);
+
+ if (c->shrinker_disabled)
+ return 0;
+
+ if (c->try_harder)
+ return 0;
+
+ return mca_can_free(c) * c->btree_pages;
}
void bch_btree_cache_free(struct cache_set *c)
@@ -737,7 +745,8 @@ int bch_btree_cache_alloc(struct cache_set *c)
c->verify_data = NULL;
#endif
- c->shrink.shrink = bch_mca_shrink;
+ c->shrink.count_objects = bch_mca_count;
+ c->shrink.scan_objects = bch_mca_scan;
c->shrink.seeks = 4;
c->shrink.batch = c->btree_pages * 2;
register_shrinker(&c->shrink);
diff --git a/drivers/md/bcache/sysfs.c b/drivers/md/bcache/sysfs.c
index 12a2c2846f9..4fe6ab2fbe2 100644
--- a/drivers/md/bcache/sysfs.c
+++ b/drivers/md/bcache/sysfs.c
@@ -556,7 +556,7 @@ STORE(__bch_cache_set)
struct shrink_control sc;
sc.gfp_mask = GFP_KERNEL;
sc.nr_to_scan = strtoul_or_return(buf);
- c->shrink.shrink(&c->shrink, &sc);
+ c->shrink.scan_objects(&c->shrink, &sc);
}
sysfs_strtoul(congested_read_threshold_us,
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 5227e079a6e..173cbb20d10 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1425,62 +1425,75 @@ static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
unsigned long max_jiffies)
{
if (jiffies - b->last_accessed < max_jiffies)
- return 1;
+ return 0;
if (!(gfp & __GFP_IO)) {
if (test_bit(B_READING, &b->state) ||
test_bit(B_WRITING, &b->state) ||
test_bit(B_DIRTY, &b->state))
- return 1;
+ return 0;
}
if (b->hold_count)
- return 1;
+ return 0;
__make_buffer_clean(b);
__unlink_buffer(b);
__free_buffer_wake(b);
- return 0;
+ return 1;
}
-static void __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
- struct shrink_control *sc)
+static long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
+ gfp_t gfp_mask)
{
int l;
struct dm_buffer *b, *tmp;
+ long freed = 0;
for (l = 0; l < LIST_SIZE; l++) {
- list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list)
- if (!__cleanup_old_buffer(b, sc->gfp_mask, 0) &&
- !--nr_to_scan)
- return;
+ list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
+ freed += __cleanup_old_buffer(b, gfp_mask, 0);
+ if (!--nr_to_scan)
+ break;
+ }
dm_bufio_cond_resched();
}
+ return freed;
}
-static int shrink(struct shrinker *shrinker, struct shrink_control *sc)
+static unsigned long
+dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
- struct dm_bufio_client *c =
- container_of(shrinker, struct dm_bufio_client, shrinker);
- unsigned long r;
- unsigned long nr_to_scan = sc->nr_to_scan;
+ struct dm_bufio_client *c;
+ unsigned long freed;
+ c = container_of(shrink, struct dm_bufio_client, shrinker);
if (sc->gfp_mask & __GFP_IO)
dm_bufio_lock(c);
else if (!dm_bufio_trylock(c))
- return !nr_to_scan ? 0 : -1;
+ return SHRINK_STOP;
- if (nr_to_scan)
- __scan(c, nr_to_scan, sc);
+ freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
+ dm_bufio_unlock(c);
+ return freed;
+}
- r = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
- if (r > INT_MAX)
- r = INT_MAX;
+static unsigned long
+dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ struct dm_bufio_client *c;
+ unsigned long count;
- dm_bufio_unlock(c);
+ c = container_of(shrink, struct dm_bufio_client, shrinker);
+ if (sc->gfp_mask & __GFP_IO)
+ dm_bufio_lock(c);
+ else if (!dm_bufio_trylock(c))
+ return 0;
- return r;
+ count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
+ dm_bufio_unlock(c);
+ return count;
}
/*
@@ -1582,7 +1595,8 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
__cache_size_refresh();
mutex_unlock(&dm_bufio_clients_lock);
- c->shrinker.shrink = shrink;
+ c->shrinker.count_objects = dm_bufio_shrink_count;
+ c->shrinker.scan_objects = dm_bufio_shrink_scan;
c->shrinker.seeks = 1;
c->shrinker.batch = 0;
register_shrinker(&c->shrinker);
@@ -1669,7 +1683,7 @@ static void cleanup_old_buffers(void)
struct dm_buffer *b;
b = list_entry(c->lru[LIST_CLEAN].prev,
struct dm_buffer, lru_list);
- if (__cleanup_old_buffer(b, 0, max_age * HZ))
+ if (!__cleanup_old_buffer(b, 0, max_age * HZ))
break;
dm_bufio_cond_resched();
}
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 21a3f725053..8e76ddca099 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -341,27 +341,26 @@ out:
/*
* ashmem_shrink - our cache shrinker, called from mm/vmscan.c :: shrink_slab
*
- * 'nr_to_scan' is the number of objects (pages) to prune, or 0 to query how
- * many objects (pages) we have in total.
+ * 'nr_to_scan' is the number of objects to scan for freeing.
*
* 'gfp_mask' is the mask of the allocation that got us into this mess.
*
- * Return value is the number of objects (pages) remaining, or -1 if we cannot
+ * Return value is the number of objects freed or -1 if we cannot
* proceed without risk of deadlock (due to gfp_mask).
*
* We approximate LRU via least-recently-unpinned, jettisoning unpinned partial
* chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
* pages freed.
*/
-static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
+static unsigned long
+ashmem_shrink_scan(struct shrinker *shrink