aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu')
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c4
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c82
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c44
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c51
4 files changed, 119 insertions, 62 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 9b265a4c6a3..c27a21034a5 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1676,7 +1676,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
return 0;
out_gem_unload:
- if (dev_priv->mm.inactive_shrinker.shrink)
+ if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
if (dev->pdev->msi_enabled)
@@ -1715,7 +1715,7 @@ int i915_driver_unload(struct drm_device *dev)
i915_teardown_sysfs(dev);
- if (dev_priv->mm.inactive_shrinker.shrink)
+ if (dev_priv->mm.inactive_shrinker.scan_objects)
unregister_shrinker(&dev_priv->mm.inactive_shrinker);
mutex_lock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index d9e337feef1..8507c6d1e64 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -57,10 +57,12 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj,
struct drm_i915_fence_reg *fence,
bool enable);
-static int i915_gem_inactive_shrink(struct shrinker *shrinker,
- struct shrink_control *sc);
+static unsigned long i915_gem_inactive_count(struct shrinker *shrinker,
+ struct shrink_control *sc);
+static unsigned long i915_gem_inactive_scan(struct shrinker *shrinker,
+ struct shrink_control *sc);
static long i915_gem_purge(struct drm_i915_private *dev_priv, long target);
-static void i915_gem_shrink_all(struct drm_i915_private *dev_priv);
+static long i915_gem_shrink_all(struct drm_i915_private *dev_priv);
static void i915_gem_object_truncate(struct drm_i915_gem_object *obj);
static bool cpu_cache_is_coherent(struct drm_device *dev,
@@ -1769,16 +1771,21 @@ i915_gem_purge(struct drm_i915_private *dev_priv, long target)
return __i915_gem_shrink(dev_priv, target, true);
}
-static void
+static long
i915_gem_shrink_all(struct drm_i915_private *dev_priv)
{
struct drm_i915_gem_object *obj, *next;
+ long freed = 0;
i915_gem_evict_everything(dev_priv->dev);
list_for_each_entry_safe(obj, next, &dev_priv->mm.unbound_list,
- global_list)
+ global_list) {
+ if (obj->pages_pin_count == 0)
+ freed += obj->base.size >> PAGE_SHIFT;
i915_gem_object_put_pages(obj);
+ }
+ return freed;
}
static int
@@ -4558,7 +4565,8 @@ i915_gem_load(struct drm_device *dev)
dev_priv->mm.interruptible = true;
- dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
+ dev_priv->mm.inactive_shrinker.scan_objects = i915_gem_inactive_scan;
+ dev_priv->mm.inactive_shrinker.count_objects = i915_gem_inactive_count;
dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
register_shrinker(&dev_priv->mm.inactive_shrinker);
}
@@ -4781,8 +4789,8 @@ static bool mutex_is_locked_by(struct mutex *mutex, struct task_struct *task)
#endif
}
-static int
-i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
+static unsigned long
+i915_gem_inactive_count(struct shrinker *shrinker, struct shrink_control *sc)
{
struct drm_i915_private *dev_priv =
container_of(shrinker,
@@ -4790,45 +4798,35 @@ i915_gem_inactive_shrink(struct shrinker *shrinker, struct shrink_control *sc)
mm.inactive_shrinker);
struct drm_device *dev = dev_priv->dev;
struct drm_i915_gem_object *obj;
- int nr_to_scan = sc->nr_to_scan;
bool unlock = true;
- int cnt;
+ unsigned long count;
if (!mutex_trylock(&dev->struct_mutex)) {
if (!mutex_is_locked_by(&dev->struct_mutex, current))
- return 0;
+ return SHRINK_STOP;
if (dev_priv->mm.shrinker_no_lock_stealing)
- return 0;
+ return SHRINK_STOP;
unlock = false;
}
- if (nr_to_scan) {
- nr_to_scan -= i915_gem_purge(dev_priv, nr_to_scan);
- if (nr_to_scan > 0)
- nr_to_scan -= __i915_gem_shrink(dev_priv, nr_to_scan,
- false);
- if (nr_to_scan > 0)
- i915_gem_shrink_all(dev_priv);
- }
-
- cnt = 0;
+ count = 0;
list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list)
if (obj->pages_pin_count == 0)
- cnt += obj->base.size >> PAGE_SHIFT;
+ count += obj->base.size >> PAGE_SHIFT;
list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
if (obj->active)
continue;
if (obj->pin_count == 0 && obj->pages_pin_count == 0)
- cnt += obj->base.size >> PAGE_SHIFT;
+ count += obj->base.size >> PAGE_SHIFT;
}
if (unlock)
mutex_unlock(&dev->struct_mutex);
- return cnt;
+ return count;
}
/* All the new VM stuff */
@@ -4892,6 +4890,40 @@ unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
return 0;
}
+static unsigned long
+i915_gem_inactive_scan(struct shrinker *shrinker, struct shrink_control *sc)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(shrinker,
+ struct drm_i915_private,
+ mm.inactive_shrinker);
+ struct drm_device *dev = dev_priv->dev;
+ int nr_to_scan = sc->nr_to_scan;
+ unsigned long freed;
+ bool unlock = true;
+
+ if (!mutex_trylock(&dev->struct_mutex)) {
+ if (!mutex_is_locked_by(&dev->struct_mutex, current))
+ return 0;
+
+ if (dev_priv->mm.shrinker_no_lock_stealing)
+ return 0;
+
+ unlock = false;
+ }
+
+ freed = i915_gem_purge(dev_priv, nr_to_scan);
+ if (freed < nr_to_scan)
+ freed += __i915_gem_shrink(dev_priv, nr_to_scan,
+ false);
+ if (freed < nr_to_scan)
+ freed += i915_gem_shrink_all(dev_priv);
+
+ if (unlock)
+ mutex_unlock(&dev->struct_mutex);
+ return freed;
+}
+
struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
struct i915_address_space *vm)
{
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index bd2a3b40cd1..863bef9f923 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -377,28 +377,26 @@ out:
return nr_free;
}
-/* Get good estimation how many pages are free in pools */
-static int ttm_pool_get_num_unused_pages(void)
-{
- unsigned i;
- int total = 0;
- for (i = 0; i < NUM_POOLS; ++i)
- total += _manager->pools[i].npages;
-
- return total;
-}
-
/**
* Callback for mm to request pool to reduce number of page held.
+ *
+ * XXX: (dchinner) Deadlock warning!
+ *
+ * ttm_page_pool_free() does memory allocation using GFP_KERNEL. that means
+ * this can deadlock when called a sc->gfp_mask that is not equal to
+ * GFP_KERNEL.
+ *
+ * This code is crying out for a shrinker per pool....
*/
-static int ttm_pool_mm_shrink(struct shrinker *shrink,
- struct shrink_control *sc)
+static unsigned long
+ttm_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
static atomic_t start_pool = ATOMIC_INIT(0);
unsigned i;
unsigned pool_offset = atomic_add_return(1, &start_pool);
struct ttm_page_pool *pool;
int shrink_pages = sc->nr_to_scan;
+ unsigned long freed = 0;
pool_offset = pool_offset % NUM_POOLS;
/* select start pool in round robin fashion */
@@ -408,14 +406,28 @@ static int ttm_pool_mm_shrink(struct shrinker *shrink,
break;
pool = &_manager->pools[(i + pool_offset)%NUM_POOLS];
shrink_pages = ttm_page_pool_free(pool, nr_free);
+ freed += nr_free - shrink_pages;
}
- /* return estimated number of unused pages in pool */
- return ttm_pool_get_num_unused_pages();
+ return freed;
+}
+
+
+static unsigned long
+ttm_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ unsigned i;
+ unsigned long count = 0;
+
+ for (i = 0; i < NUM_POOLS; ++i)
+ count += _manager->pools[i].npages;
+
+ return count;
}
static void ttm_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
- manager->mm_shrink.shrink = &ttm_pool_mm_shrink;
+ manager->mm_shrink.count_objects = ttm_pool_shrink_count;
+ manager->mm_shrink.scan_objects = ttm_pool_shrink_scan;
manager->mm_shrink.seeks = 1;
register_shrinker(&manager->mm_shrink);
}
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index b8b394319b4..7957beeeaf7 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -918,19 +918,6 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev)
}
EXPORT_SYMBOL_GPL(ttm_dma_populate);
-/* Get good estimation how many pages are free in pools */
-static int ttm_dma_pool_get_num_unused_pages(void)
-{
- struct device_pools *p;
- unsigned total = 0;
-
- mutex_lock(&_manager->lock);
- list_for_each_entry(p, &_manager->pools, pools)
- total += p->pool->npages_free;
- mutex_unlock(&_manager->lock);
- return total;
-}
-
/* Put all pages in pages list to correct pool to wait for reuse */
void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev)
{
@@ -1002,18 +989,29 @@ EXPORT_SYMBOL_GPL(ttm_dma_unpopulate);
/**
* Callback for mm to request pool to reduce number of page held.
+ *
+ * XXX: (dchinner) Deadlock warning!
+ *
+ * ttm_dma_page_pool_free() does GFP_KERNEL memory allocation, and so attention
+ * needs to be paid to sc->gfp_mask to determine if this can be done or not.
+ * GFP_KERNEL memory allocation in a GFP_ATOMIC reclaim context woul dbe really
+ * bad.
+ *
+ * I'm getting sadder as I hear more pathetical whimpers about needing per-pool
+ * shrinkers
*/
-static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
- struct shrink_control *sc)
+static unsigned long
+ttm_dma_pool_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
static atomic_t start_pool = ATOMIC_INIT(0);
unsigned idx = 0;
unsigned pool_offset = atomic_add_return(1, &start_pool);
unsigned shrink_pages = sc->nr_to_scan;
struct device_pools *p;
+ unsigned long freed = 0;
if (list_empty(&_manager->pools))
- return 0;
+ return SHRINK_STOP;
mutex_lock(&_manager->lock);
pool_offset = pool_offset % _manager->npools;
@@ -1029,18 +1027,33 @@ static int ttm_dma_pool_mm_shrink(struct shrinker *shrink,
continue;
nr_free = shrink_pages;
shrink_pages = ttm_dma_page_pool_free(p->pool, nr_free);
+ freed += nr_free - shrink_pages;
+
pr_debug("%s: (%s:%d) Asked to shrink %d, have %d more to go\n",
p->pool->dev_name, p->pool->name, current->pid,
nr_free, shrink_pages);
}
mutex_unlock(&_manager->lock);
- /* return estimated number of unused pages in pool */
- return ttm_dma_pool_get_num_unused_pages();
+ return freed;
+}
+
+static unsigned long
+ttm_dma_pool_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
+{
+ struct device_pools *p;
+ unsigned long count = 0;
+
+ mutex_lock(&_manager->lock);
+ list_for_each_entry(p, &_manager->pools, pools)
+ count += p->pool->npages_free;
+ mutex_unlock(&_manager->lock);
+ return count;
}
static void ttm_dma_pool_mm_shrink_init(struct ttm_pool_manager *manager)
{
- manager->mm_shrink.shrink = &ttm_dma_pool_mm_shrink;
+ manager->mm_shrink.count_objects = ttm_dma_pool_shrink_count;
+ manager->mm_shrink.scan_objects = &ttm_dma_pool_shrink_scan;
manager->mm_shrink.seeks = 1;
register_shrinker(&manager->mm_shrink);
}