aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/gpu/drm/Kconfig8
-rw-r--r--drivers/gpu/drm/Makefile2
-rw-r--r--drivers/gpu/drm/ttm/Makefile8
-rw-r--r--drivers/gpu/drm/ttm/ttm_agp_backend.c150
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c1698
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c561
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c454
-rw-r--r--drivers/gpu/drm/ttm/ttm_global.c114
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c234
-rw-r--r--drivers/gpu/drm/ttm/ttm_module.c50
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c635
-rw-r--r--include/drm/ttm/ttm_bo_api.h618
-rw-r--r--include/drm/ttm/ttm_bo_driver.h867
-rw-r--r--include/drm/ttm/ttm_memory.h153
-rw-r--r--include/drm/ttm/ttm_module.h58
-rw-r--r--include/drm/ttm/ttm_placement.h92
16 files changed, 5701 insertions, 1 deletions
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index f5d46e7199d..64a57adc0a2 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -18,6 +18,14 @@ menuconfig DRM
details. You should also select and configure AGP
(/dev/agpgart) support.
+config DRM_TTM
+ tristate "TTM memory manager"
+ depends on DRM
+ help
+ GPU memory management subsystem for devices with multiple
+ GPU memory types. Will be enabled automatically if a device driver
+ uses it.
+
config DRM_TDFX
tristate "3dfx Banshee/Voodoo3+"
depends on DRM && PCI
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 4ec5061fa58..4e89ab08b7b 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -26,4 +26,4 @@ obj-$(CONFIG_DRM_I915) += i915/
obj-$(CONFIG_DRM_SIS) += sis/
obj-$(CONFIG_DRM_SAVAGE)+= savage/
obj-$(CONFIG_DRM_VIA) +=via/
-
+obj-$(CONFIG_DRM_TTM) += ttm/
diff --git a/drivers/gpu/drm/ttm/Makefile b/drivers/gpu/drm/ttm/Makefile
new file mode 100644
index 00000000000..b0a9de7a57c
--- /dev/null
+++ b/drivers/gpu/drm/ttm/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the drm device driver. This driver provides support for the
+
+ccflags-y := -Iinclude/drm
+ttm-y := ttm_agp_backend.o ttm_memory.o ttm_tt.o ttm_bo.o \
+ ttm_bo_util.o ttm_bo_vm.o ttm_module.o ttm_global.o
+
+obj-$(CONFIG_DRM_TTM) += ttm.o
diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c
new file mode 100644
index 00000000000..e8f6d2229d8
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c
@@ -0,0 +1,150 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ * Keith Packard.
+ */
+
+#include "ttm/ttm_module.h"
+#include "ttm/ttm_bo_driver.h"
+#ifdef TTM_HAS_AGP
+#include "ttm/ttm_placement.h"
+#include <linux/agp_backend.h>
+#include <linux/module.h>
+#include <linux/io.h>
+#include <asm/agp.h>
+
+struct ttm_agp_backend {
+ struct ttm_backend backend;
+ struct agp_memory *mem;
+ struct agp_bridge_data *bridge;
+};
+
+static int ttm_agp_populate(struct ttm_backend *backend,
+ unsigned long num_pages, struct page **pages,
+ struct page *dummy_read_page)
+{
+ struct ttm_agp_backend *agp_be =
+ container_of(backend, struct ttm_agp_backend, backend);
+ struct page **cur_page, **last_page = pages + num_pages;
+ struct agp_memory *mem;
+
+ mem = agp_allocate_memory(agp_be->bridge, num_pages, AGP_USER_MEMORY);
+ if (unlikely(mem == NULL))
+ return -ENOMEM;
+
+ mem->page_count = 0;
+ for (cur_page = pages; cur_page < last_page; ++cur_page) {
+ struct page *page = *cur_page;
+ if (!page)
+ page = dummy_read_page;
+
+ mem->memory[mem->page_count++] =
+ phys_to_gart(page_to_phys(page));
+ }
+ agp_be->mem = mem;
+ return 0;
+}
+
+static int ttm_agp_bind(struct ttm_backend *backend, struct ttm_mem_reg *bo_mem)
+{
+ struct ttm_agp_backend *agp_be =
+ container_of(backend, struct ttm_agp_backend, backend);
+ struct agp_memory *mem = agp_be->mem;
+ int cached = (bo_mem->placement & TTM_PL_FLAG_CACHED);
+ int ret;
+
+ mem->is_flushed = 1;
+ mem->type = (cached) ? AGP_USER_CACHED_MEMORY : AGP_USER_MEMORY;
+
+ ret = agp_bind_memory(mem, bo_mem->mm_node->start);
+ if (ret)
+ printk(KERN_ERR TTM_PFX "AGP Bind memory failed.\n");
+
+ return ret;
+}
+
+static int ttm_agp_unbind(struct ttm_backend *backend)
+{
+ struct ttm_agp_backend *agp_be =
+ container_of(backend, struct ttm_agp_backend, backend);
+
+ if (agp_be->mem->is_bound)
+ return agp_unbind_memory(agp_be->mem);
+ else
+ return 0;
+}
+
+static void ttm_agp_clear(struct ttm_backend *backend)
+{
+ struct ttm_agp_backend *agp_be =
+ container_of(backend, struct ttm_agp_backend, backend);
+ struct agp_memory *mem = agp_be->mem;
+
+ if (mem) {
+ ttm_agp_unbind(backend);
+ agp_free_memory(mem);
+ }
+ agp_be->mem = NULL;
+}
+
+static void ttm_agp_destroy(struct ttm_backend *backend)
+{
+ struct ttm_agp_backend *agp_be =
+ container_of(backend, struct ttm_agp_backend, backend);
+
+ if (agp_be->mem)
+ ttm_agp_clear(backend);
+ kfree(agp_be);
+}
+
+static struct ttm_backend_func ttm_agp_func = {
+ .populate = ttm_agp_populate,
+ .clear = ttm_agp_clear,
+ .bind = ttm_agp_bind,
+ .unbind = ttm_agp_unbind,
+ .destroy = ttm_agp_destroy,
+};
+
+struct ttm_backend *ttm_agp_backend_init(struct ttm_bo_device *bdev,
+ struct agp_bridge_data *bridge)
+{
+ struct ttm_agp_backend *agp_be;
+
+ agp_be = kmalloc(sizeof(*agp_be), GFP_KERNEL);
+ if (!agp_be)
+ return NULL;
+
+ agp_be->mem = NULL;
+ agp_be->bridge = bridge;
+ agp_be->backend.func = &ttm_agp_func;
+ agp_be->backend.bdev = bdev;
+ return &agp_be->backend;
+}
+EXPORT_SYMBOL(ttm_agp_backend_init);
+
+#endif
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
new file mode 100644
index 00000000000..1587aeca7be
--- /dev/null
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -0,0 +1,1698 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ */
+
+#include "ttm/ttm_module.h"
+#include "ttm/ttm_bo_driver.h"
+#include "ttm/ttm_placement.h"
+#include <linux/jiffies.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/file.h>
+#include <linux/module.h>
+
+#define TTM_ASSERT_LOCKED(param)
+#define TTM_DEBUG(fmt, arg...)
+#define TTM_BO_HASH_ORDER 13
+
+static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
+static void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo);
+static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
+
+static inline uint32_t ttm_bo_type_flags(unsigned type)
+{
+ return 1 << (type);
+}
+
+static void ttm_bo_release_list(struct kref *list_kref)
+{
+ struct ttm_buffer_object *bo =
+ container_of(list_kref, struct ttm_buffer_object, list_kref);
+ struct ttm_bo_device *bdev = bo->bdev;
+
+ BUG_ON(atomic_read(&bo->list_kref.refcount));
+ BUG_ON(atomic_read(&bo->kref.refcount));
+ BUG_ON(atomic_read(&bo->cpu_writers));
+ BUG_ON(bo->sync_obj != NULL);
+ BUG_ON(bo->mem.mm_node != NULL);
+ BUG_ON(!list_empty(&bo->lru));
+ BUG_ON(!list_empty(&bo->ddestroy));
+
+ if (bo->ttm)
+ ttm_tt_destroy(bo->ttm);
+ if (bo->destroy)
+ bo->destroy(bo);
+ else {
+ ttm_mem_global_free(bdev->mem_glob, bo->acc_size, false);
+ kfree(bo);
+ }
+}
+
+int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
+{
+
+ if (interruptible) {
+ int ret = 0;
+
+ ret = wait_event_interruptible(bo->event_queue,
+ atomic_read(&bo->reserved) == 0);
+ if (unlikely(ret != 0))
+ return -ERESTART;
+ } else {
+ wait_event(bo->event_queue, atomic_read(&bo->reserved) == 0);
+ }
+ return 0;
+}
+
+static void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_type_manager *man;
+
+ BUG_ON(!atomic_read(&bo->reserved));
+
+ if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
+
+ BUG_ON(!list_empty(&bo->lru));
+
+ man = &bdev->man[bo->mem.mem_type];
+ list_add_tail(&bo->lru, &man->lru);
+ kref_get(&bo->list_kref);
+
+ if (bo->ttm != NULL) {
+ list_add_tail(&bo->swap, &bdev->swap_lru);
+ kref_get(&bo->list_kref);
+ }
+ }
+}
+
+/**
+ * Call with the lru_lock held.
+ */
+
+static int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
+{
+ int put_count = 0;
+
+ if (!list_empty(&bo->swap)) {
+ list_del_init(&bo->swap);
+ ++put_count;
+ }
+ if (!list_empty(&bo->lru)) {
+ list_del_init(&bo->lru);
+ ++put_count;
+ }
+
+ /*
+ * TODO: Add a driver hook to delete from
+ * driver-specific LRU's here.
+ */
+
+ return put_count;
+}
+
+int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait, bool use_sequence, uint32_t sequence)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ int ret;
+
+ while (unlikely(atomic_cmpxchg(&bo->reserved, 0, 1) != 0)) {
+ if (use_sequence && bo->seq_valid &&
+ (sequence - bo->val_seq < (1 << 31))) {
+ return -EAGAIN;
+ }
+
+ if (no_wait)
+ return -EBUSY;
+
+ spin_unlock(&bdev->lru_lock);
+ ret = ttm_bo_wait_unreserved(bo, interruptible);
+ spin_lock(&bdev->lru_lock);
+
+ if (unlikely(ret))
+ return ret;
+ }
+
+ if (use_sequence) {
+ bo->val_seq = sequence;
+ bo->seq_valid = true;
+ } else {
+ bo->seq_valid = false;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(ttm_bo_reserve);
+
+static void ttm_bo_ref_bug(struct kref *list_kref)
+{
+ BUG();
+}
+
+int ttm_bo_reserve(struct ttm_buffer_object *bo,
+ bool interruptible,
+ bool no_wait, bool use_sequence, uint32_t sequence)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ int put_count = 0;
+ int ret;
+
+ spin_lock(&bdev->lru_lock);
+ ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
+ sequence);
+ if (likely(ret == 0))
+ put_count = ttm_bo_del_from_lru(bo);
+ spin_unlock(&bdev->lru_lock);
+
+ while (put_count--)
+ kref_put(&bo->list_kref, ttm_bo_ref_bug);
+
+ return ret;
+}
+
+void ttm_bo_unreserve(struct ttm_buffer_object *bo)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+
+ spin_lock(&bdev->lru_lock);
+ ttm_bo_add_to_lru(bo);
+ atomic_set(&bo->reserved, 0);
+ wake_up_all(&bo->event_queue);
+ spin_unlock(&bdev->lru_lock);
+}
+EXPORT_SYMBOL(ttm_bo_unreserve);
+
+/*
+ * Call bo->mutex locked.
+ */
+
+static int ttm_bo_add_ttm(struct ttm_buffer_object *bo, bool zero_alloc)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ int ret = 0;
+ uint32_t page_flags = 0;
+
+ TTM_ASSERT_LOCKED(&bo->mutex);
+ bo->ttm = NULL;
+
+ switch (bo->type) {
+ case ttm_bo_type_device:
+ if (zero_alloc)
+ page_flags |= TTM_PAGE_FLAG_ZERO_ALLOC;
+ case ttm_bo_type_kernel:
+ bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
+ page_flags, bdev->dummy_read_page);
+ if (unlikely(bo->ttm == NULL))
+ ret = -ENOMEM;
+ break;
+ case ttm_bo_type_user:
+ bo->ttm = ttm_tt_create(bdev, bo->num_pages << PAGE_SHIFT,
+ page_flags | TTM_PAGE_FLAG_USER,
+ bdev->dummy_read_page);
+ if (unlikely(bo->ttm == NULL))
+ ret = -ENOMEM;
+ break;
+
+ ret = ttm_tt_set_user(bo->ttm, current,
+ bo->buffer_start, bo->num_pages);
+ if (unlikely(ret != 0))
+ ttm_tt_destroy(bo->ttm);
+ break;
+ default:
+ printk(KERN_ERR TTM_PFX "Illegal buffer object type\n");
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+
+static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem,
+ bool evict, bool interruptible, bool no_wait)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem);
+ bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem);
+ struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type];
+ struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type];
+ int ret = 0;
+
+ if (old_is_pci || new_is_pci ||
+ ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0))
+ ttm_bo_unmap_virtual(bo);
+
+ /*
+ * Create and bind a ttm if required.
+ */
+
+ if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) {
+ ret = ttm_bo_add_ttm(bo, false);
+ if (ret)
+ goto out_err;
+
+ ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement);
+ if (ret)
+ return ret;
+
+ if (mem->mem_type != TTM_PL_SYSTEM) {
+ ret = ttm_tt_bind(bo->ttm, mem);
+ if (ret)
+ goto out_err;
+ }
+
+ if (bo->mem.mem_type == TTM_PL_SYSTEM) {
+
+ struct ttm_mem_reg *old_mem = &bo->mem;
+ uint32_t save_flags = old_mem->placement;
+
+ *old_mem = *mem;
+ mem->mm_node = NULL;
+ ttm_flag_masked(&save_flags, mem->placement,
+ TTM_PL_MASK_MEMTYPE);
+ goto moved;
+ }
+
+ }
+
+ if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) &&
+ !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED))
+ ret = ttm_bo_move_ttm(bo, evict, no_wait, mem);
+ else if (bdev->driver->move)
+ ret = bdev->driver->move(bo, evict, interruptible,
+ no_wait, mem);
+ else
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait, mem);
+
+ if (ret)
+ goto out_err;
+
+moved:
+ if (bo->evicted) {
+ ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
+ if (ret)
+ printk(KERN_ERR TTM_PFX "Can not flush read caches\n");
+ bo->evicted = false;
+ }
+
+ if (bo->mem.mm_node) {
+ spin_lock(&bo->lock);
+ bo->offset = (bo->mem.mm_node->start << PAGE_SHIFT) +
+ bdev->man[bo->mem.mem_type].gpu_offset;
+ bo->cur_placement = bo->mem.placement;
+ spin_unlock(&bo->lock);
+ }
+
+ return 0;
+
+out_err:
+ new_man = &bdev->man[bo->mem.mem_type];
+ if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) {
+ ttm_tt_unbind(bo->ttm);
+ ttm_tt_destroy(bo->ttm);
+ bo->ttm = NULL;
+ }
+
+ return ret;
+}
+
+/**
+ * If bo idle, remove from delayed- and lru lists, and unref.
+ * If not idle, and already on delayed list, do nothing.
+ * If not idle, and not on delayed list, put on delayed list,
+ * up the list_kref and schedule a delayed list check.
+ */
+
+static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_driver *driver = bdev->driver;
+ int ret;
+
+ spin_lock(&bo->lock);
+ (void) ttm_bo_wait(bo, false, false, !remove_all);
+
+ if (!bo->sync_obj) {
+ int put_count;
+
+ spin_unlock(&bo->lock);
+
+ spin_lock(&bdev->lru_lock);
+ ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
+ BUG_ON(ret);
+ if (bo->ttm)
+ ttm_tt_unbind(bo->ttm);
+
+ if (!list_empty(&bo->ddestroy)) {
+ list_del_init(&bo->ddestroy);
+ kref_put(&bo->list_kref, ttm_bo_ref_bug);
+ }
+ if (bo->mem.mm_node) {
+ drm_mm_put_block(bo->mem.mm_node);
+ bo->mem.mm_node = NULL;
+ }
+ put_count = ttm_bo_del_from_lru(bo);
+ spin_unlock(&bdev->lru_lock);
+
+ atomic_set(&bo->reserved, 0);
+
+ while (put_count--)
+ kref_put(&bo->list_kref, ttm_bo_release_list);
+
+ return 0;
+ }
+
+ spin_lock(&bdev->lru_lock);
+ if (list_empty(&bo->ddestroy)) {
+ void *sync_obj = bo->sync_obj;
+ void *sync_obj_arg = bo->sync_obj_arg;
+
+ kref_get(&bo->list_kref);
+ list_add_tail(&bo->ddestroy, &bdev->ddestroy);
+ spin_unlock(&bdev->lru_lock);
+ spin_unlock(&bo->lock);
+
+ if (sync_obj)
+ driver->sync_obj_flush(sync_obj, sync_obj_arg);
+ schedule_delayed_work(&bdev->wq,
+ ((HZ / 100) < 1) ? 1 : HZ / 100);
+ ret = 0;
+
+ } else {
+ spin_unlock(&bdev->lru_lock);
+ spin_unlock(&bo->lock);
+ ret = -EBUSY;
+ }
+
+ return ret;
+}
+
+/**
+ * Traverse the delayed list, and call ttm_bo_cleanup_refs on all
+ * encountered buffers.
+ */
+
+static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
+{
+ struct ttm_buffer_object *entry, *nentry;
+ struct list_head *list, *next;
+ int ret;
+
+ spin_lock(&bdev->lru_lock);
+ list_for_each_safe(list, next, &bdev->ddestroy) {
+ entry = list_entry(list, struct ttm_buffer_object, ddestroy);
+ nentry = NULL;
+
+ /*
+ * Protect the next list entry from destruction while we
+ * unlock the lru_lock.
+ */
+
+ if (next != &bdev->ddestroy) {
+ nentry = list_entry(next, struct ttm_buffer_object,
+ ddestroy);
+ kref_get(&nentry->list_kref);
+ }
+ kref_get(&entry->list_kref);
+
+ spin_unlock(&bdev->lru_lock);
+ ret = ttm_bo_cleanup_refs(entry, remove_all);
+ kref_put(&entry->list_kref, ttm_bo_release_list);
+
+ spin_lock(&bdev->lru_lock);
+ if (nentry) {
+ bool next_onlist = !list_empty(next);
+ spin_unlock(&bdev->lru_lock);
+ kref_put(&nentry->list_kref, ttm_bo_release_list);
+ spin_lock(&bdev->lru_lock);
+ /*
+ * Someone might have raced us and removed the
+ * next entry from the list. We don't bother restarting
+ * list traversal.
+ */
+
+ if (!next_onlist)
+ break;
+ }
+ if (ret)
+ break;
+ }
+ ret = !list_empty(&bdev->ddestroy);
+ spin_unlock(&bdev->lru_lock);
+
+ return ret;
+}
+
+static void ttm_bo_delayed_workqueue(struct work_struct *work)
+{
+ struct ttm_bo_device *bdev =
+ container_of(work, struct ttm_bo_device, wq.work);
+
+ if (ttm_bo_delayed_delete(bdev, false)) {
+ schedule_delayed_work(&bdev->wq,
+ ((HZ / 100) < 1) ? 1 : HZ / 100);
+ }
+}
+
+static void ttm_bo_release(struct kref *kref)
+{
+ struct ttm_buffer_object *bo =
+ container_of(kref, struct ttm_buffer_object, kref);
+ struct ttm_bo_device *bdev = bo->bdev;
+
+ if (likely(bo->vm_node != NULL)) {
+ rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
+ drm_mm_put_block(bo->vm_node);
+ bo->vm_node = NULL;
+ }
+ write_unlock(&bdev->vm_lock);
+ ttm_bo_cleanup_refs(bo, false);
+ kref_put(&bo->list_kref, ttm_bo_release_list);
+ write_lock(&bdev->vm_lock);
+}
+
+void ttm_bo_unref(struct ttm_buffer_object **p_bo)
+{
+ struct ttm_buffer_object *bo = *p_bo;
+ struct ttm_bo_device *bdev = bo->bdev;
+
+ *p_bo = NULL;
+ write_lock(&bdev->vm_lock);
+ kref_put(&bo->kref, ttm_bo_release);
+ write_unlock(&bdev->vm_lock);
+}
+EXPORT_SYMBOL(ttm_bo_unref);
+
+static int ttm_bo_evict(struct ttm_buffer_object *bo, unsigned mem_type,
+ bool interruptible, bool no_wait)
+{
+ int ret = 0;
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_reg evict_mem;
+ uint32_t proposed_placement;
+
+ if (bo->mem.mem_type != mem_type)
+ goto out;
+
+ spin_lock(&bo->lock);
+ ret = ttm_bo_wait(bo, false, interruptible, no_wait);
+ spin_unlock(&bo->lock);
+
+ if (ret && ret != -ERESTART) {
+ printk(KERN_ERR TTM_PFX "Failed to expire sync object before "
+ "buffer eviction.\n");
+ goto out;
+ }
+
+ BUG_ON(!atomic_read(&bo->reserved));
+
+ evict_mem = bo->mem;
+ evict_mem.mm_node = NULL;
+
+ proposed_placement = bdev->driver->evict_flags(bo);
+
+ ret = ttm_bo_mem_space(bo, proposed_placement,
+ &evict_mem, interruptible, no_wait);
+ if (unlikely(ret != 0 && ret != -ERESTART))
+ ret = ttm_bo_mem_space(bo, TTM_PL_FLAG_SYSTEM,
+ &evict_mem, interruptible, no_wait);
+
+ if (ret) {
+ if (ret != -ERESTART)
+ printk(KERN_ERR TTM_PFX
+ "Failed to find memory space for "
+ "buffer 0x%p eviction.\n", bo);
+ goto out;
+ }
+
+ ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
+ no_wait);
+ if (ret) {
+ if (ret != -ERESTART)
+ printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
+ goto out;
+ }
+
+ spin_lock(&bdev->lru_lock);
+ if (evict_mem.mm_node) {
+ drm_mm_put_block(evict_mem.mm_node);
+ evict_mem.mm_node = NULL;
+ }
+ spin_unlock(&bdev->lru_lock);
+ bo->evicted = true;
+out:
+ return ret;
+}
+
+/**
+ * Repeatedly evict memory from the LRU for @mem_type until we create enough
+ * space, or we've evicted everything and there isn't enough space.
+ */
+static int ttm_bo_mem_force_space(struct ttm_bo_device *bdev,
+ struct ttm_mem_reg *mem,
+ uint32_t mem_type,
+ bool interruptible, bool no_wait)
+{
+ struct drm_mm_node *node;
+ struct ttm_buffer_object *entry;
+ struct ttm_mem_type_manager *man = &bdev->man[mem_type];
+ struct list_head *lru;
+ unsigned long num_pages = mem->num_pages;
+ int put_count = 0;
+ int ret;
+
+retry_pre_get:
+ ret = drm_mm_pre_get(&man->manager);
+ if (unlikely(ret != 0))
+ return ret;
+
+ spin_lock(&bdev->lru_lock);
+ do {
+ node = drm_mm_search_free(&man->manager, num_pages,
+ mem->page_alignment, 1);
+ if (node)
+ break;
+
+ lru = &man->lru;
+ if (list_empty(lru))
+ break;
+
+ entry = list_first_entry(lru, struct ttm_buffer_object, lru);
+ kref_get(&entry->list_kref);
+
+ ret =
+ ttm_bo_reserve_locked(entry, interruptible, no_wait,
+ false, 0);
+
+ if (likely(ret == 0))
+ put_count = ttm_bo_del_from_lru(entry);
+
+ spin_unlock(&bdev->lru_lock);
+
+ if (unlikely(ret != 0))
+ return ret;
+
+ while (put_count--)
+ kref_put(&entry->list_kref, ttm_bo_ref_bug);
+
+ ret = ttm_bo_evict(entry, mem_type, interruptible, no_wait);
+
+ ttm_bo_unreserve(entry);
+
+ kref_put(&entry->list_kref, ttm_bo_release_list);
+ if (ret)
+ return ret;
+
+ spin_lock(&bdev->lru_lock);
+ } while (1);
+
+ if (!node) {
+ spin_unlock(&bdev->lru_lock);
+ return -ENOMEM;
+ }
+
+ node = drm_mm_get_block_atomic(node, num_pages, mem->page_alignment);
+ if (unlikely(!node)) {
+ spin_unlock(&bdev->lru_lock);
+ goto retry_pre_get;
+ }
+
+ spin_unlock(&bdev->lru_lock);
+ mem->mm_node = node;
+ mem->mem_type = mem_type;
+ return 0;
+}
+
+static bool ttm_bo_mt_compatible(struct ttm_mem_type_manager *man,
+ bool disallow_fixed,
+ uint32_t mem_type,
+ uint32_t mask, uint32_t *res_mask)
+{
+ uint32_t cur_flags = ttm_bo_type_flags(mem_type);
+
+ if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && disallow_fixed)
+ return false;
+
+ if ((cur_flags & mask & TTM_PL_MASK_MEM) == 0)
+ return false;
+
+ if ((mask & man->available_caching) == 0)
+ return false;
+ if (mask & man->default_caching)
+ cur_flags |= man->default_caching;
+ else if (mask & TTM_PL_FLAG_CACHED)
+ cur_flags |= TTM_PL_FLAG_CACHED;
+ else if (mask & TTM_PL_FLAG_WC)
+ cur_flags |= TTM_PL_FLAG_WC;
+ else
+ cur_flags |= TTM_PL_FLAG_UNCACHED;
+
+ *res_mask = cur_flags;
+ return true;
+}
+
+/**
+ * Creates space for memory region @mem according to its type.
+ *
+ * This function first searches for free space in compatible memory types in
+ * the priority order defined by the driver. If free space isn't found, then
+ * ttm_bo_mem_force_space is attempted in priority order to evict and find
+ * space.
+ */
+int ttm_bo_mem_space(struct ttm_buffer_object *bo,
+ uint32_t proposed_placement,
+ struct ttm_mem_reg *mem,
+ bool interruptible, bool no_wait)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_mem_type_manager *man;
+
+ uint32_t num_prios = bdev->driver->num_mem_type_prio;
+ const uint32_t *prios = bdev->driver->mem_type_prio;
+ uint32_t i;
+ uint32_t mem_type = TTM_PL_SYSTEM;
+ uint32_t cur_flags = 0;
+ bool type_found = false;
+ bool type_ok = false;
+ bool has_eagain = false;
+ struct drm_mm_node *node = NULL;
+ int ret;
+
+ mem->mm_node = NULL;
+ for (i = 0; i < num_prios; ++i) {
+ mem_type = prios[i];
+ man = &bdev->man[mem_type];
+
+ type_ok = ttm_bo_mt_compatible(man,
+ bo->type == ttm_bo_type_user,
+ mem_type, proposed_placement,
+ &cur_flags);
+
+ if (!type_ok)
+ continue;
+
+ if (mem_type == TTM_PL_SYSTEM)
+ break;
+
+ if (man->has_type && man->use_type) {
+ type_found = true;
+ do {
+ ret = drm_mm_pre_get(&man->manager);
+ if (unlikely(ret))
+ return ret;
+
+ spin_lock(&bdev->lru_lock);
+ node = drm_mm_search_free(&man->manager,
+ mem->num_pages,
+ mem->page_alignment,
+ 1);
+ if (unlikely(!node)) {
+ spin_unlock(&bdev->lru_lock);
+ break;
+ }
+ node = drm_mm_get_block_atomic(node,
+ mem->num_pages,
+ mem->
+ page_alignment);
+ spin_unlock(&bdev->lru_lock);
+ } while (!node);
+ }
+ if (node)
+ break;
+ }
+
+ if ((type_ok && (mem_type == TTM_PL_SYSTEM)) || node) {
+ mem->mm_node = node;
+ mem->mem_type = mem_type;
+ mem->placement = cur_flags;
+ return 0;
+ }
+
+ if (!type_found)
+ return -EINVAL;
+
+ num_prios = bdev->driver->num_mem_busy_prio;
+ prios = bdev->driver->mem_busy_prio;
+
+ for (i = 0; i < num_prios; ++i) {
+ mem_type = prios[i];
+ man = &bdev->man[mem_type];
+
+ if (!man->has_type)
+ continue;
+
+ if (!ttm_bo_mt_compatible(man,
+ bo->type == ttm_bo_type_user,
+ mem_type,
+ proposed_placement, &cur_flags))
+ continue;
+
+ ret = ttm_bo_mem_force_space(bdev, mem, mem_type,
+ interruptible, no_wait);
+
+ if (ret == 0 && mem->mm_node) {
+ mem->placement = cur_flags;
+ return 0;
+ }
+
+ if (ret == -ERESTART)
+ has_eagain = true;
+ }
+
+ ret = (has_eagain) ? -ERESTART : -ENOMEM;
+ return ret;
+}
+EXPORT_SYMBOL(ttm_bo_mem_space);
+
+int ttm_bo_wait_cpu(struct ttm_buffer_object *bo, bool no_wait)
+{
+ int ret = 0;
+
+ if ((atomic_read(&bo->cpu_writers) > 0) && no_wait)
+ return -EBUSY;
+
+ ret = wait_event_interruptible(bo->event_queue,
+ atomic_read(&bo->cpu_writers) == 0);
+
+ if (ret == -ERESTARTSYS)
+ ret = -ERESTART;
+
+ return ret;
+}
+
+int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
+ uint32_t proposed_placement,
+ bool interruptible, bool no_wait)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ int ret = 0;
+ struct ttm_mem_reg mem;
+
+ BUG_ON(!atomic_read(&bo->reserved));
+
+ /*
+ * FIXME: It's possible to pipeline buffer moves.
+ * Have the driver move function wait for idle when necessary,
+ * instead of doing it here.
+ */
+
+ spin_lock(&bo->lock);
+ ret = ttm_bo_wait(bo, false, interruptible, no_wait);
+ spin_unlock(&bo->lock);
+
+ if (ret)
+ return ret;
+
+ mem.num_pages = bo->num_pages;
+ mem.size = mem.num_pages << PAGE_SHIFT;
+ mem.page_alignment = bo->mem.page_alignment;
+
+ /*
+ * Determine where to move the buffer.
+ */
+
+ ret = ttm_bo_mem_space(bo, proposed_placement, &mem,
+ interruptible, no_wait);
+ if (ret)
+ goto out_unlock;
+
+ ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait);
+
+out_unlock:
+ if (ret && mem.mm_node) {
+ spin_lock(&bdev->lru_lock);
+ drm_mm_put_block(mem.mm_node);
+ spin_unlock(&bdev->lru_lock);
+ }
+ return ret;
+}
+
+static int ttm_bo_mem_compat(uint32_t proposed_placement,
+ struct ttm_mem_reg *mem)
+{
+ if ((proposed_placement & mem->placement & TTM_PL_MASK_MEM) == 0)
+ return 0;
+ if ((proposed_placement & mem->placement & TTM_PL_MASK_CACHING) == 0)
+ return 0;
+
+ return 1;
+}
+
+int ttm_buffer_object_validate(struct ttm_buffer_object *bo,
+ uint32_t proposed_placement,
+ bool interruptible, bool no_wait)
+{
+ int ret;
+
+ BUG_ON(!atomic_read(&bo->reserved));
+ bo->proposed_placement = proposed_placement;
+
+ TTM_DEBUG("Proposed placement 0x%08lx, Old flags 0x%08lx\n",
+ (unsigned long)proposed_placement,
+ (unsigned long)bo->mem.placement)