aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm/ttm_bo.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c384
1 files changed, 124 insertions, 260 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 52b20b12c83..4ab9f7171c4 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -45,7 +45,6 @@
#define TTM_DEBUG(fmt, arg...)
#define TTM_BO_HASH_ORDER 13
-static int ttm_bo_setup_vm(struct ttm_buffer_object *bo);
static int ttm_bo_swapout(struct ttm_mem_shrink *shrink);
static void ttm_bo_global_kobj_release(struct kobject *kobj);
@@ -150,6 +149,9 @@ static void ttm_bo_release_list(struct kref *list_kref)
if (bo->ttm)
ttm_tt_destroy(bo->ttm);
atomic_dec(&bo->glob->bo_count);
+ if (bo->resv == &bo->ttm_resv)
+ reservation_object_fini(&bo->ttm_resv);
+ mutex_destroy(&bo->wu_mutex);
if (bo->destroy)
bo->destroy(bo);
else {
@@ -158,24 +160,12 @@ static void ttm_bo_release_list(struct kref *list_kref)
ttm_mem_global_free(bdev->glob->mem_glob, acc_size);
}
-int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo, bool interruptible)
-{
- if (interruptible) {
- return wait_event_interruptible(bo->event_queue,
- !ttm_bo_is_reserved(bo));
- } else {
- wait_event(bo->event_queue, !ttm_bo_is_reserved(bo));
- return 0;
- }
-}
-EXPORT_SYMBOL(ttm_bo_wait_unreserved);
-
void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
- BUG_ON(!ttm_bo_is_reserved(bo));
+ lockdep_assert_held(&bo->resv->lock.base);
if (!(bo->mem.placement & TTM_PL_FLAG_NO_EVICT)) {
@@ -191,6 +181,7 @@ void ttm_bo_add_to_lru(struct ttm_buffer_object *bo)
}
}
}
+EXPORT_SYMBOL(ttm_bo_add_to_lru);
int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
@@ -213,62 +204,6 @@ int ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
return put_count;
}
-int ttm_bo_reserve_locked(struct ttm_buffer_object *bo,
- bool interruptible,
- bool no_wait, bool use_sequence, uint32_t sequence)
-{
- struct ttm_bo_global *glob = bo->glob;
- int ret;
-
- while (unlikely(atomic_read(&bo->reserved) != 0)) {
- /**
- * Deadlock avoidance for multi-bo reserving.
- */
- if (use_sequence && bo->seq_valid) {
- /**
- * We've already reserved this one.
- */
- if (unlikely(sequence == bo->val_seq))
- return -EDEADLK;
- /**
- * Already reserved by a thread that will not back
- * off for us. We need to back off.
- */
- if (unlikely(sequence - bo->val_seq < (1 << 31)))
- return -EAGAIN;
- }
-
- if (no_wait)
- return -EBUSY;
-
- spin_unlock(&glob->lru_lock);
- ret = ttm_bo_wait_unreserved(bo, interruptible);
- spin_lock(&glob->lru_lock);
-
- if (unlikely(ret))
- return ret;
- }
-
- atomic_set(&bo->reserved, 1);
- if (use_sequence) {
- /**
- * Wake up waiters that may need to recheck for deadlock,
- * if we decreased the sequence number.
- */
- if (unlikely((bo->val_seq - sequence < (1 << 31))
- || !bo->seq_valid))
- wake_up_all(&bo->event_queue);
-
- bo->val_seq = sequence;
- bo->seq_valid = true;
- } else {
- bo->seq_valid = false;
- }
-
- return 0;
-}
-EXPORT_SYMBOL(ttm_bo_reserve);
-
static void ttm_bo_ref_bug(struct kref *list_kref)
{
BUG();
@@ -281,42 +216,16 @@ void ttm_bo_list_ref_sub(struct ttm_buffer_object *bo, int count,
(never_free) ? ttm_bo_ref_bug : ttm_bo_release_list);
}
-int ttm_bo_reserve(struct ttm_buffer_object *bo,
- bool interruptible,
- bool no_wait, bool use_sequence, uint32_t sequence)
+void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo)
{
- struct ttm_bo_global *glob = bo->glob;
- int put_count = 0;
- int ret;
-
- spin_lock(&glob->lru_lock);
- ret = ttm_bo_reserve_locked(bo, interruptible, no_wait, use_sequence,
- sequence);
- if (likely(ret == 0))
- put_count = ttm_bo_del_from_lru(bo);
- spin_unlock(&glob->lru_lock);
+ int put_count;
+ spin_lock(&bo->glob->lru_lock);
+ put_count = ttm_bo_del_from_lru(bo);
+ spin_unlock(&bo->glob->lru_lock);
ttm_bo_list_ref_sub(bo, put_count, true);
-
- return ret;
-}
-
-void ttm_bo_unreserve_locked(struct ttm_buffer_object *bo)
-{
- ttm_bo_add_to_lru(bo);
- atomic_set(&bo->reserved, 0);
- wake_up_all(&bo->event_queue);
-}
-
-void ttm_bo_unreserve(struct ttm_buffer_object *bo)
-{
- struct ttm_bo_global *glob = bo->glob;
-
- spin_lock(&glob->lru_lock);
- ttm_bo_unreserve_locked(bo);
- spin_unlock(&glob->lru_lock);
}
-EXPORT_SYMBOL(ttm_bo_unreserve);
+EXPORT_SYMBOL(ttm_bo_del_sub_from_lru);
/*
* Call bo->mutex locked.
@@ -442,9 +351,11 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
moved:
if (bo->evicted) {
- ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
- if (ret)
- pr_err("Can not flush read caches\n");
+ if (bdev->driver->invalidate_caches) {
+ ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
+ if (ret)
+ pr_err("Can not flush read caches\n");
+ }
bo->evicted = false;
}
@@ -488,17 +399,7 @@ static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo)
}
ttm_bo_mem_put(bo, &bo->mem);
- atomic_set(&bo->reserved, 0);
- wake_up_all(&bo->event_queue);
-
- /*
- * Since the final reference to this bo may not be dropped by
- * the current task we have to put a memory barrier here to make
- * sure the changes done in this function are always visible.
- *
- * This function only needs protection against the final kref_put.
- */
- smp_mb__before_atomic_dec();
+ ww_mutex_unlock (&bo->resv->lock);
}
static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
@@ -511,7 +412,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
int ret;
spin_lock(&glob->lru_lock);
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ ret = __ttm_bo_reserve(bo, false, true, false, 0);
spin_lock(&bdev->fence_lock);
(void) ttm_bo_wait(bo, false, false, true);
@@ -531,8 +432,18 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
spin_unlock(&bdev->fence_lock);
if (!ret) {
- atomic_set(&bo->reserved, 0);
- wake_up_all(&bo->event_queue);
+
+ /*
+ * Make NO_EVICT bos immediately available to
+ * shrinkers, now that they are queued for
+ * destruction.
+ */
+ if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
+ bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
+ ttm_bo_add_to_lru(bo);
+ }
+
+ __ttm_bo_unreserve(bo);
}
kref_get(&bo->list_kref);
@@ -583,8 +494,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
sync_obj = driver->sync_obj_ref(bo->sync_obj);
spin_unlock(&bdev->fence_lock);
- atomic_set(&bo->reserved, 0);
- wake_up_all(&bo->event_queue);
+ __ttm_bo_unreserve(bo);
spin_unlock(&glob->lru_lock);
ret = driver->sync_obj_wait(sync_obj, false, interruptible);
@@ -604,7 +514,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
return ret;
spin_lock(&glob->lru_lock);
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ ret = __ttm_bo_reserve(bo, false, true, false, 0);
/*
* We raced, and lost, someone else holds the reservation now,
@@ -622,8 +532,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo,
spin_unlock(&bdev->fence_lock);
if (ret || unlikely(list_empty(&bo->ddestroy))) {
- atomic_set(&bo->reserved, 0);
- wake_up_all(&bo->event_queue);
+ __ttm_bo_unreserve(bo);
spin_unlock(&glob->lru_lock);
return ret;
}
@@ -668,7 +577,14 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
kref_get(&nentry->list_kref);
}
- ret = ttm_bo_reserve_locked(entry, false, !remove_all, false, 0);
+ ret = __ttm_bo_reserve(entry, false, true, false, 0);
+ if (remove_all && ret) {
+ spin_unlock(&glob->lru_lock);
+ ret = __ttm_bo_reserve(entry, false, false,
+ false, 0);
+ spin_lock(&glob->lru_lock);
+ }
+
if (!ret)
ret = ttm_bo_cleanup_refs_and_unlock(entry, false,
!remove_all);
@@ -712,13 +628,7 @@ static void ttm_bo_release(struct kref *kref)
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
- write_lock(&bdev->vm_lock);
- if (likely(bo->vm_node != NULL)) {
- rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
- drm_mm_put_block(bo->vm_node);
- bo->vm_node = NULL;
- }
- write_unlock(&bdev->vm_lock);
+ drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node);
ttm_mem_io_lock(man, false);
ttm_mem_io_free_vm(bo);
ttm_mem_io_unlock(man);
@@ -768,7 +678,7 @@ static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
goto out;
}
- BUG_ON(!ttm_bo_is_reserved(bo));
+ lockdep_assert_held(&bo->resv->lock.base);
evict_mem = bo->mem;
evict_mem.mm_node = NULL;
@@ -816,7 +726,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &man->lru, lru) {
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ ret = __ttm_bo_reserve(bo, false, true, false, 0);
if (!ret)
break;
}
@@ -1049,7 +959,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo,
}
EXPORT_SYMBOL(ttm_bo_mem_space);
-int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
+static int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
struct ttm_placement *placement,
bool interruptible,
bool no_wait_gpu)
@@ -1058,7 +968,7 @@ int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
struct ttm_mem_reg mem;
struct ttm_bo_device *bdev = bo->bdev;
- BUG_ON(!ttm_bo_is_reserved(bo));
+ lockdep_assert_held(&bo->resv->lock.base);
/*
* FIXME: It's possible to pipeline buffer moves.
@@ -1090,24 +1000,32 @@ out_unlock:
return ret;
}
-static int ttm_bo_mem_compat(struct ttm_placement *placement,
- struct ttm_mem_reg *mem)
+static bool ttm_bo_mem_compat(struct ttm_placement *placement,
+ struct ttm_mem_reg *mem,
+ uint32_t *new_flags)
{
int i;
if (mem->mm_node && placement->lpfn != 0 &&
(mem->start < placement->fpfn ||
mem->start + mem->num_pages > placement->lpfn))
- return -1;
+ return false;
for (i = 0; i < placement->num_placement; i++) {
- if ((placement->placement[i] & mem->placement &
- TTM_PL_MASK_CACHING) &&
- (placement->placement[i] & mem->placement &
- TTM_PL_MASK_MEM))
- return i;
+ *new_flags = placement->placement[i];
+ if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
+ (*new_flags & mem->placement & TTM_PL_MASK_MEM))
+ return true;
+ }
+
+ for (i = 0; i < placement->num_busy_placement; i++) {
+ *new_flags = placement->busy_placement[i];
+ if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) &&
+ (*new_flags & mem->placement & TTM_PL_MASK_MEM))
+ return true;
}
- return -1;
+
+ return false;
}
int ttm_bo_validate(struct ttm_buffer_object *bo,
@@ -1116,8 +1034,9 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
bool no_wait_gpu)
{
int ret;
+ uint32_t new_flags;
- BUG_ON(!ttm_bo_is_reserved(bo));
+ lockdep_assert_held(&bo->resv->lock.base);
/* Check that range is valid */
if (placement->lpfn || placement->fpfn)
if (placement->fpfn > placement->lpfn ||
@@ -1126,8 +1045,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
/*
* Check whether we need to move buffer.
*/
- ret = ttm_bo_mem_compat(placement, &bo->mem);
- if (ret < 0) {
+ if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) {
ret = ttm_bo_move_buffer(bo, placement, interruptible,
no_wait_gpu);
if (ret)
@@ -1137,7 +1055,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
* Use the access and other non-mapping-related flag bits from
* the compatible memory placement flags to the active flags
*/
- ttm_flag_masked(&bo->mem.placement, placement->placement[ret],
+ ttm_flag_masked(&bo->mem.placement, new_flags,
~TTM_PL_MASK_MEMTYPE);
}
/*
@@ -1176,6 +1094,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
int ret = 0;
unsigned long num_pages;
struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
+ bool locked;
ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
if (ret) {
@@ -1202,12 +1121,11 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
kref_init(&bo->kref);
kref_init(&bo->list_kref);
atomic_set(&bo->cpu_writers, 0);
- atomic_set(&bo->reserved, 1);
- init_waitqueue_head(&bo->event_queue);
INIT_LIST_HEAD(&bo->lru);
INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap);
INIT_LIST_HEAD(&bo->io_reserve_lru);
+ mutex_init(&bo->wu_mutex);
bo->bdev = bdev;
bo->glob = bdev->glob;
bo->type = type;
@@ -1221,37 +1139,36 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
bo->mem.bus.io_reserved_count = 0;
bo->priv_flags = 0;
bo->mem.placement = (TTM_PL_FLAG_SYSTEM | TTM_PL_FLAG_CACHED);
- bo->seq_valid = false;
bo->persistent_swap_storage = persistent_swap_storage;
bo->acc_size = acc_size;
bo->sg = sg;
+ bo->resv = &bo->ttm_resv;
+ reservation_object_init(bo->resv);
atomic_inc(&bo->glob->bo_count);
+ drm_vma_node_reset(&bo->vma_node);
ret = ttm_bo_check_placement(bo, placement);
- if (unlikely(ret != 0))
- goto out_err;
/*
* For ttm_bo_type_device buffers, allocate
* address space from the device.
*/
- if (bo->type == ttm_bo_type_device ||
- bo->type == ttm_bo_type_sg) {
- ret = ttm_bo_setup_vm(bo);
- if (ret)
- goto out_err;
- }
+ if (likely(!ret) &&
+ (bo->type == ttm_bo_type_device ||
+ bo->type == ttm_bo_type_sg))
+ ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node,
+ bo->mem.num_pages);
- ret = ttm_bo_validate(bo, placement, interruptible, false);
- if (ret)
- goto out_err;
+ locked = ww_mutex_trylock(&bo->resv->lock);
+ WARN_ON(!locked);
- ttm_bo_unreserve(bo);
- return 0;
+ if (likely(!ret))
+ ret = ttm_bo_validate(bo, placement, interruptible, false);
-out_err:
ttm_bo_unreserve(bo);
- ttm_bo_unref(&bo);
+
+ if (unlikely(ret))
+ ttm_bo_unref(&bo);
return ret;
}
@@ -1525,10 +1442,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev)
TTM_DEBUG("Swap list was clean\n");
spin_unlock(&glob->lru_lock);
- BUG_ON(!drm_mm_clean(&bdev->addr_space_mm));
- write_lock(&bdev->vm_lock);
- drm_mm_takedown(&bdev->addr_space_mm);
- write_unlock(&bdev->vm_lock);
+ drm_vma_offset_manager_destroy(&bdev->vma_manager);
return ret;
}
@@ -1537,12 +1451,12 @@ EXPORT_SYMBOL(ttm_bo_device_release);
int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_global *glob,
struct ttm_bo_driver *driver,
+ struct address_space *mapping,
uint64_t file_page_offset,
bool need_dma32)
{
int ret = -EINVAL;
- rwlock_init(&bdev->vm_lock);
bdev->driver = driver;
memset(bdev->man, 0, sizeof(bdev->man));
@@ -1555,14 +1469,11 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
if (unlikely(ret != 0))
goto out_no_sys;
- bdev->addr_space_rb = RB_ROOT;
- ret = drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000);
- if (unlikely(ret != 0))
- goto out_no_addr_mm;
-
+ drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
+ 0x10000000);
INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
INIT_LIST_HEAD(&bdev->ddestroy);
- bdev->dev_mapping = NULL;
+ bdev->dev_mapping = mapping;
bdev->glob = glob;
bdev->need_dma32 = need_dma32;
bdev->val_seq = 0;
@@ -1572,8 +1483,6 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev,
mutex_unlock(&glob->device_list_mutex);
return 0;
-out_no_addr_mm:
- ttm_bo_clean_mm(bdev, 0);
out_no_sys:
return ret;
}
@@ -1603,12 +1512,8 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
- loff_t offset = (loff_t) bo->addr_space_offset;
- loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT;
- if (!bdev->dev_mapping)
- return;
- unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1);
+ drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping);
ttm_mem_io_free_vm(bo);
}
@@ -1625,78 +1530,6 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo)
EXPORT_SYMBOL(ttm_bo_unmap_virtual);
-static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo)
-{
- struct ttm_bo_device *bdev = bo->bdev;
- struct rb_node **cur = &bdev->addr_space_rb.rb_node;
- struct rb_node *parent = NULL;
- struct ttm_buffer_object *cur_bo;
- unsigned long offset = bo->vm_node->start;
- unsigned long cur_offset;
-
- while (*cur) {
- parent = *cur;
- cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb);
- cur_offset = cur_bo->vm_node->start;
- if (offset < cur_offset)
- cur = &parent->rb_left;
- else if (offset > cur_offset)
- cur = &parent->rb_right;
- else
- BUG();
- }
-
- rb_link_node(&bo->vm_rb, parent, cur);
- rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb);
-}
-
-/**
- * ttm_bo_setup_vm:
- *
- * @bo: the buffer to allocate address space for
- *
- * Allocate address space in the drm device so that applications
- * can mmap the buffer and access the contents. This only
- * applies to ttm_bo_type_device objects as others are not
- * placed in the drm device address space.
- */
-
-static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
-{
- struct ttm_bo_device *bdev = bo->bdev;
- int ret;
-
-retry_pre_get:
- ret = drm_mm_pre_get(&bdev->addr_space_mm);
- if (unlikely(ret != 0))
- return ret;
-
- write_lock(&bdev->vm_lock);
- bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
- bo->mem.num_pages, 0, 0);
-
- if (unlikely(bo->vm_node == NULL)) {
- ret = -ENOMEM;
- goto out_unlock;
- }
-
- bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
- bo->mem.num_pages, 0);
-
- if (unlikely(bo->vm_node == NULL)) {
- write_unlock(&bdev->vm_lock);
- goto retry_pre_get;
- }
-
- ttm_bo_vm_insert_rb(bo);
- write_unlock(&bdev->vm_lock);
- bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;
-
- return 0;
-out_unlock:
- write_unlock(&bdev->vm_lock);
- return ret;
-}
int ttm_bo_wait(struct ttm_buffer_object *bo,
bool lazy, bool interruptible, bool no_wait)
@@ -1797,7 +1630,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
spin_lock(&glob->lru_lock);
list_for_each_entry(bo, &glob->swap_lru, swap) {
- ret = ttm_bo_reserve_locked(bo, false, true, false, 0);
+ ret = __ttm_bo_reserve(bo, false, true, false, 0);
if (!ret)
break;
}
@@ -1864,8 +1697,7 @@ out:
* already swapped buffer.
*/
- atomic_set(&bo->reserved, 0);
- wake_up_all(&bo->event_queue);
+ __ttm_bo_unreserve(bo);
kref_put(&bo->list_kref, ttm_bo_release_list);
return ret;
}
@@ -1876,3 +1708,35 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev)
;
}
EXPORT_SYMBOL(ttm_bo_swapout_all);
+
+/**
+ * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become
+ * unreserved
+ *
+ * @bo: Pointer to buffer
+ */
+int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo)
+{
+ int ret;
+
+ /*
+ * In the absense of a wait_unlocked API,
+ * Use the bo::wu_mutex to avoid triggering livelocks due to
+ * concurrent use of this function. Note that this use of
+ * bo::wu_mutex can go away if we change locking order to
+ * mmap_sem -> bo::reserve.
+ */
+ ret = mutex_lock_interruptible(&bo->wu_mutex);
+ if (unlikely(ret != 0))
+ return -ERESTARTSYS;
+ if (!ww_mutex_is_locked(&bo->resv->lock))
+ goto out_unlock;
+ ret = __ttm_bo_reserve(bo, true, false, false, NULL);
+ if (unlikely(ret != 0))
+ goto out_unlock;
+ __ttm_bo_unreserve(bo);
+
+out_unlock:
+ mutex_unlock(&bo->wu_mutex);
+ return ret;
+}