diff options
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_execbuf_util.c')
| -rw-r--r-- | drivers/gpu/drm/ttm/ttm_execbuf_util.c | 126 |
1 files changed, 64 insertions, 62 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c index 1937069432c..e8dac875852 100644 --- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c +++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c @@ -41,14 +41,12 @@ static void ttm_eu_backoff_reservation_locked(struct list_head *list) if (!entry->reserved) continue; + entry->reserved = false; if (entry->removed) { ttm_bo_add_to_lru(bo); entry->removed = false; - } - entry->reserved = false; - atomic_set(&bo->reserved, 0); - wake_up_all(&bo->event_queue); + __ttm_bo_unreserve(bo); } } @@ -82,23 +80,8 @@ static void ttm_eu_list_ref_sub(struct list_head *list) } } -static int ttm_eu_wait_unreserved_locked(struct list_head *list, - struct ttm_buffer_object *bo) -{ - struct ttm_bo_global *glob = bo->glob; - int ret; - - ttm_eu_del_from_lru_locked(list); - spin_unlock(&glob->lru_lock); - ret = ttm_bo_wait_unreserved(bo, true); - spin_lock(&glob->lru_lock); - if (unlikely(ret != 0)) - ttm_eu_backoff_reservation_locked(list); - return ret; -} - - -void ttm_eu_backoff_reservation(struct list_head *list) +void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket, + struct list_head *list) { struct ttm_validate_buffer *entry; struct ttm_bo_global *glob; @@ -110,6 +93,8 @@ void ttm_eu_backoff_reservation(struct list_head *list) glob = entry->bo->glob; spin_lock(&glob->lru_lock); ttm_eu_backoff_reservation_locked(list); + if (ticket) + ww_acquire_fini(ticket); spin_unlock(&glob->lru_lock); } EXPORT_SYMBOL(ttm_eu_backoff_reservation); @@ -126,12 +111,12 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation); * buffers in different orders. */ -int ttm_eu_reserve_buffers(struct list_head *list) +int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket, + struct list_head *list) { struct ttm_bo_global *glob; struct ttm_validate_buffer *entry; int ret; - uint32_t val_seq; if (list_empty(list)) return 0; @@ -145,62 +130,77 @@ int ttm_eu_reserve_buffers(struct list_head *list) entry = list_first_entry(list, struct ttm_validate_buffer, head); glob = entry->bo->glob; + if (ticket) + ww_acquire_init(ticket, &reservation_ww_class); retry: - spin_lock(&glob->lru_lock); - val_seq = entry->bo->bdev->val_seq++; - list_for_each_entry(entry, list, head) { struct ttm_buffer_object *bo = entry->bo; -retry_this_bo: - ret = ttm_bo_reserve_locked(bo, true, true, true, val_seq); - switch (ret) { - case 0: - break; - case -EBUSY: - ret = ttm_eu_wait_unreserved_locked(list, bo); - if (unlikely(ret != 0)) { - spin_unlock(&glob->lru_lock); - ttm_eu_list_ref_sub(list); - return ret; - } - goto retry_this_bo; - case -EAGAIN: + /* already slowpath reserved? */ + if (entry->reserved) + continue; + + ret = __ttm_bo_reserve(bo, true, (ticket == NULL), true, + ticket); + + if (ret == -EDEADLK) { + /* uh oh, we lost out, drop every reservation and try + * to only reserve this buffer, then start over if + * this succeeds. + */ + BUG_ON(ticket == NULL); + spin_lock(&glob->lru_lock); ttm_eu_backoff_reservation_locked(list); spin_unlock(&glob->lru_lock); ttm_eu_list_ref_sub(list); - ret = ttm_bo_wait_unreserved(bo, true); - if (unlikely(ret != 0)) - return ret; + ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock, + ticket); + if (unlikely(ret != 0)) { + if (ret == -EINTR) + ret = -ERESTARTSYS; + goto err_fini; + } + + entry->reserved = true; + if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { + ret = -EBUSY; + goto err; + } goto retry; - default: - ttm_eu_backoff_reservation_locked(list); - spin_unlock(&glob->lru_lock); - ttm_eu_list_ref_sub(list); - return ret; - } + } else if (ret) + goto err; entry->reserved = true; if (unlikely(atomic_read(&bo->cpu_writers) > 0)) { - ttm_eu_backoff_reservation_locked(list); - spin_unlock(&glob->lru_lock); - ttm_eu_list_ref_sub(list); - ret = ttm_bo_wait_cpu(bo, false); - if (ret) - return ret; - goto retry; + ret = -EBUSY; + goto err; } } + if (ticket) + ww_acquire_done(ticket); + spin_lock(&glob->lru_lock); ttm_eu_del_from_lru_locked(list); spin_unlock(&glob->lru_lock); ttm_eu_list_ref_sub(list); - return 0; + +err: + spin_lock(&glob->lru_lock); + ttm_eu_backoff_reservation_locked(list); + spin_unlock(&glob->lru_lock); + ttm_eu_list_ref_sub(list); +err_fini: + if (ticket) { + ww_acquire_done(ticket); + ww_acquire_fini(ticket); + } + return ret; } EXPORT_SYMBOL(ttm_eu_reserve_buffers); -void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) +void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket, + struct list_head *list, void *sync_obj) { struct ttm_validate_buffer *entry; struct ttm_buffer_object *bo; @@ -216,19 +216,21 @@ void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj) driver = bdev->driver; glob = bo->glob; - spin_lock(&bdev->fence_lock); spin_lock(&glob->lru_lock); + spin_lock(&bdev->fence_lock); list_for_each_entry(entry, list, head) { bo = entry->bo; entry->old_sync_obj = bo->sync_obj; bo->sync_obj = driver->sync_obj_ref(sync_obj); - bo->sync_obj_arg = entry->new_sync_obj_arg; - ttm_bo_unreserve_locked(bo); + ttm_bo_add_to_lru(bo); + __ttm_bo_unreserve(bo); entry->reserved = false; } - spin_unlock(&glob->lru_lock); spin_unlock(&bdev->fence_lock); + spin_unlock(&glob->lru_lock); + if (ticket) + ww_acquire_fini(ticket); list_for_each_entry(entry, list, head) { if (entry->old_sync_obj) |
