aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm/ttm_execbuf_util.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_execbuf_util.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_execbuf_util.c193
1 files changed, 158 insertions, 35 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_execbuf_util.c b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
index c285c2902d1..e8dac875852 100644
--- a/drivers/gpu/drm/ttm/ttm_execbuf_util.c
+++ b/drivers/gpu/drm/ttm/ttm_execbuf_util.c
@@ -25,14 +25,14 @@
*
**************************************************************************/
-#include "ttm/ttm_execbuf_util.h"
-#include "ttm/ttm_bo_driver.h"
-#include "ttm/ttm_placement.h"
+#include <drm/ttm/ttm_execbuf_util.h>
+#include <drm/ttm/ttm_bo_driver.h>
+#include <drm/ttm/ttm_placement.h>
#include <linux/wait.h>
#include <linux/sched.h>
#include <linux/module.h>
-void ttm_eu_backoff_reservation(struct list_head *list)
+static void ttm_eu_backoff_reservation_locked(struct list_head *list)
{
struct ttm_validate_buffer *entry;
@@ -42,9 +42,61 @@ void ttm_eu_backoff_reservation(struct list_head *list)
continue;
entry->reserved = false;
- ttm_bo_unreserve(bo);
+ if (entry->removed) {
+ ttm_bo_add_to_lru(bo);
+ entry->removed = false;
+ }
+ __ttm_bo_unreserve(bo);
+ }
+}
+
+static void ttm_eu_del_from_lru_locked(struct list_head *list)
+{
+ struct ttm_validate_buffer *entry;
+
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+ if (!entry->reserved)
+ continue;
+
+ if (!entry->removed) {
+ entry->put_count = ttm_bo_del_from_lru(bo);
+ entry->removed = true;
+ }
}
}
+
+static void ttm_eu_list_ref_sub(struct list_head *list)
+{
+ struct ttm_validate_buffer *entry;
+
+ list_for_each_entry(entry, list, head) {
+ struct ttm_buffer_object *bo = entry->bo;
+
+ if (entry->put_count) {
+ ttm_bo_list_ref_sub(bo, entry->put_count, true);
+ entry->put_count = 0;
+ }
+ }
+}
+
+void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
+ struct list_head *list)
+{
+ struct ttm_validate_buffer *entry;
+ struct ttm_bo_global *glob;
+
+ if (list_empty(list))
+ return;
+
+ entry = list_first_entry(list, struct ttm_validate_buffer, head);
+ glob = entry->bo->glob;
+ spin_lock(&glob->lru_lock);
+ ttm_eu_backoff_reservation_locked(list);
+ if (ticket)
+ ww_acquire_fini(ticket);
+ spin_unlock(&glob->lru_lock);
+}
EXPORT_SYMBOL(ttm_eu_backoff_reservation);
/*
@@ -59,59 +111,130 @@ EXPORT_SYMBOL(ttm_eu_backoff_reservation);
* buffers in different orders.
*/
-int ttm_eu_reserve_buffers(struct list_head *list, uint32_t val_seq)
+int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
+ struct list_head *list)
{
+ struct ttm_bo_global *glob;
struct ttm_validate_buffer *entry;
int ret;
+ if (list_empty(list))
+ return 0;
+
+ list_for_each_entry(entry, list, head) {
+ entry->reserved = false;
+ entry->put_count = 0;
+ entry->removed = false;
+ }
+
+ entry = list_first_entry(list, struct ttm_validate_buffer, head);
+ glob = entry->bo->glob;
+
+ if (ticket)
+ ww_acquire_init(ticket, &reservation_ww_class);
retry:
list_for_each_entry(entry, list, head) {
struct ttm_buffer_object *bo = entry->bo;
- entry->reserved = false;
- ret = ttm_bo_reserve(bo, true, false, true, val_seq);
- if (ret != 0) {
- ttm_eu_backoff_reservation(list);
- if (ret == -EAGAIN) {
- ret = ttm_bo_wait_unreserved(bo, true);
- if (unlikely(ret != 0))
- return ret;
- goto retry;
- } else
- return ret;
- }
+ /* already slowpath reserved? */
+ if (entry->reserved)
+ continue;
+
+ ret = __ttm_bo_reserve(bo, true, (ticket == NULL), true,
+ ticket);
+
+ if (ret == -EDEADLK) {
+ /* uh oh, we lost out, drop every reservation and try
+ * to only reserve this buffer, then start over if
+ * this succeeds.
+ */
+ BUG_ON(ticket == NULL);
+ spin_lock(&glob->lru_lock);
+ ttm_eu_backoff_reservation_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
+ ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
+ ticket);
+ if (unlikely(ret != 0)) {
+ if (ret == -EINTR)
+ ret = -ERESTARTSYS;
+ goto err_fini;
+ }
+
+ entry->reserved = true;
+ if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
+ ret = -EBUSY;
+ goto err;
+ }
+ goto retry;
+ } else if (ret)
+ goto err;
entry->reserved = true;
if (unlikely(atomic_read(&bo->cpu_writers) > 0)) {
- ttm_eu_backoff_reservation(list);
- ret = ttm_bo_wait_cpu(bo, false);
- if (ret)
- return ret;
- goto retry;
+ ret = -EBUSY;
+ goto err;
}
}
+
+ if (ticket)
+ ww_acquire_done(ticket);
+ spin_lock(&glob->lru_lock);
+ ttm_eu_del_from_lru_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
return 0;
+
+err:
+ spin_lock(&glob->lru_lock);
+ ttm_eu_backoff_reservation_locked(list);
+ spin_unlock(&glob->lru_lock);
+ ttm_eu_list_ref_sub(list);
+err_fini:
+ if (ticket) {
+ ww_acquire_done(ticket);
+ ww_acquire_fini(ticket);
+ }
+ return ret;
}
EXPORT_SYMBOL(ttm_eu_reserve_buffers);
-void ttm_eu_fence_buffer_objects(struct list_head *list, void *sync_obj)
+void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
+ struct list_head *list, void *sync_obj)
{
struct ttm_validate_buffer *entry;
+ struct ttm_buffer_object *bo;
+ struct ttm_bo_global *glob;
+ struct ttm_bo_device *bdev;
+ struct ttm_bo_driver *driver;
- list_for_each_entry(entry, list, head) {
- struct ttm_buffer_object *bo = entry->bo;
- struct ttm_bo_driver *driver = bo->bdev->driver;
- void *old_sync_obj;
+ if (list_empty(list))
+ return;
- spin_lock(&bo->lock);
- old_sync_obj = bo->sync_obj;
+ bo = list_first_entry(list, struct ttm_validate_buffer, head)->bo;
+ bdev = bo->bdev;
+ driver = bdev->driver;
+ glob = bo->glob;
+
+ spin_lock(&glob->lru_lock);
+ spin_lock(&bdev->fence_lock);
+
+ list_for_each_entry(entry, list, head) {
+ bo = entry->bo;
+ entry->old_sync_obj = bo->sync_obj;
bo->sync_obj = driver->sync_obj_ref(sync_obj);
- bo->sync_obj_arg = entry->new_sync_obj_arg;
- spin_unlock(&bo->lock);
- ttm_bo_unreserve(bo);
+ ttm_bo_add_to_lru(bo);
+ __ttm_bo_unreserve(bo);
entry->reserved = false;
- if (old_sync_obj)
- driver->sync_obj_unref(&old_sync_obj);
+ }
+ spin_unlock(&bdev->fence_lock);
+ spin_unlock(&glob->lru_lock);
+ if (ticket)
+ ww_acquire_fini(ticket);
+
+ list_for_each_entry(entry, list, head) {
+ if (entry->old_sync_obj)
+ driver->sync_obj_unref(&entry->old_sync_obj);
}
}
EXPORT_SYMBOL(ttm_eu_fence_buffer_objects);