aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/ttm/ttm_bo_util.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo_util.c')
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c51
1 files changed, 34 insertions, 17 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index af894584dd9..1df856f7856 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -30,6 +30,7 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/drm_vma_manager.h>
#include <linux/io.h>
#include <linux/highmem.h>
#include <linux/wait.h>
@@ -186,7 +187,7 @@ void ttm_mem_io_free_vm(struct ttm_buffer_object *bo)
}
}
-int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
+static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void **virtual)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
@@ -218,7 +219,7 @@ int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
return 0;
}
-void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
+static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem,
void *virtual)
{
struct ttm_mem_type_manager *man;
@@ -342,19 +343,29 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
if (ret)
goto out;
+ /*
+ * Single TTM move. NOP.
+ */
if (old_iomap == NULL && new_iomap == NULL)
goto out2;
- if (old_iomap == NULL && ttm == NULL)
+
+ /*
+ * Don't move nonexistent data. Clear destination instead.
+ */
+ if (old_iomap == NULL &&
+ (ttm == NULL || (ttm->state == tt_unpopulated &&
+ !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
+ memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
goto out2;
+ }
- if (ttm->state == tt_unpopulated) {
+ /*
+ * TTM might be null for moves within the same region.
+ */
+ if (ttm && ttm->state == tt_unpopulated) {
ret = ttm->bdev->driver->ttm_tt_populate(ttm);
- if (ret) {
- /* if we fail here don't nuke the mm node
- * as the bo still owns it */
- old_copy.mm_node = NULL;
+ if (ret)
goto out1;
- }
}
add = 0;
@@ -380,11 +391,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
prot);
} else
ret = ttm_copy_io_page(new_iomap, old_iomap, page);
- if (ret) {
- /* failing here, means keep old copy as-is */
- old_copy.mm_node = NULL;
+ if (ret)
goto out1;
- }
}
mb();
out2:
@@ -402,7 +410,12 @@ out1:
ttm_mem_reg_iounmap(bdev, old_mem, new_iomap);
out:
ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap);
- ttm_bo_mem_put(bo, &old_copy);
+
+ /*
+ * On error, keep the mm node!
+ */
+ if (!ret)
+ ttm_bo_mem_put(bo, &old_copy);
return ret;
}
EXPORT_SYMBOL(ttm_bo_move_memcpy);
@@ -433,6 +446,7 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
struct ttm_buffer_object *fbo;
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_bo_driver *driver = bdev->driver;
+ int ret;
fbo = kmalloc(sizeof(*fbo), GFP_KERNEL);
if (!fbo)
@@ -445,12 +459,11 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
* TODO: Explicit member copy would probably be better here.
*/
- init_waitqueue_head(&fbo->event_queue);
INIT_LIST_HEAD(&fbo->ddestroy);
INIT_LIST_HEAD(&fbo->lru);
INIT_LIST_HEAD(&fbo->swap);
INIT_LIST_HEAD(&fbo->io_reserve_lru);
- fbo->vm_node = NULL;
+ drm_vma_node_reset(&fbo->vma_node);
atomic_set(&fbo->cpu_writers, 0);
spin_lock(&bdev->fence_lock);
@@ -463,6 +476,10 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
kref_init(&fbo->kref);
fbo->destroy = &ttm_transfered_destroy;
fbo->acc_size = 0;
+ fbo->resv = &fbo->ttm_resv;
+ reservation_object_init(fbo->resv);
+ ret = ww_mutex_trylock(&fbo->resv->lock);
+ WARN_ON(!ret);
*new_obj = fbo;
return 0;
@@ -577,7 +594,7 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo,
if (start_page > bo->num_pages)
return -EINVAL;
#if 0
- if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC))
+ if (num_pages > 1 && !capable(CAP_SYS_ADMIN))
return -EPERM;
#endif
(void) ttm_mem_io_lock(man, false);