diff options
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo.c')
| -rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 218 |
1 files changed, 94 insertions, 124 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index cb9dd674670..4ab9f7171c4 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -45,7 +45,6 @@ #define TTM_DEBUG(fmt, arg...) #define TTM_BO_HASH_ORDER 13 -static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); static int ttm_bo_swapout(struct ttm_mem_shrink *shrink); static void ttm_bo_global_kobj_release(struct kobject *kobj); @@ -152,7 +151,7 @@ static void ttm_bo_release_list(struct kref *list_kref) atomic_dec(&bo->glob->bo_count); if (bo->resv == &bo->ttm_resv) reservation_object_fini(&bo->ttm_resv); - + mutex_destroy(&bo->wu_mutex); if (bo->destroy) bo->destroy(bo); else { @@ -352,9 +351,11 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, moved: if (bo->evicted) { - ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); - if (ret) - pr_err("Can not flush read caches\n"); + if (bdev->driver->invalidate_caches) { + ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); + if (ret) + pr_err("Can not flush read caches\n"); + } bo->evicted = false; } @@ -411,7 +412,7 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) int ret; spin_lock(&glob->lru_lock); - ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); + ret = __ttm_bo_reserve(bo, false, true, false, 0); spin_lock(&bdev->fence_lock); (void) ttm_bo_wait(bo, false, false, true); @@ -430,8 +431,20 @@ static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo) sync_obj = driver->sync_obj_ref(bo->sync_obj); spin_unlock(&bdev->fence_lock); - if (!ret) - ww_mutex_unlock(&bo->resv->lock); + if (!ret) { + + /* + * Make NO_EVICT bos immediately available to + * shrinkers, now that they are queued for + * destruction. + */ + if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) { + bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT; + ttm_bo_add_to_lru(bo); + } + + __ttm_bo_unreserve(bo); + } kref_get(&bo->list_kref); list_add_tail(&bo->ddestroy, &bdev->ddestroy); @@ -481,7 +494,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, sync_obj = driver->sync_obj_ref(bo->sync_obj); spin_unlock(&bdev->fence_lock); - ww_mutex_unlock(&bo->resv->lock); + __ttm_bo_unreserve(bo); spin_unlock(&glob->lru_lock); ret = driver->sync_obj_wait(sync_obj, false, interruptible); @@ -501,7 +514,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, return ret; spin_lock(&glob->lru_lock); - ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); + ret = __ttm_bo_reserve(bo, false, true, false, 0); /* * We raced, and lost, someone else holds the reservation now, @@ -519,7 +532,7 @@ static int ttm_bo_cleanup_refs_and_unlock(struct ttm_buffer_object *bo, spin_unlock(&bdev->fence_lock); if (ret || unlikely(list_empty(&bo->ddestroy))) { - ww_mutex_unlock(&bo->resv->lock); + __ttm_bo_unreserve(bo); spin_unlock(&glob->lru_lock); return ret; } @@ -564,11 +577,11 @@ static int ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all) kref_get(&nentry->list_kref); } - ret = ttm_bo_reserve_nolru(entry, false, true, false, 0); + ret = __ttm_bo_reserve(entry, false, true, false, 0); if (remove_all && ret) { spin_unlock(&glob->lru_lock); - ret = ttm_bo_reserve_nolru(entry, false, false, - false, 0); + ret = __ttm_bo_reserve(entry, false, false, + false, 0); spin_lock(&glob->lru_lock); } @@ -615,13 +628,7 @@ static void ttm_bo_release(struct kref *kref) struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type]; - write_lock(&bdev->vm_lock); - if (likely(bo->vm_node != NULL)) { - rb_erase(&bo->vm_rb, &bdev->addr_space_rb); - drm_mm_put_block(bo->vm_node); - bo->vm_node = NULL; - } - write_unlock(&bdev->vm_lock); + drm_vma_offset_remove(&bdev->vma_manager, &bo->vma_node); ttm_mem_io_lock(man, false); ttm_mem_io_free_vm(bo); ttm_mem_io_unlock(man); @@ -719,7 +726,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev, spin_lock(&glob->lru_lock); list_for_each_entry(bo, &man->lru, lru) { - ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); + ret = __ttm_bo_reserve(bo, false, true, false, 0); if (!ret) break; } @@ -952,7 +959,7 @@ int ttm_bo_mem_space(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_mem_space); -int ttm_bo_move_buffer(struct ttm_buffer_object *bo, +static int ttm_bo_move_buffer(struct ttm_buffer_object *bo, struct ttm_placement *placement, bool interruptible, bool no_wait_gpu) @@ -993,24 +1000,32 @@ out_unlock: return ret; } -static int ttm_bo_mem_compat(struct ttm_placement *placement, - struct ttm_mem_reg *mem) +static bool ttm_bo_mem_compat(struct ttm_placement *placement, + struct ttm_mem_reg *mem, + uint32_t *new_flags) { int i; if (mem->mm_node && placement->lpfn != 0 && (mem->start < placement->fpfn || mem->start + mem->num_pages > placement->lpfn)) - return -1; + return false; for (i = 0; i < placement->num_placement; i++) { - if ((placement->placement[i] & mem->placement & - TTM_PL_MASK_CACHING) && - (placement->placement[i] & mem->placement & - TTM_PL_MASK_MEM)) - return i; + *new_flags = placement->placement[i]; + if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && + (*new_flags & mem->placement & TTM_PL_MASK_MEM)) + return true; + } + + for (i = 0; i < placement->num_busy_placement; i++) { + *new_flags = placement->busy_placement[i]; + if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && + (*new_flags & mem->placement & TTM_PL_MASK_MEM)) + return true; } - return -1; + + return false; } int ttm_bo_validate(struct ttm_buffer_object *bo, @@ -1019,6 +1034,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, bool no_wait_gpu) { int ret; + uint32_t new_flags; lockdep_assert_held(&bo->resv->lock.base); /* Check that range is valid */ @@ -1029,8 +1045,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, /* * Check whether we need to move buffer. */ - ret = ttm_bo_mem_compat(placement, &bo->mem); - if (ret < 0) { + if (!ttm_bo_mem_compat(placement, &bo->mem, &new_flags)) { ret = ttm_bo_move_buffer(bo, placement, interruptible, no_wait_gpu); if (ret) @@ -1040,7 +1055,7 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, * Use the access and other non-mapping-related flag bits from * the compatible memory placement flags to the active flags */ - ttm_flag_masked(&bo->mem.placement, placement->placement[ret], + ttm_flag_masked(&bo->mem.placement, new_flags, ~TTM_PL_MASK_MEMTYPE); } /* @@ -1110,6 +1125,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, INIT_LIST_HEAD(&bo->ddestroy); INIT_LIST_HEAD(&bo->swap); INIT_LIST_HEAD(&bo->io_reserve_lru); + mutex_init(&bo->wu_mutex); bo->bdev = bdev; bo->glob = bdev->glob; bo->type = type; @@ -1129,6 +1145,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev, bo->resv = &bo->ttm_resv; reservation_object_init(bo->resv); atomic_inc(&bo->glob->bo_count); + drm_vma_node_reset(&bo->vma_node); ret = ttm_bo_check_placement(bo, placement); @@ -1139,7 +1156,8 @@ int ttm_bo_init(struct ttm_bo_device *bdev, if (likely(!ret) && (bo->type == ttm_bo_type_device || bo->type == ttm_bo_type_sg)) - ret = ttm_bo_setup_vm(bo); + ret = drm_vma_offset_add(&bdev->vma_manager, &bo->vma_node, + bo->mem.num_pages); locked = ww_mutex_trylock(&bo->resv->lock); WARN_ON(!locked); @@ -1424,10 +1442,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev) TTM_DEBUG("Swap list was clean\n"); spin_unlock(&glob->lru_lock); - BUG_ON(!drm_mm_clean(&bdev->addr_space_mm)); - write_lock(&bdev->vm_lock); - drm_mm_takedown(&bdev->addr_space_mm); - write_unlock(&bdev->vm_lock); + drm_vma_offset_manager_destroy(&bdev->vma_manager); return ret; } @@ -1436,12 +1451,12 @@ EXPORT_SYMBOL(ttm_bo_device_release); int ttm_bo_device_init(struct ttm_bo_device *bdev, struct ttm_bo_global *glob, struct ttm_bo_driver *driver, + struct address_space *mapping, uint64_t file_page_offset, bool need_dma32) { int ret = -EINVAL; - rwlock_init(&bdev->vm_lock); bdev->driver = driver; memset(bdev->man, 0, sizeof(bdev->man)); @@ -1454,12 +1469,11 @@ int ttm_bo_device_init(struct ttm_bo_device *bdev, if (unlikely(ret != 0)) goto out_no_sys; - bdev->addr_space_rb = RB_ROOT; - drm_mm_init(&bdev->addr_space_mm, file_page_offset, 0x10000000); - + drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset, + 0x10000000); INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue); INIT_LIST_HEAD(&bdev->ddestroy); - bdev->dev_mapping = NULL; + bdev->dev_mapping = mapping; bdev->glob = glob; bdev->need_dma32 = need_dma32; bdev->val_seq = 0; @@ -1498,12 +1512,8 @@ bool ttm_mem_reg_is_pci(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) void ttm_bo_unmap_virtual_locked(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; - loff_t offset = (loff_t) bo->addr_space_offset; - loff_t holelen = ((loff_t) bo->mem.num_pages) << PAGE_SHIFT; - if (!bdev->dev_mapping) - return; - unmap_mapping_range(bdev->dev_mapping, offset, holelen, 1); + drm_vma_node_unmap(&bo->vma_node, bdev->dev_mapping); ttm_mem_io_free_vm(bo); } @@ -1520,78 +1530,6 @@ void ttm_bo_unmap_virtual(struct ttm_buffer_object *bo) EXPORT_SYMBOL(ttm_bo_unmap_virtual); -static void ttm_bo_vm_insert_rb(struct ttm_buffer_object *bo) -{ - struct ttm_bo_device *bdev = bo->bdev; - struct rb_node **cur = &bdev->addr_space_rb.rb_node; - struct rb_node *parent = NULL; - struct ttm_buffer_object *cur_bo; - unsigned long offset = bo->vm_node->start; - unsigned long cur_offset; - - while (*cur) { - parent = *cur; - cur_bo = rb_entry(parent, struct ttm_buffer_object, vm_rb); - cur_offset = cur_bo->vm_node->start; - if (offset < cur_offset) - cur = &parent->rb_left; - else if (offset > cur_offset) - cur = &parent->rb_right; - else - BUG(); - } - - rb_link_node(&bo->vm_rb, parent, cur); - rb_insert_color(&bo->vm_rb, &bdev->addr_space_rb); -} - -/** - * ttm_bo_setup_vm: - * - * @bo: the buffer to allocate address space for - * - * Allocate address space in the drm device so that applications - * can mmap the buffer and access the contents. This only - * applies to ttm_bo_type_device objects as others are not - * placed in the drm device address space. - */ - -static int ttm_bo_setup_vm(struct ttm_buffer_object *bo) -{ - struct ttm_bo_device *bdev = bo->bdev; - int ret; - -retry_pre_get: - ret = drm_mm_pre_get(&bdev->addr_space_mm); - if (unlikely(ret != 0)) - return ret; - - write_lock(&bdev->vm_lock); - bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm, - bo->mem.num_pages, 0, 0); - - if (unlikely(bo->vm_node == NULL)) { - ret = -ENOMEM; - goto out_unlock; - } - - bo->vm_node = drm_mm_get_block_atomic(bo->vm_node, - bo->mem.num_pages, 0); - - if (unlikely(bo->vm_node == NULL)) { - write_unlock(&bdev->vm_lock); - goto retry_pre_get; - } - - ttm_bo_vm_insert_rb(bo); - write_unlock(&bdev->vm_lock); - bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT; - - return 0; -out_unlock: - write_unlock(&bdev->vm_lock); - return ret; -} int ttm_bo_wait(struct ttm_buffer_object *bo, bool lazy, bool interruptible, bool no_wait) @@ -1692,7 +1630,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink) spin_lock(&glob->lru_lock); list_for_each_entry(bo, &glob->swap_lru, swap) { - ret = ttm_bo_reserve_nolru(bo, false, true, false, 0); + ret = __ttm_bo_reserve(bo, false, true, false, 0); if (!ret) break; } @@ -1759,7 +1697,7 @@ out: * already swapped buffer. */ - ww_mutex_unlock(&bo->resv->lock); + __ttm_bo_unreserve(bo); kref_put(&bo->list_kref, ttm_bo_release_list); return ret; } @@ -1770,3 +1708,35 @@ void ttm_bo_swapout_all(struct ttm_bo_device *bdev) ; } EXPORT_SYMBOL(ttm_bo_swapout_all); + +/** + * ttm_bo_wait_unreserved - interruptible wait for a buffer object to become + * unreserved + * + * @bo: Pointer to buffer + */ +int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo) +{ + int ret; + + /* + * In the absense of a wait_unlocked API, + * Use the bo::wu_mutex to avoid triggering livelocks due to + * concurrent use of this function. Note that this use of + * bo::wu_mutex can go away if we change locking order to + * mmap_sem -> bo::reserve. + */ + ret = mutex_lock_interruptible(&bo->wu_mutex); + if (unlikely(ret != 0)) + return -ERESTARTSYS; + if (!ww_mutex_is_locked(&bo->resv->lock)) + goto out_unlock; + ret = __ttm_bo_reserve(bo, true, false, false, NULL); + if (unlikely(ret != 0)) + goto out_unlock; + __ttm_bo_unreserve(bo); + +out_unlock: + mutex_unlock(&bo->wu_mutex); + return ret; +} |
