diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_resource.c')
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_resource.c | 426 |
1 files changed, 350 insertions, 76 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c index 0e67cf41065..01d68f0a69d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c @@ -32,8 +32,10 @@ #include <drm/drmP.h> #include "vmwgfx_resource_priv.h" +#define VMW_RES_EVICT_ERR_COUNT 10 + struct vmw_user_dma_buffer { - struct ttm_base_object base; + struct ttm_prime_object prime; struct vmw_dma_buffer dma; }; @@ -86,6 +88,11 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res) return res; } +struct vmw_resource * +vmw_resource_reference_unless_doomed(struct vmw_resource *res) +{ + return kref_get_unless_zero(&res->kref) ? res : NULL; +} /** * vmw_resource_release_id - release a resource id to the id manager. @@ -134,8 +141,12 @@ static void vmw_resource_release(struct kref *kref) vmw_dmabuf_unreference(&res->backup); } - if (likely(res->hw_destroy != NULL)) + if (likely(res->hw_destroy != NULL)) { res->hw_destroy(res); + mutex_lock(&dev_priv->binding_mutex); + vmw_context_binding_res_list_kill(&res->binding_head); + mutex_unlock(&dev_priv->binding_mutex); + } id = res->id; if (res->res_free != NULL) @@ -213,6 +224,7 @@ int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res, res->func = func; INIT_LIST_HEAD(&res->lru_head); INIT_LIST_HEAD(&res->mob_head); + INIT_LIST_HEAD(&res->binding_head); res->id = -1; res->backup = NULL; res->backup_offset = 0; @@ -295,7 +307,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv, if (unlikely(base == NULL)) return -EINVAL; - if (unlikely(base->object_type != converter->object_type)) + if (unlikely(ttm_base_object_type(base) != converter->object_type)) goto out_bad_resource; res = converter->base_obj_to_res(base); @@ -350,6 +362,38 @@ int vmw_user_lookup_handle(struct vmw_private *dev_priv, /** * Buffer management. */ + +/** + * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers + * + * @dev_priv: Pointer to a struct vmw_private identifying the device. + * @size: The requested buffer size. + * @user: Whether this is an ordinary dma buffer or a user dma buffer. + */ +static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size, + bool user) +{ + static size_t struct_size, user_struct_size; + size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT; + size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *)); + + if (unlikely(struct_size == 0)) { + size_t backend_size = ttm_round_pot(vmw_tt_size); + + struct_size = backend_size + + ttm_round_pot(sizeof(struct vmw_dma_buffer)); + user_struct_size = backend_size + + ttm_round_pot(sizeof(struct vmw_user_dma_buffer)); + } + + if (dev_priv->map_mode == vmw_dma_alloc_coherent) + page_array_size += + ttm_round_pot(num_pages * sizeof(dma_addr_t)); + + return ((user) ? user_struct_size : struct_size) + + page_array_size; +} + void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) { struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo); @@ -357,6 +401,13 @@ void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo) kfree(vmw_bo); } +static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) +{ + struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); + + ttm_prime_object_kfree(vmw_user_bo, prime); +} + int vmw_dmabuf_init(struct vmw_private *dev_priv, struct vmw_dma_buffer *vmw_bo, size_t size, struct ttm_placement *placement, @@ -366,10 +417,11 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv, struct ttm_bo_device *bdev = &dev_priv->bdev; size_t acc_size; int ret; + bool user = (bo_free == &vmw_user_dmabuf_destroy); - BUG_ON(!bo_free); + BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free))); - acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct vmw_dma_buffer)); + acc_size = vmw_dmabuf_acc_size(dev_priv, size, user); memset(vmw_bo, 0, sizeof(*vmw_bo)); INIT_LIST_HEAD(&vmw_bo->res_list); @@ -381,13 +433,6 @@ int vmw_dmabuf_init(struct vmw_private *dev_priv, return ret; } -static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo) -{ - struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo); - - ttm_base_object_kfree(vmw_user_bo, base); -} - static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) { struct vmw_user_dma_buffer *vmw_user_bo; @@ -399,11 +444,27 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base) if (unlikely(base == NULL)) return; - vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); + vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, + prime.base); bo = &vmw_user_bo->dma.base; ttm_bo_unref(&bo); } +static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base, + enum ttm_ref_type ref_type) +{ + struct vmw_user_dma_buffer *user_bo; + user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base); + + switch (ref_type) { + case TTM_REF_SYNCCPU_WRITE: + ttm_bo_synccpu_write_release(&user_bo->dma.base); + break; + default: + BUG(); + } +} + /** * vmw_user_dmabuf_alloc - Allocate a user dma buffer * @@ -434,24 +495,28 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv, } ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size, + (dev_priv->has_mob) ? + &vmw_sys_placement : &vmw_vram_sys_placement, true, &vmw_user_dmabuf_destroy); if (unlikely(ret != 0)) return ret; tmp = ttm_bo_reference(&user_bo->dma.base); - ret = ttm_base_object_init(tfile, - &user_bo->base, - shareable, - ttm_buffer_type, - &vmw_user_dmabuf_release, NULL); + ret = ttm_prime_object_init(tfile, + size, + &user_bo->prime, + shareable, + ttm_buffer_type, + &vmw_user_dmabuf_release, + &vmw_user_dmabuf_ref_obj_release); if (unlikely(ret != 0)) { ttm_bo_unref(&tmp); goto out_no_base_object; } *p_dma_buf = &user_bo->dma; - *handle = user_bo->base.hash.key; + *handle = user_bo->prime.base.hash.key; out_no_base_object: return ret; @@ -473,8 +538,137 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo, return -EPERM; vmw_user_bo = vmw_user_dma_buffer(bo); - return (vmw_user_bo->base.tfile == tfile || - vmw_user_bo->base.shareable) ? 0 : -EPERM; + + /* Check that the caller has opened the object. */ + if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base))) + return 0; + + DRM_ERROR("Could not grant buffer access.\n"); + return -EPERM; +} + +/** + * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu + * access, idling previous GPU operations on the buffer and optionally + * blocking it for further command submissions. + * + * @user_bo: Pointer to the buffer object being grabbed for CPU access + * @tfile: Identifying the caller. + * @flags: Flags indicating how the grab should be performed. + * + * A blocking grab will be automatically released when @tfile is closed. + */ +static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo, + struct ttm_object_file *tfile, + uint32_t flags) +{ + struct ttm_buffer_object *bo = &user_bo->dma.base; + bool existed; + int ret; + + if (flags & drm_vmw_synccpu_allow_cs) { + struct ttm_bo_device *bdev = bo->bdev; + + spin_lock(&bdev->fence_lock); + ret = ttm_bo_wait(bo, false, true, + !!(flags & drm_vmw_synccpu_dontblock)); + spin_unlock(&bdev->fence_lock); + return ret; + } + + ret = ttm_bo_synccpu_write_grab + (bo, !!(flags & drm_vmw_synccpu_dontblock)); + if (unlikely(ret != 0)) + return ret; + + ret = ttm_ref_object_add(tfile, &user_bo->prime.base, + TTM_REF_SYNCCPU_WRITE, &existed); + if (ret != 0 || existed) + ttm_bo_synccpu_write_release(&user_bo->dma.base); + + return ret; +} + +/** + * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access, + * and unblock command submission on the buffer if blocked. + * + * @handle: Handle identifying the buffer object. + * @tfile: Identifying the caller. + * @flags: Flags indicating the type of release. + */ +static int vmw_user_dmabuf_synccpu_release(uint32_t handle, + struct ttm_object_file *tfile, + uint32_t flags) +{ + if (!(flags & drm_vmw_synccpu_allow_cs)) + return ttm_ref_object_base_unref(tfile, handle, + TTM_REF_SYNCCPU_WRITE); + + return 0; +} + +/** + * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu + * functionality. + * + * @dev: Identifies the drm device. + * @data: Pointer to the ioctl argument. + * @file_priv: Identifies the caller. + * + * This function checks the ioctl arguments for validity and calls the + * relevant synccpu functions. + */ +int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct drm_vmw_synccpu_arg *arg = + (struct drm_vmw_synccpu_arg *) data; + struct vmw_dma_buffer *dma_buf; + struct vmw_user_dma_buffer *user_bo; + struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; + int ret; + + if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0 + || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write | + drm_vmw_synccpu_dontblock | + drm_vmw_synccpu_allow_cs)) != 0) { + DRM_ERROR("Illegal synccpu flags.\n"); + return -EINVAL; + } + + switch (arg->op) { + case drm_vmw_synccpu_grab: + ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf); + if (unlikely(ret != 0)) + return ret; + + user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, + dma); + ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags); + vmw_dmabuf_unreference(&dma_buf); + if (unlikely(ret != 0 && ret != -ERESTARTSYS && + ret != -EBUSY)) { + DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n", + (unsigned int) arg->handle); + return ret; + } + break; + case drm_vmw_synccpu_release: + ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile, + arg->flags); + if (unlikely(ret != 0)) { + DRM_ERROR("Failed synccpu release on handle 0x%08x.\n", + (unsigned int) arg->handle); + return ret; + } + break; + default: + DRM_ERROR("Invalid synccpu operation.\n"); + return -EINVAL; + } + + return 0; } int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, @@ -487,10 +681,9 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, struct drm_vmw_dmabuf_rep *rep = &arg->rep; struct vmw_dma_buffer *dma_buf; uint32_t handle; - struct vmw_master *vmaster = vmw_master(file_priv->master); int ret; - ret = ttm_read_lock(&vmaster->lock, true); + ret = ttm_read_lock(&dev_priv->reservation_sem, true); if (unlikely(ret != 0)) return ret; @@ -507,7 +700,7 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data, vmw_dmabuf_unreference(&dma_buf); out_no_dmabuf: - ttm_read_unlock(&vmaster->lock); + ttm_read_unlock(&dev_priv->reservation_sem); return ret; } @@ -536,14 +729,15 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, return -ESRCH; } - if (unlikely(base->object_type != ttm_buffer_type)) { + if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) { ttm_base_object_unref(&base); printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n", (unsigned long)handle); return -EINVAL; } - vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base); + vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, + prime.base); (void)ttm_bo_reference(&vmw_user_bo->dma.base); ttm_base_object_unref(&base); *out = &vmw_user_bo->dma; @@ -552,7 +746,8 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile, } int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, - struct vmw_dma_buffer *dma_buf) + struct vmw_dma_buffer *dma_buf, + uint32_t *handle) { struct vmw_user_dma_buffer *user_bo; @@ -560,7 +755,10 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile, return -EINVAL; user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma); - return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL); + + *handle = user_bo->prime.base.hash.key; + return ttm_ref_object_add(tfile, &user_bo->prime.base, + TTM_REF_USAGE, NULL); } /* @@ -679,7 +877,6 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, struct vmw_resource *tmp; struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; - struct vmw_master *vmaster = vmw_master(file_priv->master); int ret; /* @@ -690,7 +887,7 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, if (unlikely(vmw_user_stream_size == 0)) vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128; - ret = ttm_read_lock(&vmaster->lock, true); + ret = ttm_read_lock(&dev_priv->reservation_sem, true); if (unlikely(ret != 0)) return ret; @@ -738,7 +935,7 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data, out_err: vmw_resource_unreference(&res); out_unlock: - ttm_read_unlock(&vmaster->lock); + ttm_read_unlock(&dev_priv->reservation_sem); return ret; } @@ -775,53 +972,54 @@ err_ref: } +/** + * vmw_dumb_create - Create a dumb kms buffer + * + * @file_priv: Pointer to a struct drm_file identifying the caller. + * @dev: Pointer to the drm device. + * @args: Pointer to a struct drm_mode_create_dumb structure + * + * This is a driver callback for the core drm create_dumb functionality. + * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except + * that the arguments have a different format. + */ int vmw_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args) { struct vmw_private *dev_priv = vmw_priv(dev); - struct vmw_master *vmaster = vmw_master(file_priv->master); - struct vmw_user_dma_buffer *vmw_user_bo; - struct ttm_buffer_object *tmp; + struct vmw_dma_buffer *dma_buf; int ret; args->pitch = args->width * ((args->bpp + 7) / 8); args->size = args->pitch * args->height; - vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL); - if (vmw_user_bo == NULL) - return -ENOMEM; - - ret = ttm_read_lock(&vmaster->lock, true); - if (ret != 0) { - kfree(vmw_user_bo); + ret = ttm_read_lock(&dev_priv->reservation_sem, true); + if (unlikely(ret != 0)) return ret; - } - ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, args->size, - &vmw_vram_sys_placement, true, - &vmw_user_dmabuf_destroy); - if (ret != 0) - goto out_no_dmabuf; - - tmp = ttm_bo_reference(&vmw_user_bo->dma.base); - ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile, - &vmw_user_bo->base, - false, - ttm_buffer_type, - &vmw_user_dmabuf_release, NULL); + ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile, + args->size, false, &args->handle, + &dma_buf); if (unlikely(ret != 0)) - goto out_no_base_object; - - args->handle = vmw_user_bo->base.hash.key; + goto out_no_dmabuf; -out_no_base_object: - ttm_bo_unref(&tmp); + vmw_dmabuf_unreference(&dma_buf); out_no_dmabuf: - ttm_read_unlock(&vmaster->lock); + ttm_read_unlock(&dev_priv->reservation_sem); return ret; } +/** + * vmw_dumb_map_offset - Return the address space offset of a dumb buffer + * + * @file_priv: Pointer to a struct drm_file identifying the caller. + * @dev: Pointer to the drm device. + * @handle: Handle identifying the dumb buffer. + * @offset: The address space offset returned. + * + * This is a driver callback for the core drm dumb_map_offset functionality. + */ int vmw_dumb_map_offset(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle, uint64_t *offset) @@ -839,6 +1037,15 @@ int vmw_dumb_map_offset(struct drm_file *file_priv, return 0; } +/** + * vmw_dumb_destroy - Destroy a dumb boffer + * + * @file_priv: Pointer to a struct drm_file identifying the caller. + * @dev: Pointer to the drm device. + * @handle: Handle identifying the dumb buffer. + * + * This is a driver callback for the core drm dumb_destroy functionality. + */ int vmw_dumb_destroy(struct drm_file *file_priv, struct drm_device *dev, uint32_t handle) @@ -970,7 +1177,7 @@ void vmw_resource_unreserve(struct vmw_resource *res, if (new_backup) res->backup_offset = new_backup_offset; - if (!res->func->may_evict) + if (!res->func->may_evict || res->id == -1) return; write_lock(&dev_priv->resource_lock); @@ -992,7 +1199,6 @@ void vmw_resource_unreserve(struct vmw_resource *res, */ static int vmw_resource_check_buffer(struct vmw_resource *res, - struct ww_acquire_ctx *ticket, bool interruptible, struct ttm_validate_buffer *val_buf) { @@ -1009,7 +1215,7 @@ vmw_resource_check_buffer(struct vmw_resource *res, INIT_LIST_HEAD(&val_list); val_buf->bo = ttm_bo_reference(&res->backup->base); list_add_tail(&val_buf->head, &val_list); - ret = ttm_eu_reserve_buffers(ticket, &val_list); + ret = ttm_eu_reserve_buffers(NULL, &val_list); if (unlikely(ret != 0)) goto out_no_reserve; @@ -1027,7 +1233,7 @@ vmw_resource_check_buffer(struct vmw_resource *res, return 0; out_no_validate: - ttm_eu_backoff_reservation(ticket, &val_list); + ttm_eu_backoff_reservation(NULL, &val_list); out_no_reserve: ttm_bo_unref(&val_buf->bo); if (backup_dirty) @@ -1072,8 +1278,7 @@ int vmw_resource_reserve(struct vmw_resource *res, bool no_backup) * @val_buf: Backup buffer information. */ static void -vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket, - struct ttm_validate_buffer *val_buf) +vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf) { struct list_head val_list; @@ -1082,7 +1287,7 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket, INIT_LIST_HEAD(&val_list); list_add_tail(&val_buf->head, &val_list); - ttm_eu_backoff_reservation(ticket, &val_list); + ttm_eu_backoff_reservation(NULL, &val_list); ttm_bo_unref(&val_buf->bo); } @@ -1091,18 +1296,18 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket, * to a backup buffer. * * @res: The resource to evict. + * @interruptible: Whether to wait interruptible. */ -int vmw_resource_do_evict(struct vmw_resource *res) +int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible) { struct ttm_validate_buffer val_buf; const struct vmw_res_func *func = res->func; - struct ww_acquire_ctx ticket; int ret; BUG_ON(!func->may_evict); val_buf.bo = NULL; - ret = vmw_resource_check_buffer(res, &ticket, true, &val_buf); + ret = vmw_resource_check_buffer(res, interruptible, &val_buf); if (unlikely(ret != 0)) return ret; @@ -1117,7 +1322,7 @@ int vmw_resource_do_evict(struct vmw_resource *res) res->backup_dirty = true; res->res_dirty = false; out_no_unbind: - vmw_resource_backoff_reservation(&ticket, &val_buf); + vmw_resource_backoff_reservation(&val_buf); return ret; } @@ -1141,6 +1346,7 @@ int vmw_resource_validate(struct vmw_resource *res) struct vmw_private *dev_priv = res->dev_priv; struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type]; struct ttm_validate_buffer val_buf; + unsigned err_count = 0; if (likely(!res->func->may_evict)) return 0; @@ -1155,7 +1361,7 @@ int vmw_resource_validate(struct vmw_resource *res) write_lock(&dev_priv->resource_lock); if (list_empty(lru_list) || !res->func->may_evict) { - DRM_ERROR("Out of device device id entries " + DRM_ERROR("Out of device device resources " "for %s.\n", res->func->type_name); ret = -EBUSY; write_unlock(&dev_priv->resource_lock); @@ -1168,7 +1374,19 @@ int vmw_resource_validate(struct vmw_resource *res) list_del_init(&evict_res->lru_head); write_unlock(&dev_priv->resource_lock); - vmw_resource_do_evict(evict_res); + + ret = vmw_resource_do_evict(evict_res, true); + if (unlikely(ret != 0)) { + write_lock(&dev_priv->resource_lock); + list_add_tail(&evict_res->lru_head, lru_list); + write_unlock(&dev_priv->resource_lock); + if (ret == -ERESTARTSYS || + ++err_count > VMW_RES_EVICT_ERR_COUNT) { + vmw_resource_unreference(&evict_res); + goto out_no_validate; + } + } + vmw_resource_unreference(&evict_res); } while (1); @@ -1229,11 +1447,54 @@ void vmw_fence_single_bo(struct ttm_buffer_object *bo, * @mem: The truct ttm_mem_reg indicating to what memory * region the move is taking place. * - * For now does nothing. + * Evicts the Guest Backed hardware resource if the backup + * buffer is being moved out of MOB memory. + * Note that this function should not race with the resource + * validation code as long as it accesses only members of struct + * resource that remain static while bo::res is !NULL and + * while we have @bo reserved. struct resource::backup is *not* a + * static member. The resource validation code will take care + * to set @bo::res to NULL, while having @bo reserved when the + * buffer is no longer bound to the resource, so @bo:res can be + * used to determine whether there is a need to unbind and whether + * it is safe to unbind. */ void vmw_resource_move_notify(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem) { + struct vmw_dma_buffer *dma_buf; + + if (mem == NULL) + return; + + if (bo->destroy != vmw_dmabuf_bo_free && + bo->destroy != vmw_user_dmabuf_destroy) + return; + + dma_buf = container_of(bo, struct vmw_dma_buffer, base); + + if (mem->mem_type != VMW_PL_MOB) { + struct vmw_resource *res, *n; + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_validate_buffer val_buf; + + val_buf.bo = bo; + + list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) { + + if (unlikely(res->func->unbind == NULL)) + continue; + + (void) res->func->unbind(res, true, &val_buf); + res->backup_dirty = true; + res->res_dirty = false; + list_del_init(&res->mob_head); + } + + spin_lock(&bdev->fence_lock); + (void) ttm_bo_wait(bo, false, false, false); + spin_unlock(&bdev->fence_lock); + } } /** @@ -1253,13 +1514,15 @@ bool vmw_resource_needs_backup(const struct vmw_resource *res) * @type: The resource type to evict * * To avoid thrashing starvation or as part of the hibernation sequence, - * evict all evictable resources of a specific type. + * try to evict all evictable resources of a specific type. */ static void vmw_resource_evict_type(struct vmw_private *dev_priv, enum vmw_res_type type) { struct list_head *lru_list = &dev_priv->res_lru[type]; struct vmw_resource *evict_res; + unsigned err_count = 0; + int ret; do { write_lock(&dev_priv->resource_lock); @@ -1272,7 +1535,18 @@ static void vmw_resource_evict_type(struct vmw_private *dev_priv, lru_head)); list_del_init(&evict_res->lru_head); write_unlock(&dev_priv->resource_lock); - vmw_resource_do_evict(evict_res); + + ret = vmw_resource_do_evict(evict_res, false); + if (unlikely(ret != 0)) { + write_lock(&dev_priv->resource_lock); + list_add_tail(&evict_res->lru_head, lru_list); + write_unlock(&dev_priv->resource_lock); + if (++err_count > VMW_RES_EVICT_ERR_COUNT) { + vmw_resource_unreference(&evict_res); + return; + } + } + vmw_resource_unreference(&evict_res); } while (1); |
