diff options
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_surface.c')
| -rw-r--r-- | drivers/gpu/drm/vmwgfx/vmwgfx_surface.c | 614 | 
1 files changed, 576 insertions, 38 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c index 58281433974..4ecdbf3e59d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c @@ -36,12 +36,13 @@   * @base:           The TTM base object handling user-space visibility.   * @srf:            The surface metadata.   * @size:           TTM accounting size for the surface. + * @master:         master of the creating client. Used for security check.   */  struct vmw_user_surface { -	struct ttm_base_object base; +	struct ttm_prime_object prime;  	struct vmw_surface srf;  	uint32_t size; -	uint32_t backup_handle; +	struct drm_master *master;  };  /** @@ -68,6 +69,14 @@ static int vmw_legacy_srf_unbind(struct vmw_resource *res,  				 struct ttm_validate_buffer *val_buf);  static int vmw_legacy_srf_create(struct vmw_resource *res);  static int vmw_legacy_srf_destroy(struct vmw_resource *res); +static int vmw_gb_surface_create(struct vmw_resource *res); +static int vmw_gb_surface_bind(struct vmw_resource *res, +			       struct ttm_validate_buffer *val_buf); +static int vmw_gb_surface_unbind(struct vmw_resource *res, +				 bool readback, +				 struct ttm_validate_buffer *val_buf); +static int vmw_gb_surface_destroy(struct vmw_resource *res); +  static const struct vmw_user_resource_conv user_surface_conv = {  	.object_type = VMW_RES_SURFACE, @@ -93,6 +102,18 @@ static const struct vmw_res_func vmw_legacy_surface_func = {  	.unbind = &vmw_legacy_srf_unbind  }; +static const struct vmw_res_func vmw_gb_surface_func = { +	.res_type = vmw_res_surface, +	.needs_backup = true, +	.may_evict = true, +	.type_name = "guest backed surfaces", +	.backup_placement = &vmw_mob_placement, +	.create = vmw_gb_surface_create, +	.destroy = vmw_gb_surface_destroy, +	.bind = vmw_gb_surface_bind, +	.unbind = vmw_gb_surface_unbind +}; +  /**   * struct vmw_surface_dma - SVGA3D DMA command   */ @@ -291,6 +312,11 @@ static void vmw_hw_surface_destroy(struct vmw_resource *res)  	struct vmw_surface *srf;  	void *cmd; +	if (res->func->destroy == vmw_gb_surface_destroy) { +		(void) vmw_gb_surface_destroy(res); +		return; +	} +  	if (res->id != -1) {  		cmd = vmw_fifo_reserve(dev_priv, vmw_surface_destroy_size()); @@ -549,12 +575,15 @@ static int vmw_surface_init(struct vmw_private *dev_priv,  	struct vmw_resource *res = &srf->res;  	BUG_ON(res_free == NULL); -	(void) vmw_3d_resource_inc(dev_priv, false); +	if (!dev_priv->has_mob) +		(void) vmw_3d_resource_inc(dev_priv, false);  	ret = vmw_resource_init(dev_priv, res, true, res_free, +				(dev_priv->has_mob) ? &vmw_gb_surface_func :  				&vmw_legacy_surface_func);  	if (unlikely(ret != 0)) { -		vmw_3d_resource_dec(dev_priv, false); +		if (!dev_priv->has_mob) +			vmw_3d_resource_dec(dev_priv, false);  		res_free(res);  		return ret;  	} @@ -580,7 +609,8 @@ static int vmw_surface_init(struct vmw_private *dev_priv,  static struct vmw_resource *  vmw_user_surface_base_to_res(struct ttm_base_object *base)  { -	return &(container_of(base, struct vmw_user_surface, base)->srf.res); +	return &(container_of(base, struct vmw_user_surface, +			      prime.base)->srf.res);  }  /** @@ -596,10 +626,12 @@ static void vmw_user_surface_free(struct vmw_resource *res)  	struct vmw_private *dev_priv = srf->res.dev_priv;  	uint32_t size = user_srf->size; +	if (user_srf->master) +		drm_master_put(&user_srf->master);  	kfree(srf->offsets);  	kfree(srf->sizes);  	kfree(srf->snooper.image); -	ttm_base_object_kfree(user_srf, base); +	ttm_prime_object_kfree(user_srf, prime);  	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);  } @@ -616,7 +648,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)  {  	struct ttm_base_object *base = *p_base;  	struct vmw_user_surface *user_srf = -	    container_of(base, struct vmw_user_surface, base); +	    container_of(base, struct vmw_user_surface, prime.base);  	struct vmw_resource *res = &user_srf->srf.res;  	*p_base = NULL; @@ -669,7 +701,6 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,  	struct vmw_surface_offset *cur_offset;  	uint32_t num_sizes;  	uint32_t size; -	struct vmw_master *vmaster = vmw_master(file_priv->master);  	const struct svga3d_surface_desc *desc;  	if (unlikely(vmw_user_surface_size == 0)) @@ -695,7 +726,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,  		return -EINVAL;  	} -	ret = ttm_read_lock(&vmaster->lock, true); +	ret = ttm_read_lock(&dev_priv->reservation_sem, true);  	if (unlikely(ret != 0))  		return ret; @@ -749,7 +780,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,  	srf->base_size = *srf->sizes;  	srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; -	srf->multisample_count = 1; +	srf->multisample_count = 0;  	cur_bo_offset = 0;  	cur_offset = srf->offsets; @@ -790,8 +821,10 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,  	}  	srf->snooper.crtc = NULL; -	user_srf->base.shareable = false; -	user_srf->base.tfile = NULL; +	user_srf->prime.base.shareable = false; +	user_srf->prime.base.tfile = NULL; +	if (drm_is_primary_client(file_priv)) +		user_srf->master = drm_master_get(file_priv->master);  	/**  	 * From this point, the generic resource management functions @@ -802,10 +835,28 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,  	if (unlikely(ret != 0))  		goto out_unlock; +	/* +	 * A gb-aware client referencing a shared surface will +	 * expect a backup buffer to be present. +	 */ +	if (dev_priv->has_mob && req->shareable) { +		uint32_t backup_handle; + +		ret = vmw_user_dmabuf_alloc(dev_priv, tfile, +					    res->backup_size, +					    true, +					    &backup_handle, +					    &res->backup); +		if (unlikely(ret != 0)) { +			vmw_resource_unreference(&res); +			goto out_unlock; +		} +	} +  	tmp = vmw_resource_reference(&srf->res); -	ret = ttm_base_object_init(tfile, &user_srf->base, -				   req->shareable, VMW_RES_SURFACE, -				   &vmw_user_surface_base_release, NULL); +	ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, +				    req->shareable, VMW_RES_SURFACE, +				    &vmw_user_surface_base_release, NULL);  	if (unlikely(ret != 0)) {  		vmw_resource_unreference(&tmp); @@ -813,21 +864,95 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,  		goto out_unlock;  	} -	rep->sid = user_srf->base.hash.key; +	rep->sid = user_srf->prime.base.hash.key;  	vmw_resource_unreference(&res); -	ttm_read_unlock(&vmaster->lock); +	ttm_read_unlock(&dev_priv->reservation_sem);  	return 0;  out_no_copy:  	kfree(srf->offsets);  out_no_offsets:  	kfree(srf->sizes);  out_no_sizes: -	ttm_base_object_kfree(user_srf, base); +	ttm_prime_object_kfree(user_srf, prime);  out_no_user_srf:  	ttm_mem_global_free(vmw_mem_glob(dev_priv), size);  out_unlock: -	ttm_read_unlock(&vmaster->lock); +	ttm_read_unlock(&dev_priv->reservation_sem); +	return ret; +} + + +static int +vmw_surface_handle_reference(struct vmw_private *dev_priv, +			     struct drm_file *file_priv, +			     uint32_t u_handle, +			     enum drm_vmw_handle_type handle_type, +			     struct ttm_base_object **base_p) +{ +	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; +	struct vmw_user_surface *user_srf; +	uint32_t handle; +	struct ttm_base_object *base; +	int ret; + +	if (handle_type == DRM_VMW_HANDLE_PRIME) { +		ret = ttm_prime_fd_to_handle(tfile, u_handle, &handle); +		if (unlikely(ret != 0)) +			return ret; +	} else { +		if (unlikely(drm_is_render_client(file_priv))) { +			DRM_ERROR("Render client refused legacy " +				  "surface reference.\n"); +			return -EACCES; +		} +		handle = u_handle; +	} + +	ret = -EINVAL; +	base = ttm_base_object_lookup_for_ref(dev_priv->tdev, handle); +	if (unlikely(base == NULL)) { +		DRM_ERROR("Could not find surface to reference.\n"); +		goto out_no_lookup; +	} + +	if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE)) { +		DRM_ERROR("Referenced object is not a surface.\n"); +		goto out_bad_resource; +	} + +	if (handle_type != DRM_VMW_HANDLE_PRIME) { +		user_srf = container_of(base, struct vmw_user_surface, +					prime.base); + +		/* +		 * Make sure the surface creator has the same +		 * authenticating master. +		 */ +		if (drm_is_primary_client(file_priv) && +		    user_srf->master != file_priv->master) { +			DRM_ERROR("Trying to reference surface outside of" +				  " master domain.\n"); +			ret = -EACCES; +			goto out_bad_resource; +		} + +		ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL); +		if (unlikely(ret != 0)) { +			DRM_ERROR("Could not add a reference to a surface.\n"); +			goto out_bad_resource; +		} +	} + +	*base_p = base; +	return 0; + +out_bad_resource: +	ttm_base_object_unref(&base); +out_no_lookup: +	if (handle_type == DRM_VMW_HANDLE_PRIME) +		(void) ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE); +  	return ret;  } @@ -842,6 +967,7 @@ out_unlock:  int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,  				struct drm_file *file_priv)  { +	struct vmw_private *dev_priv = vmw_priv(dev);  	union drm_vmw_surface_reference_arg *arg =  	    (union drm_vmw_surface_reference_arg *)data;  	struct drm_vmw_surface_arg *req = &arg->req; @@ -851,26 +977,16 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,  	struct vmw_user_surface *user_srf;  	struct drm_vmw_size __user *user_sizes;  	struct ttm_base_object *base; -	int ret = -EINVAL; - -	base = ttm_base_object_lookup(tfile, req->sid); -	if (unlikely(base == NULL)) { -		DRM_ERROR("Could not find surface to reference.\n"); -		return -EINVAL; -	} +	int ret; -	if (unlikely(base->object_type != VMW_RES_SURFACE)) -		goto out_bad_resource; +	ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid, +					   req->handle_type, &base); +	if (unlikely(ret != 0)) +		return ret; -	user_srf = container_of(base, struct vmw_user_surface, base); +	user_srf = container_of(base, struct vmw_user_surface, prime.base);  	srf = &user_srf->srf; -	ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL); -	if (unlikely(ret != 0)) { -		DRM_ERROR("Could not add a reference to a surface.\n"); -		goto out_no_reference; -	} -  	rep->flags = srf->flags;  	rep->format = srf->format;  	memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels)); @@ -878,15 +994,437 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,  	    rep->size_addr;  	if (user_sizes) -		ret = copy_to_user(user_sizes, srf->sizes, -				   srf->num_sizes * sizeof(*srf->sizes)); +		ret = copy_to_user(user_sizes, &srf->base_size, +				   sizeof(srf->base_size));  	if (unlikely(ret != 0)) {  		DRM_ERROR("copy_to_user failed %p %u\n",  			  user_sizes, srf->num_sizes); +		ttm_ref_object_base_unref(tfile, base->hash.key, TTM_REF_USAGE);  		ret = -EFAULT;  	} + +	ttm_base_object_unref(&base); + +	return ret; +} + +/** + * vmw_surface_define_encode - Encode a surface_define command. + * + * @srf: Pointer to a struct vmw_surface object. + * @cmd_space: Pointer to memory area in which the commands should be encoded. + */ +static int vmw_gb_surface_create(struct vmw_resource *res) +{ +	struct vmw_private *dev_priv = res->dev_priv; +	struct vmw_surface *srf = vmw_res_to_srf(res); +	uint32_t cmd_len, submit_len; +	int ret; +	struct { +		SVGA3dCmdHeader header; +		SVGA3dCmdDefineGBSurface body; +	} *cmd; + +	if (likely(res->id != -1)) +		return 0; + +	(void) vmw_3d_resource_inc(dev_priv, false); +	ret = vmw_resource_alloc_id(res); +	if (unlikely(ret != 0)) { +		DRM_ERROR("Failed to allocate a surface id.\n"); +		goto out_no_id; +	} + +	if (unlikely(res->id >= VMWGFX_NUM_GB_SURFACE)) { +		ret = -EBUSY; +		goto out_no_fifo; +	} + +	cmd_len = sizeof(cmd->body); +	submit_len = sizeof(*cmd); +	cmd = vmw_fifo_reserve(dev_priv, submit_len); +	if (unlikely(cmd == NULL)) { +		DRM_ERROR("Failed reserving FIFO space for surface " +			  "creation.\n"); +		ret = -ENOMEM; +		goto out_no_fifo; +	} + +	cmd->header.id = SVGA_3D_CMD_DEFINE_GB_SURFACE; +	cmd->header.size = cmd_len; +	cmd->body.sid = srf->res.id; +	cmd->body.surfaceFlags = srf->flags; +	cmd->body.format = cpu_to_le32(srf->format); +	cmd->body.numMipLevels = srf->mip_levels[0]; +	cmd->body.multisampleCount = srf->multisample_count; +	cmd->body.autogenFilter = srf->autogen_filter; +	cmd->body.size.width = srf->base_size.width; +	cmd->body.size.height = srf->base_size.height; +	cmd->body.size.depth = srf->base_size.depth; +	vmw_fifo_commit(dev_priv, submit_len); + +	return 0; + +out_no_fifo: +	vmw_resource_release_id(res); +out_no_id: +	vmw_3d_resource_dec(dev_priv, false); +	return ret; +} + + +static int vmw_gb_surface_bind(struct vmw_resource *res, +			       struct ttm_validate_buffer *val_buf) +{ +	struct vmw_private *dev_priv = res->dev_priv; +	struct { +		SVGA3dCmdHeader header; +		SVGA3dCmdBindGBSurface body; +	} *cmd1; +	struct { +		SVGA3dCmdHeader header; +		SVGA3dCmdUpdateGBSurface body; +	} *cmd2; +	uint32_t submit_size; +	struct ttm_buffer_object *bo = val_buf->bo; + +	BUG_ON(bo->mem.mem_type != VMW_PL_MOB); + +	submit_size = sizeof(*cmd1) + (res->backup_dirty ? sizeof(*cmd2) : 0); + +	cmd1 = vmw_fifo_reserve(dev_priv, submit_size); +	if (unlikely(cmd1 == NULL)) { +		DRM_ERROR("Failed reserving FIFO space for surface " +			  "binding.\n"); +		return -ENOMEM; +	} + +	cmd1->header.id = SVGA_3D_CMD_BIND_GB_SURFACE; +	cmd1->header.size = sizeof(cmd1->body); +	cmd1->body.sid = res->id; +	cmd1->body.mobid = bo->mem.start; +	if (res->backup_dirty) { +		cmd2 = (void *) &cmd1[1]; +		cmd2->header.id = SVGA_3D_CMD_UPDATE_GB_SURFACE; +		cmd2->header.size = sizeof(cmd2->body); +		cmd2->body.sid = res->id; +		res->backup_dirty = false; +	} +	vmw_fifo_commit(dev_priv, submit_size); + +	return 0; +} + +static int vmw_gb_surface_unbind(struct vmw_resource *res, +				 bool readback, +				 struct ttm_validate_buffer *val_buf) +{ +	struct vmw_private *dev_priv = res->dev_priv; +	struct ttm_buffer_object *bo = val_buf->bo; +	struct vmw_fence_obj *fence; + +	struct { +		SVGA3dCmdHeader header; +		SVGA3dCmdReadbackGBSurface body; +	} *cmd1; +	struct { +		SVGA3dCmdHeader header; +		SVGA3dCmdInvalidateGBSurface body; +	} *cmd2; +	struct { +		SVGA3dCmdHeader header; +		SVGA3dCmdBindGBSurface body; +	} *cmd3; +	uint32_t submit_size; +	uint8_t *cmd; + + +	BUG_ON(bo->mem.mem_type != VMW_PL_MOB); + +	submit_size = sizeof(*cmd3) + (readback ? sizeof(*cmd1) : sizeof(*cmd2)); +	cmd = vmw_fifo_reserve(dev_priv, submit_size); +	if (unlikely(cmd == NULL)) { +		DRM_ERROR("Failed reserving FIFO space for surface " +			  "unbinding.\n"); +		return -ENOMEM; +	} + +	if (readback) { +		cmd1 = (void *) cmd; +		cmd1->header.id = SVGA_3D_CMD_READBACK_GB_SURFACE; +		cmd1->header.size = sizeof(cmd1->body); +		cmd1->body.sid = res->id; +		cmd3 = (void *) &cmd1[1]; +	} else { +		cmd2 = (void *) cmd; +		cmd2->header.id = SVGA_3D_CMD_INVALIDATE_GB_SURFACE; +		cmd2->header.size = sizeof(cmd2->body); +		cmd2->body.sid = res->id; +		cmd3 = (void *) &cmd2[1]; +	} + +	cmd3->header.id = SVGA_3D_CMD_BIND_GB_SURFACE; +	cmd3->header.size = sizeof(cmd3->body); +	cmd3->body.sid = res->id; +	cmd3->body.mobid = SVGA3D_INVALID_ID; + +	vmw_fifo_commit(dev_priv, submit_size); + +	/* +	 * Create a fence object and fence the backup buffer. +	 */ + +	(void) vmw_execbuf_fence_commands(NULL, dev_priv, +					  &fence, NULL); + +	vmw_fence_single_bo(val_buf->bo, fence); + +	if (likely(fence != NULL)) +		vmw_fence_obj_unreference(&fence); + +	return 0; +} + +static int vmw_gb_surface_destroy(struct vmw_resource *res) +{ +	struct vmw_private *dev_priv = res->dev_priv; +	struct { +		SVGA3dCmdHeader header; +		SVGA3dCmdDestroyGBSurface body; +	} *cmd; + +	if (likely(res->id == -1)) +		return 0; + +	mutex_lock(&dev_priv->binding_mutex); +	vmw_context_binding_res_list_scrub(&res->binding_head); + +	cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd)); +	if (unlikely(cmd == NULL)) { +		DRM_ERROR("Failed reserving FIFO space for surface " +			  "destruction.\n"); +		mutex_unlock(&dev_priv->binding_mutex); +		return -ENOMEM; +	} + +	cmd->header.id = SVGA_3D_CMD_DESTROY_GB_SURFACE; +	cmd->header.size = sizeof(cmd->body); +	cmd->body.sid = res->id; +	vmw_fifo_commit(dev_priv, sizeof(*cmd)); +	mutex_unlock(&dev_priv->binding_mutex); +	vmw_resource_release_id(res); +	vmw_3d_resource_dec(dev_priv, false); + +	return 0; +} + +/** + * vmw_gb_surface_define_ioctl - Ioctl function implementing + *                               the user surface define functionality. + * + * @dev:            Pointer to a struct drm_device. + * @data:           Pointer to data copied from / to user-space. + * @file_priv:      Pointer to a drm file private structure. + */ +int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data, +				struct drm_file *file_priv) +{ +	struct vmw_private *dev_priv = vmw_priv(dev); +	struct vmw_user_surface *user_srf; +	struct vmw_surface *srf; +	struct vmw_resource *res; +	struct vmw_resource *tmp; +	union drm_vmw_gb_surface_create_arg *arg = +	    (union drm_vmw_gb_surface_create_arg *)data; +	struct drm_vmw_gb_surface_create_req *req = &arg->req; +	struct drm_vmw_gb_surface_create_rep *rep = &arg->rep; +	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; +	int ret; +	uint32_t size; +	const struct svga3d_surface_desc *desc; +	uint32_t backup_handle; + +	if (unlikely(vmw_user_surface_size == 0)) +		vmw_user_surface_size = ttm_round_pot(sizeof(*user_srf)) + +			128; + +	size = vmw_user_surface_size + 128; + +	desc = svga3dsurface_get_desc(req->format); +	if (unlikely(desc->block_desc == SVGA3DBLOCKDESC_NONE)) { +		DRM_ERROR("Invalid surface format for surface creation.\n"); +		return -EINVAL; +	} + +	ret = ttm_read_lock(&dev_priv->reservation_sem, true); +	if (unlikely(ret != 0)) +		return ret; + +	ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), +				   size, false, true); +	if (unlikely(ret != 0)) { +		if (ret != -ERESTARTSYS) +			DRM_ERROR("Out of graphics memory for surface" +				  " creation.\n"); +		goto out_unlock; +	} + +	user_srf = kzalloc(sizeof(*user_srf), GFP_KERNEL); +	if (unlikely(user_srf == NULL)) { +		ret = -ENOMEM; +		goto out_no_user_srf; +	} + +	srf = &user_srf->srf; +	res = &srf->res; + +	srf->flags = req->svga3d_flags; +	srf->format = req->format; +	srf->scanout = req->drm_surface_flags & drm_vmw_surface_flag_scanout; +	srf->mip_levels[0] = req->mip_levels; +	srf->num_sizes = 1; +	srf->sizes = NULL; +	srf->offsets = NULL; +	user_srf->size = size; +	srf->base_size = req->base_size; +	srf->autogen_filter = SVGA3D_TEX_FILTER_NONE; +	srf->multisample_count = req->multisample_count; +	res->backup_size = svga3dsurface_get_serialized_size +	  (srf->format, srf->base_size, srf->mip_levels[0], +	   srf->flags & SVGA3D_SURFACE_CUBEMAP); + +	user_srf->prime.base.shareable = false; +	user_srf->prime.base.tfile = NULL; +	if (drm_is_primary_client(file_priv)) +		user_srf->master = drm_master_get(file_priv->master); + +	/** +	 * From this point, the generic resource management functions +	 * destroy the object on failure. +	 */ + +	ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free); +	if (unlikely(ret != 0)) +		goto out_unlock; + +	if (req->buffer_handle != SVGA3D_INVALID_ID) { +		ret = vmw_user_dmabuf_lookup(tfile, req->buffer_handle, +					     &res->backup); +	} else if (req->drm_surface_flags & +		   drm_vmw_surface_flag_create_buffer) +		ret = vmw_user_dmabuf_alloc(dev_priv, tfile, +					    res->backup_size, +					    req->drm_surface_flags & +					    drm_vmw_surface_flag_shareable, +					    &backup_handle, +					    &res->backup); + +	if (unlikely(ret != 0)) { +		vmw_resource_unreference(&res); +		goto out_unlock; +	} + +	tmp = vmw_resource_reference(&srf->res); +	ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime, +				    req->drm_surface_flags & +				    drm_vmw_surface_flag_shareable, +				    VMW_RES_SURFACE, +				    &vmw_user_surface_base_release, NULL); + +	if (unlikely(ret != 0)) { +		vmw_resource_unreference(&tmp); +		vmw_resource_unreference(&res); +		goto out_unlock; +	} + +	rep->handle = user_srf->prime.base.hash.key; +	rep->backup_size = res->backup_size; +	if (res->backup) { +		rep->buffer_map_handle = +			drm_vma_node_offset_addr(&res->backup->base.vma_node); +		rep->buffer_size = res->backup->base.num_pages * PAGE_SIZE; +		rep->buffer_handle = backup_handle; +	} else { +		rep->buffer_map_handle = 0; +		rep->buffer_size = 0; +		rep->buffer_handle = SVGA3D_INVALID_ID; +	} + +	vmw_resource_unreference(&res); + +	ttm_read_unlock(&dev_priv->reservation_sem); +	return 0; +out_no_user_srf: +	ttm_mem_global_free(vmw_mem_glob(dev_priv), size); +out_unlock: +	ttm_read_unlock(&dev_priv->reservation_sem); +	return ret; +} + +/** + * vmw_gb_surface_reference_ioctl - Ioctl function implementing + *                                  the user surface reference functionality. + * + * @dev:            Pointer to a struct drm_device. + * @data:           Pointer to data copied from / to user-space. + * @file_priv:      Pointer to a drm file private structure. + */ +int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data, +				   struct drm_file *file_priv) +{ +	struct vmw_private *dev_priv = vmw_priv(dev); +	union drm_vmw_gb_surface_reference_arg *arg = +	    (union drm_vmw_gb_surface_reference_arg *)data; +	struct drm_vmw_surface_arg *req = &arg->req; +	struct drm_vmw_gb_surface_ref_rep *rep = &arg->rep; +	struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; +	struct vmw_surface *srf; +	struct vmw_user_surface *user_srf; +	struct ttm_base_object *base; +	uint32_t backup_handle; +	int ret = -EINVAL; + +	ret = vmw_surface_handle_reference(dev_priv, file_priv, req->sid, +					   req->handle_type, &base); +	if (unlikely(ret != 0)) +		return ret; + +	user_srf = container_of(base, struct vmw_user_surface, prime.base); +	srf = &user_srf->srf; +	if (srf->res.backup == NULL) { +		DRM_ERROR("Shared GB surface is missing a backup buffer.\n"); +		goto out_bad_resource; +	} + +	mutex_lock(&dev_priv->cmdbuf_mutex); /* Protect res->backup */ +	ret = vmw_user_dmabuf_reference(tfile, srf->res.backup, +					&backup_handle); +	mutex_unlock(&dev_priv->cmdbuf_mutex); + +	if (unlikely(ret != 0)) { +		DRM_ERROR("Could not add a reference to a GB surface " +			  "backup buffer.\n"); +		(void) ttm_ref_object_base_unref(tfile, base->hash.key, +						 TTM_REF_USAGE); +		goto out_bad_resource; +	} + +	rep->creq.svga3d_flags = srf->flags; +	rep->creq.format = srf->format; +	rep->creq.mip_levels = srf->mip_levels[0]; +	rep->creq.drm_surface_flags = 0; +	rep->creq.multisample_count = srf->multisample_count; +	rep->creq.autogen_filter = srf->autogen_filter; +	rep->creq.buffer_handle = backup_handle; +	rep->creq.base_size = srf->base_size; +	rep->crep.handle = user_srf->prime.base.hash.key; +	rep->crep.backup_size = srf->res.backup_size; +	rep->crep.buffer_handle = backup_handle; +	rep->crep.buffer_map_handle = +		drm_vma_node_offset_addr(&srf->res.backup->base.vma_node); +	rep->crep.buffer_size = srf->res.backup->base.num_pages * PAGE_SIZE; +  out_bad_resource: -out_no_reference:  	ttm_base_object_unref(&base);  	return ret;  | 
