aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/vmwgfx/vmwgfx_resource.c')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_resource.c1757
1 files changed, 1107 insertions, 650 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
index 36e129f0023..01d68f0a69d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
@@ -26,27 +26,16 @@
**************************************************************************/
#include "vmwgfx_drv.h"
-#include "vmwgfx_drm.h"
-#include "ttm/ttm_object.h"
-#include "ttm/ttm_placement.h"
-#include "drmP.h"
+#include <drm/vmwgfx_drm.h>
+#include <drm/ttm/ttm_object.h>
+#include <drm/ttm/ttm_placement.h>
+#include <drm/drmP.h>
+#include "vmwgfx_resource_priv.h"
-#define VMW_RES_CONTEXT ttm_driver_type0
-#define VMW_RES_SURFACE ttm_driver_type1
-#define VMW_RES_STREAM ttm_driver_type2
-
-struct vmw_user_context {
- struct ttm_base_object base;
- struct vmw_resource res;
-};
-
-struct vmw_user_surface {
- struct ttm_base_object base;
- struct vmw_surface srf;
-};
+#define VMW_RES_EVICT_ERR_COUNT 10
struct vmw_user_dma_buffer {
- struct ttm_base_object base;
+ struct ttm_prime_object prime;
struct vmw_dma_buffer dma;
};
@@ -65,6 +54,21 @@ struct vmw_user_stream {
struct vmw_stream stream;
};
+
+static uint64_t vmw_user_stream_size;
+
+static const struct vmw_res_func vmw_stream_func = {
+ .res_type = vmw_res_stream,
+ .needs_backup = false,
+ .may_evict = false,
+ .type_name = "video streams",
+ .backup_placement = NULL,
+ .create = NULL,
+ .destroy = NULL,
+ .bind = NULL,
+ .unbind = NULL
+};
+
static inline struct vmw_dma_buffer *
vmw_dma_buffer(struct ttm_buffer_object *bo)
{
@@ -84,24 +88,76 @@ struct vmw_resource *vmw_resource_reference(struct vmw_resource *res)
return res;
}
+struct vmw_resource *
+vmw_resource_reference_unless_doomed(struct vmw_resource *res)
+{
+ return kref_get_unless_zero(&res->kref) ? res : NULL;
+}
+
+/**
+ * vmw_resource_release_id - release a resource id to the id manager.
+ *
+ * @res: Pointer to the resource.
+ *
+ * Release the resource id to the resource id manager and set it to -1
+ */
+void vmw_resource_release_id(struct vmw_resource *res)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct idr *idr = &dev_priv->res_idr[res->func->res_type];
+
+ write_lock(&dev_priv->resource_lock);
+ if (res->id != -1)
+ idr_remove(idr, res->id);
+ res->id = -1;
+ write_unlock(&dev_priv->resource_lock);
+}
+
static void vmw_resource_release(struct kref *kref)
{
struct vmw_resource *res =
container_of(kref, struct vmw_resource, kref);
struct vmw_private *dev_priv = res->dev_priv;
+ int id;
+ struct idr *idr = &dev_priv->res_idr[res->func->res_type];
- idr_remove(res->idr, res->id);
+ res->avail = false;
+ list_del_init(&res->lru_head);
write_unlock(&dev_priv->resource_lock);
+ if (res->backup) {
+ struct ttm_buffer_object *bo = &res->backup->base;
+
+ ttm_bo_reserve(bo, false, false, false, 0);
+ if (!list_empty(&res->mob_head) &&
+ res->func->unbind != NULL) {
+ struct ttm_validate_buffer val_buf;
- if (likely(res->hw_destroy != NULL))
+ val_buf.bo = bo;
+ res->func->unbind(res, false, &val_buf);
+ }
+ res->backup_dirty = false;
+ list_del_init(&res->mob_head);
+ ttm_bo_unreserve(bo);
+ vmw_dmabuf_unreference(&res->backup);
+ }
+
+ if (likely(res->hw_destroy != NULL)) {
res->hw_destroy(res);
+ mutex_lock(&dev_priv->binding_mutex);
+ vmw_context_binding_res_list_kill(&res->binding_head);
+ mutex_unlock(&dev_priv->binding_mutex);
+ }
+ id = res->id;
if (res->res_free != NULL)
res->res_free(res);
else
kfree(res);
write_lock(&dev_priv->resource_lock);
+
+ if (id != -1)
+ idr_remove(idr, id);
}
void vmw_resource_unreference(struct vmw_resource **p_res)
@@ -115,33 +171,69 @@ void vmw_resource_unreference(struct vmw_resource **p_res)
write_unlock(&dev_priv->resource_lock);
}
-static int vmw_resource_init(struct vmw_private *dev_priv,
- struct vmw_resource *res,
- struct idr *idr,
- enum ttm_object_type obj_type,
- void (*res_free) (struct vmw_resource *res))
+
+/**
+ * vmw_resource_alloc_id - release a resource id to the id manager.
+ *
+ * @res: Pointer to the resource.
+ *
+ * Allocate the lowest free resource from the resource manager, and set
+ * @res->id to that id. Returns 0 on success and -ENOMEM on failure.
+ */
+int vmw_resource_alloc_id(struct vmw_resource *res)
{
+ struct vmw_private *dev_priv = res->dev_priv;
int ret;
+ struct idr *idr = &dev_priv->res_idr[res->func->res_type];
+
+ BUG_ON(res->id != -1);
+
+ idr_preload(GFP_KERNEL);
+ write_lock(&dev_priv->resource_lock);
+
+ ret = idr_alloc(idr, res, 1, 0, GFP_NOWAIT);
+ if (ret >= 0)
+ res->id = ret;
+ write_unlock(&dev_priv->resource_lock);
+ idr_preload_end();
+ return ret < 0 ? ret : 0;
+}
+
+/**
+ * vmw_resource_init - initialize a struct vmw_resource
+ *
+ * @dev_priv: Pointer to a device private struct.
+ * @res: The struct vmw_resource to initialize.
+ * @obj_type: Resource object type.
+ * @delay_id: Boolean whether to defer device id allocation until
+ * the first validation.
+ * @res_free: Resource destructor.
+ * @func: Resource function table.
+ */
+int vmw_resource_init(struct vmw_private *dev_priv, struct vmw_resource *res,
+ bool delay_id,
+ void (*res_free) (struct vmw_resource *res),
+ const struct vmw_res_func *func)
+{
kref_init(&res->kref);
res->hw_destroy = NULL;
res->res_free = res_free;
- res->res_type = obj_type;
- res->idr = idr;
res->avail = false;
res->dev_priv = dev_priv;
-
- do {
- if (unlikely(idr_pre_get(idr, GFP_KERNEL) == 0))
- return -ENOMEM;
-
- write_lock(&dev_priv->resource_lock);
- ret = idr_get_new_above(idr, res, 1, &res->id);
- write_unlock(&dev_priv->resource_lock);
-
- } while (ret == -EAGAIN);
-
- return ret;
+ res->func = func;
+ INIT_LIST_HEAD(&res->lru_head);
+ INIT_LIST_HEAD(&res->mob_head);
+ INIT_LIST_HEAD(&res->binding_head);
+ res->id = -1;
+ res->backup = NULL;
+ res->backup_offset = 0;
+ res->backup_dirty = false;
+ res->res_dirty = false;
+ if (delay_id)
+ return 0;
+ else
+ return vmw_resource_alloc_id(res);
}
/**
@@ -156,9 +248,8 @@ static int vmw_resource_init(struct vmw_private *dev_priv,
* Activate basically means that the function vmw_resource_lookup will
* find it.
*/
-
-static void vmw_resource_activate(struct vmw_resource *res,
- void (*hw_destroy) (struct vmw_resource *))
+void vmw_resource_activate(struct vmw_resource *res,
+ void (*hw_destroy) (struct vmw_resource *))
{
struct vmw_private *dev_priv = res->dev_priv;
@@ -188,651 +279,396 @@ struct vmw_resource *vmw_resource_lookup(struct vmw_private *dev_priv,
}
/**
- * Context management:
+ * vmw_user_resource_lookup_handle - lookup a struct resource from a
+ * TTM user-space handle and perform basic type checks
+ *
+ * @dev_priv: Pointer to a device private struct
+ * @tfile: Pointer to a struct ttm_object_file identifying the caller
+ * @handle: The TTM user-space handle
+ * @converter: Pointer to an object describing the resource type
+ * @p_res: On successful return the location pointed to will contain
+ * a pointer to a refcounted struct vmw_resource.
+ *
+ * If the handle can't be found or is associated with an incorrect resource
+ * type, -EINVAL will be returned.
*/
-
-static void vmw_hw_context_destroy(struct vmw_resource *res)
+int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle,
+ const struct vmw_user_resource_conv
+ *converter,
+ struct vmw_resource **p_res)
{
+ struct ttm_base_object *base;
+ struct vmw_resource *res;
+ int ret = -EINVAL;
- struct vmw_private *dev_priv = res->dev_priv;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDestroyContext body;
- } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "destruction.\n");
- return;
- }
-
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DESTROY);
- cmd->header.size = cpu_to_le32(sizeof(cmd->body));
- cmd->body.cid = cpu_to_le32(res->id);
-
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
- vmw_3d_resource_dec(dev_priv);
-}
-
-static int vmw_context_init(struct vmw_private *dev_priv,
- struct vmw_resource *res,
- void (*res_free) (struct vmw_resource *res))
-{
- int ret;
-
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDefineContext body;
- } *cmd;
+ base = ttm_base_object_lookup(tfile, handle);
+ if (unlikely(base == NULL))
+ return -EINVAL;
- ret = vmw_resource_init(dev_priv, res, &dev_priv->context_idr,
- VMW_RES_CONTEXT, res_free);
+ if (unlikely(ttm_base_object_type(base) != converter->object_type))
+ goto out_bad_resource;
- if (unlikely(ret != 0)) {
- if (res_free == NULL)
- kfree(res);
- else
- res_free(res);
- return ret;
- }
+ res = converter->base_obj_to_res(base);
- cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed.\n");
- vmw_resource_unreference(&res);
- return -ENOMEM;
+ read_lock(&dev_priv->resource_lock);
+ if (!res->avail || res->res_free != converter->res_free) {
+ read_unlock(&dev_priv->resource_lock);
+ goto out_bad_resource;
}
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_CONTEXT_DEFINE);
- cmd->header.size = cpu_to_le32(sizeof(cmd->body));
- cmd->body.cid = cpu_to_le32(res->id);
-
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
- (void) vmw_3d_resource_inc(dev_priv);
- vmw_resource_activate(res, vmw_hw_context_destroy);
- return 0;
-}
-
-struct vmw_resource *vmw_context_alloc(struct vmw_private *dev_priv)
-{
- struct vmw_resource *res = kmalloc(sizeof(*res), GFP_KERNEL);
- int ret;
-
- if (unlikely(res == NULL))
- return NULL;
-
- ret = vmw_context_init(dev_priv, res, NULL);
- return (ret == 0) ? res : NULL;
-}
+ kref_get(&res->kref);
+ read_unlock(&dev_priv->resource_lock);
-/**
- * User-space context management:
- */
+ *p_res = res;
+ ret = 0;
-static void vmw_user_context_free(struct vmw_resource *res)
-{
- struct vmw_user_context *ctx =
- container_of(res, struct vmw_user_context, res);
+out_bad_resource:
+ ttm_base_object_unref(&base);
- kfree(ctx);
+ return ret;
}
/**
- * This function is called when user space has no more references on the
- * base object. It releases the base-object's reference on the resource object.
+ * Helper function that looks either a surface or dmabuf.
+ *
+ * The pointer this pointed at by out_surf and out_buf needs to be null.
*/
-
-static void vmw_user_context_base_release(struct ttm_base_object **p_base)
-{
- struct ttm_base_object *base = *p_base;
- struct vmw_user_context *ctx =
- container_of(base, struct vmw_user_context, base);
- struct vmw_resource *res = &ctx->res;
-
- *p_base = NULL;
- vmw_resource_unreference(&res);
-}
-
-int vmw_context_destroy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_resource *res;
- struct vmw_user_context *ctx;
- struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- int ret = 0;
-
- res = vmw_resource_lookup(dev_priv, &dev_priv->context_idr, arg->cid);
- if (unlikely(res == NULL))
- return -EINVAL;
-
- if (res->res_free != &vmw_user_context_free) {
- ret = -EINVAL;
- goto out;
- }
-
- ctx = container_of(res, struct vmw_user_context, res);
- if (ctx->base.tfile != tfile && !ctx->base.shareable) {
- ret = -EPERM;
- goto out;
- }
-
- ttm_ref_object_base_unref(tfile, ctx->base.hash.key, TTM_REF_USAGE);
-out:
- vmw_resource_unreference(&res);
- return ret;
-}
-
-int vmw_context_define_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+int vmw_user_lookup_handle(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t handle,
+ struct vmw_surface **out_surf,
+ struct vmw_dma_buffer **out_buf)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_user_context *ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
struct vmw_resource *res;
- struct vmw_resource *tmp;
- struct drm_vmw_context_arg *arg = (struct drm_vmw_context_arg *)data;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
int ret;
- if (unlikely(ctx == NULL))
- return -ENOMEM;
-
- res = &ctx->res;
- ctx->base.shareable = false;
- ctx->base.tfile = NULL;
+ BUG_ON(*out_surf || *out_buf);
- ret = vmw_context_init(dev_priv, res, vmw_user_context_free);
- if (unlikely(ret != 0))
- return ret;
-
- tmp = vmw_resource_reference(&ctx->res);
- ret = ttm_base_object_init(tfile, &ctx->base, false, VMW_RES_CONTEXT,
- &vmw_user_context_base_release, NULL);
-
- if (unlikely(ret != 0)) {
- vmw_resource_unreference(&tmp);
- goto out_err;
+ ret = vmw_user_resource_lookup_handle(dev_priv, tfile, handle,
+ user_surface_converter,
+ &res);
+ if (!ret) {
+ *out_surf = vmw_res_to_srf(res);
+ return 0;
}
- arg->cid = res->id;
-out_err:
- vmw_resource_unreference(&res);
+ *out_surf = NULL;
+ ret = vmw_user_dmabuf_lookup(tfile, handle, out_buf);
return ret;
-
}
-int vmw_context_check(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- int id)
-{
- struct vmw_resource *res;
- int ret = 0;
-
- read_lock(&dev_priv->resource_lock);
- res = idr_find(&dev_priv->context_idr, id);
- if (res && res->avail) {
- struct vmw_user_context *ctx =
- container_of(res, struct vmw_user_context, res);
- if (ctx->base.tfile != tfile && !ctx->base.shareable)
- ret = -EPERM;
- } else
- ret = -EINVAL;
- read_unlock(&dev_priv->resource_lock);
-
- return ret;
-}
-
-
/**
- * Surface management.
+ * Buffer management.
*/
-static void vmw_hw_surface_destroy(struct vmw_resource *res)
+/**
+ * vmw_dmabuf_acc_size - Calculate the pinned memory usage of buffers
+ *
+ * @dev_priv: Pointer to a struct vmw_private identifying the device.
+ * @size: The requested buffer size.
+ * @user: Whether this is an ordinary dma buffer or a user dma buffer.
+ */
+static size_t vmw_dmabuf_acc_size(struct vmw_private *dev_priv, size_t size,
+ bool user)
{
+ static size_t struct_size, user_struct_size;
+ size_t num_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ size_t page_array_size = ttm_round_pot(num_pages * sizeof(void *));
- struct vmw_private *dev_priv = res->dev_priv;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDestroySurface body;
- } *cmd = vmw_fifo_reserve(dev_priv, sizeof(*cmd));
-
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Failed reserving FIFO space for surface "
- "destruction.\n");
- return;
- }
+ if (unlikely(struct_size == 0)) {
+ size_t backend_size = ttm_round_pot(vmw_tt_size);
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DESTROY);
- cmd->header.size = cpu_to_le32(sizeof(cmd->body));
- cmd->body.sid = cpu_to_le32(res->id);
+ struct_size = backend_size +
+ ttm_round_pot(sizeof(struct vmw_dma_buffer));
+ user_struct_size = backend_size +
+ ttm_round_pot(sizeof(struct vmw_user_dma_buffer));
+ }
- vmw_fifo_commit(dev_priv, sizeof(*cmd));
- vmw_3d_resource_dec(dev_priv);
-}
+ if (dev_priv->map_mode == vmw_dma_alloc_coherent)
+ page_array_size +=
+ ttm_round_pot(num_pages * sizeof(dma_addr_t));
-void vmw_surface_res_free(struct vmw_resource *res)
-{
- struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
-
- kfree(srf->sizes);
- kfree(srf->snooper.image);
- kfree(srf);
+ return ((user) ? user_struct_size : struct_size) +
+ page_array_size;
}
-int vmw_surface_init(struct vmw_private *dev_priv,
- struct vmw_surface *srf,
- void (*res_free) (struct vmw_resource *res))
+void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
{
- int ret;
- struct {
- SVGA3dCmdHeader header;
- SVGA3dCmdDefineSurface body;
- } *cmd;
- SVGA3dSize *cmd_size;
- struct vmw_resource *res = &srf->res;
- struct drm_vmw_size *src_size;
- size_t submit_size;
- uint32_t cmd_len;
- int i;
-
- BUG_ON(res_free == NULL);
- ret = vmw_resource_init(dev_priv, res, &dev_priv->surface_idr,
- VMW_RES_SURFACE, res_free);
-
- if (unlikely(ret != 0)) {
- res_free(res);
- return ret;
- }
-
- submit_size = sizeof(*cmd) + srf->num_sizes * sizeof(SVGA3dSize);
- cmd_len = sizeof(cmd->body) + srf->num_sizes * sizeof(SVGA3dSize);
-
- cmd = vmw_fifo_reserve(dev_priv, submit_size);
- if (unlikely(cmd == NULL)) {
- DRM_ERROR("Fifo reserve failed for create surface.\n");
- vmw_resource_unreference(&res);
- return -ENOMEM;
- }
-
- cmd->header.id = cpu_to_le32(SVGA_3D_CMD_SURFACE_DEFINE);
- cmd->header.size = cpu_to_le32(cmd_len);
- cmd->body.sid = cpu_to_le32(res->id);
- cmd->body.surfaceFlags = cpu_to_le32(srf->flags);
- cmd->body.format = cpu_to_le32(srf->format);
- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i) {
- cmd->body.face[i].numMipLevels =
- cpu_to_le32(srf->mip_levels[i]);
- }
-
- cmd += 1;
- cmd_size = (SVGA3dSize *) cmd;
- src_size = srf->sizes;
-
- for (i = 0; i < srf->num_sizes; ++i, cmd_size++, src_size++) {
- cmd_size->width = cpu_to_le32(src_size->width);
- cmd_size->height = cpu_to_le32(src_size->height);
- cmd_size->depth = cpu_to_le32(src_size->depth);
- }
+ struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
- vmw_fifo_commit(dev_priv, submit_size);
- (void) vmw_3d_resource_inc(dev_priv);
- vmw_resource_activate(res, vmw_hw_surface_destroy);
- return 0;
+ kfree(vmw_bo);
}
-static void vmw_user_surface_free(struct vmw_resource *res)
+static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
{
- struct vmw_surface *srf = container_of(res, struct vmw_surface, res);
- struct vmw_user_surface *user_srf =
- container_of(srf, struct vmw_user_surface, srf);
+ struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
- kfree(srf->sizes);
- kfree(srf->snooper.image);
- kfree(user_srf);
+ ttm_prime_object_kfree(vmw_user_bo, prime);
}
-int vmw_user_surface_lookup_handle(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t handle, struct vmw_surface **out)
+int vmw_dmabuf_init(struct vmw_private *dev_priv,
+ struct vmw_dma_buffer *vmw_bo,
+ size_t size, struct ttm_placement *placement,
+ bool interruptible,
+ void (*bo_free) (struct ttm_buffer_object *bo))
{
- struct vmw_resource *res;
- struct vmw_surface *srf;
- struct vmw_user_surface *user_srf;
- struct ttm_base_object *base;
- int ret = -EINVAL;
-
- base = ttm_base_object_lookup(tfile, handle);
- if (unlikely(base == NULL))
- return -EINVAL;
-
- if (unlikely(base->object_type != VMW_RES_SURFACE))
- goto out_bad_resource;
-
- user_srf = container_of(base, struct vmw_user_surface, base);
- srf = &user_srf->srf;
- res = &srf->res;
-
- read_lock(&dev_priv->resource_lock);
-
- if (!res->avail || res->res_free != &vmw_user_surface_free) {
- read_unlock(&dev_priv->resource_lock);
- goto out_bad_resource;
- }
+ struct ttm_bo_device *bdev = &dev_priv->bdev;
+ size_t acc_size;
+ int ret;
+ bool user = (bo_free == &vmw_user_dmabuf_destroy);
- kref_get(&res->kref);
- read_unlock(&dev_priv->resource_lock);
+ BUG_ON(!bo_free && (!user && (bo_free != vmw_dmabuf_bo_free)));
- *out = srf;
- ret = 0;
+ acc_size = vmw_dmabuf_acc_size(dev_priv, size, user);
+ memset(vmw_bo, 0, sizeof(*vmw_bo));
-out_bad_resource:
- ttm_base_object_unref(&base);
+ INIT_LIST_HEAD(&vmw_bo->res_list);
+ ret = ttm_bo_init(bdev, &vmw_bo->base, size,
+ ttm_bo_type_device, placement,
+ 0, interruptible,
+ NULL, acc_size, NULL, bo_free);
return ret;
}
-static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
+static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
{
+ struct vmw_user_dma_buffer *vmw_user_bo;
struct ttm_base_object *base = *p_base;
- struct vmw_user_surface *user_srf =
- container_of(base, struct vmw_user_surface, base);
- struct vmw_resource *res = &user_srf->srf.res;
+ struct ttm_buffer_object *bo;
*p_base = NULL;
- vmw_resource_unreference(&res);
+
+ if (unlikely(base == NULL))
+ return;
+
+ vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
+ prime.base);
+ bo = &vmw_user_bo->dma.base;
+ ttm_bo_unref(&bo);
}
-int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+static void vmw_user_dmabuf_ref_obj_release(struct ttm_base_object *base,
+ enum ttm_ref_type ref_type)
{
- struct drm_vmw_surface_arg *arg = (struct drm_vmw_surface_arg *)data;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
-
- return ttm_ref_object_base_unref(tfile, arg->sid, TTM_REF_USAGE);
+ struct vmw_user_dma_buffer *user_bo;
+ user_bo = container_of(base, struct vmw_user_dma_buffer, prime.base);
+
+ switch (ref_type) {
+ case TTM_REF_SYNCCPU_WRITE:
+ ttm_bo_synccpu_write_release(&user_bo->dma.base);
+ break;
+ default:
+ BUG();
+ }
}
-int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+/**
+ * vmw_user_dmabuf_alloc - Allocate a user dma buffer
+ *
+ * @dev_priv: Pointer to a struct device private.
+ * @tfile: Pointer to a struct ttm_object_file on which to register the user
+ * object.
+ * @size: Size of the dma buffer.
+ * @shareable: Boolean whether the buffer is shareable with other open files.
+ * @handle: Pointer to where the handle value should be assigned.
+ * @p_dma_buf: Pointer to where the refcounted struct vmw_dma_buffer pointer
+ * should be assigned.
+ */
+int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
+ struct ttm_object_file *tfile,
+ uint32_t size,
+ bool shareable,
+ uint32_t *handle,
+ struct vmw_dma_buffer **p_dma_buf)
{
- struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_user_surface *user_srf =
- kmalloc(sizeof(*user_srf), GFP_KERNEL);
- struct vmw_surface *srf;
- struct vmw_resource *res;
- struct vmw_resource *tmp;
- union drm_vmw_surface_create_arg *arg =
- (union drm_vmw_surface_create_arg *)data;
- struct drm_vmw_surface_create_req *req = &arg->req;
- struct drm_vmw_surface_arg *rep = &arg->rep;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct drm_vmw_size __user *user_sizes;
+ struct vmw_user_dma_buffer *user_bo;
+ struct ttm_buffer_object *tmp;
int ret;
- int i;
- if (unlikely(user_srf == NULL))
+ user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
+ if (unlikely(user_bo == NULL)) {
+ DRM_ERROR("Failed to allocate a buffer.\n");
return -ENOMEM;
-
- srf = &user_srf->srf;
- res = &srf->res;
-
- srf->flags = req->flags;
- srf->format = req->format;
- srf->scanout = req->scanout;
- memcpy(srf->mip_levels, req->mip_levels, sizeof(srf->mip_levels));
- srf->num_sizes = 0;
- for (i = 0; i < DRM_VMW_MAX_SURFACE_FACES; ++i)
- srf->num_sizes += srf->mip_levels[i];
-
- if (srf->num_sizes > DRM_VMW_MAX_SURFACE_FACES *
- DRM_VMW_MAX_MIP_LEVELS) {
- ret = -EINVAL;
- goto out_err0;
- }
-
- srf->sizes = kmalloc(srf->num_sizes * sizeof(*srf->sizes), GFP_KERNEL);
- if (unlikely(srf->sizes == NULL)) {
- ret = -ENOMEM;
- goto out_err0;
- }
-
- user_sizes = (struct drm_vmw_size __user *)(unsigned long)
- req->size_addr;
-
- ret = copy_from_user(srf->sizes, user_sizes,
- srf->num_sizes * sizeof(*srf->sizes));
- if (unlikely(ret != 0)) {
- ret = -EFAULT;
- goto out_err1;
- }
-
- if (srf->scanout &&
- srf->num_sizes == 1 &&
- srf->sizes[0].width == 64 &&
- srf->sizes[0].height == 64 &&
- srf->format == SVGA3D_A8R8G8B8) {
-
- srf->snooper.image = kmalloc(64 * 64 * 4, GFP_KERNEL);
- /* clear the image */
- if (srf->snooper.image) {
- memset(srf->snooper.image, 0x00, 64 * 64 * 4);
- } else {
- DRM_ERROR("Failed to allocate cursor_image\n");
- ret = -ENOMEM;
- goto out_err1;
- }
- } else {
- srf->snooper.image = NULL;
}
- srf->snooper.crtc = NULL;
-
- user_srf->base.shareable = false;
- user_srf->base.tfile = NULL;
-
- /**
- * From this point, the generic resource management functions
- * destroy the object on failure.
- */
- ret = vmw_surface_init(dev_priv, srf, vmw_user_surface_free);
+ ret = vmw_dmabuf_init(dev_priv, &user_bo->dma, size,
+ (dev_priv->has_mob) ?
+ &vmw_sys_placement :
+ &vmw_vram_sys_placement, true,
+ &vmw_user_dmabuf_destroy);
if (unlikely(ret != 0))
return ret;
- tmp = vmw_resource_reference(&srf->res);
- ret = ttm_base_object_init(tfile, &user_srf->base,
- req->shareable, VMW_RES_SURFACE,
- &vmw_user_surface_base_release, NULL);
-
- if (unlikely(ret != 0)) {
- vmw_resource_unreference(&tmp);
- vmw_resource_unreference(&res);
- return ret;
- }
-
- rep->sid = user_srf->base.hash.key;
- if (rep->sid == SVGA3D_INVALID_ID)
- DRM_ERROR("Created bad Surface ID.\n");
-
- vmw_resource_unreference(&res);
- return 0;
-out_err1:
- kfree(srf->sizes);
-out_err0:
- kfree(user_srf);
- return ret;
-}
-
-int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- union drm_vmw_surface_reference_arg *arg =
- (union drm_vmw_surface_reference_arg *)data;
- struct drm_vmw_surface_arg *req = &arg->req;
- struct drm_vmw_surface_create_req *rep = &arg->rep;
- struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
- struct vmw_surface *srf;
- struct vmw_user_surface *user_srf;
- struct drm_vmw_size __user *user_sizes;
- struct ttm_base_object *base;
- int ret = -EINVAL;
-
- base = ttm_base_object_lookup(tfile, req->sid);
- if (unlikely(base == NULL)) {
- DRM_ERROR("Could not find surface to reference.\n");
- return -EINVAL;
- }
-
- if (unlikely(base->object_type != VMW_RES_SURFACE))
- goto out_bad_resource;
-
- user_srf = container_of(base, struct vmw_user_surface, base);
- srf = &user_srf->srf;
-
- ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
- if (unlikely(ret != 0)) {
- DRM_ERROR("Could not add a reference to a surface.\n");
- goto out_no_reference;
- }
-
- rep->flags = srf->flags;
- rep->format = srf->format;
- memcpy(rep->mip_levels, srf->mip_levels, sizeof(srf->mip_levels));
- user_sizes = (struct drm_vmw_size __user *)(unsigned long)
- rep->size_addr;
-
- if (user_sizes)
- ret = copy_to_user(user_sizes, srf->sizes,
- srf->num_sizes * sizeof(*srf->sizes));
+ tmp = ttm_bo_reference(&user_bo->dma.base);
+ ret = ttm_prime_object_init(tfile,
+ size,
+ &user_bo->prime,
+ shareable,
+ ttm_buffer_type,
+ &vmw_user_dmabuf_release,
+ &vmw_user_dmabuf_ref_obj_release);
if (unlikely(ret != 0)) {
- DRM_ERROR("copy_to_user failed %p %u\n",
- user_sizes, srf->num_sizes);
- ret = -EFAULT;
+ ttm_bo_unref(&tmp);
+ goto out_no_base_object;
}
-out_bad_resource:
-out_no_reference:
- ttm_base_object_unref(&base);
-
- return ret;
-}
-
-int vmw_surface_check(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t handle, int *id)
-{
- struct ttm_base_object *base;
- struct vmw_user_surface *user_srf;
-
- int ret = -EPERM;
-
- base = ttm_base_object_lookup(tfile, handle);
- if (unlikely(base == NULL))
- return -EINVAL;
- if (unlikely(base->object_type != VMW_RES_SURFACE))
- goto out_bad_surface;
+ *p_dma_buf = &user_bo->dma;
+ *handle = user_bo->prime.base.hash.key;
- user_srf = container_of(base, struct vmw_user_surface, base);
- *id = user_srf->srf.res.id;
- ret = 0;
-
-out_bad_surface:
- /**
- * FIXME: May deadlock here when called from the
- * command parsing code.
- */
-
- ttm_base_object_unref(&base);
+out_no_base_object:
return ret;
}
/**
- * Buffer management.
+ * vmw_user_dmabuf_verify_access - verify access permissions on this
+ * buffer object.
+ *
+ * @bo: Pointer to the buffer object being accessed
+ * @tfile: Identifying the caller.
*/
-
-static size_t vmw_dmabuf_acc_size(struct ttm_bo_global *glob,
- unsigned long num_pages)
+int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
+ struct ttm_object_file *tfile)
{
- static size_t bo_user_size = ~0;
+ struct vmw_user_dma_buffer *vmw_user_bo;
- size_t page_array_size =
- (num_pages * sizeof(void *) + PAGE_SIZE - 1) & PAGE_MASK;
+ if (unlikely(bo->destroy != vmw_user_dmabuf_destroy))
+ return -EPERM;
- if (unlikely(bo_user_size == ~0)) {
- bo_user_size = glob->ttm_bo_extra_size +
- ttm_round_pot(sizeof(struct vmw_dma_buffer));
- }
+ vmw_user_bo = vmw_user_dma_buffer(bo);
- return bo_user_size + page_array_size;
-}
+ /* Check that the caller has opened the object. */
+ if (likely(ttm_ref_object_exists(tfile, &vmw_user_bo->prime.base)))
+ return 0;
-void vmw_dmabuf_bo_free(struct ttm_buffer_object *bo)
-{
- struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
- struct ttm_bo_global *glob = bo->glob;
-
- ttm_mem_global_free(glob->mem_glob, bo->acc_size);
- kfree(vmw_bo);
+ DRM_ERROR("Could not grant buffer access.\n");
+ return -EPERM;
}
-int vmw_dmabuf_init(struct vmw_private *dev_priv,
- struct vmw_dma_buffer *vmw_bo,
- size_t size, struct ttm_placement *placement,
- bool interruptible,
- void (*bo_free) (struct ttm_buffer_object *bo))
+/**
+ * vmw_user_dmabuf_synccpu_grab - Grab a struct vmw_user_dma_buffer for cpu
+ * access, idling previous GPU operations on the buffer and optionally
+ * blocking it for further command submissions.
+ *
+ * @user_bo: Pointer to the buffer object being grabbed for CPU access
+ * @tfile: Identifying the caller.
+ * @flags: Flags indicating how the grab should be performed.
+ *
+ * A blocking grab will be automatically released when @tfile is closed.
+ */
+static int vmw_user_dmabuf_synccpu_grab(struct vmw_user_dma_buffer *user_bo,
+ struct ttm_object_file *tfile,
+ uint32_t flags)
{
- struct ttm_bo_device *bdev = &dev_priv->bdev;
- struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
- size_t acc_size;
+ struct ttm_buffer_object *bo = &user_bo->dma.base;
+ bool existed;
int ret;
- BUG_ON(!bo_free);
-
- acc_size =
- vmw_dmabuf_acc_size(bdev->glob,
- (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
+ if (flags & drm_vmw_synccpu_allow_cs) {
+ struct ttm_bo_device *bdev = bo->bdev;
- ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
- if (unlikely(ret != 0)) {
- /* we must free the bo here as
- * ttm_buffer_object_init does so as well */
- bo_free(&vmw_bo->base);
+ spin_lock(&bdev->fence_lock);
+ ret = ttm_bo_wait(bo, false, true,
+ !!(flags & drm_vmw_synccpu_dontblock));
+ spin_unlock(&bdev->fence_lock);
return ret;
}
- memset(vmw_bo, 0, sizeof(*vmw_bo));
+ ret = ttm_bo_synccpu_write_grab
+ (bo, !!(flags & drm_vmw_synccpu_dontblock));
+ if (unlikely(ret != 0))
+ return ret;
- INIT_LIST_HEAD(&vmw_bo->validate_list);
+ ret = ttm_ref_object_add(tfile, &user_bo->prime.base,
+ TTM_REF_SYNCCPU_WRITE, &existed);
+ if (ret != 0 || existed)
+ ttm_bo_synccpu_write_release(&user_bo->dma.base);
- ret = ttm_bo_init(bdev, &vmw_bo->base, size,
- ttm_bo_type_device, placement,
- 0, 0, interruptible,
- NULL, acc_size, bo_free);
return ret;
}
-static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
+/**
+ * vmw_user_dmabuf_synccpu_release - Release a previous grab for CPU access,
+ * and unblock command submission on the buffer if blocked.
+ *
+ * @handle: Handle identifying the buffer object.
+ * @tfile: Identifying the caller.
+ * @flags: Flags indicating the type of release.
+ */
+static int vmw_user_dmabuf_synccpu_release(uint32_t handle,
+ struct ttm_object_file *tfile,
+ uint32_t flags)
{
- struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
- struct ttm_bo_global *glob = bo->glob;
+ if (!(flags & drm_vmw_synccpu_allow_cs))
+ return ttm_ref_object_base_unref(tfile, handle,
+ TTM_REF_SYNCCPU_WRITE);
- ttm_mem_global_free(glob->mem_glob, bo->acc_size);
- kfree(vmw_user_bo);
+ return 0;
}
-static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
+/**
+ * vmw_user_dmabuf_synccpu_release - ioctl function implementing the synccpu
+ * functionality.
+ *
+ * @dev: Identifies the drm device.
+ * @data: Pointer to the ioctl argument.
+ * @file_priv: Identifies the caller.
+ *
+ * This function checks the ioctl arguments for validity and calls the
+ * relevant synccpu functions.
+ */
+int vmw_user_dmabuf_synccpu_ioctl(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
{
- struct vmw_user_dma_buffer *vmw_user_bo;
- struct ttm_base_object *base = *p_base;
- struct ttm_buffer_object *bo;
+ struct drm_vmw_synccpu_arg *arg =
+ (struct drm_vmw_synccpu_arg *) data;
+ struct vmw_dma_buffer *dma_buf;
+ struct vmw_user_dma_buffer *user_bo;
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ int ret;
- *p_base = NULL;
+ if ((arg->flags & (drm_vmw_synccpu_read | drm_vmw_synccpu_write)) == 0
+ || (arg->flags & ~(drm_vmw_synccpu_read | drm_vmw_synccpu_write |
+ drm_vmw_synccpu_dontblock |
+ drm_vmw_synccpu_allow_cs)) != 0) {
+ DRM_ERROR("Illegal synccpu flags.\n");
+ return -EINVAL;
+ }
- if (unlikely(base == NULL))
- return;
+ switch (arg->op) {
+ case drm_vmw_synccpu_grab:
+ ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &dma_buf);
+ if (unlikely(ret != 0))
+ return ret;
+
+ user_bo = container_of(dma_buf, struct vmw_user_dma_buffer,
+ dma);
+ ret = vmw_user_dmabuf_synccpu_grab(user_bo, tfile, arg->flags);
+ vmw_dmabuf_unreference(&dma_buf);
+ if (unlikely(ret != 0 && ret != -ERESTARTSYS &&
+ ret != -EBUSY)) {
+ DRM_ERROR("Failed synccpu grab on handle 0x%08x.\n",
+ (unsigned int) arg->handle);
+ return ret;
+ }
+ break;
+ case drm_vmw_synccpu_release:
+ ret = vmw_user_dmabuf_synccpu_release(arg->handle, tfile,
+ arg->flags);
+ if (unlikely(ret != 0)) {
+ DRM_ERROR("Failed synccpu release on handle 0x%08x.\n",
+ (unsigned int) arg->handle);
+ return ret;
+ }
+ break;
+ default:
+ DRM_ERROR("Invalid synccpu operation.\n");
+ return -EINVAL;
+ }
- vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
- bo = &vmw_user_bo->dma.base;
- ttm_bo_unref(&bo);
+ return 0;
}
int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
@@ -843,46 +679,30 @@ int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
(union drm_vmw_alloc_dmabuf_arg *)data;
struct drm_vmw_alloc_dmabuf_req *req = &arg->req;
struct drm_vmw_dmabuf_rep *rep = &arg->rep;
- struct vmw_user_dma_buffer *vmw_user_bo;
- struct ttm_buffer_object *tmp;
- struct vmw_master *vmaster = vmw_master(file_priv->master);
+ struct vmw_dma_buffer *dma_buf;
+ uint32_t handle;
int ret;
- vmw_user_bo = kzalloc(sizeof(*vmw_user_bo), GFP_KERNEL);
- if (unlikely(vmw_user_bo == NULL))
- return -ENOMEM;
-
- ret = ttm_read_lock(&vmaster->lock, true);
- if (unlikely(ret != 0)) {
- kfree(vmw_user_bo);
+ ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+ if (unlikely(ret != 0))
return ret;
- }
- ret = vmw_dmabuf_init(dev_priv, &vmw_user_bo->dma, req->size,
- &vmw_vram_sys_placement, true,
- &vmw_user_dmabuf_destroy);
+ ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+ req->size, false, &handle, &dma_buf);
if (unlikely(ret != 0))
- return ret;
+ goto out_no_dmabuf;
- tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
- ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
- &vmw_user_bo->base,
- false,
- ttm_buffer_type,
- &vmw_user_dmabuf_release, NULL);
- if (unlikely(ret != 0)) {
- ttm_bo_unref(&tmp);
- } else {
- rep->handle = vmw_user_bo->base.hash.key;
- rep->map_handle = vmw_user_bo->dma.base.addr_space_offset;
- rep->cur_gmr_id = vmw_user_bo->base.hash.key;
- rep->cur_gmr_offset = 0;
- }
- ttm_bo_unref(&tmp);
+ rep->handle = handle;
+ rep->map_handle = drm_vma_node_offset_addr(&dma_buf->base.vma_node);
+ rep->cur_gmr_id = handle;
+ rep->cur_gmr_offset = 0;
- ttm_read_unlock(&vmaster->lock);
+ vmw_dmabuf_unreference(&dma_buf);
- return 0;
+out_no_dmabuf:
+ ttm_read_unlock(&dev_priv->reservation_sem);
+
+ return ret;
}
int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
@@ -896,27 +716,6 @@ int vmw_dmabuf_unref_ioctl(struct drm_device *dev, void *data,
TTM_REF_USAGE);
}
-uint32_t vmw_dmabuf_validate_node(struct ttm_buffer_object *bo,
- uint32_t cur_validate_node)
-{
- struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
-
- if (likely(vmw_bo->on_validate_list))
- return vmw_bo->cur_validate_node;
-
- vmw_bo->cur_validate_node = cur_validate_node;
- vmw_bo->on_validate_list = true;
-
- return cur_validate_node;
-}
-
-void vmw_dmabuf_validate_clear(struct ttm_buffer_object *bo)
-{
- struct vmw_dma_buffer *vmw_bo = vmw_dma_buffer(bo);
-
- vmw_bo->on_validate_list = false;
-}
-
int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
uint32_t handle, struct vmw_dma_buffer **out)
{
@@ -930,14 +729,15 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
return -ESRCH;
}
- if (unlikely(base->object_type != ttm_buffer_type)) {
+ if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
ttm_base_object_unref(&base);
printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
(unsigned long)handle);
return -EINVAL;
}
- vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
+ vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
+ prime.base);
(void)ttm_bo_reference(&vmw_user_bo->dma.base);
ttm_base_object_unref(&base);
*out = &vmw_user_bo->dma;
@@ -945,6 +745,22 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
return 0;
}
+int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
+ struct vmw_dma_buffer *dma_buf,
+ uint32_t *handle)
+{
+ struct vmw_user_dma_buffer *user_bo;
+
+ if (dma_buf->base.destroy != vmw_user_dmabuf_destroy)
+ return -EINVAL;
+
+ user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
+
+ *handle = user_bo->prime.base.hash.key;
+ return ttm_ref_object_add(tfile, &user_bo->prime.base,
+ TTM_REF_USAGE, NULL);
+}
+
/*
* Stream management
*/
@@ -969,8 +785,8 @@ static int vmw_stream_init(struct vmw_private *dev_priv,
struct vmw_resource *res = &stream->res;
int ret;
- ret = vmw_resource_init(dev_priv, res, &dev_priv->stream_idr,
- VMW_RES_STREAM, res_free);
+ ret = vmw_resource_init(dev_priv, res, false, res_free,
+ &vmw_stream_func);
if (unlikely(ret != 0)) {
if (res_free == NULL)
@@ -992,16 +808,15 @@ static int vmw_stream_init(struct vmw_private *dev_priv,
return 0;
}
-/**
- * User-space context management:
- */
-
static void vmw_user_stream_free(struct vmw_resource *res)
{
struct vmw_user_stream *stream =
container_of(res, struct vmw_user_stream, stream.res);
+ struct vmw_private *dev_priv = res->dev_priv;
- kfree(stream);
+ ttm_base_object_kfree(stream, base);
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_user_stream_size);
}
/**
@@ -1028,9 +843,11 @@ int vmw_stream_unref_ioctl(struct drm_device *dev, void *data,
struct vmw_user_stream *stream;
struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct idr *idr = &dev_priv->res_idr[vmw_res_stream];
int ret = 0;
- res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, arg->stream_id);
+
+ res = vmw_resource_lookup(dev_priv, idr, arg->stream_id);
if (unlikely(res == NULL))
return -EINVAL;
@@ -1055,23 +872,55 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct vmw_private *dev_priv = vmw_priv(dev);
- struct vmw_user_stream *stream = kmalloc(sizeof(*stream), GFP_KERNEL);
+ struct vmw_user_stream *stream;
struct vmw_resource *res;
struct vmw_resource *tmp;
struct drm_vmw_stream_arg *arg = (struct drm_vmw_stream_arg *)data;
struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
int ret;
- if (unlikely(stream == NULL))
- return -ENOMEM;
+ /*
+ * Approximate idr memory usage with 128 bytes. It will be limited
+ * by maximum number_of streams anyway?
+ */
+
+ if (unlikely(vmw_user_stream_size == 0))
+ vmw_user_stream_size = ttm_round_pot(sizeof(*stream)) + 128;
+
+ ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv),
+ vmw_user_stream_size,
+ false, true);
+ if (unlikely(ret != 0)) {
+ if (ret != -ERESTARTSYS)
+ DRM_ERROR("Out of graphics memory for stream"
+ " creation.\n");
+ goto out_unlock;
+ }
+
+
+ stream = kmalloc(sizeof(*stream), GFP_KERNEL);
+ if (unlikely(stream == NULL)) {
+ ttm_mem_global_free(vmw_mem_glob(dev_priv),
+ vmw_user_stream_size);
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
res = &stream->stream.res;
stream->base.shareable = false;
stream->base.tfile = NULL;
+ /*
+ * From here on, the destructor takes over resource freeing.
+ */
+
ret = vmw_stream_init(dev_priv, &stream->stream, vmw_user_stream_free);
if (unlikely(ret != 0))
- return ret;
+ goto out_unlock;
tmp = vmw_resource_reference(res);
ret = ttm_base_object_init(tfile, &stream->base, false, VMW_RES_STREAM,
@@ -1085,6 +934,8 @@ int vmw_stream_claim_ioctl(struct drm_device *dev, void *data,
arg->stream_id = res->id;
out_err:
vmw_resource_unreference(&res);
+out_unlock:
+ ttm_read_unlock(&dev_priv->reservation_sem);
return ret;
}
@@ -1096,7 +947,8 @@ int vmw_user_stream_lookup(struct vmw_private *dev_priv,
struct vmw_resource *res;
int ret;
- res = vmw_resource_lookup(dev_priv, &dev_priv->stream_idr, *inout_id);
+ res = vmw_resource_lookup(dev_priv, &dev_priv->res_idr[vmw_res_stream],
+ *inout_id);
if (unlikely(res == NULL))
return -EINVAL;
@@ -1118,3 +970,608 @@ err_ref:
vmw_resource_unreference(&res);
return ret;
}
+
+
+/**
+ * vmw_dumb_create - Create a dumb kms buffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @args: Pointer to a struct drm_mode_create_dumb structure
+ *
+ * This is a driver callback for the core drm create_dumb functionality.
+ * Note that this is very similar to the vmw_dmabuf_alloc ioctl, except
+ * that the arguments have a different format.
+ */
+int vmw_dumb_create(struct drm_file *file_priv,
+ struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct vmw_private *dev_priv = vmw_priv(dev);
+ struct vmw_dma_buffer *dma_buf;
+ int ret;
+
+ args->pitch = args->width * ((args->bpp + 7) / 8);
+ args->size = args->pitch * args->height;
+
+ ret = ttm_read_lock(&dev_priv->reservation_sem, true);
+ if (unlikely(ret != 0))
+ return ret;
+
+ ret = vmw_user_dmabuf_alloc(dev_priv, vmw_fpriv(file_priv)->tfile,
+ args->size, false, &args->handle,
+ &dma_buf);
+ if (unlikely(ret != 0))
+ goto out_no_dmabuf;
+
+ vmw_dmabuf_unreference(&dma_buf);
+out_no_dmabuf:
+ ttm_read_unlock(&dev_priv->reservation_sem);
+ return ret;
+}
+
+/**
+ * vmw_dumb_map_offset - Return the address space offset of a dumb buffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @handle: Handle identifying the dumb buffer.
+ * @offset: The address space offset returned.
+ *
+ * This is a driver callback for the core drm dumb_map_offset functionality.
+ */
+int vmw_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *dev, uint32_t handle,
+ uint64_t *offset)
+{
+ struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+ struct vmw_dma_buffer *out_buf;
+ int ret;
+
+ ret = vmw_user_dmabuf_lookup(tfile, handle, &out_buf);
+ if (ret != 0)
+ return -EINVAL;
+
+ *offset = drm_vma_node_offset_addr(&out_buf->base.vma_node);
+ vmw_dmabuf_unreference(&out_buf);
+ return 0;
+}
+
+/**
+ * vmw_dumb_destroy - Destroy a dumb boffer
+ *
+ * @file_priv: Pointer to a struct drm_file identifying the caller.
+ * @dev: Pointer to the drm device.
+ * @handle: Handle identifying the dumb buffer.
+ *
+ * This is a driver callback for the core drm dumb_destroy functionality.
+ */
+int vmw_dumb_destroy(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle)
+{
+ return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
+ handle, TTM_REF_USAGE);
+}
+
+/**
+ * vmw_resource_buf_alloc - Allocate a backup buffer for a resource.
+ *
+ * @res: The resource for which to allocate a backup buffer.
+ * @interruptible: Whether any sleeps during allocation should be
+ * performed while interruptible.
+ */
+static int vmw_resource_buf_alloc(struct vmw_resource *res,
+ bool interruptible)
+{
+ unsigned long size =
+ (res->backup_size + PAGE_SIZE - 1) & PAGE_MASK;
+ struct vmw_dma_buffer *backup;
+ int ret;
+
+ if (likely(res->backup)) {
+ BUG_ON(res->backup->base.num_pages * PAGE_SIZE < size);
+ return 0;
+ }
+
+ backup = kzalloc(sizeof(*backup), GFP_KERNEL);
+ if (unlikely(backup == NULL))
+ return -ENOMEM;
+
+ ret = vmw_dmabuf_init(res->dev_priv, backup, res->backup_size,
+ res->func->backup_placement,
+ interruptible,
+ &vmw_dmabuf_bo_free);
+ if (unlikely(ret != 0))
+ goto out_no_dmabuf;
+
+ res->backup = backup;
+
+out_no_dmabuf:
+ return ret;
+}
+
+/**
+ * vmw_resource_do_validate - Make a resource up-to-date and visible
+ * to the device.
+ *
+ * @res: The resource to make visible to the device.
+ * @val_buf: Information about a buffer possibly
+ * containing backup data if a bind operation is needed.
+ *
+ * On hardware resource shortage, this function returns -EBUSY and
+ * should be retried once resources have been freed up.
+ */
+static int vmw_resource_do_validate(struct vmw_resource *res,
+ struct ttm_validate_buffer *val_buf)
+{
+ int ret = 0;
+ const struct vmw_res_func *func = res->func;
+
+ if (unlikely(res->id == -1)) {
+ ret = func->create(res);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ if (func->bind &&
+ ((func->needs_backup && list_empty(&res->mob_head) &&
+ val_buf->bo != NULL) ||
+ (!func->needs_backup && val_buf->bo != NULL))) {
+ ret = func->bind(res, val_buf);
+ if (unlikely(ret != 0))
+ goto out_bind_failed;
+ if (func->needs_backup)
+ list_add_tail(&res->mob_head, &res->backup->res_list);
+ }
+
+ /*
+ * Only do this on write operations, and move to
+ * vmw_resource_unreserve if it can be called after
+ * backup buffers have been unreserved. Otherwise
+ * sort out locking.
+ */
+ res->res_dirty = true;
+
+ return 0;
+
+out_bind_failed:
+ func->destroy(res);
+
+ return ret;
+}
+
+/**
+ * vmw_resource_unreserve - Unreserve a resource previously reserved for
+ * command submission.
+ *
+ * @res: Pointer to the struct vmw_resource to unreserve.
+ * @new_backup: Pointer to new backup buffer if command submission
+ * switched.
+ * @new_backup_offset: New backup offset if @new_backup is !NULL.
+ *
+ * Currently unreserving a resource means putting it back on the device's
+ * resource lru list, so that it can be evicted if necessary.
+ */
+void vmw_resource_unreserve(struct vmw_resource *res,
+ struct vmw_dma_buffer *new_backup,
+ unsigned long new_backup_offset)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+
+ if (!list_empty(&res->lru_head))
+ return;
+
+ if (new_backup && new_backup != res->backup) {
+
+ if (res->backup) {
+ lockdep_assert_held(&res->backup->base.resv->lock.base);
+ list_del_init(&res->mob_head);
+ vmw_dmabuf_unreference(&res->backup);
+ }
+
+ res->backup = vmw_dmabuf_reference(new_backup);
+ lockdep_assert_held(&new_backup->base.resv->lock.base);
+ list_add_tail(&res->mob_head, &new_backup->res_list);
+ }
+ if (new_backup)
+ res->backup_offset = new_backup_offset;
+
+ if (!res->func->may_evict || res->id == -1)
+ return;
+
+ write_lock(&dev_priv->resource_lock);
+ list_add_tail(&res->lru_head,
+ &res->dev_priv->res_lru[res->func->res_type]);
+ write_unlock(&dev_priv->resource_lock);
+}
+
+/**
+ * vmw_resource_check_buffer - Check whether a backup buffer is needed
+ * for a resource and in that case, allocate
+ * one, reserve and validate it.
+ *
+ * @res: The resource for which to allocate a backup buffer.
+ * @interruptible: Whether any sleeps during allocation should be
+ * performed while interruptible.
+ * @val_buf: On successful return contains data about the
+ * reserved and validated backup buffer.
+ */
+static int
+vmw_resource_check_buffer(struct vmw_resource *res,
+ bool interruptible,
+ struct ttm_validate_buffer *val_buf)
+{
+ struct list_head val_list;
+ bool backup_dirty = false;
+ int ret;
+
+ if (unlikely(res->backup == NULL)) {
+ ret = vmw_resource_buf_alloc(res, interruptible);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ INIT_LIST_HEAD(&val_list);
+ val_buf->bo = ttm_bo_reference(&res->backup->base);
+ list_add_tail(&val_buf->head, &val_list);
+ ret = ttm_eu_reserve_buffers(NULL, &val_list);
+ if (unlikely(ret != 0))
+ goto out_no_reserve;
+
+ if (res->func->needs_backup && list_empty(&res->mob_head))
+ return 0;
+
+ backup_dirty = res->backup_dirty;
+ ret = ttm_bo_validate(&res->backup->base,
+ res->func->backup_placement,
+ true, false);
+
+ if (unlikely(ret != 0))
+ goto out_no_validate;
+
+ return 0;
+
+out_no_validate:
+ ttm_eu_backoff_reservation(NULL, &val_list);
+out_no_reserve:
+ ttm_bo_unref(&val_buf->bo);
+ if (backup_dirty)
+ vmw_dmabuf_unreference(&res->backup);
+
+ return ret;
+}
+
+/**
+ * vmw_resource_reserve - Reserve a resource for command submission
+ *
+ * @res: The resource to reserve.
+ *
+ * This function takes the resource off the LRU list and make sure
+ * a backup buffer is present for guest-backed resources. However,
+ * the buffer may not be bound to the resource at this point.
+ *
+ */
+int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
+{
+ struct vmw_private *dev_priv = res->dev_priv;
+ int ret;
+
+ write_lock(&dev_priv->resource_lock);
+ list_del_init(&res->lru_head);
+ write_unlock(&dev_priv->resource_lock);
+
+ if (res->func->needs_backup && res->backup == NULL &&
+ !no_backup) {
+ ret = vmw_resource_buf_alloc(res, true);
+ if (unlikely(ret != 0))
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * vmw_resource_backoff_reservation - Unreserve and unreference a
+ * backup buffer
+ *.
+ * @val_buf: Backup buffer information.
+ */
+static void
+vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
+{
+ struct list_head val_list;
+
+ if (likely(val_buf->bo == NULL))
+ return;
+
+ INIT_LIST_HEAD(&val_list);
+ list_add_tail(&val_buf->head, &val_list);
+ ttm_eu_backoff_reservation(NULL, &val_list);
+ ttm_bo_unref(&val_buf->bo);
+}
+
+/**
+ * vmw_resource_do_evict - Evict a resource, and transfer its data
+ * to a backup buffer.
+ *
+ * @res: The resource to evict.
+ * @interruptible: Whether to wait interruptible.
+ */
+int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
+{
+ struct ttm_validate_buffer val_buf;
+ const struct vmw_res_func *func = res->func;
+ int ret;
+
+ BUG_ON(!func->may_evict);
+
+ val_buf.bo = NULL;
+ ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
+ if (unlikely(ret != 0))
+ return ret;
+
+ if (unlikely(func->unbind != NULL &&
+ (!func->needs_backup || !list_empty(&res->mob_head)))) {
+ ret = func->unbind(res, res->res_dirty, &val_buf);
+ if (unlikely(ret != 0))
+ goto out_no_unbind;
+ list_del_init(&res->mob_head);
+ }
+ ret = func->destroy(res);
+ res->backup_dirty = true;
+ res->res_dirty = false;
+out_no_unbind:
+ vmw_resource_backoff_reservation(&val_buf);
+
+ return ret;
+}
+
+
+/**
+ * vmw_resource_validate - Make a resource up-to-date and visible
+ * to the device.
+ *
+ * @res: The resource to make visible to the device.
+ *
+ * On succesful return, any backup DMA buffer pointed to by @res->backup will
+ * be reserved and validated.
+ * On hardware resource shortage, this function will repeatedly evict
+ * resources of the same type until the validation succeeds.
+ */
+int vmw_resource_validate(struct vmw_resource *res)
+{
+ int ret;
+ struct vmw_resource *evict_res;
+ struct vmw_private *dev_priv = res->dev_priv;
+ struct list_head *lru_list = &dev_priv->res_lru[res->func->res_type];
+ struct ttm_validate_buffer val_buf;
+ unsigned err_count = 0;
+
+ if (likely(!res->func->may_evict))
+ return 0;
+
+ val_buf.bo = NULL;
+ if (res->backup)
+ val_buf.bo = &res->backup->base;
+ do {
+ ret = vmw_resource_do_validate(res, &val_buf);
+ if (likely(ret != -EBUSY))
+ break;
+
+ write_lock(&dev_priv->resource_lock);
+ if (list_empty(lru_list) || !res->func->may_evict) {
+ DRM_ERROR("Out of device device resources "
+ "for %s.\n", res->func->type_name);
+ ret = -EBUSY;
+ write_unlock(&dev_priv->resource_lock);
+ break;
+ }
+
+ evict_res = vmw_resource_reference
+ (list_first_entry(lru_list, struct vmw_resource,
+ lru_head));
+ list_del_init(&evict_res->lru_head);
+
+ write_unlock(&dev_priv->resource_lock);
+
+ ret = vmw_resource_do_evict(evict_res, true);
+ if (unlikely(ret != 0)) {
+ write_lock(&dev_priv->resource_lock);
+ list_add_tail(&evict_res->lru_head, lru_list);
+ write_unlock(&dev_priv->resource_lock);
+ if (ret == -ERESTARTSYS ||
+ ++err_count > VMW_RES_EVICT_ERR_COUNT) {
+ vmw_resource_unreference(&evict_res);
+ goto out_no_validate;
+ }
+ }
+
+ vmw_resource_unreference(&evict_res);
+ } while (1);
+
+ if (unlikely(ret != 0))
+ goto out_no_validate;
+ else if (!res->func->needs_backup && res->backup) {
+ list_del_init(&res->mob_head);
+ vmw_dmabuf_unreference(&res->backup);
+ }
+
+ return 0;
+
+out_no_validate:
+ return ret;
+}
+
+/**
+ * vmw_fence_single_bo - Utility function to fence a single TTM buffer
+ * object without unreserving it.
+ *
+ * @bo: Pointer to the struct ttm_buffer_object to fence.
+ * @fence: Pointer to the fence. If NULL, this function will
+ * insert a fence into the command stream..
+ *
+ * Contrary to the ttm_eu version of this function, it takes only
+ * a single buffer object instead of a list, and it also doesn't
+ * unreserve the buffer object, which needs to be done separately.
+ */
+void vmw_fence_single_bo(struct ttm_buffer_object *bo,
+ struct vmw_fence_obj *fence)
+{
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_bo_driver *driver = bdev->driver;
+ struct vmw_fence_obj *old_fence_obj;
+ struct vmw_private *dev_priv =
+ container_of(bdev, struct vmw_private, bdev);
+
+ if (fence == NULL)
+ vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL);
+ else
+ driver->sync_obj_ref(fence);
+
+ spin_lock(&bdev->fence_lock);
+
+ old_fence_obj = bo->sync_obj;
+ bo->sync_obj = fence;
+
+ spin_unlock(&bdev->fence_lock);
+
+ if (old_fence_obj)
+ vmw_fence_obj_unreference(&old_fence_obj);
+}
+
+/**
+ * vmw_resource_move_notify - TTM move_notify_callback
+ *
+ * @bo: The TTM buffer object about to move.
+ * @mem: The truct ttm_mem_reg indicating to what memory
+ * region the move is taking place.
+ *
+ * Evicts the Guest Backed hardware resource if the backup
+ * buffer is being moved out of MOB memory.
+ * Note that this function should not race with the resource
+ * validation code as long as it accesses only members of struct
+ * resource that remain static while bo::res is !NULL and
+ * while we have @bo reserved. struct resource::backup is *not* a
+ * static member. The resource validation code will take care
+ * to set @bo::res to NULL, while having @bo reserved when the
+ * buffer is no longer bound to the resource, so @bo:res can be
+ * used to determine whether there is a need to unbind and whether
+ * it is safe to unbind.
+ */
+void vmw_resource_move_notify(struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem)
+{
+ struct vmw_dma_buffer *dma_buf;
+
+ if (mem == NULL)
+ return;
+
+ if (bo->destroy != vmw_dmabuf_bo_free &&
+ bo->destroy != vmw_user_dmabuf_destroy)
+ return;
+
+ dma_buf = container_of(bo, struct vmw_dma_buffer, base);
+
+ if (mem->mem_type != VMW_PL_MOB) {
+ struct vmw_resource *res, *n;
+ struct ttm_bo_device *bdev = bo->bdev;
+ struct ttm_validate_buffer val_buf;
+
+ val_buf.bo = bo;
+
+ list_for_each_entry_safe(res, n, &dma_buf->res_list, mob_head) {
+
+ if (unlikely(res->func->unbind == NULL))
+ continue;
+
+ (void) res->func->unbind(res, true, &val_buf);
+ res->backup_dirty = true;
+ res->res_dirty = false;
+ list_del_init(&res->mob_head);
+ }
+
+ spin_lock(&bdev->fence_lock);
+ (void) ttm_bo_wait(bo, false, false, false);
+ spin_unlock(&bdev->fence_lock);
+ }
+}
+
+/**
+ * vmw_resource_needs_backup - Return whether a resource needs a backup buffer.
+ *
+ * @res: The resource being queried.
+ */
+bool vmw_resource_needs_backup(const struct vmw_resource *res)
+{
+ return res->func->needs_backup;
+}
+
+/**
+ * vmw_resource_evict_type - Evict all resources of a specific type
+ *
+ * @dev_priv: Pointer to a device private struct
+ * @type: The resource type to evict
+ *
+ * To avoid thrashing starvation or as part of the hibernation sequence,
+ * try to evict all evictable resources of a specific type.
+ */
+static void vmw_resource_evict_type(struct vmw_private *dev_priv,
+ enum vmw_res_type type)
+{
+ struct list_head *lru_list = &dev_priv->res_lru[type];
+ struct vmw_resource *evict_res;
+ unsigned err_count = 0;
+ int ret;
+
+ do {
+ write_lock(&dev_priv->resource_lock);
+
+ if (list_empty(lru_list))
+ goto out_unlock;
+
+ evict_res = vmw_resource_reference(
+ list_first_entry(lru_list, struct vmw_resource,
+ lru_head));
+ list_del_init(&evict_res->lru_head);
+ write_unlock(&dev_priv->resource_lock);
+
+ ret = vmw_resource_do_evict(evict_res, false);
+ if (unlikely(ret != 0)) {
+ write_lock(&dev_priv->resource_lock);
+ list_add_tail(&evict_res->lru_head, lru_list);
+ write_unlock(&dev_priv->resource_lock);
+ if (++err_count > VMW_RES_EVICT_ERR_COUNT) {
+ vmw_resource_unreference(&evict_res);
+ return;
+ }
+ }
+
+ vmw_resource_unreference(&evict_res);
+ } while (1);
+
+out_unlock:
+ write_unlock(&dev_priv->resource_lock);
+}
+
+/**
+ * vmw_resource_evict_all - Evict all evictable resources
+ *
+ * @dev_priv: Pointer to a device private struct
+ *
+ * To avoid thrashing starvation or as part of the hibernation sequence,
+ * evict all evictable resources. In particular this means that all
+ * guest-backed resources that are registered with the device are
+ * evicted and the OTable becomes clean.
+ */
+void vmw_resource_evict_all(struct vmw_private *dev_priv)
+{
+ enum vmw_res_type type;
+
+ mutex_lock(&dev_priv->cmdbuf_mutex);
+
+ for (type = 0; type < vmw_res_max; ++type)
+ vmw_resource_evict_type(dev_priv, type);
+
+ mutex_unlock(&dev_priv->cmdbuf_mutex);
+}