diff options
Diffstat (limited to 'drivers/gpu/drm/drm_gem.c')
| -rw-r--r-- | drivers/gpu/drm/drm_gem.c | 421 | 
1 files changed, 421 insertions, 0 deletions
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c new file mode 100644 index 00000000000..ccd1afdede0 --- /dev/null +++ b/drivers/gpu/drm/drm_gem.c @@ -0,0 +1,421 @@ +/* + * Copyright © 2008 Intel Corporation + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice (including the next + * paragraph) shall be included in all copies or substantial portions of the + * Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS + * IN THE SOFTWARE. + * + * Authors: + *    Eric Anholt <eric@anholt.net> + * + */ + +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/mm.h> +#include <linux/uaccess.h> +#include <linux/fs.h> +#include <linux/file.h> +#include <linux/module.h> +#include <linux/mman.h> +#include <linux/pagemap.h> +#include "drmP.h" + +/** @file drm_gem.c + * + * This file provides some of the base ioctls and library routines for + * the graphics memory manager implemented by each device driver. + * + * Because various devices have different requirements in terms of + * synchronization and migration strategies, implementing that is left up to + * the driver, and all that the general API provides should be generic -- + * allocating objects, reading/writing data with the cpu, freeing objects. + * Even there, platform-dependent optimizations for reading/writing data with + * the CPU mean we'll likely hook those out to driver-specific calls.  However, + * the DRI2 implementation wants to have at least allocate/mmap be generic. + * + * The goal was to have swap-backed object allocation managed through + * struct file.  However, file descriptors as handles to a struct file have + * two major failings: + * - Process limits prevent more than 1024 or so being used at a time by + *   default. + * - Inability to allocate high fds will aggravate the X Server's select() + *   handling, and likely that of many GL client applications as well. + * + * This led to a plan of using our own integer IDs (called handles, following + * DRM terminology) to mimic fds, and implement the fd syscalls we need as + * ioctls.  The objects themselves will still include the struct file so + * that we can transition to fds if the required kernel infrastructure shows + * up at a later date, and as our interface with shmfs for memory allocation. + */ + +/** + * Initialize the GEM device fields + */ + +int +drm_gem_init(struct drm_device *dev) +{ +	spin_lock_init(&dev->object_name_lock); +	idr_init(&dev->object_name_idr); +	atomic_set(&dev->object_count, 0); +	atomic_set(&dev->object_memory, 0); +	atomic_set(&dev->pin_count, 0); +	atomic_set(&dev->pin_memory, 0); +	atomic_set(&dev->gtt_count, 0); +	atomic_set(&dev->gtt_memory, 0); +	return 0; +} + +/** + * Allocate a GEM object of the specified size with shmfs backing store + */ +struct drm_gem_object * +drm_gem_object_alloc(struct drm_device *dev, size_t size) +{ +	struct drm_gem_object *obj; + +	BUG_ON((size & (PAGE_SIZE - 1)) != 0); + +	obj = kcalloc(1, sizeof(*obj), GFP_KERNEL); + +	obj->dev = dev; +	obj->filp = shmem_file_setup("drm mm object", size, 0); +	if (IS_ERR(obj->filp)) { +		kfree(obj); +		return NULL; +	} + +	kref_init(&obj->refcount); +	kref_init(&obj->handlecount); +	obj->size = size; +	if (dev->driver->gem_init_object != NULL && +	    dev->driver->gem_init_object(obj) != 0) { +		fput(obj->filp); +		kfree(obj); +		return NULL; +	} +	atomic_inc(&dev->object_count); +	atomic_add(obj->size, &dev->object_memory); +	return obj; +} +EXPORT_SYMBOL(drm_gem_object_alloc); + +/** + * Removes the mapping from handle to filp for this object. + */ +static int +drm_gem_handle_delete(struct drm_file *filp, int handle) +{ +	struct drm_device *dev; +	struct drm_gem_object *obj; + +	/* This is gross. The idr system doesn't let us try a delete and +	 * return an error code.  It just spews if you fail at deleting. +	 * So, we have to grab a lock around finding the object and then +	 * doing the delete on it and dropping the refcount, or the user +	 * could race us to double-decrement the refcount and cause a +	 * use-after-free later.  Given the frequency of our handle lookups, +	 * we may want to use ida for number allocation and a hash table +	 * for the pointers, anyway. +	 */ +	spin_lock(&filp->table_lock); + +	/* Check if we currently have a reference on the object */ +	obj = idr_find(&filp->object_idr, handle); +	if (obj == NULL) { +		spin_unlock(&filp->table_lock); +		return -EINVAL; +	} +	dev = obj->dev; + +	/* Release reference and decrement refcount. */ +	idr_remove(&filp->object_idr, handle); +	spin_unlock(&filp->table_lock); + +	mutex_lock(&dev->struct_mutex); +	drm_gem_object_handle_unreference(obj); +	mutex_unlock(&dev->struct_mutex); + +	return 0; +} + +/** + * Create a handle for this object. This adds a handle reference + * to the object, which includes a regular reference count. Callers + * will likely want to dereference the object afterwards. + */ +int +drm_gem_handle_create(struct drm_file *file_priv, +		       struct drm_gem_object *obj, +		       int *handlep) +{ +	int	ret; + +	/* +	 * Get the user-visible handle using idr. +	 */ +again: +	/* ensure there is space available to allocate a handle */ +	if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0) +		return -ENOMEM; + +	/* do the allocation under our spinlock */ +	spin_lock(&file_priv->table_lock); +	ret = idr_get_new_above(&file_priv->object_idr, obj, 1, handlep); +	spin_unlock(&file_priv->table_lock); +	if (ret == -EAGAIN) +		goto again; + +	if (ret != 0) +		return ret; + +	drm_gem_object_handle_reference(obj); +	return 0; +} +EXPORT_SYMBOL(drm_gem_handle_create); + +/** Returns a reference to the object named by the handle. */ +struct drm_gem_object * +drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, +		      int handle) +{ +	struct drm_gem_object *obj; + +	spin_lock(&filp->table_lock); + +	/* Check if we currently have a reference on the object */ +	obj = idr_find(&filp->object_idr, handle); +	if (obj == NULL) { +		spin_unlock(&filp->table_lock); +		return NULL; +	} + +	drm_gem_object_reference(obj); + +	spin_unlock(&filp->table_lock); + +	return obj; +} +EXPORT_SYMBOL(drm_gem_object_lookup); + +/** + * Releases the handle to an mm object. + */ +int +drm_gem_close_ioctl(struct drm_device *dev, void *data, +		    struct drm_file *file_priv) +{ +	struct drm_gem_close *args = data; +	int ret; + +	if (!(dev->driver->driver_features & DRIVER_GEM)) +		return -ENODEV; + +	ret = drm_gem_handle_delete(file_priv, args->handle); + +	return ret; +} + +/** + * Create a global name for an object, returning the name. + * + * Note that the name does not hold a reference; when the object + * is freed, the name goes away. + */ +int +drm_gem_flink_ioctl(struct drm_device *dev, void *data, +		    struct drm_file *file_priv) +{ +	struct drm_gem_flink *args = data; +	struct drm_gem_object *obj; +	int ret; + +	if (!(dev->driver->driver_features & DRIVER_GEM)) +		return -ENODEV; + +	obj = drm_gem_object_lookup(dev, file_priv, args->handle); +	if (obj == NULL) +		return -EBADF; + +again: +	if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) +		return -ENOMEM; + +	spin_lock(&dev->object_name_lock); +	if (obj->name) { +		args->name = obj->name; +		spin_unlock(&dev->object_name_lock); +		return 0; +	} +	ret = idr_get_new_above(&dev->object_name_idr, obj, 1, +				 &obj->name); +	spin_unlock(&dev->object_name_lock); +	if (ret == -EAGAIN) +		goto again; + +	if (ret != 0) { +		mutex_lock(&dev->struct_mutex); +		drm_gem_object_unreference(obj); +		mutex_unlock(&dev->struct_mutex); +		return ret; +	} + +	/* +	 * Leave the reference from the lookup around as the +	 * name table now holds one +	 */ +	args->name = (uint64_t) obj->name; + +	return 0; +} + +/** + * Open an object using the global name, returning a handle and the size. + * + * This handle (of course) holds a reference to the object, so the object + * will not go away until the handle is deleted. + */ +int +drm_gem_open_ioctl(struct drm_device *dev, void *data, +		   struct drm_file *file_priv) +{ +	struct drm_gem_open *args = data; +	struct drm_gem_object *obj; +	int ret; +	int handle; + +	if (!(dev->driver->driver_features & DRIVER_GEM)) +		return -ENODEV; + +	spin_lock(&dev->object_name_lock); +	obj = idr_find(&dev->object_name_idr, (int) args->name); +	if (obj) +		drm_gem_object_reference(obj); +	spin_unlock(&dev->object_name_lock); +	if (!obj) +		return -ENOENT; + +	ret = drm_gem_handle_create(file_priv, obj, &handle); +	mutex_lock(&dev->struct_mutex); +	drm_gem_object_unreference(obj); +	mutex_unlock(&dev->struct_mutex); +	if (ret) +		return ret; + +	args->handle = handle; +	args->size = obj->size; + +	return 0; +} + +/** + * Called at device open time, sets up the structure for handling refcounting + * of mm objects. + */ +void +drm_gem_open(struct drm_device *dev, struct drm_file *file_private) +{ +	idr_init(&file_private->object_idr); +	spin_lock_init(&file_private->table_lock); +} + +/** + * Called at device close to release the file's + * handle references on objects. + */ +static int +drm_gem_object_release_handle(int id, void *ptr, void *data) +{ +	struct drm_gem_object *obj = ptr; + +	drm_gem_object_handle_unreference(obj); + +	return 0; +} + +/** + * Called at close time when the filp is going away. + * + * Releases any remaining references on objects by this filp. + */ +void +drm_gem_release(struct drm_device *dev, struct drm_file *file_private) +{ +	mutex_lock(&dev->struct_mutex); +	idr_for_each(&file_private->object_idr, +		     &drm_gem_object_release_handle, NULL); + +	idr_destroy(&file_private->object_idr); +	mutex_unlock(&dev->struct_mutex); +} + +/** + * Called after the last reference to the object has been lost. + * + * Frees the object + */ +void +drm_gem_object_free(struct kref *kref) +{ +	struct drm_gem_object *obj = (struct drm_gem_object *) kref; +	struct drm_device *dev = obj->dev; + +	BUG_ON(!mutex_is_locked(&dev->struct_mutex)); + +	if (dev->driver->gem_free_object != NULL) +		dev->driver->gem_free_object(obj); + +	fput(obj->filp); +	atomic_dec(&dev->object_count); +	atomic_sub(obj->size, &dev->object_memory); +	kfree(obj); +} +EXPORT_SYMBOL(drm_gem_object_free); + +/** + * Called after the last handle to the object has been closed + * + * Removes any name for the object. Note that this must be + * called before drm_gem_object_free or we'll be touching + * freed memory + */ +void +drm_gem_object_handle_free(struct kref *kref) +{ +	struct drm_gem_object *obj = container_of(kref, +						  struct drm_gem_object, +						  handlecount); +	struct drm_device *dev = obj->dev; + +	/* Remove any name for this object */ +	spin_lock(&dev->object_name_lock); +	if (obj->name) { +		idr_remove(&dev->object_name_idr, obj->name); +		spin_unlock(&dev->object_name_lock); +		/* +		 * The object name held a reference to this object, drop +		 * that now. +		 */ +		drm_gem_object_unreference(obj); +	} else +		spin_unlock(&dev->object_name_lock); + +} +EXPORT_SYMBOL(drm_gem_object_handle_free); +  | 
