diff options
Diffstat (limited to 'drivers/gpu/drm/drm_gem.c')
| -rw-r--r-- | drivers/gpu/drm/drm_gem.c | 679 | 
1 files changed, 495 insertions, 184 deletions
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index ea1c4b019eb..f7d71190aad 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -34,7 +34,10 @@  #include <linux/module.h>  #include <linux/mman.h>  #include <linux/pagemap.h> -#include "drmP.h" +#include <linux/shmem_fs.h> +#include <linux/dma-buf.h> +#include <drm/drmP.h> +#include <drm/drm_vma_manager.h>  /** @file drm_gem.c   * @@ -82,36 +85,27 @@  #endif  /** - * Initialize the GEM device fields + * drm_gem_init - Initialize the GEM device fields + * @dev: drm_devic structure to initialize   */ -  int  drm_gem_init(struct drm_device *dev)  { -	struct drm_gem_mm *mm; +	struct drm_vma_offset_manager *vma_offset_manager; -	spin_lock_init(&dev->object_name_lock); +	mutex_init(&dev->object_name_lock);  	idr_init(&dev->object_name_idr); -	mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL); -	if (!mm) { +	vma_offset_manager = kzalloc(sizeof(*vma_offset_manager), GFP_KERNEL); +	if (!vma_offset_manager) {  		DRM_ERROR("out of memory\n");  		return -ENOMEM;  	} -	dev->mm_private = mm; - -	if (drm_ht_create(&mm->offset_hash, 19)) { -		kfree(mm); -		return -ENOMEM; -	} - -	if (drm_mm_init(&mm->offset_manager, DRM_FILE_PAGE_OFFSET_START, -			DRM_FILE_PAGE_OFFSET_SIZE)) { -		drm_ht_remove(&mm->offset_hash); -		kfree(mm); -		return -ENOMEM; -	} +	dev->vma_offset_manager = vma_offset_manager; +	drm_vma_offset_manager_init(vma_offset_manager, +				    DRM_FILE_PAGE_OFFSET_START, +				    DRM_FILE_PAGE_OFFSET_SIZE);  	return 0;  } @@ -119,69 +113,139 @@ drm_gem_init(struct drm_device *dev)  void  drm_gem_destroy(struct drm_device *dev)  { -	struct drm_gem_mm *mm = dev->mm_private; -	drm_mm_takedown(&mm->offset_manager); -	drm_ht_remove(&mm->offset_hash); -	kfree(mm); -	dev->mm_private = NULL; +	drm_vma_offset_manager_destroy(dev->vma_offset_manager); +	kfree(dev->vma_offset_manager); +	dev->vma_offset_manager = NULL;  }  /** - * Initialize an already allocate GEM object of the specified size with + * drm_gem_object_init - initialize an allocated shmem-backed GEM object + * @dev: drm_device the object should be initialized for + * @obj: drm_gem_object to initialize + * @size: object size + * + * Initialize an already allocated GEM object of the specified size with   * shmfs backing store.   */  int drm_gem_object_init(struct drm_device *dev,  			struct drm_gem_object *obj, size_t size)  { +	struct file *filp; + +	drm_gem_private_object_init(dev, obj, size); + +	filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); +	if (IS_ERR(filp)) +		return PTR_ERR(filp); + +	obj->filp = filp; + +	return 0; +} +EXPORT_SYMBOL(drm_gem_object_init); + +/** + * drm_gem_object_init - initialize an allocated private GEM object + * @dev: drm_device the object should be initialized for + * @obj: drm_gem_object to initialize + * @size: object size + * + * Initialize an already allocated GEM object of the specified size with + * no GEM provided backing store. Instead the caller is responsible for + * backing the object and handling it. + */ +void drm_gem_private_object_init(struct drm_device *dev, +				 struct drm_gem_object *obj, size_t size) +{  	BUG_ON((size & (PAGE_SIZE - 1)) != 0);  	obj->dev = dev; -	obj->filp = shmem_file_setup("drm mm object", size, VM_NORESERVE); -	if (IS_ERR(obj->filp)) -		return -ENOMEM; +	obj->filp = NULL;  	kref_init(&obj->refcount); -	atomic_set(&obj->handle_count, 0); +	obj->handle_count = 0;  	obj->size = size; +	drm_vma_node_reset(&obj->vma_node); +} +EXPORT_SYMBOL(drm_gem_private_object_init); -	return 0; +static void +drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp) +{ +	/* +	 * Note: obj->dma_buf can't disappear as long as we still hold a +	 * handle reference in obj->handle_count. +	 */ +	mutex_lock(&filp->prime.lock); +	if (obj->dma_buf) { +		drm_prime_remove_buf_handle_locked(&filp->prime, +						   obj->dma_buf); +	} +	mutex_unlock(&filp->prime.lock);  } -EXPORT_SYMBOL(drm_gem_object_init);  /** - * Allocate a GEM object of the specified size with shmfs backing store + * drm_gem_object_free - release resources bound to userspace handles + * @obj: GEM object to clean up. + * + * Called after the last handle to the object has been closed + * + * Removes any name for the object. Note that this must be + * called before drm_gem_object_free or we'll be touching + * freed memory   */ -struct drm_gem_object * -drm_gem_object_alloc(struct drm_device *dev, size_t size) +static void drm_gem_object_handle_free(struct drm_gem_object *obj)  { -	struct drm_gem_object *obj; +	struct drm_device *dev = obj->dev; -	obj = kzalloc(sizeof(*obj), GFP_KERNEL); -	if (!obj) -		goto free; +	/* Remove any name for this object */ +	if (obj->name) { +		idr_remove(&dev->object_name_idr, obj->name); +		obj->name = 0; +	} +} + +static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj) +{ +	/* Unbreak the reference cycle if we have an exported dma_buf. */ +	if (obj->dma_buf) { +		dma_buf_put(obj->dma_buf); +		obj->dma_buf = NULL; +	} +} -	if (drm_gem_object_init(dev, obj, size) != 0) -		goto free; +static void +drm_gem_object_handle_unreference_unlocked(struct drm_gem_object *obj) +{ +	if (WARN_ON(obj->handle_count == 0)) +		return; -	if (dev->driver->gem_init_object != NULL && -	    dev->driver->gem_init_object(obj) != 0) { -		goto fput; +	/* +	* Must bump handle count first as this may be the last +	* ref, in which case the object would disappear before we +	* checked for a name +	*/ + +	mutex_lock(&obj->dev->object_name_lock); +	if (--obj->handle_count == 0) { +		drm_gem_object_handle_free(obj); +		drm_gem_object_exported_dma_buf_free(obj);  	} -	return obj; -fput: -	/* Object_init mangles the global counters - readjust them. */ -	fput(obj->filp); -free: -	kfree(obj); -	return NULL; +	mutex_unlock(&obj->dev->object_name_lock); + +	drm_gem_object_unreference_unlocked(obj);  } -EXPORT_SYMBOL(drm_gem_object_alloc);  /** - * Removes the mapping from handle to filp for this object. + * drm_gem_handle_delete - deletes the given file-private handle + * @filp: drm file-private structure to use for the handle look up + * @handle: userspace handle to delete + * + * Removes the GEM handle from the @filp lookup table and if this is the last + * handle also cleans up linked resources like GEM names.   */ -static int +int  drm_gem_handle_delete(struct drm_file *filp, u32 handle)  {  	struct drm_device *dev; @@ -210,12 +274,97 @@ drm_gem_handle_delete(struct drm_file *filp, u32 handle)  	idr_remove(&filp->object_idr, handle);  	spin_unlock(&filp->table_lock); +	if (drm_core_check_feature(dev, DRIVER_PRIME)) +		drm_gem_remove_prime_handles(obj, filp); +	drm_vma_node_revoke(&obj->vma_node, filp->filp); + +	if (dev->driver->gem_close_object) +		dev->driver->gem_close_object(obj, filp);  	drm_gem_object_handle_unreference_unlocked(obj);  	return 0;  } +EXPORT_SYMBOL(drm_gem_handle_delete); + +/** + * drm_gem_dumb_destroy - dumb fb callback helper for gem based drivers + * @file: drm file-private structure to remove the dumb handle from + * @dev: corresponding drm_device + * @handle: the dumb handle to remove + *  + * This implements the ->dumb_destroy kms driver callback for drivers which use + * gem to manage their backing storage. + */ +int drm_gem_dumb_destroy(struct drm_file *file, +			 struct drm_device *dev, +			 uint32_t handle) +{ +	return drm_gem_handle_delete(file, handle); +} +EXPORT_SYMBOL(drm_gem_dumb_destroy); + +/** + * drm_gem_handle_create_tail - internal functions to create a handle + * @file_priv: drm file-private structure to register the handle for + * @obj: object to register + * @handlep: pionter to return the created handle to the caller + *  + * This expects the dev->object_name_lock to be held already and will drop it + * before returning. Used to avoid races in establishing new handles when + * importing an object from either an flink name or a dma-buf. + */ +int +drm_gem_handle_create_tail(struct drm_file *file_priv, +			   struct drm_gem_object *obj, +			   u32 *handlep) +{ +	struct drm_device *dev = obj->dev; +	int ret; + +	WARN_ON(!mutex_is_locked(&dev->object_name_lock)); + +	/* +	 * Get the user-visible handle using idr.  Preload and perform +	 * allocation under our spinlock. +	 */ +	idr_preload(GFP_KERNEL); +	spin_lock(&file_priv->table_lock); + +	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT); +	drm_gem_object_reference(obj); +	obj->handle_count++; +	spin_unlock(&file_priv->table_lock); +	idr_preload_end(); +	mutex_unlock(&dev->object_name_lock); +	if (ret < 0) { +		drm_gem_object_handle_unreference_unlocked(obj); +		return ret; +	} +	*handlep = ret; + +	ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp); +	if (ret) { +		drm_gem_handle_delete(file_priv, *handlep); +		return ret; +	} + +	if (dev->driver->gem_open_object) { +		ret = dev->driver->gem_open_object(obj, file_priv); +		if (ret) { +			drm_gem_handle_delete(file_priv, *handlep); +			return ret; +		} +	} + +	return 0; +}  /** + * gem_handle_create - create a gem handle for an object + * @file_priv: drm file-private structure to register the handle for + * @obj: object to register + * @handlep: pionter to return the created handle to the caller + *   * Create a handle for this object. This adds a handle reference   * to the object, which includes a regular reference count. Callers   * will likely want to dereference the object afterwards. @@ -225,30 +374,160 @@ drm_gem_handle_create(struct drm_file *file_priv,  		       struct drm_gem_object *obj,  		       u32 *handlep)  { -	int	ret; +	mutex_lock(&obj->dev->object_name_lock); -	/* -	 * Get the user-visible handle using idr. +	return drm_gem_handle_create_tail(file_priv, obj, handlep); +} +EXPORT_SYMBOL(drm_gem_handle_create); + + +/** + * drm_gem_free_mmap_offset - release a fake mmap offset for an object + * @obj: obj in question + * + * This routine frees fake offsets allocated by drm_gem_create_mmap_offset(). + */ +void +drm_gem_free_mmap_offset(struct drm_gem_object *obj) +{ +	struct drm_device *dev = obj->dev; + +	drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node); +} +EXPORT_SYMBOL(drm_gem_free_mmap_offset); + +/** + * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object + * @obj: obj in question + * @size: the virtual size + * + * GEM memory mapping works by handing back to userspace a fake mmap offset + * it can use in a subsequent mmap(2) call.  The DRM core code then looks + * up the object based on the offset and sets up the various memory mapping + * structures. + * + * This routine allocates and attaches a fake offset for @obj, in cases where + * the virtual size differs from the physical size (ie. obj->size).  Otherwise + * just use drm_gem_create_mmap_offset(). + */ +int +drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size) +{ +	struct drm_device *dev = obj->dev; + +	return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node, +				  size / PAGE_SIZE); +} +EXPORT_SYMBOL(drm_gem_create_mmap_offset_size); + +/** + * drm_gem_create_mmap_offset - create a fake mmap offset for an object + * @obj: obj in question + * + * GEM memory mapping works by handing back to userspace a fake mmap offset + * it can use in a subsequent mmap(2) call.  The DRM core code then looks + * up the object based on the offset and sets up the various memory mapping + * structures. + * + * This routine allocates and attaches a fake offset for @obj. + */ +int drm_gem_create_mmap_offset(struct drm_gem_object *obj) +{ +	return drm_gem_create_mmap_offset_size(obj, obj->size); +} +EXPORT_SYMBOL(drm_gem_create_mmap_offset); + +/** + * drm_gem_get_pages - helper to allocate backing pages for a GEM object + * from shmem + * @obj: obj in question + * @gfpmask: gfp mask of requested pages + */ +struct page **drm_gem_get_pages(struct drm_gem_object *obj, gfp_t gfpmask) +{ +	struct inode *inode; +	struct address_space *mapping; +	struct page *p, **pages; +	int i, npages; + +	/* This is the shared memory object that backs the GEM resource */ +	inode = file_inode(obj->filp); +	mapping = inode->i_mapping; + +	/* We already BUG_ON() for non-page-aligned sizes in +	 * drm_gem_object_init(), so we should never hit this unless +	 * driver author is doing something really wrong:  	 */ -again: -	/* ensure there is space available to allocate a handle */ -	if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0) -		return -ENOMEM; +	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); -	/* do the allocation under our spinlock */ -	spin_lock(&file_priv->table_lock); -	ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep); -	spin_unlock(&file_priv->table_lock); -	if (ret == -EAGAIN) -		goto again; +	npages = obj->size >> PAGE_SHIFT; -	if (ret != 0) -		return ret; +	pages = drm_malloc_ab(npages, sizeof(struct page *)); +	if (pages == NULL) +		return ERR_PTR(-ENOMEM); -	drm_gem_object_handle_reference(obj); -	return 0; +	gfpmask |= mapping_gfp_mask(mapping); + +	for (i = 0; i < npages; i++) { +		p = shmem_read_mapping_page_gfp(mapping, i, gfpmask); +		if (IS_ERR(p)) +			goto fail; +		pages[i] = p; + +		/* Make sure shmem keeps __GFP_DMA32 allocated pages in the +		 * correct region during swapin. Note that this requires +		 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping) +		 * so shmem can relocate pages during swapin if required. +		 */ +		BUG_ON((gfpmask & __GFP_DMA32) && +				(page_to_pfn(p) >= 0x00100000UL)); +	} + +	return pages; + +fail: +	while (i--) +		page_cache_release(pages[i]); + +	drm_free_large(pages); +	return ERR_CAST(p);  } -EXPORT_SYMBOL(drm_gem_handle_create); +EXPORT_SYMBOL(drm_gem_get_pages); + +/** + * drm_gem_put_pages - helper to free backing pages for a GEM object + * @obj: obj in question + * @pages: pages to free + * @dirty: if true, pages will be marked as dirty + * @accessed: if true, the pages will be marked as accessed + */ +void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages, +		bool dirty, bool accessed) +{ +	int i, npages; + +	/* We already BUG_ON() for non-page-aligned sizes in +	 * drm_gem_object_init(), so we should never hit this unless +	 * driver author is doing something really wrong: +	 */ +	WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0); + +	npages = obj->size >> PAGE_SHIFT; + +	for (i = 0; i < npages; i++) { +		if (dirty) +			set_page_dirty(pages[i]); + +		if (accessed) +			mark_page_accessed(pages[i]); + +		/* Undo the reference we took when populating the table */ +		page_cache_release(pages[i]); +	} + +	drm_free_large(pages); +} +EXPORT_SYMBOL(drm_gem_put_pages);  /** Returns a reference to the object named by the handle. */  struct drm_gem_object * @@ -275,6 +554,11 @@ drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp,  EXPORT_SYMBOL(drm_gem_object_lookup);  /** + * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl + * @dev: drm_device + * @data: ioctl data + * @file_priv: drm file-private structure + *   * Releases the handle to an mm object.   */  int @@ -293,6 +577,11 @@ drm_gem_close_ioctl(struct drm_device *dev, void *data,  }  /** + * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl + * @dev: drm_device + * @data: ioctl data + * @file_priv: drm file-private structure + *   * Create a global name for an object, returning the name.   *   * Note that the name does not hold a reference; when the object @@ -313,39 +602,38 @@ drm_gem_flink_ioctl(struct drm_device *dev, void *data,  	if (obj == NULL)  		return -ENOENT; -again: -	if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) { -		ret = -ENOMEM; +	mutex_lock(&dev->object_name_lock); +	idr_preload(GFP_KERNEL); +	/* prevent races with concurrent gem_close. */ +	if (obj->handle_count == 0) { +		ret = -ENOENT;  		goto err;  	} -	spin_lock(&dev->object_name_lock);  	if (!obj->name) { -		ret = idr_get_new_above(&dev->object_name_idr, obj, 1, -					&obj->name); -		args->name = (uint64_t) obj->name; -		spin_unlock(&dev->object_name_lock); - -		if (ret == -EAGAIN) -			goto again; - -		if (ret != 0) +		ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_NOWAIT); +		if (ret < 0)  			goto err; -		/* Allocate a reference for the name table.  */ -		drm_gem_object_reference(obj); -	} else { -		args->name = (uint64_t) obj->name; -		spin_unlock(&dev->object_name_lock); -		ret = 0; +		obj->name = ret;  	} +	args->name = (uint64_t) obj->name; +	ret = 0; +  err: +	idr_preload_end(); +	mutex_unlock(&dev->object_name_lock);  	drm_gem_object_unreference_unlocked(obj);  	return ret;  }  /** + * drm_gem_open - implementation of the GEM_OPEN ioctl + * @dev: drm_device + * @data: ioctl data + * @file_priv: drm file-private structure + *   * Open an object using the global name, returning a handle and the size.   *   * This handle (of course) holds a reference to the object, so the object @@ -363,15 +651,17 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,  	if (!(dev->driver->driver_features & DRIVER_GEM))  		return -ENODEV; -	spin_lock(&dev->object_name_lock); +	mutex_lock(&dev->object_name_lock);  	obj = idr_find(&dev->object_name_idr, (int) args->name); -	if (obj) +	if (obj) {  		drm_gem_object_reference(obj); -	spin_unlock(&dev->object_name_lock); -	if (!obj) +	} else { +		mutex_unlock(&dev->object_name_lock);  		return -ENOENT; +	} -	ret = drm_gem_handle_create(file_priv, obj, &handle); +	/* drm_gem_handle_create_tail unlocks dev->object_name_lock. */ +	ret = drm_gem_handle_create_tail(file_priv, obj, &handle);  	drm_gem_object_unreference_unlocked(obj);  	if (ret)  		return ret; @@ -383,6 +673,10 @@ drm_gem_open_ioctl(struct drm_device *dev, void *data,  }  /** + * gem_gem_open - initalizes GEM file-private structures at devnode open time + * @dev: drm_device which is being opened by userspace + * @file_private: drm file-private structure to set up + *   * Called at device open time, sets up the structure for handling refcounting   * of mm objects.   */ @@ -393,14 +687,23 @@ drm_gem_open(struct drm_device *dev, struct drm_file *file_private)  	spin_lock_init(&file_private->table_lock);  } -/** +/*   * Called at device close to release the file's   * handle references on objects.   */  static int  drm_gem_object_release_handle(int id, void *ptr, void *data)  { +	struct drm_file *file_priv = data;  	struct drm_gem_object *obj = ptr; +	struct drm_device *dev = obj->dev; + +	if (drm_core_check_feature(dev, DRIVER_PRIME)) +		drm_gem_remove_prime_handles(obj, file_priv); +	drm_vma_node_revoke(&obj->vma_node, file_priv->filp); + +	if (dev->driver->gem_close_object) +		dev->driver->gem_close_object(obj, file_priv);  	drm_gem_object_handle_unreference_unlocked(obj); @@ -408,6 +711,10 @@ drm_gem_object_release_handle(int id, void *ptr, void *data)  }  /** + * drm_gem_release - release file-private GEM resources + * @dev: drm_device which is being closed by userspace + * @file_private: drm file-private structure to clean up + *   * Called at close time when the filp is going away.   *   * Releases any remaining references on objects by this filp. @@ -416,20 +723,26 @@ void  drm_gem_release(struct drm_device *dev, struct drm_file *file_private)  {  	idr_for_each(&file_private->object_idr, -		     &drm_gem_object_release_handle, NULL); - -	idr_remove_all(&file_private->object_idr); +		     &drm_gem_object_release_handle, file_private);  	idr_destroy(&file_private->object_idr);  }  void  drm_gem_object_release(struct drm_gem_object *obj)  { -	fput(obj->filp); +	WARN_ON(obj->dma_buf); + +	if (obj->filp) +		fput(obj->filp); + +	drm_gem_free_mmap_offset(obj);  }  EXPORT_SYMBOL(drm_gem_object_release);  /** + * drm_gem_object_free - free a GEM object + * @kref: kref of the object to free + *   * Called after the last reference to the object has been lost.   * Must be called holding struct_ mutex   * @@ -448,41 +761,6 @@ drm_gem_object_free(struct kref *kref)  }  EXPORT_SYMBOL(drm_gem_object_free); -static void drm_gem_object_ref_bug(struct kref *list_kref) -{ -	BUG(); -} - -/** - * Called after the last handle to the object has been closed - * - * Removes any name for the object. Note that this must be - * called before drm_gem_object_free or we'll be touching - * freed memory - */ -void drm_gem_object_handle_free(struct drm_gem_object *obj) -{ -	struct drm_device *dev = obj->dev; - -	/* Remove any name for this object */ -	spin_lock(&dev->object_name_lock); -	if (obj->name) { -		idr_remove(&dev->object_name_idr, obj->name); -		obj->name = 0; -		spin_unlock(&dev->object_name_lock); -		/* -		 * The object name held a reference to this object, drop -		 * that now. -		* -		* This cannot be the last reference, since the handle holds one too. -		 */ -		kref_put(&obj->refcount, drm_gem_object_ref_bug); -	} else -		spin_unlock(&dev->object_name_lock); - -} -EXPORT_SYMBOL(drm_gem_object_handle_free); -  void drm_gem_vm_open(struct vm_area_struct *vma)  {  	struct drm_gem_object *obj = vma->vm_private_data; @@ -490,7 +768,7 @@ void drm_gem_vm_open(struct vm_area_struct *vma)  	drm_gem_object_reference(obj);  	mutex_lock(&obj->dev->struct_mutex); -	drm_vm_open_locked(vma); +	drm_vm_open_locked(obj->dev, vma);  	mutex_unlock(&obj->dev->struct_mutex);  }  EXPORT_SYMBOL(drm_gem_vm_open); @@ -498,14 +776,72 @@ EXPORT_SYMBOL(drm_gem_vm_open);  void drm_gem_vm_close(struct vm_area_struct *vma)  {  	struct drm_gem_object *obj = vma->vm_private_data; +	struct drm_device *dev = obj->dev; -	mutex_lock(&obj->dev->struct_mutex); -	drm_vm_close_locked(vma); +	mutex_lock(&dev->struct_mutex); +	drm_vm_close_locked(obj->dev, vma);  	drm_gem_object_unreference(obj); -	mutex_unlock(&obj->dev->struct_mutex); +	mutex_unlock(&dev->struct_mutex);  }  EXPORT_SYMBOL(drm_gem_vm_close); +/** + * drm_gem_mmap_obj - memory map a GEM object + * @obj: the GEM object to map + * @obj_size: the object size to be mapped, in bytes + * @vma: VMA for the area to be mapped + * + * Set up the VMA to prepare mapping of the GEM object using the gem_vm_ops + * provided by the driver. Depending on their requirements, drivers can either + * provide a fault handler in their gem_vm_ops (in which case any accesses to + * the object will be trapped, to perform migration, GTT binding, surface + * register allocation, or performance monitoring), or mmap the buffer memory + * synchronously after calling drm_gem_mmap_obj. + * + * This function is mainly intended to implement the DMABUF mmap operation, when + * the GEM object is not looked up based on its fake offset. To implement the + * DRM mmap operation, drivers should use the drm_gem_mmap() function. + * + * drm_gem_mmap_obj() assumes the user is granted access to the buffer while + * drm_gem_mmap() prevents unprivileged users from mapping random objects. So + * callers must verify access restrictions before calling this helper. + * + * NOTE: This function has to be protected with dev->struct_mutex + * + * Return 0 or success or -EINVAL if the object size is smaller than the VMA + * size, or if no gem_vm_ops are provided. + */ +int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size, +		     struct vm_area_struct *vma) +{ +	struct drm_device *dev = obj->dev; + +	lockdep_assert_held(&dev->struct_mutex); + +	/* Check for valid size. */ +	if (obj_size < vma->vm_end - vma->vm_start) +		return -EINVAL; + +	if (!dev->driver->gem_vm_ops) +		return -EINVAL; + +	vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP; +	vma->vm_ops = dev->driver->gem_vm_ops; +	vma->vm_private_data = obj; +	vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); + +	/* Take a ref for this mapping of the object, so that the fault +	 * handler can dereference the mmap offset's pointer to the object. +	 * This reference is cleaned up by the corresponding vm_close +	 * (which should happen whether the vma was created by this call, or +	 * by a vm_open due to mremap or partial unmap or whatever). +	 */ +	drm_gem_object_reference(obj); + +	drm_vm_open_locked(dev, vma); +	return 0; +} +EXPORT_SYMBOL(drm_gem_mmap_obj);  /**   * drm_gem_mmap - memory map routine for GEM objects @@ -515,65 +851,40 @@ EXPORT_SYMBOL(drm_gem_vm_close);   * If a driver supports GEM object mapping, mmap calls on the DRM file   * descriptor will end up here.   * - * If we find the object based on the offset passed in (vma->vm_pgoff will + * Look up the GEM object based on the offset passed in (vma->vm_pgoff will   * contain the fake offset we created when the GTT map ioctl was called on - * the object), we set up the driver fault handler so that any accesses - * to the object can be trapped, to perform migration, GTT binding, surface - * register allocation, or performance monitoring. + * the object) and map it with a call to drm_gem_mmap_obj(). + * + * If the caller is not granted access to the buffer object, the mmap will fail + * with EACCES. Please see the vma manager for more information.   */  int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)  {  	struct drm_file *priv = filp->private_data;  	struct drm_device *dev = priv->minor->dev; -	struct drm_gem_mm *mm = dev->mm_private; -	struct drm_local_map *map = NULL;  	struct drm_gem_object *obj; -	struct drm_hash_item *hash; -	int ret = 0; +	struct drm_vma_offset_node *node; +	int ret; + +	if (drm_device_is_unplugged(dev)) +		return -ENODEV;  	mutex_lock(&dev->struct_mutex); -	if (drm_ht_find_item(&mm->offset_hash, vma->vm_pgoff, &hash)) { +	node = drm_vma_offset_exact_lookup(dev->vma_offset_manager, +					   vma->vm_pgoff, +					   vma_pages(vma)); +	if (!node) {  		mutex_unlock(&dev->struct_mutex);  		return drm_mmap(filp, vma); +	} else if (!drm_vma_node_is_allowed(node, filp)) { +		mutex_unlock(&dev->struct_mutex); +		return -EACCES;  	} -	map = drm_hash_entry(hash, struct drm_map_list, hash)->map; -	if (!map || -	    ((map->flags & _DRM_RESTRICTED) && !capable(CAP_SYS_ADMIN))) { -		ret =  -EPERM; -		goto out_unlock; -	} - -	/* Check for valid size. */ -	if (map->size < vma->vm_end - vma->vm_start) { -		ret = -EINVAL; -		goto out_unlock; -	} - -	obj = map->handle; -	if (!obj->dev->driver->gem_vm_ops) { -		ret = -EINVAL; -		goto out_unlock; -	} - -	vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND; -	vma->vm_ops = obj->dev->driver->gem_vm_ops; -	vma->vm_private_data = map->handle; -	vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags)); - -	/* Take a ref for this mapping of the object, so that the fault -	 * handler can dereference the mmap offset's pointer to the object. -	 * This reference is cleaned up by the corresponding vm_close -	 * (which should happen whether the vma was created by this call, or -	 * by a vm_open due to mremap or partial unmap or whatever). -	 */ -	drm_gem_object_reference(obj); - -	vma->vm_file = filp;	/* Needed for drm_vm_open() */ -	drm_vm_open_locked(vma); +	obj = container_of(node, struct drm_gem_object, vma_node); +	ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT, vma); -out_unlock:  	mutex_unlock(&dev->struct_mutex);  	return ret;  | 
