diff options
Diffstat (limited to 'drivers/gpu/drm/drm_bufs.c')
| -rw-r--r-- | drivers/gpu/drm/drm_bufs.c | 44 | 
1 files changed, 19 insertions, 25 deletions
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c index 471e051d295..68175b54504 100644 --- a/drivers/gpu/drm/drm_bufs.c +++ b/drivers/gpu/drm/drm_bufs.c @@ -261,7 +261,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,  		struct drm_agp_mem *entry;  		int valid = 0; -		if (!drm_core_has_AGP(dev)) { +		if (!dev->agp) {  			kfree(map);  			return -EINVAL;  		} @@ -303,9 +303,6 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,  		break;  	} -	case _DRM_GEM: -		DRM_ERROR("tried to addmap GEM object\n"); -		break;  	case _DRM_SCATTER_GATHER:  		if (!dev->sg) {  			kfree(map); @@ -366,7 +363,7 @@ static int drm_addmap_core(struct drm_device * dev, resource_size_t offset,  		list->master = dev->primary->master;  	*maplist = list;  	return 0; -	} +}  int drm_addmap(struct drm_device * dev, resource_size_t offset,  	       unsigned int size, enum drm_map_type type, @@ -483,9 +480,6 @@ int drm_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)  		dmah.size = map->size;  		__drm_pci_free(dev, &dmah);  		break; -	case _DRM_GEM: -		DRM_ERROR("tried to rmmap GEM object\n"); -		break;  	}  	kfree(map); @@ -662,13 +656,13 @@ int drm_addbufs_agp(struct drm_device * dev, struct drm_buf_desc * request)  		DRM_DEBUG("zone invalid\n");  		return -EINVAL;  	} -	spin_lock(&dev->count_lock); +	spin_lock(&dev->buf_lock);  	if (dev->buf_use) { -		spin_unlock(&dev->count_lock); +		spin_unlock(&dev->buf_lock);  		return -EBUSY;  	}  	atomic_inc(&dev->buf_alloc); -	spin_unlock(&dev->count_lock); +	spin_unlock(&dev->buf_lock);  	mutex_lock(&dev->struct_mutex);  	entry = &dma->bufs[order]; @@ -811,13 +805,13 @@ int drm_addbufs_pci(struct drm_device * dev, struct drm_buf_desc * request)  	page_order = order - PAGE_SHIFT > 0 ? order - PAGE_SHIFT : 0;  	total = PAGE_SIZE << page_order; -	spin_lock(&dev->count_lock); +	spin_lock(&dev->buf_lock);  	if (dev->buf_use) { -		spin_unlock(&dev->count_lock); +		spin_unlock(&dev->buf_lock);  		return -EBUSY;  	}  	atomic_inc(&dev->buf_alloc); -	spin_unlock(&dev->count_lock); +	spin_unlock(&dev->buf_lock);  	mutex_lock(&dev->struct_mutex);  	entry = &dma->bufs[order]; @@ -1021,13 +1015,13 @@ static int drm_addbufs_sg(struct drm_device * dev, struct drm_buf_desc * request  	if (order < DRM_MIN_ORDER || order > DRM_MAX_ORDER)  		return -EINVAL; -	spin_lock(&dev->count_lock); +	spin_lock(&dev->buf_lock);  	if (dev->buf_use) { -		spin_unlock(&dev->count_lock); +		spin_unlock(&dev->buf_lock);  		return -EBUSY;  	}  	atomic_inc(&dev->buf_alloc); -	spin_unlock(&dev->count_lock); +	spin_unlock(&dev->buf_lock);  	mutex_lock(&dev->struct_mutex);  	entry = &dma->bufs[order]; @@ -1181,7 +1175,7 @@ int drm_addbufs(struct drm_device *dev, void *data,   * \param arg pointer to a drm_buf_info structure.   * \return zero on success or a negative number on failure.   * - * Increments drm_device::buf_use while holding the drm_device::count_lock + * Increments drm_device::buf_use while holding the drm_device::buf_lock   * lock, preventing of allocating more buffers after this call. Information   * about each requested buffer is then copied into user space.   */ @@ -1202,13 +1196,13 @@ int drm_infobufs(struct drm_device *dev, void *data,  	if (!dma)  		return -EINVAL; -	spin_lock(&dev->count_lock); +	spin_lock(&dev->buf_lock);  	if (atomic_read(&dev->buf_alloc)) { -		spin_unlock(&dev->count_lock); +		spin_unlock(&dev->buf_lock);  		return -EBUSY;  	}  	++dev->buf_use;		/* Can't allocate more after this call */ -	spin_unlock(&dev->count_lock); +	spin_unlock(&dev->buf_lock);  	for (i = 0, count = 0; i < DRM_MAX_ORDER + 1; i++) {  		if (dma->bufs[i].buf_count) @@ -1387,16 +1381,16 @@ int drm_mapbufs(struct drm_device *dev, void *data,  	if (!dma)  		return -EINVAL; -	spin_lock(&dev->count_lock); +	spin_lock(&dev->buf_lock);  	if (atomic_read(&dev->buf_alloc)) { -		spin_unlock(&dev->count_lock); +		spin_unlock(&dev->buf_lock);  		return -EBUSY;  	}  	dev->buf_use++;		/* Can't allocate more after this call */ -	spin_unlock(&dev->count_lock); +	spin_unlock(&dev->buf_lock);  	if (request->count >= dma->buf_count) { -		if ((drm_core_has_AGP(dev) && (dma->flags & _DRM_DMA_USE_AGP)) +		if ((dev->agp && (dma->flags & _DRM_DMA_USE_AGP))  		    || (drm_core_check_feature(dev, DRIVER_SG)  			&& (dma->flags & _DRM_DMA_USE_SG))) {  			struct drm_local_map *map = dev->agp_buffer_map;  | 
