aboutsummaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorThomas Hellstrom <thellstrom@vmware.com>2011-08-31 07:42:53 +0000
committerDave Airlie <airlied@redhat.com>2011-09-01 09:38:07 +0100
commitfb17f18993071cc230ec8ddb6dd3dd9932d2dba2 (patch)
treec44f01b1f015cce50e7c4d12045dc8815618f253 /drivers
parent05730b32a78dab4bed8fb7ccc64c53d9fcf31e9d (diff)
vmwgfx: Restrict number of GMR pages to device limit
When GMR2 is available, make sure we restrict the number of used GMR pages to the limit indicated by the device. This is done by failing a GMRID allocation if the total number of GMR pages exceeds the limit. As a result TTM will then start evicting buffers in GMR memory on a LRU basis until the allocation succeeds. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Jakob Bornecrantz <jakob@vmware.com> Signed-off-by: Dave Airlie <airlied@redhat.com>
Diffstat (limited to 'drivers')
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c12
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c46
3 files changed, 49 insertions, 11 deletions
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 3d6a073e130..77e77b2b9d8 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -322,6 +322,12 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->max_gmr_ids =
vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
}
+ if (dev_priv->capabilities & SVGA_CAP_GMR2) {
+ dev_priv->max_gmr_pages =
+ vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
+ dev_priv->memory_size =
+ vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE);
+ }
dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE);
@@ -338,6 +344,12 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
DRM_INFO("Max GMR descriptors is %u\n",
(unsigned)dev_priv->max_gmr_descriptors);
}
+ if (dev_priv->capabilities & SVGA_CAP_GMR2) {
+ DRM_INFO("Max number of GMR pages is %u\n",
+ (unsigned)dev_priv->max_gmr_pages);
+ DRM_INFO("Max dedicated hypervisor graphics memory is %u\n",
+ (unsigned)dev_priv->memory_size);
+ }
DRM_INFO("VRAM at 0x%08x size is %u kiB\n",
dev_priv->vram_start, dev_priv->vram_size / 1024);
DRM_INFO("MMIO at 0x%08x size is %u kiB\n",
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 95b75000f0c..323fc10de2d 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -190,6 +190,8 @@ struct vmw_private {
uint32_t capabilities;
uint32_t max_gmr_descriptors;
uint32_t max_gmr_ids;
+ uint32_t max_gmr_pages;
+ uint32_t memory_size;
bool has_gmr;
struct mutex hw_mutex;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index ac6e0d1bd62..5f717152cff 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -40,6 +40,8 @@ struct vmwgfx_gmrid_man {
spinlock_t lock;
struct ida gmr_ida;
uint32_t max_gmr_ids;
+ uint32_t max_gmr_pages;
+ uint32_t used_gmr_pages;
};
static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
@@ -49,33 +51,50 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
{
struct vmwgfx_gmrid_man *gman =
(struct vmwgfx_gmrid_man *)man->priv;
- int ret;
+ int ret = 0;
int id;
mem->mm_node = NULL;
- do {
- if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0))
- return -ENOMEM;
+ spin_lock(&gman->lock);
+
+ if (gman->max_gmr_pages > 0) {
+ gman->used_gmr_pages += bo->num_pages;
+ if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
+ goto out_err_locked;
+ }
+ do {
+ spin_unlock(&gman->lock);
+ if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0)) {
+ ret = -ENOMEM;
+ goto out_err;
+ }
spin_lock(&gman->lock);
- ret = ida_get_new(&gman->gmr_ida, &id);
+ ret = ida_get_new(&gman->gmr_ida, &id);
if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) {
ida_remove(&gman->gmr_ida, id);
- spin_unlock(&gman->lock);
- return 0;
+ ret = 0;
+ goto out_err_locked;
}
-
- spin_unlock(&gman->lock);
-
} while (ret == -EAGAIN);
if (likely(ret == 0)) {
mem->mm_node = gman;
mem->start = id;
- }
+ mem->num_pages = bo->num_pages;
+ } else
+ goto out_err_locked;
+
+ spin_unlock(&gman->lock);
+ return 0;
+out_err:
+ spin_lock(&gman->lock);
+out_err_locked:
+ gman->used_gmr_pages -= bo->num_pages;
+ spin_unlock(&gman->lock);
return ret;
}
@@ -88,6 +107,7 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
if (mem->mm_node) {
spin_lock(&gman->lock);
ida_remove(&gman->gmr_ida, mem->start);
+ gman->used_gmr_pages -= mem->num_pages;
spin_unlock(&gman->lock);
mem->mm_node = NULL;
}
@@ -96,6 +116,8 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
unsigned long p_size)
{
+ struct vmw_private *dev_priv =
+ container_of(man->bdev, struct vmw_private, bdev);
struct vmwgfx_gmrid_man *gman =
kzalloc(sizeof(*gman), GFP_KERNEL);
@@ -103,6 +125,8 @@ static int vmw_gmrid_man_init(struct ttm_mem_type_manager *man,
return -ENOMEM;
spin_lock_init(&gman->lock);
+ gman->max_gmr_pages = dev_priv->max_gmr_pages;
+ gman->used_gmr_pages = 0;
ida_init(&gman->gmr_ida);
gman->max_gmr_ids = p_size;
man->priv = (void *) gman;