diff options
Diffstat (limited to 'drivers/gpu/drm/gma500/gtt.c')
| -rw-r--r-- | drivers/gpu/drm/gma500/gtt.c | 136 |
1 files changed, 86 insertions, 50 deletions
diff --git a/drivers/gpu/drm/gma500/gtt.c b/drivers/gpu/drm/gma500/gtt.c index 04a371aceb3..592d205a008 100644 --- a/drivers/gpu/drm/gma500/gtt.c +++ b/drivers/gpu/drm/gma500/gtt.c @@ -22,6 +22,7 @@ #include <drm/drmP.h> #include <linux/shmem_fs.h> #include "psb_drv.h" +#include "blitter.h" /* @@ -80,7 +81,8 @@ static u32 __iomem *psb_gtt_entry(struct drm_device *dev, struct gtt_range *r) * the GTT. This is protected via the gtt mutex which the caller * must hold. */ -static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r) +static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r, + int resume) { u32 __iomem *gtt_slot; u32 pte; @@ -97,16 +99,20 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r) gtt_slot = psb_gtt_entry(dev, r); pages = r->pages; - /* Make sure changes are visible to the GPU */ - set_pages_array_wc(pages, r->npage); + if (!resume) { + /* Make sure changes are visible to the GPU */ + set_pages_array_wc(pages, r->npage); + } /* Write our page entries into the GTT itself */ for (i = r->roll; i < r->npage; i++) { - pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0); + pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), + PSB_MMU_CACHED_MEMORY); iowrite32(pte, gtt_slot++); } for (i = 0; i < r->roll; i++) { - pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0); + pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), + PSB_MMU_CACHED_MEMORY); iowrite32(pte, gtt_slot++); } /* Make sure all the entries are set before we return */ @@ -124,7 +130,7 @@ static int psb_gtt_insert(struct drm_device *dev, struct gtt_range *r) * page table entries with the dummy page. This is protected via the gtt * mutex which the caller must hold. */ -static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r) +void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r) { struct drm_psb_private *dev_priv = dev->dev_private; u32 __iomem *gtt_slot; @@ -134,7 +140,8 @@ static void psb_gtt_remove(struct drm_device *dev, struct gtt_range *r) WARN_ON(r->stolen); gtt_slot = psb_gtt_entry(dev, r); - pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page), 0); + pte = psb_gtt_mask_pte(page_to_pfn(dev_priv->scratch_page), + PSB_MMU_CACHED_MEMORY); for (i = 0; i < r->npage; i++) iowrite32(pte, gtt_slot++); @@ -173,11 +180,13 @@ void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll) gtt_slot = psb_gtt_entry(dev, r); for (i = r->roll; i < r->npage; i++) { - pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0); + pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), + PSB_MMU_CACHED_MEMORY); iowrite32(pte, gtt_slot++); } for (i = 0; i < r->roll; i++) { - pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), 0); + pte = psb_gtt_mask_pte(page_to_pfn(r->pages[i]), + PSB_MMU_CACHED_MEMORY); iowrite32(pte, gtt_slot++); } ioread32(gtt_slot - 1); @@ -193,37 +202,18 @@ void psb_gtt_roll(struct drm_device *dev, struct gtt_range *r, int roll) */ static int psb_gtt_attach_pages(struct gtt_range *gt) { - struct inode *inode; - struct address_space *mapping; - int i; - struct page *p; - int pages = gt->gem.size / PAGE_SIZE; + struct page **pages; WARN_ON(gt->pages); - /* This is the shared memory object that backs the GEM resource */ - inode = gt->gem.filp->f_path.dentry->d_inode; - mapping = inode->i_mapping; + pages = drm_gem_get_pages(>->gem, 0); + if (IS_ERR(pages)) + return PTR_ERR(pages); - gt->pages = kmalloc(pages * sizeof(struct page *), GFP_KERNEL); - if (gt->pages == NULL) - return -ENOMEM; - gt->npage = pages; + gt->npage = gt->gem.size / PAGE_SIZE; + gt->pages = pages; - for (i = 0; i < pages; i++) { - p = shmem_read_mapping_page(mapping, i); - if (IS_ERR(p)) - goto err; - gt->pages[i] = p; - } return 0; - -err: - while (i--) - page_cache_release(gt->pages[i]); - kfree(gt->pages); - gt->pages = NULL; - return PTR_ERR(p); } /** @@ -237,13 +227,7 @@ err: */ static void psb_gtt_detach_pages(struct gtt_range *gt) { - int i; - for (i = 0; i < gt->npage; i++) { - /* FIXME: do we need to force dirty */ - set_page_dirty(gt->pages[i]); - page_cache_release(gt->pages[i]); - } - kfree(gt->pages); + drm_gem_put_pages(>->gem, gt->pages, true, false); gt->pages = NULL; } @@ -262,6 +246,7 @@ int psb_gtt_pin(struct gtt_range *gt) int ret = 0; struct drm_device *dev = gt->gem.dev; struct drm_psb_private *dev_priv = dev->dev_private; + u32 gpu_base = dev_priv->gtt.gatt_start; mutex_lock(&dev_priv->gtt_mutex); @@ -269,11 +254,14 @@ int psb_gtt_pin(struct gtt_range *gt) ret = psb_gtt_attach_pages(gt); if (ret < 0) goto out; - ret = psb_gtt_insert(dev, gt); + ret = psb_gtt_insert(dev, gt, 0); if (ret < 0) { psb_gtt_detach_pages(gt); goto out; } + psb_mmu_insert_pages(psb_mmu_get_default_pd(dev_priv->mmu), + gt->pages, (gpu_base + gt->offset), + gt->npage, 0, 0, PSB_MMU_CACHED_MEMORY); } gt->in_gart++; out: @@ -296,16 +284,30 @@ void psb_gtt_unpin(struct gtt_range *gt) { struct drm_device *dev = gt->gem.dev; struct drm_psb_private *dev_priv = dev->dev_private; + u32 gpu_base = dev_priv->gtt.gatt_start; + int ret; + /* While holding the gtt_mutex no new blits can be initiated */ mutex_lock(&dev_priv->gtt_mutex); + /* Wait for any possible usage of the memory to be finished */ + ret = gma_blt_wait_idle(dev_priv); + if (ret) { + DRM_ERROR("Failed to idle the blitter, unpin failed!"); + goto out; + } + WARN_ON(!gt->in_gart); gt->in_gart--; if (gt->in_gart == 0 && gt->stolen == 0) { + psb_mmu_remove_pages(psb_mmu_get_default_pd(dev_priv->mmu), + (gpu_base + gt->offset), gt->npage, 0, 0); psb_gtt_remove(dev, gt); psb_gtt_detach_pages(gt); } + +out: mutex_unlock(&dev_priv->gtt_mutex); } @@ -328,7 +330,7 @@ void psb_gtt_unpin(struct gtt_range *gt) * as in use. */ struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len, - const char *name, int backed) + const char *name, int backed, u32 align) { struct drm_psb_private *dev_priv = dev->dev_private; struct gtt_range *gt; @@ -356,7 +358,7 @@ struct gtt_range *psb_gtt_alloc_range(struct drm_device *dev, int len, /* Ensure this is set for non GEM objects */ gt->gem.dev = dev; ret = allocate_resource(dev_priv->gtt_mem, >->resource, - len, start, end, PAGE_SIZE, NULL, NULL); + len, start, end, align, NULL, NULL); if (ret == 0) { gt->offset = gt->resource.start - r->start; return gt; @@ -421,9 +423,11 @@ int psb_gtt_init(struct drm_device *dev, int resume) int ret = 0; uint32_t pte; - mutex_init(&dev_priv->gtt_mutex); + if (!resume) { + mutex_init(&dev_priv->gtt_mutex); + psb_gtt_alloc(dev); + } - psb_gtt_alloc(dev); pg = &dev_priv->gtt; /* Enable the GTT */ @@ -505,7 +509,8 @@ int psb_gtt_init(struct drm_device *dev, int resume) /* * Map the GTT and the stolen memory area */ - dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start, + if (!resume) + dev_priv->gtt_map = ioremap_nocache(pg->gtt_phys_start, gtt_pages << PAGE_SHIFT); if (!dev_priv->gtt_map) { dev_err(dev->dev, "Failure to map gtt.\n"); @@ -513,7 +518,10 @@ int psb_gtt_init(struct drm_device *dev, int resume) goto out_err; } - dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base, stolen_size); + if (!resume) + dev_priv->vram_addr = ioremap_wc(dev_priv->stolen_base, + stolen_size); + if (!dev_priv->vram_addr) { dev_err(dev->dev, "Failure to map stolen base.\n"); ret = -ENOMEM; @@ -529,7 +537,7 @@ int psb_gtt_init(struct drm_device *dev, int resume) dev_dbg(dev->dev, "Set up %d stolen pages starting at 0x%08x, GTT offset %dK\n", num_pages, pfn_base << PAGE_SHIFT, 0); for (i = 0; i < num_pages; ++i) { - pte = psb_gtt_mask_pte(pfn_base + i, 0); + pte = psb_gtt_mask_pte(pfn_base + i, PSB_MMU_CACHED_MEMORY); iowrite32(pte, dev_priv->gtt_map + i); } @@ -538,7 +546,7 @@ int psb_gtt_init(struct drm_device *dev, int resume) */ pfn_base = page_to_pfn(dev_priv->scratch_page); - pte = psb_gtt_mask_pte(pfn_base, 0); + pte = psb_gtt_mask_pte(pfn_base, PSB_MMU_CACHED_MEMORY); for (; i < gtt_pages; ++i) iowrite32(pte, dev_priv->gtt_map + i); @@ -549,3 +557,31 @@ out_err: psb_gtt_takedown(dev); return ret; } + +int psb_gtt_restore(struct drm_device *dev) +{ + struct drm_psb_private *dev_priv = dev->dev_private; + struct resource *r = dev_priv->gtt_mem->child; + struct gtt_range *range; + unsigned int restored = 0, total = 0, size = 0; + + /* On resume, the gtt_mutex is already initialized */ + mutex_lock(&dev_priv->gtt_mutex); + psb_gtt_init(dev, 1); + + while (r != NULL) { + range = container_of(r, struct gtt_range, resource); + if (range->pages) { + psb_gtt_insert(dev, range, 1); + size += range->resource.end - range->resource.start; + restored++; + } + r = r->sibling; + total++; + } + mutex_unlock(&dev_priv->gtt_mutex); + DRM_DEBUG_DRIVER("Restored %u of %u gtt ranges (%u KB)", restored, + total, (size / 1024)); + + return 0; +} |
