diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_sgdma.c')
-rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_sgdma.c | 46 |
1 files changed, 7 insertions, 39 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 1d6ee8b5515..491767fe4fc 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -97,7 +97,6 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) NV_DEBUG(dev, "pg=0x%lx\n", mem->mm_node->start); - dev_priv->engine.instmem.prepare_access(nvbe->dev, true); pte = nouveau_sgdma_pte(nvbe->dev, mem->mm_node->start << PAGE_SHIFT); nvbe->pte_start = pte; for (i = 0; i < nvbe->nr_pages; i++) { @@ -116,24 +115,11 @@ nouveau_sgdma_bind(struct ttm_backend *be, struct ttm_mem_reg *mem) dma_offset += NV_CTXDMA_PAGE_SIZE; } } - dev_priv->engine.instmem.finish_access(nvbe->dev); + dev_priv->engine.instmem.flush(nvbe->dev); if (dev_priv->card_type == NV_50) { - nv_wr32(dev, 0x100c80, 0x00050001); - if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { - NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); - NV_ERROR(dev, "0x100c80 = 0x%08x\n", - nv_rd32(dev, 0x100c80)); - return -EBUSY; - } - - nv_wr32(dev, 0x100c80, 0x00000001); - if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { - NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); - NV_ERROR(dev, "0x100c80 = 0x%08x\n", - nv_rd32(dev, 0x100c80)); - return -EBUSY; - } + nv50_vm_flush(dev, 5); /* PGRAPH */ + nv50_vm_flush(dev, 0); /* PFIFO */ } nvbe->bound = true; @@ -154,7 +140,6 @@ nouveau_sgdma_unbind(struct ttm_backend *be) if (!nvbe->bound) return 0; - dev_priv->engine.instmem.prepare_access(nvbe->dev, true); pte = nvbe->pte_start; for (i = 0; i < nvbe->nr_pages; i++) { dma_addr_t dma_offset = dev_priv->gart_info.sg_dummy_bus; @@ -170,24 +155,11 @@ nouveau_sgdma_unbind(struct ttm_backend *be) dma_offset += NV_CTXDMA_PAGE_SIZE; } } - dev_priv->engine.instmem.finish_access(nvbe->dev); + dev_priv->engine.instmem.flush(nvbe->dev); if (dev_priv->card_type == NV_50) { - nv_wr32(dev, 0x100c80, 0x00050001); - if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { - NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); - NV_ERROR(dev, "0x100c80 = 0x%08x\n", - nv_rd32(dev, 0x100c80)); - return -EBUSY; - } - - nv_wr32(dev, 0x100c80, 0x00000001); - if (!nv_wait(0x100c80, 0x00000001, 0x00000000)) { - NV_ERROR(dev, "timeout: (0x100c80 & 1) == 0 (2)\n"); - NV_ERROR(dev, "0x100c80 = 0x%08x\n", - nv_rd32(dev, 0x100c80)); - return -EBUSY; - } + nv50_vm_flush(dev, 5); + nv50_vm_flush(dev, 0); } nvbe->bound = false; @@ -272,7 +244,6 @@ nouveau_sgdma_init(struct drm_device *dev) pci_map_page(dev->pdev, dev_priv->gart_info.sg_dummy_page, 0, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL); - dev_priv->engine.instmem.prepare_access(dev, true); if (dev_priv->card_type < NV_50) { /* Maybe use NV_DMA_TARGET_AGP for PCIE? NVIDIA do this, and * confirmed to work on c51. Perhaps means NV_DMA_TARGET_PCIE @@ -294,7 +265,7 @@ nouveau_sgdma_init(struct drm_device *dev) nv_wo32(dev, gpuobj, (i+4)/4, 0); } } - dev_priv->engine.instmem.finish_access(dev); + dev_priv->engine.instmem.flush(dev); dev_priv->gart_info.type = NOUVEAU_GART_SGDMA; dev_priv->gart_info.aper_base = 0; @@ -325,14 +296,11 @@ nouveau_sgdma_get_page(struct drm_device *dev, uint32_t offset, uint32_t *page) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *gpuobj = dev_priv->gart_info.sg_ctxdma; - struct nouveau_instmem_engine *instmem = &dev_priv->engine.instmem; int pte; pte = (offset >> NV_CTXDMA_PAGE_SHIFT); if (dev_priv->card_type < NV_50) { - instmem->prepare_access(dev, false); *page = nv_ro32(dev, gpuobj, (pte + 2)) & ~NV_CTXDMA_PAGE_MASK; - instmem->finish_access(dev); return 0; } |