aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nouveau_bo.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_bo.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c725
1 files changed, 387 insertions, 338 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 7f80ed52356..b6dc85c614b 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -27,31 +27,124 @@
* Jeremy Kolb <jkolb@brandeis.edu>
*/
-#include "drmP.h"
-#include "ttm/ttm_page_alloc.h"
+#include <core/engine.h>
+#include <linux/swiotlb.h>
+
+#include <subdev/fb.h>
+#include <subdev/vm.h>
+#include <subdev/bar.h>
#include "nouveau_drm.h"
-#include "nouveau_drv.h"
#include "nouveau_dma.h"
-#include "nouveau_mm.h"
-#include "nouveau_vm.h"
#include "nouveau_fence.h"
-#include "nouveau_ramht.h"
-#include <linux/log2.h>
-#include <linux/slab.h>
+#include "nouveau_bo.h"
+#include "nouveau_ttm.h"
+#include "nouveau_gem.h"
+
+/*
+ * NV10-NV40 tiling helpers
+ */
+
+static void
+nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg,
+ u32 addr, u32 size, u32 pitch, u32 flags)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ int i = reg - drm->tile.reg;
+ struct nouveau_fb *pfb = nouveau_fb(drm->device);
+ struct nouveau_fb_tile *tile = &pfb->tile.region[i];
+ struct nouveau_engine *engine;
+
+ nouveau_fence_unref(&reg->fence);
+
+ if (tile->pitch)
+ pfb->tile.fini(pfb, i, tile);
+
+ if (pitch)
+ pfb->tile.init(pfb, i, addr, size, pitch, flags, tile);
+
+ pfb->tile.prog(pfb, i, tile);
+
+ if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR)))
+ engine->tile_prog(engine, i);
+ if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG)))
+ engine->tile_prog(engine, i);
+}
+
+static struct nouveau_drm_tile *
+nv10_bo_get_tile_region(struct drm_device *dev, int i)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_drm_tile *tile = &drm->tile.reg[i];
+
+ spin_lock(&drm->tile.lock);
+
+ if (!tile->used &&
+ (!tile->fence || nouveau_fence_done(tile->fence)))
+ tile->used = true;
+ else
+ tile = NULL;
+
+ spin_unlock(&drm->tile.lock);
+ return tile;
+}
+
+static void
+nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile,
+ struct nouveau_fence *fence)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+
+ if (tile) {
+ spin_lock(&drm->tile.lock);
+ tile->fence = nouveau_fence_ref(fence);
+ tile->used = false;
+ spin_unlock(&drm->tile.lock);
+ }
+}
+
+static struct nouveau_drm_tile *
+nv10_bo_set_tiling(struct drm_device *dev, u32 addr,
+ u32 size, u32 pitch, u32 flags)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_fb *pfb = nouveau_fb(drm->device);
+ struct nouveau_drm_tile *tile, *found = NULL;
+ int i;
+
+ for (i = 0; i < pfb->tile.regions; i++) {
+ tile = nv10_bo_get_tile_region(dev, i);
+
+ if (pitch && !found) {
+ found = tile;
+ continue;
+
+ } else if (tile && pfb->tile.region[i].pitch) {
+ /* Kill an unused tile region. */
+ nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0);
+ }
+
+ nv10_bo_put_tile_region(dev, tile, NULL);
+ }
+
+ if (found)
+ nv10_bo_update_tile_region(dev, found, addr, size,
+ pitch, flags);
+ return found;
+}
static void
nouveau_bo_del_ttm(struct ttm_buffer_object *bo)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
- struct drm_device *dev = dev_priv->dev;
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+ struct drm_device *dev = drm->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
- if (unlikely(nvbo->gem))
+ if (unlikely(nvbo->gem.filp))
DRM_ERROR("bo %p still attached to GEM object\n", bo);
-
- nv10_mem_put_tile_region(dev, nvbo->tile, NULL);
+ WARN_ON(nvbo->pin_refcnt > 0);
+ nv10_bo_put_tile_region(dev, nvbo->tile, NULL);
kfree(nvbo);
}
@@ -59,23 +152,24 @@ static void
nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags,
int *align, int *size)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
+ struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
+ struct nouveau_device *device = nv_device(drm->device);
- if (dev_priv->card_type < NV_50) {
+ if (device->card_type < NV_50) {
if (nvbo->tile_mode) {
- if (dev_priv->chipset >= 0x40) {
+ if (device->chipset >= 0x40) {
*align = 65536;
*size = roundup(*size, 64 * nvbo->tile_mode);
- } else if (dev_priv->chipset >= 0x30) {
+ } else if (device->chipset >= 0x30) {
*align = 32768;
*size = roundup(*size, 64 * nvbo->tile_mode);
- } else if (dev_priv->chipset >= 0x20) {
+ } else if (device->chipset >= 0x20) {
*align = 16384;
*size = roundup(*size, 64 * nvbo->tile_mode);
- } else if (dev_priv->chipset >= 0x10) {
+ } else if (device->chipset >= 0x10) {
*align = 16384;
*size = roundup(*size, 32 * nvbo->tile_mode);
}
@@ -94,11 +188,22 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
struct sg_table *sg,
struct nouveau_bo **pnvbo)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_drm *drm = nouveau_drm(dev);
struct nouveau_bo *nvbo;
size_t acc_size;
int ret;
int type = ttm_bo_type_device;
+ int lpg_shift = 12;
+ int max_size;
+
+ if (drm->client.base.vm)
+ lpg_shift = drm->client.base.vm->vmm->lpg_shift;
+ max_size = INT_MAX & ~((1 << lpg_shift) - 1);
+
+ if (size <= 0 || size > max_size) {
+ nv_warn(drm, "skipped size %x\n", (u32)size);
+ return -EINVAL;
+ }
if (sg)
type = ttm_bo_type_sg;
@@ -111,24 +216,24 @@ nouveau_bo_new(struct drm_device *dev, int size, int align,
INIT_LIST_HEAD(&nvbo->vma_list);
nvbo->tile_mode = tile_mode;
nvbo->tile_flags = tile_flags;
- nvbo->bo.bdev = &dev_priv->ttm.bdev;
+ nvbo->bo.bdev = &drm->ttm.bdev;
nvbo->page_shift = 12;
- if (dev_priv->bar1_vm) {
+ if (drm->client.base.vm) {
if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024)
- nvbo->page_shift = dev_priv->bar1_vm->lpg_shift;
+ nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift;
}
nouveau_bo_fixup_align(nvbo, flags, &align, &size);
nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
nouveau_bo_placement_set(nvbo, flags, 0);
- acc_size = ttm_bo_dma_acc_size(&dev_priv->ttm.bdev, size,
+ acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size,
sizeof(struct nouveau_bo));
- ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
+ ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size,
type, &nvbo->placement,
- align >> PAGE_SHIFT, 0, false, NULL, acc_size, sg,
+ align >> PAGE_SHIFT, false, NULL, acc_size, sg,
nouveau_bo_del_ttm);
if (ret) {
/* ttm will call nouveau_bo_del_ttm if it fails.. */
@@ -155,10 +260,12 @@ set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags)
static void
set_placement_range(struct nouveau_bo *nvbo, uint32_t type)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
- int vram_pages = dev_priv->vram_size >> PAGE_SHIFT;
+ struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
+ struct nouveau_fb *pfb = nouveau_fb(drm->device);
+ u32 vram_pages = pfb->ram->size >> PAGE_SHIFT;
- if (dev_priv->card_type == NV_10 &&
+ if ((nv_device(drm->device)->card_type == NV_10 ||
+ nv_device(drm->device)->card_type == NV_11) &&
nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) &&
nvbo->bo.mem.num_pages < vram_pages / 4) {
/*
@@ -198,76 +305,77 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy)
int
nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
+ struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
int ret;
+ ret = ttm_bo_reserve(bo, false, false, false, 0);
+ if (ret)
+ goto out;
+
if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) {
- NV_ERROR(nouveau_bdev(bo->bdev)->dev,
- "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
+ NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo,
1 << bo->mem.mem_type, memtype);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
if (nvbo->pin_refcnt++)
- return 0;
-
- ret = ttm_bo_reserve(bo, false, false, false, 0);
- if (ret)
goto out;
nouveau_bo_placement_set(nvbo, memtype, 0);
- ret = nouveau_bo_validate(nvbo, false, false, false);
+ ret = nouveau_bo_validate(nvbo, false, false);
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- dev_priv->fb_aper_free -= bo->mem.size;
+ drm->gem.vram_available -= bo->mem.size;
break;
case TTM_PL_TT:
- dev_priv->gart_info.aper_free -= bo->mem.size;
+ drm->gem.gart_available -= bo->mem.size;
break;
default:
break;
}
}
- ttm_bo_unreserve(bo);
out:
- if (unlikely(ret))
- nvbo->pin_refcnt--;
+ ttm_bo_unreserve(bo);
return ret;
}
int
nouveau_bo_unpin(struct nouveau_bo *nvbo)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev);
+ struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev);
struct ttm_buffer_object *bo = &nvbo->bo;
- int ret;
-
- if (--nvbo->pin_refcnt)
- return 0;
+ int ret, ref;
ret = ttm_bo_reserve(bo, false, false, false, 0);
if (ret)
return ret;
+ ref = --nvbo->pin_refcnt;
+ WARN_ON_ONCE(ref < 0);
+ if (ref)
+ goto out;
+
nouveau_bo_placement_set(nvbo, bo->mem.placement, 0);
- ret = nouveau_bo_validate(nvbo, false, false, false);
+ ret = nouveau_bo_validate(nvbo, false, false);
if (ret == 0) {
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- dev_priv->fb_aper_free += bo->mem.size;
+ drm->gem.vram_available += bo->mem.size;
break;
case TTM_PL_TT:
- dev_priv->gart_info.aper_free += bo->mem.size;
+ drm->gem.gart_available += bo->mem.size;
break;
default:
break;
}
}
+out:
ttm_bo_unreserve(bo);
return ret;
}
@@ -295,12 +403,12 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo)
int
nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible,
- bool no_wait_reserve, bool no_wait_gpu)
+ bool no_wait_gpu)
{
int ret;
- ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, interruptible,
- no_wait_reserve, no_wait_gpu);
+ ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement,
+ interruptible, no_wait_gpu);
if (ret)
return ret;
@@ -356,30 +464,20 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val)
}
static struct ttm_tt *
-nouveau_ttm_tt_create(struct ttm_bo_device *bdev,
- unsigned long size, uint32_t page_flags,
- struct page *dummy_read_page)
+nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
+ uint32_t page_flags, struct page *dummy_read)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
- struct drm_device *dev = dev_priv->dev;
-
- switch (dev_priv->gart_info.type) {
#if __OS_HAS_AGP
- case NOUVEAU_GART_AGP:
- return ttm_agp_tt_create(bdev, dev->agp->bridge,
- size, page_flags, dummy_read_page);
-#endif
- case NOUVEAU_GART_PDMA:
- case NOUVEAU_GART_HW:
- return nouveau_sgdma_create_ttm(bdev, size, page_flags,
- dummy_read_page);
- default:
- NV_ERROR(dev, "Unknown GART type %d\n",
- dev_priv->gart_info.type);
- break;
+ struct nouveau_drm *drm = nouveau_bdev(bdev);
+ struct drm_device *dev = drm->dev;
+
+ if (drm->agp.stat == ENABLED) {
+ return ttm_agp_tt_create(bdev, dev->agp->bridge, size,
+ page_flags, dummy_read);
}
+#endif
- return NULL;
+ return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read);
}
static int
@@ -393,8 +491,7 @@ static int
nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
- struct drm_device *dev = dev_priv->dev;
+ struct nouveau_drm *drm = nouveau_bdev(bdev);
switch (type) {
case TTM_PL_SYSTEM:
@@ -403,7 +500,7 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_VRAM:
- if (dev_priv->card_type >= NV_50) {
+ if (nv_device(drm->device)->card_type >= NV_50) {
man->func = &nouveau_vram_manager;
man->io_reserve_fastpath = false;
man->use_io_reserve_lru = true;
@@ -417,32 +514,28 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
man->default_caching = TTM_PL_FLAG_WC;
break;
case TTM_PL_TT:
- if (dev_priv->card_type >= NV_50)
+ if (nv_device(drm->device)->card_type >= NV_50)
man->func = &nouveau_gart_manager;
else
+ if (drm->agp.stat != ENABLED)
+ man->func = &nv04_gart_manager;
+ else
man->func = &ttm_bo_manager_func;
- switch (dev_priv->gart_info.type) {
- case NOUVEAU_GART_AGP:
+
+ if (drm->agp.stat == ENABLED) {
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
- break;
- case NOUVEAU_GART_PDMA:
- case NOUVEAU_GART_HW:
+ } else {
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE |
TTM_MEMTYPE_FLAG_CMA;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
- break;
- default:
- NV_ERROR(dev, "Unknown GART type: %d\n",
- dev_priv->gart_info.type);
- return -EINVAL;
}
+
break;
default:
- NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type);
return -EINVAL;
}
return 0;
@@ -467,26 +560,15 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
}
-/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access
- * TTM_PL_{VRAM,TT} directly.
- */
-
static int
-nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan,
- struct nouveau_bo *nvbo, bool evict,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+nve0_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
- struct nouveau_fence *fence = NULL;
- int ret;
-
- ret = nouveau_fence_new(chan, &fence);
- if (ret)
- return ret;
-
- ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict,
- no_wait_reserve, no_wait_gpu, new_mem);
- nouveau_fence_unref(&fence);
+ int ret = RING_SPACE(chan, 2);
+ if (ret == 0) {
+ BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1);
+ OUT_RING (chan, handle & 0x0000ffff);
+ FIRE_RING (chan);
+ }
return ret;
}
@@ -676,20 +758,14 @@ nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
static int
nv50_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
- int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
- &chan->m2mf_ntfy);
+ int ret = RING_SPACE(chan, 6);
if (ret == 0) {
- ret = RING_SPACE(chan, 6);
- if (ret == 0) {
- BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
- OUT_RING (chan, handle);
- BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
- OUT_RING (chan, NvNotify0);
- OUT_RING (chan, NvDmaFB);
- OUT_RING (chan, NvDmaFB);
- } else {
- nouveau_ramht_remove(chan, NvNotify0);
- }
+ BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
+ OUT_RING (chan, handle);
+ BEGIN_NV04(chan, NvSubCopy, 0x0180, 3);
+ OUT_RING (chan, NvNotify0);
+ OUT_RING (chan, NvDmaFB);
+ OUT_RING (chan, NvDmaFB);
}
return ret;
@@ -700,25 +776,25 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem)
{
struct nouveau_mem *node = old_mem->mm_node;
- struct nouveau_bo *nvbo = nouveau_bo(bo);
u64 length = (new_mem->num_pages << PAGE_SHIFT);
u64 src_offset = node->vma[0].offset;
u64 dst_offset = node->vma[1].offset;
+ int src_tiled = !!node->memtype;
+ int dst_tiled = !!((struct nouveau_mem *)new_mem->mm_node)->memtype;
int ret;
while (length) {
u32 amount, stride, height;
+ ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled));
+ if (ret)
+ return ret;
+
amount = min(length, (u64)(4 * 1024 * 1024));
stride = 16 * 4;
height = amount / stride;
- if (new_mem->mem_type == TTM_PL_VRAM &&
- nouveau_bo_tile_layout(nvbo)) {
- ret = RING_SPACE(chan, 8);
- if (ret)
- return ret;
-
+ if (src_tiled) {
BEGIN_NV04(chan, NvSubCopy, 0x0200, 7);
OUT_RING (chan, 0);
OUT_RING (chan, 0);
@@ -728,19 +804,10 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
OUT_RING (chan, 0);
OUT_RING (chan, 0);
} else {
- ret = RING_SPACE(chan, 2);
- if (ret)
- return ret;
-
BEGIN_NV04(chan, NvSubCopy, 0x0200, 1);
OUT_RING (chan, 1);
}
- if (old_mem->mem_type == TTM_PL_VRAM &&
- nouveau_bo_tile_layout(nvbo)) {
- ret = RING_SPACE(chan, 8);
- if (ret)
- return ret;
-
+ if (dst_tiled) {
BEGIN_NV04(chan, NvSubCopy, 0x021c, 7);
OUT_RING (chan, 0);
OUT_RING (chan, 0);
@@ -750,18 +817,10 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
OUT_RING (chan, 0);
OUT_RING (chan, 0);
} else {
- ret = RING_SPACE(chan, 2);
- if (ret)
- return ret;
-
BEGIN_NV04(chan, NvSubCopy, 0x021c, 1);
OUT_RING (chan, 1);
}
- ret = RING_SPACE(chan, 14);
- if (ret)
- return ret;
-
BEGIN_NV04(chan, NvSubCopy, 0x0238, 2);
OUT_RING (chan, upper_32_bits(src_offset));
OUT_RING (chan, upper_32_bits(dst_offset));
@@ -788,16 +847,12 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
static int
nv04_bo_move_init(struct nouveau_channel *chan, u32 handle)
{
- int ret = nouveau_notifier_alloc(chan, NvNotify0, 32, 0xfe0, 0x1000,
- &chan->m2mf_ntfy);
+ int ret = RING_SPACE(chan, 4);
if (ret == 0) {
- ret = RING_SPACE(chan, 4);
- if (ret == 0) {
- BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
- OUT_RING (chan, handle);
- BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
- OUT_RING (chan, NvNotify0);
- }
+ BEGIN_NV04(chan, NvSubCopy, 0x0000, 1);
+ OUT_RING (chan, handle);
+ BEGIN_NV04(chan, NvSubCopy, 0x0180, 1);
+ OUT_RING (chan, NvNotify0);
}
return ret;
@@ -808,8 +863,8 @@ nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo,
struct nouveau_channel *chan, struct ttm_mem_reg *mem)
{
if (mem->mem_type == TTM_PL_TT)
- return chan->gart_handle;
- return chan->vram_handle;
+ return NvDmaTT;
+ return NvDmaFB;
}
static int
@@ -859,70 +914,72 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo,
}
static int
-nouveau_vma_getmap(struct nouveau_channel *chan, struct nouveau_bo *nvbo,
- struct ttm_mem_reg *mem, struct nouveau_vma *vma)
+nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo,
+ struct ttm_mem_reg *mem)
{
- struct nouveau_mem *node = mem->mm_node;
+ struct nouveau_mem *old_node = bo->mem.mm_node;
+ struct nouveau_mem *new_node = mem->mm_node;
+ u64 size = (u64)mem->num_pages << PAGE_SHIFT;
int ret;
- ret = nouveau_vm_get(chan->vm, mem->num_pages << PAGE_SHIFT,
- node->page_shift, NV_MEM_ACCESS_RO, vma);
+ ret = nouveau_vm_get(nv_client(drm)->vm, size, old_node->page_shift,
+ NV_MEM_ACCESS_RW, &old_node->vma[0]);
if (ret)
return ret;
- if (mem->mem_type == TTM_PL_VRAM)
- nouveau_vm_map(vma, node);
- else
- nouveau_vm_map_sg(vma, 0, mem->num_pages << PAGE_SHIFT, node);
+ ret = nouveau_vm_get(nv_client(drm)->vm, size, new_node->page_shift,
+ NV_MEM_ACCESS_RW, &old_node->vma[1]);
+ if (ret) {
+ nouveau_vm_put(&old_node->vma[0]);
+ return ret;
+ }
+ nouveau_vm_map(&old_node->vma[0], old_node);
+ nouveau_vm_map(&old_node->vma[1], new_node);
return 0;
}
static int
nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
- struct nouveau_channel *chan = chan = dev_priv->channel;
- struct nouveau_bo *nvbo = nouveau_bo(bo);
- struct ttm_mem_reg *old_mem = &bo->mem;
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+ struct nouveau_channel *chan = drm->ttm.chan;
+ struct nouveau_fence *fence;
int ret;
- mutex_lock_nested(&chan->mutex, NOUVEAU_KCHANNEL_MUTEX);
-
/* create temporary vmas for the transfer and attach them to the
* old nouveau_mem node, these will get cleaned up after ttm has
* destroyed the ttm_mem_reg
*/
- if (dev_priv->card_type >= NV_50) {
- struct nouveau_mem *node = old_mem->mm_node;
-
- ret = nouveau_vma_getmap(chan, nvbo, old_mem, &node->vma[0]);
- if (ret)
- goto out;
-
- ret = nouveau_vma_getmap(chan, nvbo, new_mem, &node->vma[1]);
+ if (nv_device(drm->device)->card_type >= NV_50) {
+ ret = nouveau_bo_move_prep(drm, bo, new_mem);
if (ret)
- goto out;
+ return ret;
}
- ret = dev_priv->ttm.move(chan, bo, &bo->mem, new_mem);
+ mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING);
+ ret = nouveau_fence_sync(bo->sync_obj, chan);
if (ret == 0) {
- ret = nouveau_bo_move_accel_cleanup(chan, nvbo, evict,
- no_wait_reserve,
- no_wait_gpu, new_mem);
+ ret = drm->ttm.move(chan, bo, &bo->mem, new_mem);
+ if (ret == 0) {
+ ret = nouveau_fence_new(chan, false, &fence);
+ if (ret == 0) {
+ ret = ttm_bo_move_accel_cleanup(bo, fence,
+ evict,
+ no_wait_gpu,
+ new_mem);
+ nouveau_fence_unref(&fence);
+ }
+ }
}
-
-out:
- mutex_unlock(&chan->mutex);
+ mutex_unlock(&chan->cli->mutex);
return ret;
}
void
-nouveau_bo_move_init(struct nouveau_channel *chan)
+nouveau_bo_move_init(struct nouveau_drm *drm)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
static const struct {
const char *name;
int engine;
@@ -932,7 +989,8 @@ nouveau_bo_move_init(struct nouveau_channel *chan)
struct ttm_mem_reg *, struct ttm_mem_reg *);
int (*init)(struct nouveau_channel *, u32 handle);
} _methods[] = {
- { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
+ { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init },
+ { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init },
{ "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init },
{ "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init },
{ "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init },
@@ -947,25 +1005,40 @@ nouveau_bo_move_init(struct nouveau_channel *chan)
int ret;
do {
+ struct nouveau_object *object;
+ struct nouveau_channel *chan;
u32 handle = (mthd->engine << 16) | mthd->oclass;
- ret = nouveau_gpuobj_gr_new(chan, handle, mthd->oclass);
+
+ if (mthd->engine)
+ chan = drm->cechan;
+ else
+ chan = drm->channel;
+ if (chan == NULL)
+ continue;
+
+ ret = nouveau_object_new(nv_object(drm), chan->handle, handle,
+ mthd->oclass, NULL, 0, &object);
if (ret == 0) {
ret = mthd->init(chan, handle);
- if (ret == 0) {
- dev_priv->ttm.move = mthd->exec;
- name = mthd->name;
- break;
+ if (ret) {
+ nouveau_object_del(nv_object(drm),
+ chan->handle, handle);
+ continue;
}
+
+ drm->ttm.move = mthd->exec;
+ drm->ttm.chan = chan;
+ name = mthd->name;
+ break;
}
} while ((++mthd)->exec);
- NV_INFO(chan->dev, "MM: using %s for buffer copies\n", name);
+ NV_INFO(drm, "MM: using %s for buffer copies\n", name);
}
static int
nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
@@ -978,7 +1051,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
- ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
if (ret)
return ret;
@@ -986,11 +1059,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr,
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem);
+ ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
- ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem);
out:
ttm_bo_mem_put(bo, &tmp_mem);
return ret;
@@ -998,8 +1071,7 @@ out:
static int
nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
struct ttm_placement placement;
@@ -1012,15 +1084,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
tmp_mem = *new_mem;
tmp_mem.mm_node = NULL;
- ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
+ ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu);
if (ret)
return ret;
- ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
+ ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
if (ret)
goto out;
- ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
+ ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem);
if (ret)
goto out;
@@ -1040,19 +1112,10 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
return;
list_for_each_entry(vma, &nvbo->vma_list, head) {
- if (new_mem && new_mem->mem_type == TTM_PL_VRAM) {
+ if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM &&
+ (new_mem->mem_type == TTM_PL_VRAM ||
+ nvbo->page_shift != vma->vm->vmm->lpg_shift)) {
nouveau_vm_map(vma, new_mem->mm_node);
- } else
- if (new_mem && new_mem->mem_type == TTM_PL_TT &&
- nvbo->page_shift == vma->vm->spg_shift) {
- if (((struct nouveau_mem *)new_mem->mm_node)->sg)
- nouveau_vm_map_sg_table(vma, 0, new_mem->
- num_pages << PAGE_SHIFT,
- new_mem->mm_node);
- else
- nouveau_vm_map_sg(vma, 0, new_mem->
- num_pages << PAGE_SHIFT,
- new_mem->mm_node);
} else {
nouveau_vm_unmap(vma);
}
@@ -1061,10 +1124,10 @@ nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem)
static int
nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
- struct nouveau_tile_reg **new_tile)
+ struct nouveau_drm_tile **new_tile)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
- struct drm_device *dev = dev_priv->dev;
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+ struct drm_device *dev = drm->dev;
struct nouveau_bo *nvbo = nouveau_bo(bo);
u64 offset = new_mem->start << PAGE_SHIFT;
@@ -1072,8 +1135,8 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
if (new_mem->mem_type != TTM_PL_VRAM)
return 0;
- if (dev_priv->card_type >= NV_10) {
- *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size,
+ if (nv_device(drm->device)->card_type >= NV_10) {
+ *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size,
nvbo->tile_mode,
nvbo->tile_flags);
}
@@ -1083,28 +1146,27 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem,
static void
nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo,
- struct nouveau_tile_reg *new_tile,
- struct nouveau_tile_reg **old_tile)
+ struct nouveau_drm_tile *new_tile,
+ struct nouveau_drm_tile **old_tile)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
- struct drm_device *dev = dev_priv->dev;
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
+ struct drm_device *dev = drm->dev;
- nv10_mem_put_tile_region(dev, *old_tile, bo->sync_obj);
+ nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj);
*old_tile = new_tile;
}
static int
nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
- bool no_wait_reserve, bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+ bool no_wait_gpu, struct ttm_mem_reg *new_mem)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
struct ttm_mem_reg *old_mem = &bo->mem;
- struct nouveau_tile_reg *new_tile = NULL;
+ struct nouveau_drm_tile *new_tile = NULL;
int ret = 0;
- if (dev_priv->card_type < NV_50) {
+ if (nv_device(drm->device)->card_type < NV_50) {
ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile);
if (ret)
return ret;
@@ -1118,28 +1180,30 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr,
goto out;
}
- /* CPU copy if we have no accelerated method available */
- if (!dev_priv->ttm.move) {
- ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
- goto out;
- }
-
/* Hardware assisted copy. */
- if (new_mem->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
- else if (old_mem->mem_type == TTM_PL_SYSTEM)
- ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
- else
- ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem);
-
- if (!ret)
- goto out;
+ if (drm->ttm.move) {
+ if (new_mem->mem_type == TTM_PL_SYSTEM)
+ ret = nouveau_bo_move_flipd(bo, evict, intr,
+ no_wait_gpu, new_mem);
+ else if (old_mem->mem_type == TTM_PL_SYSTEM)
+ ret = nouveau_bo_move_flips(bo, evict, intr,
+ no_wait_gpu, new_mem);
+ else
+ ret = nouveau_bo_move_m2mf(bo, evict, intr,
+ no_wait_gpu, new_mem);
+ if (!ret)
+ goto out;
+ }
/* Fallback to software copy. */
- ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem);
+ spin_lock(&bo->bdev->fence_lock);
+ ret = ttm_bo_wait(bo, true, intr, no_wait_gpu);
+ spin_unlock(&bo->bdev->fence_lock);
+ if (ret == 0)
+ ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
out:
- if (dev_priv->card_type < NV_50) {
+ if (nv_device(drm->device)->card_type < NV_50) {
if (ret)
nouveau_bo_vm_cleanup(bo, NULL, &new_tile);
else
@@ -1152,15 +1216,18 @@ out:
static int
nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
- return 0;
+ struct nouveau_bo *nvbo = nouveau_bo(bo);
+
+ return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp);
}
static int
nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
- struct drm_device *dev = dev_priv->dev;
+ struct nouveau_drm *drm = nouveau_bdev(bdev);
+ struct nouveau_mem *node = mem->mm_node;
+ struct drm_device *dev = drm->dev;
int ret;
mem->bus.addr = NULL;
@@ -1176,48 +1243,30 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
return 0;
case TTM_PL_TT:
#if __OS_HAS_AGP
- if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+ if (drm->agp.stat == ENABLED) {
mem->bus.offset = mem->start << PAGE_SHIFT;
- mem->bus.base = dev_priv->gart_info.aper_base;
- mem->bus.is_iomem = true;
+ mem->bus.base = drm->agp.base;
+ mem->bus.is_iomem = !dev->agp->cant_use_aperture;
}
#endif
- break;
- case TTM_PL_VRAM:
- {
- struct nouveau_mem *node = mem->mm_node;
- u8 page_shift;
-
- if (!dev_priv->bar1_vm) {
- mem->bus.offset = mem->start << PAGE_SHIFT;
- mem->bus.base = pci_resource_start(dev->pdev, 1);
- mem->bus.is_iomem = true;
+ if (nv_device(drm->device)->card_type < NV_50 || !node->memtype)
+ /* untiled */
break;
- }
-
- if (dev_priv->card_type >= NV_C0)
- page_shift = node->page_shift;
- else
- page_shift = 12;
+ /* fallthrough, tiled memory */
+ case TTM_PL_VRAM:
+ mem->bus.offset = mem->start << PAGE_SHIFT;
+ mem->bus.base = nv_device_resource_start(nouveau_dev(dev), 1);
+ mem->bus.is_iomem = true;
+ if (nv_device(drm->device)->card_type >= NV_50) {
+ struct nouveau_bar *bar = nouveau_bar(drm->device);
- ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size,
- page_shift, NV_MEM_ACCESS_RW,
- &node->bar_vma);
- if (ret)
- return ret;
+ ret = bar->umap(bar, node, NV_MEM_ACCESS_RW,
+ &node->bar_vma);
+ if (ret)
+ return ret;
- nouveau_vm_map(&node->bar_vma, node);
- if (ret) {
- nouveau_vm_put(&node->bar_vma);
- return ret;
+ mem->bus.offset = node->bar_vma.offset;
}
-
- mem->bus.offset = node->bar_vma.offset;
- if (dev_priv->card_type == NV_50) /*XXX*/
- mem->bus.offset -= 0x0020000000ULL;
- mem->bus.base = pci_resource_start(dev->pdev, 1);
- mem->bus.is_iomem = true;
- }
break;
default:
return -EINVAL;
@@ -1228,50 +1277,61 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
static void
nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev);
+ struct nouveau_drm *drm = nouveau_bdev(bdev);
+ struct nouveau_bar *bar = nouveau_bar(drm->device);
struct nouveau_mem *node = mem->mm_node;
- if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM)
- return;
-
if (!node->bar_vma.node)
return;
- nouveau_vm_unmap(&node->bar_vma);
- nouveau_vm_put(&node->bar_vma);
+ bar->unmap(bar, &node->bar_vma);
}
static int
nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo)
{
- struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev);
+ struct nouveau_drm *drm = nouveau_bdev(bo->bdev);
struct nouveau_bo *nvbo = nouveau_bo(bo);
+ struct nouveau_device *device = nv_device(drm->device);
+ u32 mappable = nv_device_resource_len(device, 1) >> PAGE_SHIFT;
+ int ret;
/* as long as the bo isn't in vram, and isn't tiled, we've got
* nothing to do here.
*/
if (bo->mem.mem_type != TTM_PL_VRAM) {
- if (dev_priv->card_type < NV_50 ||
+ if (nv_device(drm->device)->card_type < NV_50 ||
!nouveau_bo_tile_layout(nvbo))
return 0;
+
+ if (bo->mem.mem_type == TTM_PL_SYSTEM) {
+ nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0);
+
+ ret = nouveau_bo_validate(nvbo, false, false);
+ if (ret)
+ return ret;
+ }
+ return 0;
}
/* make sure bo is in mappable vram */
- if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages)
+ if (nv_device(drm->device)->card_type >= NV_50 ||
+ bo->mem.start + bo->mem.num_pages < mappable)
return 0;
nvbo->placement.fpfn = 0;
- nvbo->placement.lpfn = dev_priv->fb_mappable_pages;
+ nvbo->placement.lpfn = mappable;
nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0);
- return nouveau_bo_validate(nvbo, false, true, false);
+ return nouveau_bo_validate(nvbo, false, false);
}
static int
nouveau_ttm_tt_populate(struct ttm_tt *ttm)
{
struct ttm_dma_tt *ttm_dma = (void *)ttm;
- struct drm_nouveau_private *dev_priv;
+ struct nouveau_drm *drm;
+ struct nouveau_device *device;
struct drm_device *dev;
unsigned i;
int r;
@@ -1288,11 +1348,12 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
return 0;
}
- dev_priv = nouveau_bdev(ttm->bdev);
- dev = dev_priv->dev;
+ drm = nouveau_bdev(ttm->bdev);
+ device = nv_device(drm->device);
+ dev = drm->dev;
#if __OS_HAS_AGP
- if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+ if (drm->agp.stat == ENABLED) {
return ttm_agp_tt_populate(ttm);
}
#endif
@@ -1309,13 +1370,12 @@ nouveau_ttm_tt_populate(struct ttm_tt *ttm)
}
for (i = 0; i < ttm->num_pages; i++) {
- ttm_dma->dma_address[i] = pci_map_page(dev->pdev, ttm->pages[i],
- 0, PAGE_SIZE,
- PCI_DMA_BIDIRECTIONAL);
- if (pci_dma_mapping_error(dev->pdev, ttm_dma->dma_address[i])) {
+ ttm_dma->dma_address[i] = nv_device_map_page(device,
+ ttm->pages[i]);
+ if (!ttm_dma->dma_address[i]) {
while (--i) {
- pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ nv_device_unmap_page(device,
+ ttm_dma->dma_address[i]);
ttm_dma->dma_address[i] = 0;
}
ttm_pool_unpopulate(ttm);
@@ -1329,7 +1389,8 @@ static void
nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
struct ttm_dma_tt *ttm_dma = (void *)ttm;
- struct drm_nouveau_private *dev_priv;
+ struct nouveau_drm *drm;
+ struct nouveau_device *device;
struct drm_device *dev;
unsigned i;
bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
@@ -1337,11 +1398,12 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
if (slave)
return;
- dev_priv = nouveau_bdev(ttm->bdev);
- dev = dev_priv->dev;
+ drm = nouveau_bdev(ttm->bdev);
+ device = nv_device(drm->device);
+ dev = drm->dev;
#if __OS_HAS_AGP
- if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) {
+ if (drm->agp.stat == ENABLED) {
ttm_agp_tt_unpopulate(ttm);
return;
}
@@ -1356,8 +1418,7 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
for (i = 0; i < ttm->num_pages; i++) {
if (ttm_dma->dma_address[i]) {
- pci_unmap_page(dev->pdev, ttm_dma->dma_address[i],
- PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
+ nv_device_unmap_page(device, ttm_dma->dma_address[i]);
}
}
@@ -1367,14 +1428,12 @@ nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm)
void
nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence)
{
+ struct nouveau_fence *new_fence = nouveau_fence_ref(fence);
struct nouveau_fence *old_fence = NULL;
- if (likely(fence))
- nouveau_fence_ref(fence);
-
spin_lock(&nvbo->bo.bdev->fence_lock);
old_fence = nvbo->bo.sync_obj;
- nvbo->bo.sync_obj = fence;
+ nvbo->bo.sync_obj = new_fence;
spin_unlock(&nvbo->bo.bdev->fence_lock);
nouveau_fence_unref(&old_fence);
@@ -1393,19 +1452,19 @@ nouveau_bo_fence_ref(void *sync_obj)
}
static bool
-nouveau_bo_fence_signalled(void *sync_obj, void *sync_arg)
+nouveau_bo_fence_signalled(void *sync_obj)
{
return nouveau_fence_done(sync_obj);
}
static int
-nouveau_bo_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
+nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr)
{
return nouveau_fence_wait(sync_obj, lazy, intr);
}
static int
-nouveau_bo_fence_flush(void *sync_obj, void *sync_arg)
+nouveau_bo_fence_flush(void *sync_obj)
{
return 0;
}
@@ -1447,7 +1506,6 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
struct nouveau_vma *vma)
{
const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT;
- struct nouveau_mem *node = nvbo->bo.mem.mm_node;
int ret;
ret = nouveau_vm_get(vm, size, nvbo->page_shift,
@@ -1455,14 +1513,10 @@ nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm,
if (ret)
return ret;
- if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
+ if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM &&
+ (nvbo->bo.mem.mem_type == TTM_PL_VRAM ||
+ nvbo->page_shift != vma->vm->vmm->lpg_shift))
nouveau_vm_map(vma, nvbo->bo.mem.mm_node);
- else if (nvbo->bo.mem.mem_type == TTM_PL_TT) {
- if (node->sg)
- nouveau_vm_map_sg_table(vma, 0, size, node);
- else
- nouveau_vm_map_sg(vma, 0, size, node);
- }
list_add_tail(&vma->head, &nvbo->vma_list);
vma->refcount = 1;
@@ -1473,13 +1527,8 @@ void
nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma)
{
if (vma->node) {
- if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) {
- spin_lock(&nvbo->bo.bdev->fence_lock);
- ttm_bo_wait(&nvbo->bo, false, false, false);
- spin_unlock(&nvbo->bo.bdev->fence_lock);
+ if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM)
nouveau_vm_unmap(vma);
- }
-
nouveau_vm_put(vma);
list_del(&vma->head);
}