diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_bo.c')
| -rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_bo.c | 1250 |
1 files changed, 912 insertions, 338 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index c41e1c200ef..b6dc85c614b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -27,139 +27,218 @@ * Jeremy Kolb <jkolb@brandeis.edu> */ -#include "drmP.h" +#include <core/engine.h> +#include <linux/swiotlb.h> + +#include <subdev/fb.h> +#include <subdev/vm.h> +#include <subdev/bar.h> #include "nouveau_drm.h" -#include "nouveau_drv.h" #include "nouveau_dma.h" +#include "nouveau_fence.h" + +#include "nouveau_bo.h" +#include "nouveau_ttm.h" +#include "nouveau_gem.h" -#include <linux/log2.h> -#include <linux/slab.h> +/* + * NV10-NV40 tiling helpers + */ + +static void +nv10_bo_update_tile_region(struct drm_device *dev, struct nouveau_drm_tile *reg, + u32 addr, u32 size, u32 pitch, u32 flags) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + int i = reg - drm->tile.reg; + struct nouveau_fb *pfb = nouveau_fb(drm->device); + struct nouveau_fb_tile *tile = &pfb->tile.region[i]; + struct nouveau_engine *engine; + + nouveau_fence_unref(®->fence); + + if (tile->pitch) + pfb->tile.fini(pfb, i, tile); + + if (pitch) + pfb->tile.init(pfb, i, addr, size, pitch, flags, tile); + + pfb->tile.prog(pfb, i, tile); + + if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_GR))) + engine->tile_prog(engine, i); + if ((engine = nouveau_engine(pfb, NVDEV_ENGINE_MPEG))) + engine->tile_prog(engine, i); +} + +static struct nouveau_drm_tile * +nv10_bo_get_tile_region(struct drm_device *dev, int i) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_drm_tile *tile = &drm->tile.reg[i]; + + spin_lock(&drm->tile.lock); + + if (!tile->used && + (!tile->fence || nouveau_fence_done(tile->fence))) + tile->used = true; + else + tile = NULL; + + spin_unlock(&drm->tile.lock); + return tile; +} + +static void +nv10_bo_put_tile_region(struct drm_device *dev, struct nouveau_drm_tile *tile, + struct nouveau_fence *fence) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + + if (tile) { + spin_lock(&drm->tile.lock); + tile->fence = nouveau_fence_ref(fence); + tile->used = false; + spin_unlock(&drm->tile.lock); + } +} + +static struct nouveau_drm_tile * +nv10_bo_set_tiling(struct drm_device *dev, u32 addr, + u32 size, u32 pitch, u32 flags) +{ + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_fb *pfb = nouveau_fb(drm->device); + struct nouveau_drm_tile *tile, *found = NULL; + int i; + + for (i = 0; i < pfb->tile.regions; i++) { + tile = nv10_bo_get_tile_region(dev, i); + + if (pitch && !found) { + found = tile; + continue; + + } else if (tile && pfb->tile.region[i].pitch) { + /* Kill an unused tile region. */ + nv10_bo_update_tile_region(dev, tile, 0, 0, 0, 0); + } + + nv10_bo_put_tile_region(dev, tile, NULL); + } + + if (found) + nv10_bo_update_tile_region(dev, found, addr, size, + pitch, flags); + return found; +} static void nouveau_bo_del_ttm(struct ttm_buffer_object *bo) { - struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); - struct drm_device *dev = dev_priv->dev; + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); + struct drm_device *dev = drm->dev; struct nouveau_bo *nvbo = nouveau_bo(bo); - if (unlikely(nvbo->gem)) + if (unlikely(nvbo->gem.filp)) DRM_ERROR("bo %p still attached to GEM object\n", bo); - - if (nvbo->tile) - nv10_mem_expire_tiling(dev, nvbo->tile, NULL); - + WARN_ON(nvbo->pin_refcnt > 0); + nv10_bo_put_tile_region(dev, nvbo->tile, NULL); kfree(nvbo); } static void -nouveau_bo_fixup_align(struct drm_device *dev, - uint32_t tile_mode, uint32_t tile_flags, +nouveau_bo_fixup_align(struct nouveau_bo *nvbo, u32 flags, int *align, int *size) { - struct drm_nouveau_private *dev_priv = dev->dev_private; - - /* - * Some of the tile_flags have a periodic structure of N*4096 bytes, - * align to to that as well as the page size. Align the size to the - * appropriate boundaries. This does imply that sizes are rounded up - * 3-7 pages, so be aware of this and do not waste memory by allocating - * many small buffers. - */ - if (dev_priv->card_type == NV_50) { - uint32_t block_size = dev_priv->vram_size >> 15; - int i; - - switch (tile_flags) { - case 0x1800: - case 0x2800: - case 0x4800: - case 0x7a00: - if (is_power_of_2(block_size)) { - for (i = 1; i < 10; i++) { - *align = 12 * i * block_size; - if (!(*align % 65536)) - break; - } - } else { - for (i = 1; i < 10; i++) { - *align = 8 * i * block_size; - if (!(*align % 65536)) - break; - } - } - *size = roundup(*size, *align); - break; - default: - break; - } + struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); + struct nouveau_device *device = nv_device(drm->device); - } else { - if (tile_mode) { - if (dev_priv->chipset >= 0x40) { + if (device->card_type < NV_50) { + if (nvbo->tile_mode) { + if (device->chipset >= 0x40) { *align = 65536; - *size = roundup(*size, 64 * tile_mode); + *size = roundup(*size, 64 * nvbo->tile_mode); - } else if (dev_priv->chipset >= 0x30) { + } else if (device->chipset >= 0x30) { *align = 32768; - *size = roundup(*size, 64 * tile_mode); + *size = roundup(*size, 64 * nvbo->tile_mode); - } else if (dev_priv->chipset >= 0x20) { + } else if (device->chipset >= 0x20) { *align = 16384; - *size = roundup(*size, 64 * tile_mode); + *size = roundup(*size, 64 * nvbo->tile_mode); - } else if (dev_priv->chipset >= 0x10) { + } else if (device->chipset >= 0x10) { *align = 16384; - *size = roundup(*size, 32 * tile_mode); + *size = roundup(*size, 32 * nvbo->tile_mode); } } + } else { + *size = roundup(*size, (1 << nvbo->page_shift)); + *align = max((1 << nvbo->page_shift), *align); } - /* ALIGN works only on powers of two. */ *size = roundup(*size, PAGE_SIZE); - - if (dev_priv->card_type == NV_50) { - *size = roundup(*size, 65536); - *align = max(65536, *align); - } } int -nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, - int size, int align, uint32_t flags, uint32_t tile_mode, - uint32_t tile_flags, bool no_vm, bool mappable, +nouveau_bo_new(struct drm_device *dev, int size, int align, + uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, + struct sg_table *sg, struct nouveau_bo **pnvbo) { - struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_bo *nvbo; - int ret = 0; + size_t acc_size; + int ret; + int type = ttm_bo_type_device; + int lpg_shift = 12; + int max_size; + + if (drm->client.base.vm) + lpg_shift = drm->client.base.vm->vmm->lpg_shift; + max_size = INT_MAX & ~((1 << lpg_shift) - 1); + + if (size <= 0 || size > max_size) { + nv_warn(drm, "skipped size %x\n", (u32)size); + return -EINVAL; + } + + if (sg) + type = ttm_bo_type_sg; nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); if (!nvbo) return -ENOMEM; INIT_LIST_HEAD(&nvbo->head); INIT_LIST_HEAD(&nvbo->entry); - nvbo->mappable = mappable; - nvbo->no_vm = no_vm; + INIT_LIST_HEAD(&nvbo->vma_list); nvbo->tile_mode = tile_mode; nvbo->tile_flags = tile_flags; - nvbo->bo.bdev = &dev_priv->ttm.bdev; + nvbo->bo.bdev = &drm->ttm.bdev; - nouveau_bo_fixup_align(dev, tile_mode, nouveau_bo_tile_layout(nvbo), - &align, &size); - align >>= PAGE_SHIFT; + nvbo->page_shift = 12; + if (drm->client.base.vm) { + if (!(flags & TTM_PL_FLAG_TT) && size > 256 * 1024) + nvbo->page_shift = drm->client.base.vm->vmm->lpg_shift; + } + nouveau_bo_fixup_align(nvbo, flags, &align, &size); + nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; nouveau_bo_placement_set(nvbo, flags, 0); - nvbo->channel = chan; - ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, - ttm_bo_type_device, &nvbo->placement, align, 0, - false, NULL, size, nouveau_bo_del_ttm); + acc_size = ttm_bo_dma_acc_size(&drm->ttm.bdev, size, + sizeof(struct nouveau_bo)); + + ret = ttm_bo_init(&drm->ttm.bdev, &nvbo->bo, size, + type, &nvbo->placement, + align >> PAGE_SHIFT, false, NULL, acc_size, sg, + nouveau_bo_del_ttm); if (ret) { /* ttm will call nouveau_bo_del_ttm if it fails.. */ return ret; } - nvbo->channel = NULL; *pnvbo = nvbo; return 0; @@ -181,18 +260,20 @@ set_placement_list(uint32_t *pl, unsigned *n, uint32_t type, uint32_t flags) static void set_placement_range(struct nouveau_bo *nvbo, uint32_t type) { - struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); - - if (dev_priv->card_type == NV_10 && - nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM)) { + struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); + struct nouveau_fb *pfb = nouveau_fb(drm->device); + u32 vram_pages = pfb->ram->size >> PAGE_SHIFT; + + if ((nv_device(drm->device)->card_type == NV_10 || + nv_device(drm->device)->card_type == NV_11) && + nvbo->tile_mode && (type & TTM_PL_FLAG_VRAM) && + nvbo->bo.mem.num_pages < vram_pages / 4) { /* * Make sure that the color and depth buffers are handled * by independent memory controller units. Up to a 9x * speed up when alpha-blending and depth-test are enabled * at the same time. */ - int vram_pages = dev_priv->vram_size >> PAGE_SHIFT; - if (nvbo->tile_flags & NOUVEAU_GEM_TILE_ZETA) { nvbo->placement.fpfn = vram_pages / 2; nvbo->placement.lpfn = ~0; @@ -224,76 +305,77 @@ nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t type, uint32_t busy) int nouveau_bo_pin(struct nouveau_bo *nvbo, uint32_t memtype) { - struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); + struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct ttm_buffer_object *bo = &nvbo->bo; int ret; + ret = ttm_bo_reserve(bo, false, false, false, 0); + if (ret) + goto out; + if (nvbo->pin_refcnt && !(memtype & (1 << bo->mem.mem_type))) { - NV_ERROR(nouveau_bdev(bo->bdev)->dev, - "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo, + NV_ERROR(drm, "bo %p pinned elsewhere: 0x%08x vs 0x%08x\n", bo, 1 << bo->mem.mem_type, memtype); - return -EINVAL; + ret = -EINVAL; + goto out; } if (nvbo->pin_refcnt++) - return 0; - - ret = ttm_bo_reserve(bo, false, false, false, 0); - if (ret) goto out; nouveau_bo_placement_set(nvbo, memtype, 0); - ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false); + ret = nouveau_bo_validate(nvbo, false, false); if (ret == 0) { switch (bo->mem.mem_type) { case TTM_PL_VRAM: - dev_priv->fb_aper_free -= bo->mem.size; + drm->gem.vram_available -= bo->mem.size; break; case TTM_PL_TT: - dev_priv->gart_info.aper_free -= bo->mem.size; + drm->gem.gart_available -= bo->mem.size; break; default: break; } } - ttm_bo_unreserve(bo); out: - if (unlikely(ret)) - nvbo->pin_refcnt--; + ttm_bo_unreserve(bo); return ret; } int nouveau_bo_unpin(struct nouveau_bo *nvbo) { - struct drm_nouveau_private *dev_priv = nouveau_bdev(nvbo->bo.bdev); + struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct ttm_buffer_object *bo = &nvbo->bo; - int ret; - - if (--nvbo->pin_refcnt) - return 0; + int ret, ref; ret = ttm_bo_reserve(bo, false, false, false, 0); if (ret) return ret; + ref = --nvbo->pin_refcnt; + WARN_ON_ONCE(ref < 0); + if (ref) + goto out; + nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); - ret = ttm_bo_validate(bo, &nvbo->placement, false, false, false); + ret = nouveau_bo_validate(nvbo, false, false); if (ret == 0) { switch (bo->mem.mem_type) { case TTM_PL_VRAM: - dev_priv->fb_aper_free += bo->mem.size; + drm->gem.vram_available += bo->mem.size; break; case TTM_PL_TT: - dev_priv->gart_info.aper_free += bo->mem.size; + drm->gem.gart_available += bo->mem.size; break; default: break; } } +out: ttm_bo_unreserve(bo); return ret; } @@ -319,6 +401,20 @@ nouveau_bo_unmap(struct nouveau_bo *nvbo) ttm_bo_kunmap(&nvbo->kmap); } +int +nouveau_bo_validate(struct nouveau_bo *nvbo, bool interruptible, + bool no_wait_gpu) +{ + int ret; + + ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, + interruptible, no_wait_gpu); + if (ret) + return ret; + + return 0; +} + u16 nouveau_bo_rd16(struct nouveau_bo *nvbo, unsigned index) { @@ -367,26 +463,21 @@ nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val) *mem = val; } -static struct ttm_backend * -nouveau_bo_create_ttm_backend_entry(struct ttm_bo_device *bdev) +static struct ttm_tt * +nouveau_ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size, + uint32_t page_flags, struct page *dummy_read) { - struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); - struct drm_device *dev = dev_priv->dev; - - switch (dev_priv->gart_info.type) { #if __OS_HAS_AGP - case NOUVEAU_GART_AGP: - return ttm_agp_backend_init(bdev, dev->agp->bridge); -#endif - case NOUVEAU_GART_SGDMA: - return nouveau_sgdma_init_ttm(dev); - default: - NV_ERROR(dev, "Unknown GART type %d\n", - dev_priv->gart_info.type); - break; + struct nouveau_drm *drm = nouveau_bdev(bdev); + struct drm_device *dev = drm->dev; + + if (drm->agp.stat == ENABLED) { + return ttm_agp_tt_create(bdev, dev->agp->bridge, size, + page_flags, dummy_read); } +#endif - return NULL; + return nouveau_sgdma_create_ttm(bdev, size, page_flags, dummy_read); } static int @@ -400,8 +491,7 @@ static int nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, struct ttm_mem_type_manager *man) { - struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); - struct drm_device *dev = dev_priv->dev; + struct nouveau_drm *drm = nouveau_bdev(bdev); switch (type) { case TTM_PL_SYSTEM: @@ -410,40 +500,42 @@ nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type, man->default_caching = TTM_PL_FLAG_CACHED; break; case TTM_PL_VRAM: - man->func = &ttm_bo_manager_func; + if (nv_device(drm->device)->card_type >= NV_50) { + man->func = &nouveau_vram_manager; + man->io_reserve_fastpath = false; + man->use_io_reserve_lru = true; + } else { + man->func = &ttm_bo_manager_func; + } man->flags = TTM_MEMTYPE_FLAG_FIXED | TTM_MEMTYPE_FLAG_MAPPABLE; man->available_caching = TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_WC; man->default_caching = TTM_PL_FLAG_WC; - if (dev_priv->card_type == NV_50) - man->gpu_offset = 0x40000000; - else - man->gpu_offset = 0; break; case TTM_PL_TT: - man->func = &ttm_bo_manager_func; - switch (dev_priv->gart_info.type) { - case NOUVEAU_GART_AGP: + if (nv_device(drm->device)->card_type >= NV_50) + man->func = &nouveau_gart_manager; + else + if (drm->agp.stat != ENABLED) + man->func = &nv04_gart_manager; + else + man->func = &ttm_bo_manager_func; + + if (drm->agp.stat == ENABLED) { man->flags = TTM_MEMTYPE_FLAG_MAPPABLE; - man->available_caching = TTM_PL_FLAG_UNCACHED; - man->default_caching = TTM_PL_FLAG_UNCACHED; - break; - case NOUVEAU_GART_SGDMA: + man->available_caching = TTM_PL_FLAG_UNCACHED | + TTM_PL_FLAG_WC; + man->default_caching = TTM_PL_FLAG_WC; + } else { man->flags = TTM_MEMTYPE_FLAG_MAPPABLE | TTM_MEMTYPE_FLAG_CMA; man->available_caching = TTM_PL_MASK_CACHING; man->default_caching = TTM_PL_FLAG_CACHED; - break; - default: - NV_ERROR(dev, "Unknown GART type: %d\n", - dev_priv->gart_info.type); - return -EINVAL; } - man->gpu_offset = dev_priv->vm_gart_base; + break; default: - NV_ERROR(dev, "Unsupported memory type %u\n", (unsigned)type); return -EINVAL; } return 0; @@ -468,99 +560,242 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl) } -/* GPU-assisted copy using NV_MEMORY_TO_MEMORY_FORMAT, can access - * TTM_PL_{VRAM,TT} directly. - */ +static int +nve0_bo_move_init(struct nouveau_channel *chan, u32 handle) +{ + int ret = RING_SPACE(chan, 2); + if (ret == 0) { + BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); + OUT_RING (chan, handle & 0x0000ffff); + FIRE_RING (chan); + } + return ret; +} static int -nouveau_bo_move_accel_cleanup(struct nouveau_channel *chan, - struct nouveau_bo *nvbo, bool evict, - bool no_wait_reserve, bool no_wait_gpu, - struct ttm_mem_reg *new_mem) +nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, + struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) +{ + struct nouveau_mem *node = old_mem->mm_node; + int ret = RING_SPACE(chan, 10); + if (ret == 0) { + BEGIN_NVC0(chan, NvSubCopy, 0x0400, 8); + OUT_RING (chan, upper_32_bits(node->vma[0].offset)); + OUT_RING (chan, lower_32_bits(node->vma[0].offset)); + OUT_RING (chan, upper_32_bits(node->vma[1].offset)); + OUT_RING (chan, lower_32_bits(node->vma[1].offset)); + OUT_RING (chan, PAGE_SIZE); + OUT_RING (chan, PAGE_SIZE); + OUT_RING (chan, PAGE_SIZE); + OUT_RING (chan, new_mem->num_pages); + BEGIN_IMC0(chan, NvSubCopy, 0x0300, 0x0386); + } + return ret; +} + +static int +nvc0_bo_move_init(struct nouveau_channel *chan, u32 handle) { - struct nouveau_fence *fence = NULL; + int ret = RING_SPACE(chan, 2); + if (ret == 0) { + BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); + OUT_RING (chan, handle); + } + return ret; +} + +static int +nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, + struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) +{ + struct nouveau_mem *node = old_mem->mm_node; + u64 src_offset = node->vma[0].offset; + u64 dst_offset = node->vma[1].offset; + u32 page_count = new_mem->num_pages; int ret; - ret = nouveau_fence_new(chan, &fence, true); - if (ret) - return ret; + page_count = new_mem->num_pages; + while (page_count) { + int line_count = (page_count > 8191) ? 8191 : page_count; - if (nvbo->channel) { - ret = nouveau_fence_sync(fence, nvbo->channel); + ret = RING_SPACE(chan, 11); if (ret) - goto out; + return ret; + + BEGIN_NVC0(chan, NvSubCopy, 0x030c, 8); + OUT_RING (chan, upper_32_bits(src_offset)); + OUT_RING (chan, lower_32_bits(src_offset)); + OUT_RING (chan, upper_32_bits(dst_offset)); + OUT_RING (chan, lower_32_bits(dst_offset)); + OUT_RING (chan, PAGE_SIZE); + OUT_RING (chan, PAGE_SIZE); + OUT_RING (chan, PAGE_SIZE); + OUT_RING (chan, line_count); + BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1); + OUT_RING (chan, 0x00000110); + + page_count -= line_count; + src_offset += (PAGE_SIZE * line_count); + dst_offset += (PAGE_SIZE * line_count); } - ret = ttm_bo_move_accel_cleanup(&nvbo->bo, fence, NULL, evict, - no_wait_reserve, no_wait_gpu, new_mem); -out: - nouveau_fence_unref((void *)&fence); - return ret; + return 0; } -static inline uint32_t -nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, - struct nouveau_channel *chan, struct ttm_mem_reg *mem) +static int +nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, + struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) { - struct nouveau_bo *nvbo = nouveau_bo(bo); + struct nouveau_mem *node = old_mem->mm_node; + u64 src_offset = node->vma[0].offset; + u64 dst_offset = node->vma[1].offset; + u32 page_count = new_mem->num_pages; + int ret; + + page_count = new_mem->num_pages; + while (page_count) { + int line_count = (page_count > 2047) ? 2047 : page_count; + + ret = RING_SPACE(chan, 12); + if (ret) + return ret; - if (nvbo->no_vm) { - if (mem->mem_type == TTM_PL_TT) - return NvDmaGART; - return NvDmaVRAM; + BEGIN_NVC0(chan, NvSubCopy, 0x0238, 2); + OUT_RING (chan, upper_32_bits(dst_offset)); + OUT_RING (chan, lower_32_bits(dst_offset)); + BEGIN_NVC0(chan, NvSubCopy, 0x030c, 6); + OUT_RING (chan, upper_32_bits(src_offset)); + OUT_RING (chan, lower_32_bits(src_offset)); + OUT_RING (chan, PAGE_SIZE); /* src_pitch */ + OUT_RING (chan, PAGE_SIZE); /* dst_pitch */ + OUT_RING (chan, PAGE_SIZE); /* line_length */ + OUT_RING (chan, line_count); + BEGIN_NVC0(chan, NvSubCopy, 0x0300, 1); + OUT_RING (chan, 0x00100110); + + page_count -= line_count; + src_offset += (PAGE_SIZE * line_count); + dst_offset += (PAGE_SIZE * line_count); } - if (mem->mem_type == TTM_PL_TT) - return chan->gart_handle; - return chan->vram_handle; + return 0; } static int -nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, +nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) { - struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); - struct nouveau_bo *nvbo = nouveau_bo(bo); - u64 length = (new_mem->num_pages << PAGE_SHIFT); - u64 src_offset, dst_offset; + struct nouveau_mem *node = old_mem->mm_node; + u64 src_offset = node->vma[0].offset; + u64 dst_offset = node->vma[1].offset; + u32 page_count = new_mem->num_pages; int ret; - src_offset = old_mem->start << PAGE_SHIFT; - dst_offset = new_mem->start << PAGE_SHIFT; - if (!nvbo->no_vm) { - if (old_mem->mem_type == TTM_PL_VRAM) - src_offset += dev_priv->vm_vram_base; - else - src_offset += dev_priv->vm_gart_base; + page_count = new_mem->num_pages; + while (page_count) { + int line_count = (page_count > 8191) ? 8191 : page_count; - if (new_mem->mem_type == TTM_PL_VRAM) - dst_offset += dev_priv->vm_vram_base; - else - dst_offset += dev_priv->vm_gart_base; + ret = RING_SPACE(chan, 11); + if (ret) + return ret; + + BEGIN_NV04(chan, NvSubCopy, 0x030c, 8); + OUT_RING (chan, upper_32_bits(src_offset)); + OUT_RING (chan, lower_32_bits(src_offset)); + OUT_RING (chan, upper_32_bits(dst_offset)); + OUT_RING (chan, lower_32_bits(dst_offset)); + OUT_RING (chan, PAGE_SIZE); + OUT_RING (chan, PAGE_SIZE); + OUT_RING (chan, PAGE_SIZE); + OUT_RING (chan, line_count); + BEGIN_NV04(chan, NvSubCopy, 0x0300, 1); + OUT_RING (chan, 0x00000110); + + page_count -= line_count; + src_offset += (PAGE_SIZE * line_count); + dst_offset += (PAGE_SIZE * line_count); } - ret = RING_SPACE(chan, 3); - if (ret) - return ret; + return 0; +} - BEGIN_RING(chan, NvSubM2MF, 0x0184, 2); - OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem)); - OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem)); +static int +nv98_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, + struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) +{ + struct nouveau_mem *node = old_mem->mm_node; + int ret = RING_SPACE(chan, 7); + if (ret == 0) { + BEGIN_NV04(chan, NvSubCopy, 0x0320, 6); + OUT_RING (chan, upper_32_bits(node->vma[0].offset)); + OUT_RING (chan, lower_32_bits(node->vma[0].offset)); + OUT_RING (chan, upper_32_bits(node->vma[1].offset)); + OUT_RING (chan, lower_32_bits(node->vma[1].offset)); + OUT_RING (chan, 0x00000000 /* COPY */); + OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT); + } + return ret; +} + +static int +nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, + struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) +{ + struct nouveau_mem *node = old_mem->mm_node; + int ret = RING_SPACE(chan, 7); + if (ret == 0) { + BEGIN_NV04(chan, NvSubCopy, 0x0304, 6); + OUT_RING (chan, new_mem->num_pages << PAGE_SHIFT); + OUT_RING (chan, upper_32_bits(node->vma[0].offset)); + OUT_RING (chan, lower_32_bits(node->vma[0].offset)); + OUT_RING (chan, upper_32_bits(node->vma[1].offset)); + OUT_RING (chan, lower_32_bits(node->vma[1].offset)); + OUT_RING (chan, 0x00000000 /* MODE_COPY, QUERY_NONE */); + } + return ret; +} + +static int +nv50_bo_move_init(struct nouveau_channel *chan, u32 handle) +{ + int ret = RING_SPACE(chan, 6); + if (ret == 0) { + BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); + OUT_RING (chan, handle); + BEGIN_NV04(chan, NvSubCopy, 0x0180, 3); + OUT_RING (chan, NvNotify0); + OUT_RING (chan, NvDmaFB); + OUT_RING (chan, NvDmaFB); + } + + return ret; +} + +static int +nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, + struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) +{ + struct nouveau_mem *node = old_mem->mm_node; + u64 length = (new_mem->num_pages << PAGE_SHIFT); + u64 src_offset = node->vma[0].offset; + u64 dst_offset = node->vma[1].offset; + int src_tiled = !!node->memtype; + int dst_tiled = !!((struct nouveau_mem *)new_mem->mm_node)->memtype; + int ret; while (length) { u32 amount, stride, height; + ret = RING_SPACE(chan, 18 + 6 * (src_tiled + dst_tiled)); + if (ret) + return ret; + amount = min(length, (u64)(4 * 1024 * 1024)); stride = 16 * 4; height = amount / stride; - if (new_mem->mem_type == TTM_PL_VRAM && - nouveau_bo_tile_layout(nvbo)) { - ret = RING_SPACE(chan, 8); - if (ret) - return ret; - - BEGIN_RING(chan, NvSubM2MF, 0x0200, 7); + if (src_tiled) { + BEGIN_NV04(chan, NvSubCopy, 0x0200, 7); OUT_RING (chan, 0); OUT_RING (chan, 0); OUT_RING (chan, stride); @@ -569,20 +804,11 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, OUT_RING (chan, 0); OUT_RING (chan, 0); } else { - ret = RING_SPACE(chan, 2); - if (ret) - return ret; - - BEGIN_RING(chan, NvSubM2MF, 0x0200, 1); + BEGIN_NV04(chan, NvSubCopy, 0x0200, 1); OUT_RING (chan, 1); } - if (old_mem->mem_type == TTM_PL_VRAM && - nouveau_bo_tile_layout(nvbo)) { - ret = RING_SPACE(chan, 8); - if (ret) - return ret; - - BEGIN_RING(chan, NvSubM2MF, 0x021c, 7); + if (dst_tiled) { + BEGIN_NV04(chan, NvSubCopy, 0x021c, 7); OUT_RING (chan, 0); OUT_RING (chan, 0); OUT_RING (chan, stride); @@ -591,22 +817,14 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, OUT_RING (chan, 0); OUT_RING (chan, 0); } else { - ret = RING_SPACE(chan, 2); - if (ret) - return ret; - - BEGIN_RING(chan, NvSubM2MF, 0x021c, 1); + BEGIN_NV04(chan, NvSubCopy, 0x021c, 1); OUT_RING (chan, 1); } - ret = RING_SPACE(chan, 14); - if (ret) - return ret; - - BEGIN_RING(chan, NvSubM2MF, 0x0238, 2); + BEGIN_NV04(chan, NvSubCopy, 0x0238, 2); OUT_RING (chan, upper_32_bits(src_offset)); OUT_RING (chan, upper_32_bits(dst_offset)); - BEGIN_RING(chan, NvSubM2MF, 0x030c, 8); + BEGIN_NV04(chan, NvSubCopy, 0x030c, 8); OUT_RING (chan, lower_32_bits(src_offset)); OUT_RING (chan, lower_32_bits(dst_offset)); OUT_RING (chan, stride); @@ -615,7 +833,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, OUT_RING (chan, height); OUT_RING (chan, 0x00000101); OUT_RING (chan, 0x00000000); - BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); + BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); OUT_RING (chan, 0); length -= amount; @@ -627,6 +845,29 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, } static int +nv04_bo_move_init(struct nouveau_channel *chan, u32 handle) +{ + int ret = RING_SPACE(chan, 4); + if (ret == 0) { + BEGIN_NV04(chan, NvSubCopy, 0x0000, 1); + OUT_RING (chan, handle); + BEGIN_NV04(chan, NvSubCopy, 0x0180, 1); + OUT_RING (chan, NvNotify0); + } + + return ret; +} + +static inline uint32_t +nouveau_bo_mem_ctxdma(struct ttm_buffer_object *bo, + struct nouveau_channel *chan, struct ttm_mem_reg *mem) +{ + if (mem->mem_type == TTM_PL_TT) + return NvDmaTT; + return NvDmaFB; +} + +static int nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, struct ttm_mem_reg *old_mem, struct ttm_mem_reg *new_mem) { @@ -639,7 +880,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, if (ret) return ret; - BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); + BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_DMA_SOURCE, 2); OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, old_mem)); OUT_RING (chan, nouveau_bo_mem_ctxdma(bo, chan, new_mem)); @@ -651,7 +892,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, if (ret) return ret; - BEGIN_RING(chan, NvSubM2MF, + BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_OFFSET_IN, 8); OUT_RING (chan, src_offset); OUT_RING (chan, dst_offset); @@ -661,7 +902,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, OUT_RING (chan, line_count); OUT_RING (chan, 0x00000101); OUT_RING (chan, 0x00000000); - BEGIN_RING(chan, NvSubM2MF, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); + BEGIN_NV04(chan, NvSubCopy, NV_MEMORY_TO_MEMORY_FORMAT_NOP, 1); OUT_RING (chan, 0); page_count -= line_count; @@ -673,33 +914,131 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, } static int -nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, - bool no_wait_reserve, bool no_wait_gpu, - struct ttm_mem_reg *new_mem) +nouveau_bo_move_prep(struct nouveau_drm *drm, struct ttm_buffer_object *bo, + struct ttm_mem_reg *mem) { - struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); - struct nouveau_bo *nvbo = nouveau_bo(bo); - struct nouveau_channel *chan; + struct nouveau_mem *old_node = bo->mem.mm_node; + struct nouveau_mem *new_node = mem->mm_node; + u64 size = (u64)mem->num_pages << PAGE_SHIFT; int ret; - chan = nvbo->channel; - if (!chan || nvbo->no_vm) - chan = dev_priv->channel; - - if (dev_priv->card_type < NV_50) - ret = nv04_bo_move_m2mf(chan, bo, &bo->mem, new_mem); - else - ret = nv50_bo_move_m2mf(chan, bo, &bo->mem, new_mem); + ret = nouveau_vm_get(nv_client(drm)->vm, size, old_node->page_shift, + NV_MEM_ACCESS_RW, &old_node->vma[0]); if (ret) return ret; - return nouveau_bo_move_accel_cleanup(chan, nvbo, evict, no_wait_reserve, no_wait_gpu, new_mem); + ret = nouveau_vm_get(nv_client(drm)->vm, size, new_node->page_shift, + NV_MEM_ACCESS_RW, &old_node->vma[1]); + if (ret) { + nouveau_vm_put(&old_node->vma[0]); + return ret; + } + + nouveau_vm_map(&old_node->vma[0], old_node); + nouveau_vm_map(&old_node->vma[1], new_node); + return 0; +} + +static int +nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, + bool no_wait_gpu, struct ttm_mem_reg *new_mem) +{ + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); + struct nouveau_channel *chan = drm->ttm.chan; + struct nouveau_fence *fence; + int ret; + + /* create temporary vmas for the transfer and attach them to the + * old nouveau_mem node, these will get cleaned up after ttm has + * destroyed the ttm_mem_reg + */ + if (nv_device(drm->device)->card_type >= NV_50) { + ret = nouveau_bo_move_prep(drm, bo, new_mem); + if (ret) + return ret; + } + + mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING); + ret = nouveau_fence_sync(bo->sync_obj, chan); + if (ret == 0) { + ret = drm->ttm.move(chan, bo, &bo->mem, new_mem); + if (ret == 0) { + ret = nouveau_fence_new(chan, false, &fence); + if (ret == 0) { + ret = ttm_bo_move_accel_cleanup(bo, fence, + evict, + no_wait_gpu, + new_mem); + nouveau_fence_unref(&fence); + } + } + } + mutex_unlock(&chan->cli->mutex); + return ret; +} + +void +nouveau_bo_move_init(struct nouveau_drm *drm) +{ + static const struct { + const char *name; + int engine; + u32 oclass; + int (*exec)(struct nouveau_channel *, + struct ttm_buffer_object *, + struct ttm_mem_reg *, struct ttm_mem_reg *); + int (*init)(struct nouveau_channel *, u32 handle); + } _methods[] = { + { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init }, + { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init }, + { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init }, + { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init }, + { "COPY", 0, 0x85b5, nva3_bo_move_copy, nv50_bo_move_init }, + { "CRYPT", 0, 0x74c1, nv84_bo_move_exec, nv50_bo_move_init }, + { "M2MF", 0, 0x9039, nvc0_bo_move_m2mf, nvc0_bo_move_init }, + { "M2MF", 0, 0x5039, nv50_bo_move_m2mf, nv50_bo_move_init }, + { "M2MF", 0, 0x0039, nv04_bo_move_m2mf, nv04_bo_move_init }, + {}, + { "CRYPT", 0, 0x88b4, nv98_bo_move_exec, nv50_bo_move_init }, + }, *mthd = _methods; + const char *name = "CPU"; + int ret; + + do { + struct nouveau_object *object; + struct nouveau_channel *chan; + u32 handle = (mthd->engine << 16) | mthd->oclass; + + if (mthd->engine) + chan = drm->cechan; + else + chan = drm->channel; + if (chan == NULL) + continue; + + ret = nouveau_object_new(nv_object(drm), chan->handle, handle, + mthd->oclass, NULL, 0, &object); + if (ret == 0) { + ret = mthd->init(chan, handle); + if (ret) { + nouveau_object_del(nv_object(drm), + chan->handle, handle); + continue; + } + + drm->ttm.move = mthd->exec; + drm->ttm.chan = chan; + name = mthd->name; + break; + } + } while ((++mthd)->exec); + + NV_INFO(drm, "MM: using %s for buffer copies\n", name); } static int nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, - bool no_wait_reserve, bool no_wait_gpu, - struct ttm_mem_reg *new_mem) + bool no_wait_gpu, struct ttm_mem_reg *new_mem) { u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; struct ttm_placement placement; @@ -712,7 +1051,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, tmp_mem = *new_mem; tmp_mem.mm_node = NULL; - ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); + ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu); if (ret) return ret; @@ -720,11 +1059,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, bool intr, if (ret) goto out; - ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, &tmp_mem); + ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, &tmp_mem); if (ret) goto out; - ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); + ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, new_mem); out: ttm_bo_mem_put(bo, &tmp_mem); return ret; @@ -732,8 +1071,7 @@ out: static int nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, - bool no_wait_reserve, bool no_wait_gpu, - struct ttm_mem_reg *new_mem) + bool no_wait_gpu, struct ttm_mem_reg *new_mem) { u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; struct ttm_placement placement; @@ -746,15 +1084,15 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, tmp_mem = *new_mem; tmp_mem.mm_node = NULL; - ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); + ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_gpu); if (ret) return ret; - ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, &tmp_mem); + ret = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); if (ret) goto out; - ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); + ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_gpu, new_mem); if (ret) goto out; @@ -763,36 +1101,44 @@ out: return ret; } -static int -nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, - struct nouveau_tile_reg **new_tile) +static void +nouveau_bo_move_ntfy(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) { - struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); - struct drm_device *dev = dev_priv->dev; struct nouveau_bo *nvbo = nouveau_bo(bo); - uint64_t offset; - int ret; + struct nouveau_vma *vma; - if (nvbo->no_vm || new_mem->mem_type != TTM_PL_VRAM) { - /* Nothing to do. */ - *new_tile = NULL; - return 0; + /* ttm can now (stupidly) pass the driver bos it didn't create... */ + if (bo->destroy != nouveau_bo_del_ttm) + return; + + list_for_each_entry(vma, &nvbo->vma_list, head) { + if (new_mem && new_mem->mem_type != TTM_PL_SYSTEM && + (new_mem->mem_type == TTM_PL_VRAM || + nvbo->page_shift != vma->vm->vmm->lpg_shift)) { + nouveau_vm_map(vma, new_mem->mm_node); + } else { + nouveau_vm_unmap(vma); + } } +} - offset = new_mem->start << PAGE_SHIFT; +static int +nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, + struct nouveau_drm_tile **new_tile) +{ + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); + struct drm_device *dev = drm->dev; + struct nouveau_bo *nvbo = nouveau_bo(bo); + u64 offset = new_mem->start << PAGE_SHIFT; - if (dev_priv->card_type == NV_50) { - ret = nv50_mem_vm_bind_linear(dev, - offset + dev_priv->vm_vram_base, - new_mem->size, - nouveau_bo_tile_layout(nvbo), - offset); - if (ret) - return ret; + *new_tile = NULL; + if (new_mem->mem_type != TTM_PL_VRAM) + return 0; - } else if (dev_priv->card_type >= NV_10) { - *new_tile = nv10_mem_set_tiling(dev, offset, new_mem->size, - nvbo->tile_mode); + if (nv_device(drm->device)->card_type >= NV_10) { + *new_tile = nv10_bo_set_tiling(dev, offset, new_mem->size, + nvbo->tile_mode, + nvbo->tile_flags); } return 0; @@ -800,35 +1146,31 @@ nouveau_bo_vm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem, static void nouveau_bo_vm_cleanup(struct ttm_buffer_object *bo, - struct nouveau_tile_reg *new_tile, - struct nouveau_tile_reg **old_tile) + struct nouveau_drm_tile *new_tile, + struct nouveau_drm_tile **old_tile) { - struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); - struct drm_device *dev = dev_priv->dev; + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); + struct drm_device *dev = drm->dev; - if (dev_priv->card_type >= NV_10 && - dev_priv->card_type < NV_50) { - if (*old_tile) - nv10_mem_expire_tiling(dev, *old_tile, bo->sync_obj); - - *old_tile = new_tile; - } + nv10_bo_put_tile_region(dev, *old_tile, bo->sync_obj); + *old_tile = new_tile; } static int nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, - bool no_wait_reserve, bool no_wait_gpu, - struct ttm_mem_reg *new_mem) + bool no_wait_gpu, struct ttm_mem_reg *new_mem) { - struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_bo *nvbo = nouveau_bo(bo); struct ttm_mem_reg *old_mem = &bo->mem; - struct nouveau_tile_reg *new_tile = NULL; + struct nouveau_drm_tile *new_tile = NULL; int ret = 0; - ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); - if (ret) - return ret; + if (nv_device(drm->device)->card_type < NV_50) { + ret = nouveau_bo_vm_bind(bo, new_mem, &new_tile); + if (ret) + return ret; + } /* Fake bo copy. */ if (old_mem->mem_type == TTM_PL_SYSTEM && !bo->ttm) { @@ -838,31 +1180,35 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, bool intr, goto out; } - /* Software copy if the card isn't up and running yet. */ - if (!dev_priv->channel) { - ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); - goto out; - } - /* Hardware assisted copy. */ - if (new_mem->mem_type == TTM_PL_SYSTEM) - ret = nouveau_bo_move_flipd(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); - else if (old_mem->mem_type == TTM_PL_SYSTEM) - ret = nouveau_bo_move_flips(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); - else - ret = nouveau_bo_move_m2mf(bo, evict, intr, no_wait_reserve, no_wait_gpu, new_mem); - - if (!ret) - goto out; + if (drm->ttm.move) { + if (new_mem->mem_type == TTM_PL_SYSTEM) + ret = nouveau_bo_move_flipd(bo, evict, intr, + no_wait_gpu, new_mem); + else if (old_mem->mem_type == TTM_PL_SYSTEM) + ret = nouveau_bo_move_flips(bo, evict, intr, + no_wait_gpu, new_mem); + else + ret = nouveau_bo_move_m2mf(bo, evict, intr, + no_wait_gpu, new_mem); + if (!ret) + goto out; + } /* Fallback to software copy. */ - ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); + spin_lock(&bo->bdev->fence_lock); + ret = ttm_bo_wait(bo, true, intr, no_wait_gpu); + spin_unlock(&bo->bdev->fence_lock); + if (ret == 0) + ret = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem); out: - if (ret) - nouveau_bo_vm_cleanup(bo, NULL, &new_tile); - else - nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile); + if (nv_device(drm->device)->card_type < NV_50) { + if (ret) + nouveau_bo_vm_cleanup(bo, NULL, &new_tile); + else + nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile); + } return ret; } @@ -870,15 +1216,19 @@ out: static int nouveau_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp) { - return 0; + struct nouveau_bo *nvbo = nouveau_bo(bo); + + return drm_vma_node_verify_access(&nvbo->gem.vma_node, filp); } static int nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; - struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); - struct drm_device *dev = dev_priv->dev; + struct nouveau_drm *drm = nouveau_bdev(bdev); + struct nouveau_mem *node = mem->mm_node; + struct drm_device *dev = drm->dev; + int ret; mem->bus.addr = NULL; mem->bus.offset = 0; @@ -893,17 +1243,30 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) return 0; case TTM_PL_TT: #if __OS_HAS_AGP - if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { + if (drm->agp.stat == ENABLED) { mem->bus.offset = mem->start << PAGE_SHIFT; - mem->bus.base = dev_priv->gart_info.aper_base; - mem->bus.is_iomem = true; + mem->bus.base = drm->agp.base; + mem->bus.is_iomem = !dev->agp->cant_use_aperture; } #endif - break; + if (nv_device(drm->device)->card_type < NV_50 || !node->memtype) + /* untiled */ + break; + /* fallthrough, tiled memory */ case TTM_PL_VRAM: mem->bus.offset = mem->start << PAGE_SHIFT; - mem->bus.base = pci_resource_start(dev->pdev, 1); + mem->bus.base = nv_device_resource_start(nouveau_dev(dev), 1); mem->bus.is_iomem = true; + if (nv_device(drm->device)->card_type >= NV_50) { + struct nouveau_bar *bar = nouveau_bar(drm->device); + + ret = bar->umap(bar, node, NV_MEM_ACCESS_RW, + &node->bar_vma); + if (ret) + return ret; + + mem->bus.offset = node->bar_vma.offset; + } break; default: return -EINVAL; @@ -914,48 +1277,259 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) static void nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { + struct nouveau_drm *drm = nouveau_bdev(bdev); + struct nouveau_bar *bar = nouveau_bar(drm->device); + struct nouveau_mem *node = mem->mm_node; + + if (!node->bar_vma.node) + return; + + bar->unmap(bar, &node->bar_vma); } static int nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) { - struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); struct nouveau_bo *nvbo = nouveau_bo(bo); + struct nouveau_device *device = nv_device(drm->device); + u32 mappable = nv_device_resource_len(device, 1) >> PAGE_SHIFT; + int ret; /* as long as the bo isn't in vram, and isn't tiled, we've got * nothing to do here. */ if (bo->mem.mem_type != TTM_PL_VRAM) { - if (dev_priv->card_type < NV_50 || + if (nv_device(drm->device)->card_type < NV_50 || !nouveau_bo_tile_layout(nvbo)) return 0; + + if (bo->mem.mem_type == TTM_PL_SYSTEM) { + nouveau_bo_placement_set(nvbo, TTM_PL_TT, 0); + + ret = nouveau_bo_validate(nvbo, false, false); + if (ret) + return ret; + } + return 0; } /* make sure bo is in mappable vram */ - if (bo->mem.start + bo->mem.num_pages < dev_priv->fb_mappable_pages) + if (nv_device(drm->device)->card_type >= NV_50 || + bo->mem.start + bo->mem.num_pages < mappable) return 0; nvbo->placement.fpfn = 0; - nvbo->placement.lpfn = dev_priv->fb_mappable_pages; - nouveau_bo_placement_set(nvbo, TTM_PL_VRAM, 0); - return ttm_bo_validate(bo, &nvbo->placement, false, true, false); + nvbo->placement.lpfn = mappable; + nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_VRAM, 0); + return nouveau_bo_validate(nvbo, false, false); +} + +static int +nouveau_ttm_tt_populate(struct ttm_tt *ttm) +{ + struct ttm_dma_tt *ttm_dma = (void *)ttm; + struct nouveau_drm *drm; + struct nouveau_device *device; + struct drm_device *dev; + unsigned i; + int r; + bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); + + if (ttm->state != tt_unpopulated) + return 0; + + if (slave && ttm->sg) { + /* make userspace faulting work */ + drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, + ttm_dma->dma_address, ttm->num_pages); + ttm->state = tt_unbound; + return 0; + } + + drm = nouveau_bdev(ttm->bdev); + device = nv_device(drm->device); + dev = drm->dev; + +#if __OS_HAS_AGP + if (drm->agp.stat == ENABLED) { + return ttm_agp_tt_populate(ttm); + } +#endif + +#ifdef CONFIG_SWIOTLB + if (swiotlb_nr_tbl()) { + return ttm_dma_populate((void *)ttm, dev->dev); + } +#endif + + r = ttm_pool_populate(ttm); + if (r) { + return r; + } + + for (i = 0; i < ttm->num_pages; i++) { + ttm_dma->dma_address[i] = nv_device_map_page(device, + ttm->pages[i]); + if (!ttm_dma->dma_address[i]) { + while (--i) { + nv_device_unmap_page(device, + ttm_dma->dma_address[i]); + ttm_dma->dma_address[i] = 0; + } + ttm_pool_unpopulate(ttm); + return -EFAULT; + } + } + return 0; +} + +static void +nouveau_ttm_tt_unpopulate(struct ttm_tt *ttm) +{ + struct ttm_dma_tt *ttm_dma = (void *)ttm; + struct nouveau_drm *drm; + struct nouveau_device *device; + struct drm_device *dev; + unsigned i; + bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); + + if (slave) + return; + + drm = nouveau_bdev(ttm->bdev); + device = nv_device(drm->device); + dev = drm->dev; + +#if __OS_HAS_AGP + if (drm->agp.stat == ENABLED) { + ttm_agp_tt_unpopulate(ttm); + return; + } +#endif + +#ifdef CONFIG_SWIOTLB + if (swiotlb_nr_tbl()) { + ttm_dma_unpopulate((void *)ttm, dev->dev); + return; + } +#endif + + for (i = 0; i < ttm->num_pages; i++) { + if (ttm_dma->dma_address[i]) { + nv_device_unmap_page(device, ttm_dma->dma_address[i]); + } + } + + ttm_pool_unpopulate(ttm); +} + +void +nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence) +{ + struct nouveau_fence *new_fence = nouveau_fence_ref(fence); + struct nouveau_fence *old_fence = NULL; + + spin_lock(&nvbo->bo.bdev->fence_lock); + old_fence = nvbo->bo.sync_obj; + nvbo->bo.sync_obj = new_fence; + spin_unlock(&nvbo->bo.bdev->fence_lock); + + nouveau_fence_unref(&old_fence); +} + +static void +nouveau_bo_fence_unref(void **sync_obj) +{ + nouveau_fence_unref((struct nouveau_fence **)sync_obj); +} + +static void * +nouveau_bo_fence_ref(void *sync_obj) +{ + return nouveau_fence_ref(sync_obj); +} + +static bool +nouveau_bo_fence_signalled(void *sync_obj) +{ + return nouveau_fence_done(sync_obj); +} + +static int +nouveau_bo_fence_wait(void *sync_obj, bool lazy, bool intr) +{ + return nouveau_fence_wait(sync_obj, lazy, intr); +} + +static int +nouveau_bo_fence_flush(void *sync_obj) +{ + return 0; } struct ttm_bo_driver nouveau_bo_driver = { - .create_ttm_backend_entry = nouveau_bo_create_ttm_backend_entry, + .ttm_tt_create = &nouveau_ttm_tt_create, + .ttm_tt_populate = &nouveau_ttm_tt_populate, + .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate, .invalidate_caches = nouveau_bo_invalidate_caches, .init_mem_type = nouveau_bo_init_mem_type, .evict_flags = nouveau_bo_evict_flags, + .move_notify = nouveau_bo_move_ntfy, .move = nouveau_bo_move, .verify_access = nouveau_bo_verify_access, - .sync_obj_signaled = nouveau_fence_signalled, - .sync_obj_wait = nouveau_fence_wait, - .sync_obj_flush = nouveau_fence_flush, - .sync_obj_unref = nouveau_fence_unref, - .sync_obj_ref = nouveau_fence_ref, + .sync_obj_signaled = nouveau_bo_fence_signalled, + .sync_obj_wait = nouveau_bo_fence_wait, + .sync_obj_flush = nouveau_bo_fence_flush, + .sync_obj_unref = nouveau_bo_fence_unref, + .sync_obj_ref = nouveau_bo_fence_ref, .fault_reserve_notify = &nouveau_ttm_fault_reserve_notify, .io_mem_reserve = &nouveau_ttm_io_mem_reserve, .io_mem_free = &nouveau_ttm_io_mem_free, }; +struct nouveau_vma * +nouveau_bo_vma_find(struct nouveau_bo *nvbo, struct nouveau_vm *vm) +{ + struct nouveau_vma *vma; + list_for_each_entry(vma, &nvbo->vma_list, head) { + if (vma->vm == vm) + return vma; + } + + return NULL; +} + +int +nouveau_bo_vma_add(struct nouveau_bo *nvbo, struct nouveau_vm *vm, + struct nouveau_vma *vma) +{ + const u32 size = nvbo->bo.mem.num_pages << PAGE_SHIFT; + int ret; + + ret = nouveau_vm_get(vm, size, nvbo->page_shift, + NV_MEM_ACCESS_RW, vma); + if (ret) + return ret; + + if ( nvbo->bo.mem.mem_type != TTM_PL_SYSTEM && + (nvbo->bo.mem.mem_type == TTM_PL_VRAM || + nvbo->page_shift != vma->vm->vmm->lpg_shift)) + nouveau_vm_map(vma, nvbo->bo.mem.mm_node); + + list_add_tail(&vma->head, &nvbo->vma_list); + vma->refcount = 1; + return 0; +} + +void +nouveau_bo_vma_del(struct nouveau_bo *nvbo, struct nouveau_vma *vma) +{ + if (vma->node) { + if (nvbo->bo.mem.mem_type != TTM_PL_SYSTEM) + nouveau_vm_unmap(vma); + nouveau_vm_put(vma); + list_del(&vma->head); + } +} |
