diff options
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo_util.c')
| -rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_util.c | 359 |
1 files changed, 237 insertions, 122 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 2ecf7d0c64f..1df856f7856 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -28,32 +28,27 @@ * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com> */ -#include "ttm/ttm_bo_driver.h" -#include "ttm/ttm_placement.h" +#include <drm/ttm/ttm_bo_driver.h> +#include <drm/ttm/ttm_placement.h> +#include <drm/drm_vma_manager.h> #include <linux/io.h> #include <linux/highmem.h> #include <linux/wait.h> +#include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/module.h> void ttm_bo_free_old_node(struct ttm_buffer_object *bo) { - struct ttm_mem_reg *old_mem = &bo->mem; - - if (old_mem->mm_node) { - spin_lock(&bo->glob->lru_lock); - drm_mm_put_block(old_mem->mm_node); - spin_unlock(&bo->glob->lru_lock); - } - old_mem->mm_node = NULL; + ttm_bo_mem_put(bo, &bo->mem); } int ttm_bo_move_ttm(struct ttm_buffer_object *bo, - bool evict, bool no_wait, struct ttm_mem_reg *new_mem) + bool evict, + bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct ttm_tt *ttm = bo->ttm; struct ttm_mem_reg *old_mem = &bo->mem; - uint32_t save_flags = old_mem->placement; int ret; if (old_mem->mem_type != TTM_PL_SYSTEM) { @@ -62,7 +57,6 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, TTM_PL_MASK_MEM); old_mem->mem_type = TTM_PL_SYSTEM; - save_flags = old_mem->placement; } ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); @@ -77,49 +71,166 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, *old_mem = *new_mem; new_mem->mm_node = NULL; - ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE); + return 0; } EXPORT_SYMBOL(ttm_bo_move_ttm); -int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, +int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible) +{ + if (likely(man->io_reserve_fastpath)) + return 0; + + if (interruptible) + return mutex_lock_interruptible(&man->io_reserve_mutex); + + mutex_lock(&man->io_reserve_mutex); + return 0; +} +EXPORT_SYMBOL(ttm_mem_io_lock); + +void ttm_mem_io_unlock(struct ttm_mem_type_manager *man) +{ + if (likely(man->io_reserve_fastpath)) + return; + + mutex_unlock(&man->io_reserve_mutex); +} +EXPORT_SYMBOL(ttm_mem_io_unlock); + +static int ttm_mem_io_evict(struct ttm_mem_type_manager *man) +{ + struct ttm_buffer_object *bo; + + if (!man->use_io_reserve_lru || list_empty(&man->io_reserve_lru)) + return -EAGAIN; + + bo = list_first_entry(&man->io_reserve_lru, + struct ttm_buffer_object, + io_reserve_lru); + list_del_init(&bo->io_reserve_lru); + ttm_bo_unmap_virtual_locked(bo); + + return 0; +} + + +int ttm_mem_io_reserve(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem) +{ + struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; + int ret = 0; + + if (!bdev->driver->io_mem_reserve) + return 0; + if (likely(man->io_reserve_fastpath)) + return bdev->driver->io_mem_reserve(bdev, mem); + + if (bdev->driver->io_mem_reserve && + mem->bus.io_reserved_count++ == 0) { +retry: + ret = bdev->driver->io_mem_reserve(bdev, mem); + if (ret == -EAGAIN) { + ret = ttm_mem_io_evict(man); + if (ret == 0) + goto retry; + } + } + return ret; +} +EXPORT_SYMBOL(ttm_mem_io_reserve); + +void ttm_mem_io_free(struct ttm_bo_device *bdev, + struct ttm_mem_reg *mem) +{ + struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; + + if (likely(man->io_reserve_fastpath)) + return; + + if (bdev->driver->io_mem_reserve && + --mem->bus.io_reserved_count == 0 && + bdev->driver->io_mem_free) + bdev->driver->io_mem_free(bdev, mem); + +} +EXPORT_SYMBOL(ttm_mem_io_free); + +int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) +{ + struct ttm_mem_reg *mem = &bo->mem; + int ret; + + if (!mem->bus.io_reserved_vm) { + struct ttm_mem_type_manager *man = + &bo->bdev->man[mem->mem_type]; + + ret = ttm_mem_io_reserve(bo->bdev, mem); + if (unlikely(ret != 0)) + return ret; + mem->bus.io_reserved_vm = true; + if (man->use_io_reserve_lru) + list_add_tail(&bo->io_reserve_lru, + &man->io_reserve_lru); + } + return 0; +} + +void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) +{ + struct ttm_mem_reg *mem = &bo->mem; + + if (mem->bus.io_reserved_vm) { + mem->bus.io_reserved_vm = false; + list_del_init(&bo->io_reserve_lru); + ttm_mem_io_free(bo->bdev, mem); + } +} + +static int ttm_mem_reg_ioremap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, void **virtual) { struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; - unsigned long bus_offset; - unsigned long bus_size; - unsigned long bus_base; int ret; void *addr; *virtual = NULL; - ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, &bus_size); - if (ret || bus_size == 0) + (void) ttm_mem_io_lock(man, false); + ret = ttm_mem_io_reserve(bdev, mem); + ttm_mem_io_unlock(man); + if (ret || !mem->bus.is_iomem) return ret; - if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) - addr = (void *)(((u8 *) man->io_addr) + bus_offset); - else { + if (mem->bus.addr) { + addr = mem->bus.addr; + } else { if (mem->placement & TTM_PL_FLAG_WC) - addr = ioremap_wc(bus_base + bus_offset, bus_size); + addr = ioremap_wc(mem->bus.base + mem->bus.offset, mem->bus.size); else - addr = ioremap_nocache(bus_base + bus_offset, bus_size); - if (!addr) + addr = ioremap_nocache(mem->bus.base + mem->bus.offset, mem->bus.size); + if (!addr) { + (void) ttm_mem_io_lock(man, false); + ttm_mem_io_free(bdev, mem); + ttm_mem_io_unlock(man); return -ENOMEM; + } } *virtual = addr; return 0; } -void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, +static void ttm_mem_reg_iounmap(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem, void *virtual) { struct ttm_mem_type_manager *man; man = &bdev->man[mem->mem_type]; - if (virtual && (man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) + if (virtual && mem->bus.addr == NULL) iounmap(virtual); + (void) ttm_mem_io_lock(man, false); + ttm_mem_io_free(bdev, mem); + ttm_mem_io_unlock(man); } static int ttm_copy_io_page(void *dst, void *src, unsigned long page) @@ -139,7 +250,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, unsigned long page, pgprot_t prot) { - struct page *d = ttm_tt_get_page(ttm, page); + struct page *d = ttm->pages[page]; void *dst; if (!d) @@ -148,7 +259,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, src = (void *)((unsigned long)src + (page << PAGE_SHIFT)); #ifdef CONFIG_X86 - dst = kmap_atomic_prot(d, KM_USER0, prot); + dst = kmap_atomic_prot(d, prot); #else if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) dst = vmap(&d, 1, 0, prot); @@ -161,7 +272,7 @@ static int ttm_copy_io_ttm_page(struct ttm_tt *ttm, void *src, memcpy_fromio(dst, src, PAGE_SIZE); #ifdef CONFIG_X86 - kunmap_atomic(dst, KM_USER0); + kunmap_atomic(dst); #else if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) vunmap(dst); @@ -176,7 +287,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, unsigned long page, pgprot_t prot) { - struct page *s = ttm_tt_get_page(ttm, page); + struct page *s = ttm->pages[page]; void *src; if (!s) @@ -184,7 +295,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, dst = (void *)((unsigned long)dst + (page << PAGE_SHIFT)); #ifdef CONFIG_X86 - src = kmap_atomic_prot(s, KM_USER0, prot); + src = kmap_atomic_prot(s, prot); #else if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) src = vmap(&s, 1, 0, prot); @@ -197,7 +308,7 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, memcpy_toio(dst, src, PAGE_SIZE); #ifdef CONFIG_X86 - kunmap_atomic(src, KM_USER0); + kunmap_atomic(src); #else if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) vunmap(src); @@ -209,7 +320,8 @@ static int ttm_copy_ttm_io_page(struct ttm_tt *ttm, void *dst, } int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, - bool evict, bool no_wait, struct ttm_mem_reg *new_mem) + bool evict, bool no_wait_gpu, + struct ttm_mem_reg *new_mem) { struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; @@ -219,7 +331,6 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, void *old_iomap; void *new_iomap; int ret; - uint32_t save_flags = old_mem->placement; unsigned long i; unsigned long page; unsigned long add = 0; @@ -232,17 +343,36 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, if (ret) goto out; + /* + * Single TTM move. NOP. + */ if (old_iomap == NULL && new_iomap == NULL) goto out2; - if (old_iomap == NULL && ttm == NULL) + + /* + * Don't move nonexistent data. Clear destination instead. + */ + if (old_iomap == NULL && + (ttm == NULL || (ttm->state == tt_unpopulated && + !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) { + memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); goto out2; + } + + /* + * TTM might be null for moves within the same region. + */ + if (ttm && ttm->state == tt_unpopulated) { + ret = ttm->bdev->driver->ttm_tt_populate(ttm); + if (ret) + goto out1; + } add = 0; dir = 1; if ((old_mem->mem_type == new_mem->mem_type) && - (new_mem->mm_node->start < - old_mem->mm_node->start + old_mem->mm_node->size)) { + (new_mem->start < old_mem->start + old_mem->size)) { dir = -1; add = new_mem->num_pages - 1; } @@ -266,11 +396,9 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, } mb(); out2: - ttm_bo_free_old_node(bo); - + old_copy = *old_mem; *old_mem = *new_mem; new_mem->mm_node = NULL; - ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE); if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (ttm != NULL)) { ttm_tt_unbind(ttm); @@ -279,9 +407,15 @@ out2: } out1: - ttm_mem_reg_iounmap(bdev, new_mem, new_iomap); + ttm_mem_reg_iounmap(bdev, old_mem, new_iomap); out: ttm_mem_reg_iounmap(bdev, &old_copy, old_iomap); + + /* + * On error, keep the mm node! + */ + if (!ret) + ttm_bo_mem_put(bo, &old_copy); return ret; } EXPORT_SYMBOL(ttm_bo_move_memcpy); @@ -312,8 +446,9 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, struct ttm_buffer_object *fbo; struct ttm_bo_device *bdev = bo->bdev; struct ttm_bo_driver *driver = bdev->driver; + int ret; - fbo = kzalloc(sizeof(*fbo), GFP_KERNEL); + fbo = kmalloc(sizeof(*fbo), GFP_KERNEL); if (!fbo) return -ENOMEM; @@ -324,19 +459,27 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, * TODO: Explicit member copy would probably be better here. */ - spin_lock_init(&fbo->lock); - init_waitqueue_head(&fbo->event_queue); INIT_LIST_HEAD(&fbo->ddestroy); INIT_LIST_HEAD(&fbo->lru); INIT_LIST_HEAD(&fbo->swap); - fbo->vm_node = NULL; + INIT_LIST_HEAD(&fbo->io_reserve_lru); + drm_vma_node_reset(&fbo->vma_node); + atomic_set(&fbo->cpu_writers, 0); - fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); - if (fbo->mem.mm_node) - fbo->mem.mm_node->private = (void *)fbo; + spin_lock(&bdev->fence_lock); + if (bo->sync_obj) + fbo->sync_obj = driver->sync_obj_ref(bo->sync_obj); + else + fbo->sync_obj = NULL; + spin_unlock(&bdev->fence_lock); kref_init(&fbo->list_kref); kref_init(&fbo->kref); fbo->destroy = &ttm_transfered_destroy; + fbo->acc_size = 0; + fbo->resv = &fbo->ttm_resv; + reservation_object_init(fbo->resv); + ret = ww_mutex_trylock(&fbo->resv->lock); + WARN_ON(!ret); *new_obj = fbo; return 0; @@ -363,7 +506,7 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) else tmp = pgprot_noncached(tmp); #endif -#if defined(__sparc__) +#if defined(__sparc__) || defined(__mips__) if (!(caching_flags & TTM_PL_FLAG_CACHED)) tmp = pgprot_noncached(tmp); #endif @@ -372,26 +515,23 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp) EXPORT_SYMBOL(ttm_io_prot); static int ttm_bo_ioremap(struct ttm_buffer_object *bo, - unsigned long bus_base, - unsigned long bus_offset, - unsigned long bus_size, + unsigned long offset, + unsigned long size, struct ttm_bo_kmap_obj *map) { - struct ttm_bo_device *bdev = bo->bdev; struct ttm_mem_reg *mem = &bo->mem; - struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; - if (!(man->flags & TTM_MEMTYPE_FLAG_NEEDS_IOREMAP)) { + if (bo->mem.bus.addr) { map->bo_kmap_type = ttm_bo_map_premapped; - map->virtual = (void *)(((u8 *) man->io_addr) + bus_offset); + map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); } else { map->bo_kmap_type = ttm_bo_map_iomap; if (mem->placement & TTM_PL_FLAG_WC) - map->virtual = ioremap_wc(bus_base + bus_offset, - bus_size); + map->virtual = ioremap_wc(bo->mem.bus.base + bo->mem.bus.offset + offset, + size); else - map->virtual = ioremap_nocache(bus_base + bus_offset, - bus_size); + map->virtual = ioremap_nocache(bo->mem.bus.base + bo->mem.bus.offset + offset, + size); } return (!map->virtual) ? -ENOMEM : 0; } @@ -403,10 +543,16 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, { struct ttm_mem_reg *mem = &bo->mem; pgprot_t prot; struct ttm_tt *ttm = bo->ttm; - struct page *d; - int i; + int ret; BUG_ON(!ttm); + + if (ttm->state == tt_unpopulated) { + ret = ttm->bdev->driver->ttm_tt_populate(ttm); + if (ret) + return ret; + } + if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { /* * We're mapping a single page, and the desired @@ -414,18 +560,9 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, */ map->bo_kmap_type = ttm_bo_map_kmap; - map->page = ttm_tt_get_page(ttm, start_page); + map->page = ttm->pages[start_page]; map->virtual = kmap(map->page); } else { - /* - * Populate the part we're mapping; - */ - for (i = start_page; i < start_page + num_pages; ++i) { - d = ttm_tt_get_page(ttm, i); - if (!d) - return -ENOMEM; - } - /* * We need to use vmap to get the desired page protection * or to make the buffer object look contiguous. @@ -444,37 +581,43 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, unsigned long num_pages, struct ttm_bo_kmap_obj *map) { + struct ttm_mem_type_manager *man = + &bo->bdev->man[bo->mem.mem_type]; + unsigned long offset, size; int ret; - unsigned long bus_base; - unsigned long bus_offset; - unsigned long bus_size; BUG_ON(!list_empty(&bo->swap)); map->virtual = NULL; + map->bo = bo; if (num_pages > bo->num_pages) return -EINVAL; if (start_page > bo->num_pages) return -EINVAL; #if 0 - if (num_pages > 1 && !DRM_SUSER(DRM_CURPROC)) + if (num_pages > 1 && !capable(CAP_SYS_ADMIN)) return -EPERM; #endif - ret = ttm_bo_pci_offset(bo->bdev, &bo->mem, &bus_base, - &bus_offset, &bus_size); + (void) ttm_mem_io_lock(man, false); + ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); + ttm_mem_io_unlock(man); if (ret) return ret; - if (bus_size == 0) { + if (!bo->mem.bus.is_iomem) { return ttm_bo_kmap_ttm(bo, start_page, num_pages, map); } else { - bus_offset += start_page << PAGE_SHIFT; - bus_size = num_pages << PAGE_SHIFT; - return ttm_bo_ioremap(bo, bus_base, bus_offset, bus_size, map); + offset = start_page << PAGE_SHIFT; + size = num_pages << PAGE_SHIFT; + return ttm_bo_ioremap(bo, offset, size, map); } } EXPORT_SYMBOL(ttm_bo_kmap); void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) { + struct ttm_buffer_object *bo = map->bo; + struct ttm_mem_type_manager *man = + &bo->bdev->man[bo->mem.mem_type]; + if (!map->virtual) return; switch (map->bo_kmap_type) { @@ -492,44 +635,18 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) default: BUG(); } + (void) ttm_mem_io_lock(man, false); + ttm_mem_io_free(map->bo->bdev, &map->bo->mem); + ttm_mem_io_unlock(man); map->virtual = NULL; map->page = NULL; } EXPORT_SYMBOL(ttm_bo_kunmap); -int ttm_bo_pfn_prot(struct ttm_buffer_object *bo, - unsigned long dst_offset, - unsigned long *pfn, pgprot_t *prot) -{ - struct ttm_mem_reg *mem = &bo->mem; - struct ttm_bo_device *bdev = bo->bdev; - unsigned long bus_offset; - unsigned long bus_size; - unsigned long bus_base; - int ret; - ret = ttm_bo_pci_offset(bdev, mem, &bus_base, &bus_offset, - &bus_size); - if (ret) - return -EINVAL; - if (bus_size != 0) - *pfn = (bus_base + bus_offset + dst_offset) >> PAGE_SHIFT; - else - if (!bo->ttm) - return -EINVAL; - else - *pfn = page_to_pfn(ttm_tt_get_page(bo->ttm, - dst_offset >> - PAGE_SHIFT)); - *prot = (mem->placement & TTM_PL_FLAG_CACHED) ? - PAGE_KERNEL : ttm_io_prot(mem->placement, PAGE_KERNEL); - - return 0; -} - int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, void *sync_obj, - void *sync_obj_arg, - bool evict, bool no_wait, + bool evict, + bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct ttm_bo_device *bdev = bo->bdev; @@ -537,32 +654,30 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, struct ttm_mem_type_manager *man = &bdev->man[new_mem->mem_type]; struct ttm_mem_reg *old_mem = &bo->mem; int ret; - uint32_t save_flags = old_mem->placement; struct ttm_buffer_object *ghost_obj; void *tmp_obj = NULL; - spin_lock(&bo->lock); + spin_lock(&bdev->fence_lock); if (bo->sync_obj) { tmp_obj = bo->sync_obj; bo->sync_obj = NULL; } bo->sync_obj = driver->sync_obj_ref(sync_obj); - bo->sync_obj_arg = sync_obj_arg; if (evict) { ret = ttm_bo_wait(bo, false, false, false); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); if (tmp_obj) driver->sync_obj_unref(&tmp_obj); if (ret) return ret; - ttm_bo_free_old_node(bo); if ((man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm != NULL)) { ttm_tt_unbind(bo->ttm); ttm_tt_destroy(bo->ttm); bo->ttm = NULL; } + ttm_bo_free_old_node(bo); } else { /** * This should help pipeline ordinary buffer moves. @@ -573,7 +688,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, */ set_bit(TTM_BO_PRIV_FLAG_MOVING, &bo->priv_flags); - spin_unlock(&bo->lock); + spin_unlock(&bdev->fence_lock); if (tmp_obj) driver->sync_obj_unref(&tmp_obj); @@ -598,7 +713,7 @@ int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, *old_mem = *new_mem; new_mem->mm_node = NULL; - ttm_flag_masked(&save_flags, new_mem->placement, TTM_PL_MASK_MEMTYPE); + return 0; } EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); |
