diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_gem.c')
| -rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_gem.c | 1082 |
1 files changed, 508 insertions, 574 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 6ac804b0c9f..c90c0dc0afe 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -23,156 +23,231 @@ * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ -#include "drmP.h" -#include "drm.h" -#include "nouveau_drv.h" +#include <subdev/fb.h> + #include "nouveau_drm.h" #include "nouveau_dma.h" +#include "nouveau_fence.h" +#include "nouveau_abi16.h" + +#include "nouveau_ttm.h" +#include "nouveau_gem.h" -#define nouveau_gem_pushbuf_sync(chan) 0 +void +nouveau_gem_object_del(struct drm_gem_object *gem) +{ + struct nouveau_bo *nvbo = nouveau_gem_object(gem); + struct ttm_buffer_object *bo = &nvbo->bo; + + if (gem->import_attach) + drm_prime_gem_destroy(gem, nvbo->bo.sg); + + drm_gem_object_release(gem); + + /* reset filp so nouveau_bo_del_ttm() can test for it */ + gem->filp = NULL; + ttm_bo_unref(&bo); +} int -nouveau_gem_object_new(struct drm_gem_object *gem) +nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv) { - return 0; + struct nouveau_cli *cli = nouveau_cli(file_priv); + struct nouveau_bo *nvbo = nouveau_gem_object(gem); + struct nouveau_vma *vma; + int ret; + + if (!cli->base.vm) + return 0; + + ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); + if (ret) + return ret; + + vma = nouveau_bo_vma_find(nvbo, cli->base.vm); + if (!vma) { + vma = kzalloc(sizeof(*vma), GFP_KERNEL); + if (!vma) { + ret = -ENOMEM; + goto out; + } + + ret = nouveau_bo_vma_add(nvbo, cli->base.vm, vma); + if (ret) { + kfree(vma); + goto out; + } + } else { + vma->refcount++; + } + +out: + ttm_bo_unreserve(&nvbo->bo); + return ret; +} + +static void +nouveau_gem_object_delete(void *data) +{ + struct nouveau_vma *vma = data; + nouveau_vm_unmap(vma); + nouveau_vm_put(vma); + kfree(vma); +} + +static void +nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma) +{ + const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM; + struct nouveau_fence *fence = NULL; + + list_del(&vma->head); + + if (mapped) { + spin_lock(&nvbo->bo.bdev->fence_lock); + fence = nouveau_fence_ref(nvbo->bo.sync_obj); + spin_unlock(&nvbo->bo.bdev->fence_lock); + } + + if (fence) { + nouveau_fence_work(fence, nouveau_gem_object_delete, vma); + } else { + if (mapped) + nouveau_vm_unmap(vma); + nouveau_vm_put(vma); + kfree(vma); + } + nouveau_fence_unref(&fence); } void -nouveau_gem_object_del(struct drm_gem_object *gem) +nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) { - struct nouveau_bo *nvbo = gem->driver_private; - struct ttm_buffer_object *bo = &nvbo->bo; + struct nouveau_cli *cli = nouveau_cli(file_priv); + struct nouveau_bo *nvbo = nouveau_gem_object(gem); + struct nouveau_vma *vma; + int ret; - if (!nvbo) + if (!cli->base.vm) return; - nvbo->gem = NULL; - if (unlikely(nvbo->cpu_filp)) - ttm_bo_synccpu_write_release(bo); + ret = ttm_bo_reserve(&nvbo->bo, false, false, false, 0); + if (ret) + return; - if (unlikely(nvbo->pin_refcnt)) { - nvbo->pin_refcnt = 1; - nouveau_bo_unpin(nvbo); + vma = nouveau_bo_vma_find(nvbo, cli->base.vm); + if (vma) { + if (--vma->refcount == 0) + nouveau_gem_object_unmap(nvbo, vma); } - - ttm_bo_unref(&bo); + ttm_bo_unreserve(&nvbo->bo); } int -nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan, - int size, int align, uint32_t flags, uint32_t tile_mode, - uint32_t tile_flags, bool no_vm, bool mappable, +nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain, + uint32_t tile_mode, uint32_t tile_flags, struct nouveau_bo **pnvbo) { + struct nouveau_drm *drm = nouveau_drm(dev); struct nouveau_bo *nvbo; + u32 flags = 0; int ret; - ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode, - tile_flags, no_vm, mappable, pnvbo); + if (domain & NOUVEAU_GEM_DOMAIN_VRAM) + flags |= TTM_PL_FLAG_VRAM; + if (domain & NOUVEAU_GEM_DOMAIN_GART) + flags |= TTM_PL_FLAG_TT; + if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU) + flags |= TTM_PL_FLAG_SYSTEM; + + ret = nouveau_bo_new(dev, size, align, flags, tile_mode, + tile_flags, NULL, pnvbo); if (ret) return ret; nvbo = *pnvbo; - nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); - if (!nvbo->gem) { + /* we restrict allowed domains on nv50+ to only the types + * that were requested at creation time. not possibly on + * earlier chips without busting the ABI. + */ + nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM | + NOUVEAU_GEM_DOMAIN_GART; + if (nv_device(drm->device)->card_type >= NV_50) + nvbo->valid_domains &= domain; + + /* Initialize the embedded gem-object. We return a single gem-reference + * to the caller, instead of a normal nouveau_bo ttm reference. */ + ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size); + if (ret) { nouveau_bo_ref(NULL, pnvbo); return -ENOMEM; } - nvbo->bo.persistant_swap_storage = nvbo->gem->filp; - nvbo->gem->driver_private = nvbo; + nvbo->bo.persistent_swap_storage = nvbo->gem.filp; return 0; } static int -nouveau_gem_info(struct drm_gem_object *gem, struct drm_nouveau_gem_info *rep) +nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, + struct drm_nouveau_gem_info *rep) { + struct nouveau_cli *cli = nouveau_cli(file_priv); struct nouveau_bo *nvbo = nouveau_gem_object(gem); + struct nouveau_vma *vma; if (nvbo->bo.mem.mem_type == TTM_PL_TT) rep->domain = NOUVEAU_GEM_DOMAIN_GART; else rep->domain = NOUVEAU_GEM_DOMAIN_VRAM; - rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; rep->offset = nvbo->bo.offset; - rep->map_handle = nvbo->mappable ? nvbo->bo.addr_space_offset : 0; - rep->tile_mode = nvbo->tile_mode; - rep->tile_flags = nvbo->tile_flags; - return 0; -} + if (cli->base.vm) { + vma = nouveau_bo_vma_find(nvbo, cli->base.vm); + if (!vma) + return -EINVAL; -static bool -nouveau_gem_tile_flags_valid(struct drm_device *dev, uint32_t tile_flags) { - switch (tile_flags) { - case 0x0000: - case 0x1800: - case 0x2800: - case 0x4800: - case 0x7000: - case 0x7400: - case 0x7a00: - case 0xe000: - break; - default: - NV_ERROR(dev, "bad page flags: 0x%08x\n", tile_flags); - return false; + rep->offset = vma->offset; } - return true; + rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; + rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node); + rep->tile_mode = nvbo->tile_mode; + rep->tile_flags = nvbo->tile_flags; + return 0; } int nouveau_gem_ioctl_new(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_nouveau_private *dev_priv = dev->dev_private; + struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_cli *cli = nouveau_cli(file_priv); + struct nouveau_fb *pfb = nouveau_fb(drm->device); struct drm_nouveau_gem_new *req = data; struct nouveau_bo *nvbo = NULL; - struct nouveau_channel *chan = NULL; - uint32_t flags = 0; int ret = 0; - NOUVEAU_CHECK_INITIALISED_WITH_RETURN; - - if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL)) - dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping; - - if (req->channel_hint) { - NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel_hint, - file_priv, chan); - } - - if (req->info.domain & NOUVEAU_GEM_DOMAIN_VRAM) - flags |= TTM_PL_FLAG_VRAM; - if (req->info.domain & NOUVEAU_GEM_DOMAIN_GART) - flags |= TTM_PL_FLAG_TT; - if (!flags || req->info.domain & NOUVEAU_GEM_DOMAIN_CPU) - flags |= TTM_PL_FLAG_SYSTEM; - - if (!nouveau_gem_tile_flags_valid(dev, req->info.tile_flags)) + if (!pfb->memtype_valid(pfb, req->info.tile_flags)) { + NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags); return -EINVAL; + } - ret = nouveau_gem_new(dev, chan, req->info.size, req->align, flags, - req->info.tile_mode, req->info.tile_flags, false, - (req->info.domain & NOUVEAU_GEM_DOMAIN_MAPPABLE), - &nvbo); + ret = nouveau_gem_new(dev, req->info.size, req->align, + req->info.domain, req->info.tile_mode, + req->info.tile_flags, &nvbo); if (ret) return ret; - ret = nouveau_gem_info(nvbo->gem, &req->info); - if (ret) - goto out; - - ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); -out: - mutex_lock(&dev->struct_mutex); - drm_gem_object_handle_unreference(nvbo->gem); - mutex_unlock(&dev->struct_mutex); + ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle); + if (ret == 0) { + ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info); + if (ret) + drm_gem_handle_delete(file_priv, req->info.handle); + } - if (ret) - drm_gem_object_unreference(nvbo->gem); + /* drop reference from allocate - handle holds it now */ + drm_gem_object_unreference_unlocked(&nvbo->gem); return ret; } @@ -180,42 +255,37 @@ static int nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, uint32_t write_domains, uint32_t valid_domains) { - struct nouveau_bo *nvbo = gem->driver_private; + struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct ttm_buffer_object *bo = &nvbo->bo; - uint64_t flags; + uint32_t domains = valid_domains & nvbo->valid_domains & + (write_domains ? write_domains : read_domains); + uint32_t pref_flags = 0, valid_flags = 0; - if (!valid_domains || (!read_domains && !write_domains)) + if (!domains) return -EINVAL; - if (write_domains) { - if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && - (write_domains & NOUVEAU_GEM_DOMAIN_VRAM)) - flags = TTM_PL_FLAG_VRAM; - else - if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) && - (write_domains & NOUVEAU_GEM_DOMAIN_GART)) - flags = TTM_PL_FLAG_TT; - else - return -EINVAL; - } else { - if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && - (read_domains & NOUVEAU_GEM_DOMAIN_VRAM) && - bo->mem.mem_type == TTM_PL_VRAM) - flags = TTM_PL_FLAG_VRAM; - else - if ((valid_domains & NOUVEAU_GEM_DOMAIN_GART) && - (read_domains & NOUVEAU_GEM_DOMAIN_GART) && - bo->mem.mem_type == TTM_PL_TT) - flags = TTM_PL_FLAG_TT; - else - if ((valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && - (read_domains & NOUVEAU_GEM_DOMAIN_VRAM)) - flags = TTM_PL_FLAG_VRAM; - else - flags = TTM_PL_FLAG_TT; - } + if (valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) + valid_flags |= TTM_PL_FLAG_VRAM; + + if (valid_domains & NOUVEAU_GEM_DOMAIN_GART) + valid_flags |= TTM_PL_FLAG_TT; + + if ((domains & NOUVEAU_GEM_DOMAIN_VRAM) && + bo->mem.mem_type == TTM_PL_VRAM) + pref_flags |= TTM_PL_FLAG_VRAM; + + else if ((domains & NOUVEAU_GEM_DOMAIN_GART) && + bo->mem.mem_type == TTM_PL_TT) + pref_flags |= TTM_PL_FLAG_TT; + + else if (domains & NOUVEAU_GEM_DOMAIN_VRAM) + pref_flags |= TTM_PL_FLAG_VRAM; + + else + pref_flags |= TTM_PL_FLAG_TT; + + nouveau_bo_placement_set(nvbo, pref_flags, valid_flags); - nouveau_bo_placement_set(nvbo, flags); return 0; } @@ -223,39 +293,47 @@ struct validate_op { struct list_head vram_list; struct list_head gart_list; struct list_head both_list; + struct ww_acquire_ctx ticket; }; static void -validate_fini_list(struct list_head *list, struct nouveau_fence *fence) +validate_fini_list(struct list_head *list, struct nouveau_fence *fence, + struct ww_acquire_ctx *ticket) { struct list_head *entry, *tmp; struct nouveau_bo *nvbo; list_for_each_safe(entry, tmp, list) { nvbo = list_entry(entry, struct nouveau_bo, entry); - if (likely(fence)) { - struct nouveau_fence *prev_fence; - - spin_lock(&nvbo->bo.lock); - prev_fence = nvbo->bo.sync_obj; - nvbo->bo.sync_obj = nouveau_fence_ref(fence); - spin_unlock(&nvbo->bo.lock); - nouveau_fence_unref((void *)&prev_fence); + + if (likely(fence)) + nouveau_bo_fence(nvbo, fence); + + if (unlikely(nvbo->validate_mapped)) { + ttm_bo_kunmap(&nvbo->kmap); + nvbo->validate_mapped = false; } list_del(&nvbo->entry); nvbo->reserved_by = NULL; - ttm_bo_unreserve(&nvbo->bo); - drm_gem_object_unreference(nvbo->gem); + ttm_bo_unreserve_ticket(&nvbo->bo, ticket); + drm_gem_object_unreference_unlocked(&nvbo->gem); } } static void -validate_fini(struct validate_op *op, struct nouveau_fence* fence) +validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence) { - validate_fini_list(&op->vram_list, fence); - validate_fini_list(&op->gart_list, fence); - validate_fini_list(&op->both_list, fence); + validate_fini_list(&op->vram_list, fence, &op->ticket); + validate_fini_list(&op->gart_list, fence, &op->ticket); + validate_fini_list(&op->both_list, fence, &op->ticket); +} + +static void +validate_fini(struct validate_op *op, struct nouveau_fence *fence) +{ + validate_fini_no_ticket(op, fence); + ww_acquire_fini(&op->ticket); } static int @@ -263,16 +341,16 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, struct drm_nouveau_gem_pushbuf_bo *pbbo, int nr_buffers, struct validate_op *op) { - struct drm_device *dev = chan->dev; - struct drm_nouveau_private *dev_priv = dev->dev_private; - uint32_t sequence; + struct nouveau_cli *cli = nouveau_cli(file_priv); + struct drm_device *dev = chan->drm->dev; int trycnt = 0; int ret, i; + struct nouveau_bo *res_bo = NULL; - sequence = atomic_add_return(1, &dev_priv->ttm.validate_sequence); + ww_acquire_init(&op->ticket, &reservation_ww_class); retry: if (++trycnt > 100000) { - NV_ERROR(dev, "%s failed and gave up.\n", __func__); + NV_ERROR(cli, "%s failed and gave up.\n", __func__); return -EINVAL; } @@ -283,30 +361,47 @@ retry: gem = drm_gem_object_lookup(dev, file_priv, b->handle); if (!gem) { - NV_ERROR(dev, "Unknown handle 0x%08x\n", b->handle); + NV_ERROR(cli, "Unknown handle 0x%08x\n", b->handle); + ww_acquire_done(&op->ticket); validate_fini(op, NULL); - return -EINVAL; + return -ENOENT; + } + nvbo = nouveau_gem_object(gem); + if (nvbo == res_bo) { + res_bo = NULL; + drm_gem_object_unreference_unlocked(gem); + continue; } - nvbo = gem->driver_private; if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { - NV_ERROR(dev, "multiple instances of buffer %d on " + NV_ERROR(cli, "multiple instances of buffer %d on " "validation list\n", b->handle); + drm_gem_object_unreference_unlocked(gem); + ww_acquire_done(&op->ticket); validate_fini(op, NULL); return -EINVAL; } - ret = ttm_bo_reserve(&nvbo->bo, false, false, true, sequence); + ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket); if (ret) { - validate_fini(op, NULL); - if (ret == -EAGAIN) - ret = ttm_bo_wait_unreserved(&nvbo->bo, false); - drm_gem_object_unreference(gem); - if (ret) + validate_fini_no_ticket(op, NULL); + if (unlikely(ret == -EDEADLK)) { + ret = ttm_bo_reserve_slowpath(&nvbo->bo, true, + &op->ticket); + if (!ret) + res_bo = nvbo; + } + if (unlikely(ret)) { + ww_acquire_done(&op->ticket); + ww_acquire_fini(&op->ticket); + drm_gem_object_unreference_unlocked(gem); + if (ret != -ERESTARTSYS) + NV_ERROR(cli, "fail reserve\n"); return ret; - goto retry; + } } + b->user_priv = (uint64_t)(unsigned long)nvbo; nvbo->reserved_by = file_priv; nvbo->pbbo_index = i; if ((b->valid_domains & NOUVEAU_GEM_DOMAIN_VRAM) && @@ -319,36 +414,45 @@ retry: if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART) list_add_tail(&nvbo->entry, &op->gart_list); else { - NV_ERROR(dev, "invalid valid domains: 0x%08x\n", + NV_ERROR(cli, "invalid valid domains: 0x%08x\n", b->valid_domains); list_add_tail(&nvbo->entry, &op->both_list); + ww_acquire_done(&op->ticket); validate_fini(op, NULL); return -EINVAL; } + if (nvbo == res_bo) + goto retry; + } - if (unlikely(atomic_read(&nvbo->bo.cpu_writers) > 0)) { - validate_fini(op, NULL); + ww_acquire_done(&op->ticket); + return 0; +} - if (nvbo->cpu_filp == file_priv) { - NV_ERROR(dev, "bo %p mapped by process trying " - "to validate it!\n", nvbo); - return -EINVAL; - } +static int +validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo) +{ + struct nouveau_fence *fence = NULL; + int ret = 0; - ret = ttm_bo_wait_cpu(&nvbo->bo, false); - if (ret) - return ret; - goto retry; - } + spin_lock(&nvbo->bo.bdev->fence_lock); + fence = nouveau_fence_ref(nvbo->bo.sync_obj); + spin_unlock(&nvbo->bo.bdev->fence_lock); + + if (fence) { + ret = nouveau_fence_sync(fence, chan); + nouveau_fence_unref(&fence); } - return 0; + return ret; } static int -validate_list(struct nouveau_channel *chan, struct list_head *list, - struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr) +validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli, + struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo, + uint64_t user_pbbo_ptr) { + struct nouveau_drm *drm = chan->drm; struct drm_nouveau_gem_pushbuf_bo __user *upbbo = (void __force __user *)(uintptr_t)user_pbbo_ptr; struct nouveau_bo *nvbo; @@ -356,46 +460,48 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, list_for_each_entry(nvbo, list, entry) { struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; - struct nouveau_fence *prev_fence = nvbo->bo.sync_obj; - if (prev_fence && nouveau_fence_channel(prev_fence) != chan) { - spin_lock(&nvbo->bo.lock); - ret = ttm_bo_wait(&nvbo->bo, false, false, false); - spin_unlock(&nvbo->bo.lock); - if (unlikely(ret)) - return ret; - } - - ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains, + ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains, b->write_domains, b->valid_domains); - if (unlikely(ret)) + if (unlikely(ret)) { + NV_ERROR(cli, "fail set_domain\n"); return ret; + } - nvbo->channel = chan; - ret = ttm_bo_validate(&nvbo->bo, &nvbo->placement, - false, false); - nvbo->channel = NULL; - if (unlikely(ret)) + ret = nouveau_bo_validate(nvbo, true, false); + if (unlikely(ret)) { + if (ret != -ERESTARTSYS) + NV_ERROR(cli, "fail ttm_validate\n"); return ret; + } - if (nvbo->bo.offset == b->presumed_offset && - ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && - b->presumed_domain & NOUVEAU_GEM_DOMAIN_VRAM) || - (nvbo->bo.mem.mem_type == TTM_PL_TT && - b->presumed_domain & NOUVEAU_GEM_DOMAIN_GART))) - continue; + ret = validate_sync(chan, nvbo); + if (unlikely(ret)) { + NV_ERROR(cli, "fail post-validate sync\n"); + return ret; + } - if (nvbo->bo.mem.mem_type == TTM_PL_TT) - b->presumed_domain = NOUVEAU_GEM_DOMAIN_GART; - else - b->presumed_domain = NOUVEAU_GEM_DOMAIN_VRAM; - b->presumed_offset = nvbo->bo.offset; - b->presumed_ok = 0; - relocs++; + if (nv_device(drm->device)->card_type < NV_50) { + if (nvbo->bo.offset == b->presumed.offset && + ((nvbo->bo.mem.mem_type == TTM_PL_VRAM && + b->presumed.domain & NOUVEAU_GEM_DOMAIN_VRAM) || + (nvbo->bo.mem.mem_type == TTM_PL_TT && + b->presumed.domain & NOUVEAU_GEM_DOMAIN_GART))) + continue; - if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index], b, sizeof(*b))) - return -EFAULT; + if (nvbo->bo.mem.mem_type == TTM_PL_TT) + b->presumed.domain = NOUVEAU_GEM_DOMAIN_GART; + else + b->presumed.domain = NOUVEAU_GEM_DOMAIN_VRAM; + b->presumed.offset = nvbo->bo.offset; + b->presumed.valid = 0; + relocs++; + + if (copy_to_user(&upbbo[nvbo->pbbo_index].presumed, + &b->presumed, sizeof(b->presumed))) + return -EFAULT; + } } return relocs; @@ -408,6 +514,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, uint64_t user_buffers, int nr_buffers, struct validate_op *op, int *apply_relocs) { + struct nouveau_cli *cli = nouveau_cli(file_priv); int ret, relocs = 0; INIT_LIST_HEAD(&op->vram_list); @@ -418,25 +525,34 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, return 0; ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); - if (unlikely(ret)) + if (unlikely(ret)) { + if (ret != -ERESTARTSYS) + NV_ERROR(cli, "validate_init\n"); return ret; + } - ret = validate_list(chan, &op->vram_list, pbbo, user_buffers); + ret = validate_list(chan, cli, &op->vram_list, pbbo, user_buffers); if (unlikely(ret < 0)) { + if (ret != -ERESTARTSYS) + NV_ERROR(cli, "validate vram_list\n"); validate_fini(op, NULL); return ret; } relocs += ret; - ret = validate_list(chan, &op->gart_list, pbbo, user_buffers); + ret = validate_list(chan, cli, &op->gart_list, pbbo, user_buffers); if (unlikely(ret < 0)) { + if (ret != -ERESTARTSYS) + NV_ERROR(cli, "validate gart_list\n"); validate_fini(op, NULL); return ret; } relocs += ret; - ret = validate_list(chan, &op->both_list, pbbo, user_buffers); + ret = validate_list(chan, cli, &op->both_list, pbbo, user_buffers); if (unlikely(ret < 0)) { + if (ret != -ERESTARTSYS) + NV_ERROR(cli, "validate both_list\n"); validate_fini(op, NULL); return ret; } @@ -446,18 +562,31 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, return 0; } +static inline void +u_free(void *addr) +{ + if (!is_vmalloc_addr(addr)) + kfree(addr); + else + vfree(addr); +} + static inline void * u_memcpya(uint64_t user, unsigned nmemb, unsigned size) { void *mem; void __user *userptr = (void __force __user *)(uintptr_t)user; - mem = kmalloc(nmemb * size, GFP_KERNEL); + size *= nmemb; + + mem = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); + if (!mem) + mem = vmalloc(size); if (!mem) return ERR_PTR(-ENOMEM); - if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) { - kfree(mem); + if (copy_from_user(mem, userptr, size)) { + u_free(mem); return ERR_PTR(-EFAULT); } @@ -465,62 +594,85 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size) } static int -nouveau_gem_pushbuf_reloc_apply(struct nouveau_channel *chan, int nr_bo, - struct drm_nouveau_gem_pushbuf_bo *bo, - unsigned nr_relocs, uint64_t ptr_relocs, - unsigned nr_dwords, unsigned first_dword, - uint32_t *pushbuf, bool is_iomem) +nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, + struct drm_nouveau_gem_pushbuf *req, + struct drm_nouveau_gem_pushbuf_bo *bo) { struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; - struct drm_device *dev = chan->dev; int ret = 0; unsigned i; - reloc = u_memcpya(ptr_relocs, nr_relocs, sizeof(*reloc)); + reloc = u_memcpya(req->relocs, req->nr_relocs, sizeof(*reloc)); if (IS_ERR(reloc)) return PTR_ERR(reloc); - for (i = 0; i < nr_relocs; i++) { + for (i = 0; i < req->nr_relocs; i++) { struct drm_nouveau_gem_pushbuf_reloc *r = &reloc[i]; struct drm_nouveau_gem_pushbuf_bo *b; + struct nouveau_bo *nvbo; uint32_t data; - if (r->bo_index >= nr_bo || r->reloc_index < first_dword || - r->reloc_index >= first_dword + nr_dwords) { - NV_ERROR(dev, "Bad relocation %d\n", i); - NV_ERROR(dev, " bo: %d max %d\n", r->bo_index, nr_bo); - NV_ERROR(dev, " id: %d max %d\n", r->reloc_index, nr_dwords); + if (unlikely(r->bo_index > req->nr_buffers)) { + NV_ERROR(cli, "reloc bo index invalid\n"); ret = -EINVAL; break; } b = &bo[r->bo_index]; - if (b->presumed_ok) + if (b->presumed.valid) continue; + if (unlikely(r->reloc_bo_index > req->nr_buffers)) { + NV_ERROR(cli, "reloc container bo index invalid\n"); + ret = -EINVAL; + break; + } + nvbo = (void *)(unsigned long)bo[r->reloc_bo_index].user_priv; + + if (unlikely(r->reloc_bo_offset + 4 > + nvbo->bo.mem.num_pages << PAGE_SHIFT)) { + NV_ERROR(cli, "reloc outside of bo\n"); + ret = -EINVAL; + break; + } + + if (!nvbo->kmap.virtual) { + ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, + &nvbo->kmap); + if (ret) { + NV_ERROR(cli, "failed kmap for reloc\n"); + break; + } + nvbo->validate_mapped = true; + } + if (r->flags & NOUVEAU_GEM_RELOC_LOW) - data = b->presumed_offset + r->data; + data = b->presumed.offset + r->data; else if (r->flags & NOUVEAU_GEM_RELOC_HIGH) - data = (b->presumed_offset + r->data) >> 32; + data = (b->presumed.offset + r->data) >> 32; else data = r->data; if (r->flags & NOUVEAU_GEM_RELOC_OR) { - if (b->presumed_domain == NOUVEAU_GEM_DOMAIN_GART) + if (b->presumed.domain == NOUVEAU_GEM_DOMAIN_GART) data |= r->tor; else data |= r->vor; } - if (is_iomem) - iowrite32_native(data, (void __force __iomem *) - &pushbuf[r->reloc_index]); - else - pushbuf[r->reloc_index] = data; + spin_lock(&nvbo->bo.bdev->fence_lock); + ret = ttm_bo_wait(&nvbo->bo, false, false, false); + spin_unlock(&nvbo->bo.bdev->fence_lock); + if (ret) { + NV_ERROR(cli, "reloc wait_idle failed: %d\n", ret); + break; + } + + nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data); } - kfree(reloc); + u_free(reloc); return ret; } @@ -528,293 +680,189 @@ int nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, struct drm_file *file_priv) { + struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); + struct nouveau_cli *cli = nouveau_cli(file_priv); + struct nouveau_abi16_chan *temp; + struct nouveau_drm *drm = nouveau_drm(dev); struct drm_nouveau_gem_pushbuf *req = data; - struct drm_nouveau_gem_pushbuf_bo *bo = NULL; - struct nouveau_channel *chan; + struct drm_nouveau_gem_pushbuf_push *push; + struct drm_nouveau_gem_pushbuf_bo *bo; + struct nouveau_channel *chan = NULL; struct validate_op op; - struct nouveau_fence* fence = 0; - uint32_t *pushbuf = NULL; - int ret = 0, do_reloc = 0, i; - - NOUVEAU_CHECK_INITIALISED_WITH_RETURN; - NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan); - - if (req->nr_dwords >= chan->dma.max || - req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS || - req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) { - NV_ERROR(dev, "Pushbuf config exceeds limits:\n"); - NV_ERROR(dev, " dwords : %d max %d\n", req->nr_dwords, - chan->dma.max - 1); - NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers, - NOUVEAU_GEM_MAX_BUFFERS); - NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs, - NOUVEAU_GEM_MAX_RELOCS); - return -EINVAL; - } + struct nouveau_fence *fence = NULL; + int i, j, ret = 0, do_reloc = 0; - pushbuf = u_memcpya(req->dwords, req->nr_dwords, sizeof(uint32_t)); - if (IS_ERR(pushbuf)) - return PTR_ERR(pushbuf); + if (unlikely(!abi16)) + return -ENOMEM; - bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); - if (IS_ERR(bo)) { - kfree(pushbuf); - return PTR_ERR(bo); + list_for_each_entry(temp, &abi16->channels, head) { + if (temp->chan->handle == (NVDRM_CHAN | req->channel)) { + chan = temp->chan; + break; + } } - mutex_lock(&dev->struct_mutex); + if (!chan) + return nouveau_abi16_put(abi16, -ENOENT); - /* Validate buffer list */ - ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, - req->nr_buffers, &op, &do_reloc); - if (ret) - goto out; + req->vram_available = drm->gem.vram_available; + req->gart_available = drm->gem.gart_available; + if (unlikely(req->nr_push == 0)) + goto out_next; - /* Apply any relocations that are required */ - if (do_reloc) { - ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers, - bo, req->nr_relocs, - req->relocs, - req->nr_dwords, 0, - pushbuf, false); - if (ret) - goto out; + if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { + NV_ERROR(cli, "pushbuf push count exceeds limit: %d max %d\n", + req->nr_push, NOUVEAU_GEM_MAX_PUSH); + return nouveau_abi16_put(abi16, -EINVAL); } - /* Emit push buffer to the hw - */ - ret = RING_SPACE(chan, req->nr_dwords); - if (ret) - goto out; - - OUT_RINGp(chan, pushbuf, req->nr_dwords); - - ret = nouveau_fence_new(chan, &fence, true); - if (ret) { - NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); - WIND_RING(chan); - goto out; + if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { + NV_ERROR(cli, "pushbuf bo count exceeds limit: %d max %d\n", + req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); + return nouveau_abi16_put(abi16, -EINVAL); } - if (nouveau_gem_pushbuf_sync(chan)) { - ret = nouveau_fence_wait(fence, NULL, false, false); - if (ret) { - for (i = 0; i < req->nr_dwords; i++) - NV_ERROR(dev, "0x%08x\n", pushbuf[i]); - NV_ERROR(dev, "^^ above push buffer is fail :(\n"); - } + if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { + NV_ERROR(cli, "pushbuf reloc count exceeds limit: %d max %d\n", + req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); + return nouveau_abi16_put(abi16, -EINVAL); } -out: - validate_fini(&op, fence); - nouveau_fence_unref((void**)&fence); - mutex_unlock(&dev->struct_mutex); - kfree(pushbuf); - kfree(bo); - return ret; -} - -#define PUSHBUF_CAL (dev_priv->card_type >= NV_20) - -int -nouveau_gem_ioctl_pushbuf_call(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct drm_nouveau_gem_pushbuf_call *req = data; - struct drm_nouveau_gem_pushbuf_bo *bo = NULL; - struct nouveau_channel *chan; - struct drm_gem_object *gem; - struct nouveau_bo *pbbo; - struct validate_op op; - struct nouveau_fence* fence = 0; - int i, ret = 0, do_reloc = 0; - - NOUVEAU_CHECK_INITIALISED_WITH_RETURN; - NOUVEAU_GET_USER_CHANNEL_WITH_RETURN(req->channel, file_priv, chan); - - if (unlikely(req->handle == 0)) - goto out_next; - - if (req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS || - req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS) { - NV_ERROR(dev, "Pushbuf config exceeds limits:\n"); - NV_ERROR(dev, " buffers: %d max %d\n", req->nr_buffers, - NOUVEAU_GEM_MAX_BUFFERS); - NV_ERROR(dev, " relocs : %d max %d\n", req->nr_relocs, - NOUVEAU_GEM_MAX_RELOCS); - return -EINVAL; - } + push = u_memcpya(req->push, req->nr_push, sizeof(*push)); + if (IS_ERR(push)) + return nouveau_abi16_put(abi16, PTR_ERR(push)); bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); - if (IS_ERR(bo)) - return PTR_ERR(bo); + if (IS_ERR(bo)) { + u_free(push); + return nouveau_abi16_put(abi16, PTR_ERR(bo)); + } - mutex_lock(&dev->struct_mutex); + /* Ensure all push buffers are on validate list */ + for (i = 0; i < req->nr_push; i++) { + if (push[i].bo_index >= req->nr_buffers) { + NV_ERROR(cli, "push %d buffer not in list\n", i); + ret = -EINVAL; + goto out_prevalid; + } + } /* Validate buffer list */ ret = nouveau_gem_pushbuf_validate(chan, file_priv, bo, req->buffers, req->nr_buffers, &op, &do_reloc); if (ret) { - NV_ERROR(dev, "validate: %d\n", ret); - goto out; - } - - /* Validate DMA push buffer */ - gem = drm_gem_object_lookup(dev, file_priv, req->handle); - if (!gem) { - NV_ERROR(dev, "Unknown pb handle 0x%08x\n", req->handle); - ret = -EINVAL; - goto out; - } - pbbo = nouveau_gem_object(gem); - - if ((req->offset & 3) || req->nr_dwords < 2 || - (unsigned long)req->offset > (unsigned long)pbbo->bo.mem.size || - (unsigned long)req->nr_dwords > - ((unsigned long)(pbbo->bo.mem.size - req->offset ) >> 2)) { - NV_ERROR(dev, "pb call misaligned or out of bounds: " - "%d + %d * 4 > %ld\n", - req->offset, req->nr_dwords, pbbo->bo.mem.size); - ret = -EINVAL; - drm_gem_object_unreference(gem); - goto out; - } - - ret = ttm_bo_reserve(&pbbo->bo, false, false, true, - chan->fence.sequence); - if (ret) { - NV_ERROR(dev, "resv pb: %d\n", ret); - drm_gem_object_unreference(gem); - goto out; - } - - nouveau_bo_placement_set(pbbo, 1 << chan->pushbuf_bo->bo.mem.mem_type); - ret = ttm_bo_validate(&pbbo->bo, &pbbo->placement, false, false); - if (ret) { - NV_ERROR(dev, "validate pb: %d\n", ret); - ttm_bo_unreserve(&pbbo->bo); - drm_gem_object_unreference(gem); - goto out; - } - - list_add_tail(&pbbo->entry, &op.both_list); - - /* If presumed return address doesn't match, we need to map the - * push buffer and fix it.. - */ - if (!PUSHBUF_CAL) { - uint32_t retaddy; - - if (chan->dma.free < 4 + NOUVEAU_DMA_SKIPS) { - ret = nouveau_dma_wait(chan, 4 + NOUVEAU_DMA_SKIPS); - if (ret) { - NV_ERROR(dev, "jmp_space: %d\n", ret); - goto out; - } - } - - retaddy = chan->pushbuf_base + ((chan->dma.cur + 2) << 2); - retaddy |= 0x20000000; - if (retaddy != req->suffix0) { - req->suffix0 = retaddy; - do_reloc = 1; - } + if (ret != -ERESTARTSYS) + NV_ERROR(cli, "validate: %d\n", ret); + goto out_prevalid; } /* Apply any relocations that are required */ if (do_reloc) { - void *pbvirt; - bool is_iomem; - ret = ttm_bo_kmap(&pbbo->bo, 0, pbbo->bo.mem.num_pages, - &pbbo->kmap); + ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo); if (ret) { - NV_ERROR(dev, "kmap pb: %d\n", ret); + NV_ERROR(cli, "reloc apply: %d\n", ret); goto out; } + } - pbvirt = ttm_kmap_obj_virtual(&pbbo->kmap, &is_iomem); - ret = nouveau_gem_pushbuf_reloc_apply(chan, req->nr_buffers, bo, - req->nr_relocs, - req->relocs, - req->nr_dwords, - req->offset / 4, - pbvirt, is_iomem); - - if (!PUSHBUF_CAL) { - nouveau_bo_wr32(pbbo, - req->offset / 4 + req->nr_dwords - 2, - req->suffix0); - } - - ttm_bo_kunmap(&pbbo->kmap); + if (chan->dma.ib_max) { + ret = nouveau_dma_wait(chan, req->nr_push + 1, 16); if (ret) { - NV_ERROR(dev, "reloc apply: %d\n", ret); + NV_ERROR(cli, "nv50cal_space: %d\n", ret); goto out; } - } - if (PUSHBUF_CAL) { - ret = RING_SPACE(chan, 2); + for (i = 0; i < req->nr_push; i++) { + struct nouveau_bo *nvbo = (void *)(unsigned long) + bo[push[i].bo_index].user_priv; + + nv50_dma_push(chan, nvbo, push[i].offset, + push[i].length); + } + } else + if (nv_device(drm->device)->chipset >= 0x25) { + ret = RING_SPACE(chan, req->nr_push * 2); if (ret) { - NV_ERROR(dev, "cal_space: %d\n", ret); + NV_ERROR(cli, "cal_space: %d\n", ret); goto out; } - OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) + - req->offset) | 2); - OUT_RING(chan, 0); + + for (i = 0; i < req->nr_push; i++) { + struct nouveau_bo *nvbo = (void *)(unsigned long) + bo[push[i].bo_index].user_priv; + + OUT_RING(chan, (nvbo->bo.offset + push[i].offset) | 2); + OUT_RING(chan, 0); + } } else { - ret = RING_SPACE(chan, 2 + NOUVEAU_DMA_SKIPS); + ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS)); if (ret) { - NV_ERROR(dev, "jmp_space: %d\n", ret); + NV_ERROR(cli, "jmp_space: %d\n", ret); goto out; } - OUT_RING(chan, ((pbbo->bo.mem.mm_node->start << PAGE_SHIFT) + - req->offset) | 0x20000000); - OUT_RING(chan, 0); - /* Space the jumps apart with NOPs. */ - for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) + for (i = 0; i < req->nr_push; i++) { + struct nouveau_bo *nvbo = (void *)(unsigned long) + bo[push[i].bo_index].user_priv; + uint32_t cmd; + + cmd = chan->push.vma.offset + ((chan->dma.cur + 2) << 2); + cmd |= 0x20000000; + if (unlikely(cmd != req->suffix0)) { + if (!nvbo->kmap.virtual) { + ret = ttm_bo_kmap(&nvbo->bo, 0, + nvbo->bo.mem. + num_pages, + &nvbo->kmap); + if (ret) { + WIND_RING(chan); + goto out; + } + nvbo->validate_mapped = true; + } + + nouveau_bo_wr32(nvbo, (push[i].offset + + push[i].length - 8) / 4, cmd); + } + + OUT_RING(chan, 0x20000000 | + (nvbo->bo.offset + push[i].offset)); OUT_RING(chan, 0); + for (j = 0; j < NOUVEAU_DMA_SKIPS; j++) + OUT_RING(chan, 0); + } } - ret = nouveau_fence_new(chan, &fence, true); + ret = nouveau_fence_new(chan, false, &fence); if (ret) { - NV_ERROR(dev, "error fencing pushbuf: %d\n", ret); + NV_ERROR(cli, "error fencing pushbuf: %d\n", ret); WIND_RING(chan); goto out; } out: validate_fini(&op, fence); - nouveau_fence_unref((void**)&fence); - mutex_unlock(&dev->struct_mutex); - kfree(bo); + nouveau_fence_unref(&fence); + +out_prevalid: + u_free(bo); + u_free(push); out_next: - if (PUSHBUF_CAL) { + if (chan->dma.ib_max) { + req->suffix0 = 0x00000000; + req->suffix1 = 0x00000000; + } else + if (nv_device(drm->device)->chipset >= 0x25) { req->suffix0 = 0x00020000; req->suffix1 = 0x00000000; } else { req->suffix0 = 0x20000000 | - (chan->pushbuf_base + ((chan->dma.cur + 2) << 2)); + (chan->push.vma.offset + ((chan->dma.cur + 2) << 2)); req->suffix1 = 0x00000000; } - return ret; -} - -int -nouveau_gem_ioctl_pushbuf_call2(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_nouveau_private *dev_priv = dev->dev_private; - struct drm_nouveau_gem_pushbuf_call *req = data; - - req->vram_available = dev_priv->fb_aper_free; - req->gart_available = dev_priv->gart_info.aper_free; - - return nouveau_gem_ioctl_pushbuf_call(dev, data, file_priv); + return nouveau_abi16_put(abi16, ret); } static inline uint32_t @@ -831,74 +879,6 @@ domain_to_ttm(struct nouveau_bo *nvbo, uint32_t domain) } int -nouveau_gem_ioctl_pin(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_nouveau_gem_pin *req = data; - struct drm_gem_object *gem; - struct nouveau_bo *nvbo; - int ret = 0; - - NOUVEAU_CHECK_INITIALISED_WITH_RETURN; - - if (drm_core_check_feature(dev, DRIVER_MODESET)) { - NV_ERROR(dev, "pin only allowed without kernel modesetting\n"); - return -EINVAL; - } - - if (!DRM_SUSER(DRM_CURPROC)) - return -EPERM; - - gem = drm_gem_object_lookup(dev, file_priv, req->handle); - if (!gem) - return -EINVAL; - nvbo = nouveau_gem_object(gem); - - ret = nouveau_bo_pin(nvbo, domain_to_ttm(nvbo, req->domain)); - if (ret) - goto out; - - req->offset = nvbo->bo.offset; - if (nvbo->bo.mem.mem_type == TTM_PL_TT) - req->domain = NOUVEAU_GEM_DOMAIN_GART; - else - req->domain = NOUVEAU_GEM_DOMAIN_VRAM; - -out: - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gem); - mutex_unlock(&dev->struct_mutex); - - return ret; -} - -int -nouveau_gem_ioctl_unpin(struct drm_device *dev, void *data, - struct drm_file *file_priv) -{ - struct drm_nouveau_gem_pin *req = data; - struct drm_gem_object *gem; - int ret; - - NOUVEAU_CHECK_INITIALISED_WITH_RETURN; - - if (drm_core_check_feature(dev, DRIVER_MODESET)) - return -EINVAL; - - gem = drm_gem_object_lookup(dev, file_priv, req->handle); - if (!gem) - return -EINVAL; - - ret = nouveau_bo_unpin(nouveau_gem_object(gem)); - - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gem); - mutex_unlock(&dev->struct_mutex); - - return ret; -} - -int nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, struct drm_file *file_priv) { @@ -908,34 +888,15 @@ nouveau_gem_ioctl_cpu_prep(struct drm_device *dev, void *data, bool no_wait = !!(req->flags & NOUVEAU_GEM_CPU_PREP_NOWAIT); int ret = -EINVAL; - NOUVEAU_CHECK_INITIALISED_WITH_RETURN; - gem = drm_gem_object_lookup(dev, file_priv, req->handle); if (!gem) - return ret; + return -ENOENT; nvbo = nouveau_gem_object(gem); - if (nvbo->cpu_filp) { - if (nvbo->cpu_filp == file_priv) - goto out; - - ret = ttm_bo_wait_cpu(&nvbo->bo, no_wait); - if (ret) - goto out; - } - - if (req->flags & NOUVEAU_GEM_CPU_PREP_NOBLOCK) { - ret = ttm_bo_wait(&nvbo->bo, false, false, no_wait); - } else { - ret = ttm_bo_synccpu_write_grab(&nvbo->bo, no_wait); - if (ret == 0) - nvbo->cpu_filp = file_priv; - } - -out: - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gem); - mutex_unlock(&dev->struct_mutex); + spin_lock(&nvbo->bo.bdev->fence_lock); + ret = ttm_bo_wait(&nvbo->bo, true, true, no_wait); + spin_unlock(&nvbo->bo.bdev->fence_lock); + drm_gem_object_unreference_unlocked(gem); return ret; } @@ -943,30 +904,7 @@ int nouveau_gem_ioctl_cpu_fini(struct drm_device *dev, void *data, struct drm_file *file_priv) { - struct drm_nouveau_gem_cpu_prep *req = data; - struct drm_gem_object *gem; - struct nouveau_bo *nvbo; - int ret = -EINVAL; - - NOUVEAU_CHECK_INITIALISED_WITH_RETURN; - - gem = drm_gem_object_lookup(dev, file_priv, req->handle); - if (!gem) - return ret; - nvbo = nouveau_gem_object(gem); - - if (nvbo->cpu_filp != file_priv) - goto out; - nvbo->cpu_filp = NULL; - - ttm_bo_synccpu_write_release(&nvbo->bo); - ret = 0; - -out: - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gem); - mutex_unlock(&dev->struct_mutex); - return ret; + return 0; } int @@ -977,16 +915,12 @@ nouveau_gem_ioctl_info(struct drm_device *dev, void *data, struct drm_gem_object *gem; int ret; - NOUVEAU_CHECK_INITIALISED_WITH_RETURN; - gem = drm_gem_object_lookup(dev, file_priv, req->handle); if (!gem) - return -EINVAL; + return -ENOENT; - ret = nouveau_gem_info(gem, req); - mutex_lock(&dev->struct_mutex); - drm_gem_object_unreference(gem); - mutex_unlock(&dev->struct_mutex); + ret = nouveau_gem_info(file_priv, gem, req); + drm_gem_object_unreference_unlocked(gem); return ret; } |
