diff options
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_gem.c')
| -rw-r--r-- | drivers/gpu/drm/nouveau/nouveau_gem.c | 269 |
1 files changed, 159 insertions, 110 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index 5e2f52158f1..c90c0dc0afe 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -24,8 +24,6 @@ * */ -#include <linux/dma-buf.h> - #include <subdev/fb.h> #include "nouveau_drm.h" @@ -36,34 +34,20 @@ #include "nouveau_ttm.h" #include "nouveau_gem.h" -int -nouveau_gem_object_new(struct drm_gem_object *gem) -{ - return 0; -} - void nouveau_gem_object_del(struct drm_gem_object *gem) { - struct nouveau_bo *nvbo = gem->driver_private; + struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct ttm_buffer_object *bo = &nvbo->bo; - if (!nvbo) - return; - nvbo->gem = NULL; - - if (unlikely(nvbo->pin_refcnt)) { - nvbo->pin_refcnt = 1; - nouveau_bo_unpin(nvbo); - } - if (gem->import_attach) drm_prime_gem_destroy(gem, nvbo->bo.sg); - ttm_bo_unref(&bo); - drm_gem_object_release(gem); - kfree(gem); + + /* reset filp so nouveau_bo_del_ttm() can test for it */ + gem->filp = NULL; + ttm_bo_unref(&bo); } int @@ -103,6 +87,40 @@ out: return ret; } +static void +nouveau_gem_object_delete(void *data) +{ + struct nouveau_vma *vma = data; + nouveau_vm_unmap(vma); + nouveau_vm_put(vma); + kfree(vma); +} + +static void +nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma) +{ + const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM; + struct nouveau_fence *fence = NULL; + + list_del(&vma->head); + + if (mapped) { + spin_lock(&nvbo->bo.bdev->fence_lock); + fence = nouveau_fence_ref(nvbo->bo.sync_obj); + spin_unlock(&nvbo->bo.bdev->fence_lock); + } + + if (fence) { + nouveau_fence_work(fence, nouveau_gem_object_delete, vma); + } else { + if (mapped) + nouveau_vm_unmap(vma); + nouveau_vm_put(vma); + kfree(vma); + } + nouveau_fence_unref(&fence); +} + void nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) { @@ -120,10 +138,8 @@ nouveau_gem_object_close(struct drm_gem_object *gem, struct drm_file *file_priv) vma = nouveau_bo_vma_find(nvbo, cli->base.vm); if (vma) { - if (--vma->refcount == 0) { - nouveau_bo_vma_del(nvbo, vma); - kfree(vma); - } + if (--vma->refcount == 0) + nouveau_gem_object_unmap(nvbo, vma); } ttm_bo_unreserve(&nvbo->bo); } @@ -160,14 +176,15 @@ nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain, if (nv_device(drm->device)->card_type >= NV_50) nvbo->valid_domains &= domain; - nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size); - if (!nvbo->gem) { + /* Initialize the embedded gem-object. We return a single gem-reference + * to the caller, instead of a normal nouveau_bo ttm reference. */ + ret = drm_gem_object_init(dev, &nvbo->gem, nvbo->bo.mem.size); + if (ret) { nouveau_bo_ref(NULL, pnvbo); return -ENOMEM; } - nvbo->bo.persistent_swap_storage = nvbo->gem->filp; - nvbo->gem->driver_private = nvbo; + nvbo->bo.persistent_swap_storage = nvbo->gem.filp; return 0; } @@ -194,7 +211,7 @@ nouveau_gem_info(struct drm_file *file_priv, struct drm_gem_object *gem, } rep->size = nvbo->bo.mem.num_pages << PAGE_SHIFT; - rep->map_handle = nvbo->bo.addr_space_offset; + rep->map_handle = drm_vma_node_offset_addr(&nvbo->bo.vma_node); rep->tile_mode = nvbo->tile_mode; rep->tile_flags = nvbo->tile_flags; return 0; @@ -205,15 +222,14 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct nouveau_drm *drm = nouveau_drm(dev); + struct nouveau_cli *cli = nouveau_cli(file_priv); struct nouveau_fb *pfb = nouveau_fb(drm->device); struct drm_nouveau_gem_new *req = data; struct nouveau_bo *nvbo = NULL; int ret = 0; - drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping; - if (!pfb->memtype_valid(pfb, req->info.tile_flags)) { - NV_ERROR(drm, "bad page flags: 0x%08x\n", req->info.tile_flags); + NV_ERROR(cli, "bad page flags: 0x%08x\n", req->info.tile_flags); return -EINVAL; } @@ -223,15 +239,15 @@ nouveau_gem_ioctl_new(struct drm_device *dev, void *data, if (ret) return ret; - ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle); + ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle); if (ret == 0) { - ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info); + ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info); if (ret) drm_gem_handle_delete(file_priv, req->info.handle); } /* drop reference from allocate - handle holds it now */ - drm_gem_object_unreference_unlocked(nvbo->gem); + drm_gem_object_unreference_unlocked(&nvbo->gem); return ret; } @@ -239,7 +255,7 @@ static int nouveau_gem_set_domain(struct drm_gem_object *gem, uint32_t read_domains, uint32_t write_domains, uint32_t valid_domains) { - struct nouveau_bo *nvbo = gem->driver_private; + struct nouveau_bo *nvbo = nouveau_gem_object(gem); struct ttm_buffer_object *bo = &nvbo->bo; uint32_t domains = valid_domains & nvbo->valid_domains & (write_domains ? write_domains : read_domains); @@ -277,10 +293,12 @@ struct validate_op { struct list_head vram_list; struct list_head gart_list; struct list_head both_list; + struct ww_acquire_ctx ticket; }; static void -validate_fini_list(struct list_head *list, struct nouveau_fence *fence) +validate_fini_list(struct list_head *list, struct nouveau_fence *fence, + struct ww_acquire_ctx *ticket) { struct list_head *entry, *tmp; struct nouveau_bo *nvbo; @@ -288,7 +306,8 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence) list_for_each_safe(entry, tmp, list) { nvbo = list_entry(entry, struct nouveau_bo, entry); - nouveau_bo_fence(nvbo, fence); + if (likely(fence)) + nouveau_bo_fence(nvbo, fence); if (unlikely(nvbo->validate_mapped)) { ttm_bo_kunmap(&nvbo->kmap); @@ -297,17 +316,24 @@ validate_fini_list(struct list_head *list, struct nouveau_fence *fence) list_del(&nvbo->entry); nvbo->reserved_by = NULL; - ttm_bo_unreserve(&nvbo->bo); - drm_gem_object_unreference_unlocked(nvbo->gem); + ttm_bo_unreserve_ticket(&nvbo->bo, ticket); + drm_gem_object_unreference_unlocked(&nvbo->gem); } } static void -validate_fini(struct validate_op *op, struct nouveau_fence* fence) +validate_fini_no_ticket(struct validate_op *op, struct nouveau_fence *fence) +{ + validate_fini_list(&op->vram_list, fence, &op->ticket); + validate_fini_list(&op->gart_list, fence, &op->ticket); + validate_fini_list(&op->both_list, fence, &op->ticket); +} + +static void +validate_fini(struct validate_op *op, struct nouveau_fence *fence) { - validate_fini_list(&op->vram_list, fence); - validate_fini_list(&op->gart_list, fence); - validate_fini_list(&op->both_list, fence); + validate_fini_no_ticket(op, fence); + ww_acquire_fini(&op->ticket); } static int @@ -315,16 +341,16 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv, struct drm_nouveau_gem_pushbuf_bo *pbbo, int nr_buffers, struct validate_op *op) { + struct nouveau_cli *cli = nouveau_cli(file_priv); struct drm_device *dev = chan->drm->dev; - struct nouveau_drm *drm = nouveau_drm(dev); - uint32_t sequence; int trycnt = 0; int ret, i; + struct nouveau_bo *res_bo = NULL; - sequence = atomic_add_return(1, &drm->ttm.validate_sequence); + ww_acquire_init(&op->ticket, &reservation_ww_class); retry: if (++trycnt > 100000) { - NV_ERROR(drm, "%s failed and gave up.\n", __func__); + NV_ERROR(cli, "%s failed and gave up.\n", __func__); return -EINVAL; } @@ -335,32 +361,44 @@ retry: gem = drm_gem_object_lookup(dev, file_priv, b->handle); if (!gem) { - NV_ERROR(drm, "Unknown handle 0x%08x\n", b->handle); + NV_ERROR(cli, "Unknown handle 0x%08x\n", b->handle); + ww_acquire_done(&op->ticket); validate_fini(op, NULL); return -ENOENT; } - nvbo = gem->driver_private; + nvbo = nouveau_gem_object(gem); + if (nvbo == res_bo) { + res_bo = NULL; + drm_gem_object_unreference_unlocked(gem); + continue; + } if (nvbo->reserved_by && nvbo->reserved_by == file_priv) { - NV_ERROR(drm, "multiple instances of buffer %d on " + NV_ERROR(cli, "multiple instances of buffer %d on " "validation list\n", b->handle); drm_gem_object_unreference_unlocked(gem); + ww_acquire_done(&op->ticket); validate_fini(op, NULL); return -EINVAL; } - ret = ttm_bo_reserve(&nvbo->bo, true, false, true, sequence); + ret = ttm_bo_reserve(&nvbo->bo, true, false, true, &op->ticket); if (ret) { - validate_fini(op, NULL); - if (unlikely(ret == -EAGAIN)) - ret = ttm_bo_wait_unreserved(&nvbo->bo, true); - drm_gem_object_unreference_unlocked(gem); + validate_fini_no_ticket(op, NULL); + if (unlikely(ret == -EDEADLK)) { + ret = ttm_bo_reserve_slowpath(&nvbo->bo, true, + &op->ticket); + if (!ret) + res_bo = nvbo; + } if (unlikely(ret)) { + ww_acquire_done(&op->ticket); + ww_acquire_fini(&op->ticket); + drm_gem_object_unreference_unlocked(gem); if (ret != -ERESTARTSYS) - NV_ERROR(drm, "fail reserve\n"); + NV_ERROR(cli, "fail reserve\n"); return ret; } - goto retry; } b->user_priv = (uint64_t)(unsigned long)nvbo; @@ -376,14 +414,18 @@ retry: if (b->valid_domains & NOUVEAU_GEM_DOMAIN_GART) list_add_tail(&nvbo->entry, &op->gart_list); else { - NV_ERROR(drm, "invalid valid domains: 0x%08x\n", + NV_ERROR(cli, "invalid valid domains: 0x%08x\n", b->valid_domains); list_add_tail(&nvbo->entry, &op->both_list); + ww_acquire_done(&op->ticket); validate_fini(op, NULL); return -EINVAL; } + if (nvbo == res_bo) + goto retry; } + ww_acquire_done(&op->ticket); return 0; } @@ -394,8 +436,7 @@ validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo) int ret = 0; spin_lock(&nvbo->bo.bdev->fence_lock); - if (nvbo->bo.sync_obj) - fence = nouveau_fence_ref(nvbo->bo.sync_obj); + fence = nouveau_fence_ref(nvbo->bo.sync_obj); spin_unlock(&nvbo->bo.bdev->fence_lock); if (fence) { @@ -407,8 +448,9 @@ validate_sync(struct nouveau_channel *chan, struct nouveau_bo *nvbo) } static int -validate_list(struct nouveau_channel *chan, struct list_head *list, - struct drm_nouveau_gem_pushbuf_bo *pbbo, uint64_t user_pbbo_ptr) +validate_list(struct nouveau_channel *chan, struct nouveau_cli *cli, + struct list_head *list, struct drm_nouveau_gem_pushbuf_bo *pbbo, + uint64_t user_pbbo_ptr) { struct nouveau_drm *drm = chan->drm; struct drm_nouveau_gem_pushbuf_bo __user *upbbo = @@ -419,30 +461,24 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, list_for_each_entry(nvbo, list, entry) { struct drm_nouveau_gem_pushbuf_bo *b = &pbbo[nvbo->pbbo_index]; - ret = validate_sync(chan, nvbo); - if (unlikely(ret)) { - NV_ERROR(drm, "fail pre-validate sync\n"); - return ret; - } - - ret = nouveau_gem_set_domain(nvbo->gem, b->read_domains, + ret = nouveau_gem_set_domain(&nvbo->gem, b->read_domains, b->write_domains, b->valid_domains); if (unlikely(ret)) { - NV_ERROR(drm, "fail set_domain\n"); + NV_ERROR(cli, "fail set_domain\n"); return ret; } - ret = nouveau_bo_validate(nvbo, true, false, false); + ret = nouveau_bo_validate(nvbo, true, false); if (unlikely(ret)) { if (ret != -ERESTARTSYS) - NV_ERROR(drm, "fail ttm_validate\n"); + NV_ERROR(cli, "fail ttm_validate\n"); return ret; } ret = validate_sync(chan, nvbo); if (unlikely(ret)) { - NV_ERROR(drm, "fail post-validate sync\n"); + NV_ERROR(cli, "fail post-validate sync\n"); return ret; } @@ -462,7 +498,7 @@ validate_list(struct nouveau_channel *chan, struct list_head *list, b->presumed.valid = 0; relocs++; - if (DRM_COPY_TO_USER(&upbbo[nvbo->pbbo_index].presumed, + if (copy_to_user(&upbbo[nvbo->pbbo_index].presumed, &b->presumed, sizeof(b->presumed))) return -EFAULT; } @@ -478,7 +514,7 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, uint64_t user_buffers, int nr_buffers, struct validate_op *op, int *apply_relocs) { - struct nouveau_drm *drm = chan->drm; + struct nouveau_cli *cli = nouveau_cli(file_priv); int ret, relocs = 0; INIT_LIST_HEAD(&op->vram_list); @@ -491,32 +527,32 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, ret = validate_init(chan, file_priv, pbbo, nr_buffers, op); if (unlikely(ret)) { if (ret != -ERESTARTSYS) - NV_ERROR(drm, "validate_init\n"); + NV_ERROR(cli, "validate_init\n"); return ret; } - ret = validate_list(chan, &op->vram_list, pbbo, user_buffers); + ret = validate_list(chan, cli, &op->vram_list, pbbo, user_buffers); if (unlikely(ret < 0)) { if (ret != -ERESTARTSYS) - NV_ERROR(drm, "validate vram_list\n"); + NV_ERROR(cli, "validate vram_list\n"); validate_fini(op, NULL); return ret; } relocs += ret; - ret = validate_list(chan, &op->gart_list, pbbo, user_buffers); + ret = validate_list(chan, cli, &op->gart_list, pbbo, user_buffers); if (unlikely(ret < 0)) { if (ret != -ERESTARTSYS) - NV_ERROR(drm, "validate gart_list\n"); + NV_ERROR(cli, "validate gart_list\n"); validate_fini(op, NULL); return ret; } relocs += ret; - ret = validate_list(chan, &op->both_list, pbbo, user_buffers); + ret = validate_list(chan, cli, &op->both_list, pbbo, user_buffers); if (unlikely(ret < 0)) { if (ret != -ERESTARTSYS) - NV_ERROR(drm, "validate both_list\n"); + NV_ERROR(cli, "validate both_list\n"); validate_fini(op, NULL); return ret; } @@ -526,18 +562,31 @@ nouveau_gem_pushbuf_validate(struct nouveau_channel *chan, return 0; } +static inline void +u_free(void *addr) +{ + if (!is_vmalloc_addr(addr)) + kfree(addr); + else + vfree(addr); +} + static inline void * u_memcpya(uint64_t user, unsigned nmemb, unsigned size) { void *mem; void __user *userptr = (void __force __user *)(uintptr_t)user; - mem = kmalloc(nmemb * size, GFP_KERNEL); + size *= nmemb; + + mem = kmalloc(size, GFP_KERNEL | __GFP_NOWARN); + if (!mem) + mem = vmalloc(size); if (!mem) return ERR_PTR(-ENOMEM); - if (DRM_COPY_FROM_USER(mem, userptr, nmemb * size)) { - kfree(mem); + if (copy_from_user(mem, userptr, size)) { + u_free(mem); return ERR_PTR(-EFAULT); } @@ -545,11 +594,10 @@ u_memcpya(uint64_t user, unsigned nmemb, unsigned size) } static int -nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev, +nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, struct drm_nouveau_gem_pushbuf *req, struct drm_nouveau_gem_pushbuf_bo *bo) { - struct nouveau_drm *drm = nouveau_drm(dev); struct drm_nouveau_gem_pushbuf_reloc *reloc = NULL; int ret = 0; unsigned i; @@ -565,7 +613,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev, uint32_t data; if (unlikely(r->bo_index > req->nr_buffers)) { - NV_ERROR(drm, "reloc bo index invalid\n"); + NV_ERROR(cli, "reloc bo index invalid\n"); ret = -EINVAL; break; } @@ -575,7 +623,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev, continue; if (unlikely(r->reloc_bo_index > req->nr_buffers)) { - NV_ERROR(drm, "reloc container bo index invalid\n"); + NV_ERROR(cli, "reloc container bo index invalid\n"); ret = -EINVAL; break; } @@ -583,7 +631,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev, if (unlikely(r->reloc_bo_offset + 4 > nvbo->bo.mem.num_pages << PAGE_SHIFT)) { - NV_ERROR(drm, "reloc outside of bo\n"); + NV_ERROR(cli, "reloc outside of bo\n"); ret = -EINVAL; break; } @@ -592,7 +640,7 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev, ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.mem.num_pages, &nvbo->kmap); if (ret) { - NV_ERROR(drm, "failed kmap for reloc\n"); + NV_ERROR(cli, "failed kmap for reloc\n"); break; } nvbo->validate_mapped = true; @@ -617,14 +665,14 @@ nouveau_gem_pushbuf_reloc_apply(struct drm_device *dev, ret = ttm_bo_wait(&nvbo->bo, false, false, false); spin_unlock(&nvbo->bo.bdev->fence_lock); if (ret) { - NV_ERROR(drm, "reloc wait_idle failed: %d\n", ret); + NV_ERROR(cli, "reloc wait_idle failed: %d\n", ret); break; } nouveau_bo_wr32(nvbo, r->reloc_bo_offset >> 2, data); } - kfree(reloc); + u_free(reloc); return ret; } @@ -633,6 +681,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct nouveau_abi16 *abi16 = nouveau_abi16_get(file_priv, dev); + struct nouveau_cli *cli = nouveau_cli(file_priv); struct nouveau_abi16_chan *temp; struct nouveau_drm *drm = nouveau_drm(dev); struct drm_nouveau_gem_pushbuf *req = data; @@ -662,19 +711,19 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, goto out_next; if (unlikely(req->nr_push > NOUVEAU_GEM_MAX_PUSH)) { - NV_ERROR(drm, "pushbuf push count exceeds limit: %d max %d\n", + NV_ERROR(cli, "pushbuf push count exceeds limit: %d max %d\n", req->nr_push, NOUVEAU_GEM_MAX_PUSH); return nouveau_abi16_put(abi16, -EINVAL); } if (unlikely(req->nr_buffers > NOUVEAU_GEM_MAX_BUFFERS)) { - NV_ERROR(drm, "pushbuf bo count exceeds limit: %d max %d\n", + NV_ERROR(cli, "pushbuf bo count exceeds limit: %d max %d\n", req->nr_buffers, NOUVEAU_GEM_MAX_BUFFERS); return nouveau_abi16_put(abi16, -EINVAL); } if (unlikely(req->nr_relocs > NOUVEAU_GEM_MAX_RELOCS)) { - NV_ERROR(drm, "pushbuf reloc count exceeds limit: %d max %d\n", + NV_ERROR(cli, "pushbuf reloc count exceeds limit: %d max %d\n", req->nr_relocs, NOUVEAU_GEM_MAX_RELOCS); return nouveau_abi16_put(abi16, -EINVAL); } @@ -685,14 +734,14 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, bo = u_memcpya(req->buffers, req->nr_buffers, sizeof(*bo)); if (IS_ERR(bo)) { - kfree(push); + u_free(push); return nouveau_abi16_put(abi16, PTR_ERR(bo)); } /* Ensure all push buffers are on validate list */ for (i = 0; i < req->nr_push; i++) { if (push[i].bo_index >= req->nr_buffers) { - NV_ERROR(drm, "push %d buffer not in list\n", i); + NV_ERROR(cli, "push %d buffer not in list\n", i); ret = -EINVAL; goto out_prevalid; } @@ -703,15 +752,15 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, req->nr_buffers, &op, &do_reloc); if (ret) { if (ret != -ERESTARTSYS) - NV_ERROR(drm, "validate: %d\n", ret); + NV_ERROR(cli, "validate: %d\n", ret); goto out_prevalid; } /* Apply any relocations that are required */ if (do_reloc) { - ret = nouveau_gem_pushbuf_reloc_apply(dev, req, bo); + ret = nouveau_gem_pushbuf_reloc_apply(cli, req, bo); if (ret) { - NV_ERROR(drm, "reloc apply: %d\n", ret); + NV_ERROR(cli, "reloc apply: %d\n", ret); goto out; } } @@ -719,7 +768,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, if (chan->dma.ib_max) { ret = nouveau_dma_wait(chan, req->nr_push + 1, 16); if (ret) { - NV_ERROR(drm, "nv50cal_space: %d\n", ret); + NV_ERROR(cli, "nv50cal_space: %d\n", ret); goto out; } @@ -734,7 +783,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, if (nv_device(drm->device)->chipset >= 0x25) { ret = RING_SPACE(chan, req->nr_push * 2); if (ret) { - NV_ERROR(drm, "cal_space: %d\n", ret); + NV_ERROR(cli, "cal_space: %d\n", ret); goto out; } @@ -748,7 +797,7 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, } else { ret = RING_SPACE(chan, req->nr_push * (2 + NOUVEAU_DMA_SKIPS)); if (ret) { - NV_ERROR(drm, "jmp_space: %d\n", ret); + NV_ERROR(cli, "jmp_space: %d\n", ret); goto out; } @@ -784,9 +833,9 @@ nouveau_gem_ioctl_pushbuf(struct drm_device *dev, void *data, } } - ret = nouveau_fence_new(chan, &fence); + ret = nouveau_fence_new(chan, false, &fence); if (ret) { - NV_ERROR(drm, "error fencing pushbuf: %d\n", ret); + NV_ERROR(cli, "error fencing pushbuf: %d\n", ret); WIND_RING(chan); goto out; } @@ -796,8 +845,8 @@ out: nouveau_fence_unref(&fence); out_prevalid: - kfree(bo); - kfree(push); + u_free(bo); + u_free(push); out_next: if (chan->dma.ib_max) { |
