aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nouveau_fence.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_fence.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c378
1 files changed, 227 insertions, 151 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index faddf53ff9e..ab5ea3b0d66 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -24,185 +24,240 @@
*
*/
-#include "drmP.h"
-#include "drm.h"
+#include <drm/drmP.h>
-#include "nouveau_drv.h"
-#include "nouveau_dma.h"
+#include <linux/ktime.h>
+#include <linux/hrtimer.h>
-#define USE_REFCNT (dev_priv->card_type >= NV_10)
+#include "nouveau_drm.h"
+#include "nouveau_dma.h"
+#include "nouveau_fence.h"
-struct nouveau_fence {
- struct nouveau_channel *channel;
- struct kref refcount;
- struct list_head entry;
+#include <engine/fifo.h>
- uint32_t sequence;
- bool signalled;
+struct fence_work {
+ struct work_struct base;
+ struct list_head head;
+ void (*func)(void *);
+ void *data;
};
-static inline struct nouveau_fence *
-nouveau_fence(void *sync_obj)
+static void
+nouveau_fence_signal(struct nouveau_fence *fence)
{
- return (struct nouveau_fence *)sync_obj;
+ struct fence_work *work, *temp;
+
+ list_for_each_entry_safe(work, temp, &fence->work, head) {
+ schedule_work(&work->base);
+ list_del(&work->head);
+ }
+
+ fence->channel = NULL;
+ list_del(&fence->head);
}
-static void
-nouveau_fence_del(struct kref *ref)
+void
+nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
{
- struct nouveau_fence *fence =
- container_of(ref, struct nouveau_fence, refcount);
+ struct nouveau_fence *fence, *fnext;
+ spin_lock(&fctx->lock);
+ list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
+ nouveau_fence_signal(fence);
+ }
+ spin_unlock(&fctx->lock);
+}
- kfree(fence);
+void
+nouveau_fence_context_new(struct nouveau_fence_chan *fctx)
+{
+ INIT_LIST_HEAD(&fctx->flip);
+ INIT_LIST_HEAD(&fctx->pending);
+ spin_lock_init(&fctx->lock);
+}
+
+static void
+nouveau_fence_work_handler(struct work_struct *kwork)
+{
+ struct fence_work *work = container_of(kwork, typeof(*work), base);
+ work->func(work->data);
+ kfree(work);
}
void
-nouveau_fence_update(struct nouveau_channel *chan)
+nouveau_fence_work(struct nouveau_fence *fence,
+ void (*func)(void *), void *data)
{
- struct drm_nouveau_private *dev_priv = chan->dev->dev_private;
- struct list_head *entry, *tmp;
- struct nouveau_fence *fence;
- uint32_t sequence;
+ struct nouveau_channel *chan = fence->channel;
+ struct nouveau_fence_chan *fctx;
+ struct fence_work *work = NULL;
- if (USE_REFCNT)
- sequence = nvchan_rd32(chan, 0x48);
- else
- sequence = chan->fence.last_sequence_irq;
+ if (nouveau_fence_done(fence)) {
+ func(data);
+ return;
+ }
- if (chan->fence.sequence_ack == sequence)
+ fctx = chan->fence;
+ work = kmalloc(sizeof(*work), GFP_KERNEL);
+ if (!work) {
+ WARN_ON(nouveau_fence_wait(fence, false, false));
+ func(data);
return;
- chan->fence.sequence_ack = sequence;
+ }
- list_for_each_safe(entry, tmp, &chan->fence.pending) {
- fence = list_entry(entry, struct nouveau_fence, entry);
+ spin_lock(&fctx->lock);
+ if (!fence->channel) {
+ spin_unlock(&fctx->lock);
+ kfree(work);
+ func(data);
+ return;
+ }
- sequence = fence->sequence;
- fence->signalled = true;
- list_del(&fence->entry);
- kref_put(&fence->refcount, nouveau_fence_del);
+ INIT_WORK(&work->base, nouveau_fence_work_handler);
+ work->func = func;
+ work->data = data;
+ list_add(&work->head, &fence->work);
+ spin_unlock(&fctx->lock);
+}
- if (sequence == chan->fence.sequence_ack)
+static void
+nouveau_fence_update(struct nouveau_channel *chan)
+{
+ struct nouveau_fence_chan *fctx = chan->fence;
+ struct nouveau_fence *fence, *fnext;
+
+ spin_lock(&fctx->lock);
+ list_for_each_entry_safe(fence, fnext, &fctx->pending, head) {
+ if (fctx->read(chan) < fence->sequence)
break;
+
+ nouveau_fence_signal(fence);
+ nouveau_fence_unref(&fence);
}
+ spin_unlock(&fctx->lock);
}
int
-nouveau_fence_new(struct nouveau_channel *chan, struct nouveau_fence **pfence,
- bool emit)
+nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
{
- struct nouveau_fence *fence;
- int ret = 0;
+ struct nouveau_fence_chan *fctx = chan->fence;
+ int ret;
- fence = kzalloc(sizeof(*fence), GFP_KERNEL);
- if (!fence)
- return -ENOMEM;
- kref_init(&fence->refcount);
- fence->channel = chan;
+ fence->channel = chan;
+ fence->timeout = jiffies + (15 * HZ);
+ fence->sequence = ++fctx->sequence;
- if (emit)
- ret = nouveau_fence_emit(fence);
+ ret = fctx->emit(fence);
+ if (!ret) {
+ kref_get(&fence->kref);
+ spin_lock(&fctx->lock);
+ list_add_tail(&fence->head, &fctx->pending);
+ spin_unlock(&fctx->lock);
+ }
- if (ret)
- nouveau_fence_unref((void *)&fence);
- *pfence = fence;
return ret;
}
-struct nouveau_channel *
-nouveau_fence_channel(struct nouveau_fence *fence)
+bool
+nouveau_fence_done(struct nouveau_fence *fence)
{
- return fence ? fence->channel : NULL;
+ if (fence->channel)
+ nouveau_fence_update(fence->channel);
+ return !fence->channel;
}
-int
-nouveau_fence_emit(struct nouveau_fence *fence)
+static int
+nouveau_fence_wait_uevent_handler(void *data, u32 type, int index)
+{
+ struct nouveau_fence_priv *priv = data;
+ wake_up_all(&priv->waiting);
+ return NVKM_EVENT_KEEP;
+}
+
+static int
+nouveau_fence_wait_uevent(struct nouveau_fence *fence, bool intr)
+
{
- struct drm_nouveau_private *dev_priv = fence->channel->dev->dev_private;
struct nouveau_channel *chan = fence->channel;
- unsigned long flags;
- int ret;
+ struct nouveau_fifo *pfifo = nouveau_fifo(chan->drm->device);
+ struct nouveau_fence_priv *priv = chan->drm->fence;
+ struct nouveau_eventh *handler;
+ int ret = 0;
- ret = RING_SPACE(chan, 2);
+ ret = nouveau_event_new(pfifo->uevent, 1, 0,
+ nouveau_fence_wait_uevent_handler,
+ priv, &handler);
if (ret)
return ret;
- if (unlikely(chan->fence.sequence == chan->fence.sequence_ack - 1)) {
- spin_lock_irqsave(&chan->fence.lock, flags);
- nouveau_fence_update(chan);
- spin_unlock_irqrestore(&chan->fence.lock, flags);
+ nouveau_event_get(handler);
+
+ if (fence->timeout) {
+ unsigned long timeout = fence->timeout - jiffies;
+
+ if (time_before(jiffies, fence->timeout)) {
+ if (intr) {
+ ret = wait_event_interruptible_timeout(
+ priv->waiting,
+ nouveau_fence_done(fence),
+ timeout);
+ } else {
+ ret = wait_event_timeout(priv->waiting,
+ nouveau_fence_done(fence),
+ timeout);
+ }
+ }
- BUG_ON(chan->fence.sequence ==
- chan->fence.sequence_ack - 1);
+ if (ret >= 0) {
+ fence->timeout = jiffies + ret;
+ if (time_after_eq(jiffies, fence->timeout))
+ ret = -EBUSY;
+ }
+ } else {
+ if (intr) {
+ ret = wait_event_interruptible(priv->waiting,
+ nouveau_fence_done(fence));
+ } else {
+ wait_event(priv->waiting, nouveau_fence_done(fence));
+ }
}
- fence->sequence = ++chan->fence.sequence;
-
- kref_get(&fence->refcount);
- spin_lock_irqsave(&chan->fence.lock, flags);
- list_add_tail(&fence->entry, &chan->fence.pending);
- spin_unlock_irqrestore(&chan->fence.lock, flags);
-
- BEGIN_RING(chan, NvSubSw, USE_REFCNT ? 0x0050 : 0x0150, 1);
- OUT_RING(chan, fence->sequence);
- FIRE_RING(chan);
+ nouveau_event_ref(NULL, &handler);
+ if (unlikely(ret < 0))
+ return ret;
return 0;
}
-void
-nouveau_fence_unref(void **sync_obj)
-{
- struct nouveau_fence *fence = nouveau_fence(*sync_obj);
-
- if (fence)
- kref_put(&fence->refcount, nouveau_fence_del);
- *sync_obj = NULL;
-}
-
-void *
-nouveau_fence_ref(void *sync_obj)
-{
- struct nouveau_fence *fence = nouveau_fence(sync_obj);
-
- kref_get(&fence->refcount);
- return sync_obj;
-}
-
-bool
-nouveau_fence_signalled(void *sync_obj, void *sync_arg)
-{
- struct nouveau_fence *fence = nouveau_fence(sync_obj);
- struct nouveau_channel *chan = fence->channel;
- unsigned long flags;
-
- if (fence->signalled)
- return true;
-
- spin_lock_irqsave(&chan->fence.lock, flags);
- nouveau_fence_update(chan);
- spin_unlock_irqrestore(&chan->fence.lock, flags);
- return fence->signalled;
-}
-
int
-nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
+nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
{
- unsigned long timeout = jiffies + (3 * DRM_HZ);
+ struct nouveau_channel *chan = fence->channel;
+ struct nouveau_fence_priv *priv = chan ? chan->drm->fence : NULL;
+ unsigned long sleep_time = NSEC_PER_MSEC / 1000;
+ ktime_t t;
int ret = 0;
- __set_current_state(intr ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
-
- while (1) {
- if (nouveau_fence_signalled(sync_obj, sync_arg))
- break;
+ while (priv && priv->uevent && lazy && !nouveau_fence_done(fence)) {
+ ret = nouveau_fence_wait_uevent(fence, intr);
+ if (ret < 0)
+ return ret;
+ }
- if (time_after_eq(jiffies, timeout)) {
+ while (!nouveau_fence_done(fence)) {
+ if (fence->timeout && time_after_eq(jiffies, fence->timeout)) {
ret = -EBUSY;
break;
}
- if (lazy)
- schedule_timeout(1);
+ __set_current_state(intr ? TASK_INTERRUPTIBLE :
+ TASK_UNINTERRUPTIBLE);
+ if (lazy) {
+ t = ktime_set(0, sleep_time);
+ schedule_hrtimeout(&t, HRTIMER_MODE_REL);
+ sleep_time *= 2;
+ if (sleep_time > NSEC_PER_MSEC)
+ sleep_time = NSEC_PER_MSEC;
+ }
if (intr && signal_pending(current)) {
ret = -ERESTARTSYS;
@@ -211,52 +266,73 @@ nouveau_fence_wait(void *sync_obj, void *sync_arg, bool lazy, bool intr)
}
__set_current_state(TASK_RUNNING);
-
return ret;
}
int
-nouveau_fence_flush(void *sync_obj, void *sync_arg)
+nouveau_fence_sync(struct nouveau_fence *fence, struct nouveau_channel *chan)
{
- return 0;
+ struct nouveau_fence_chan *fctx = chan->fence;
+ struct nouveau_channel *prev;
+ int ret = 0;
+
+ prev = fence ? fence->channel : NULL;
+ if (prev) {
+ if (unlikely(prev != chan && !nouveau_fence_done(fence))) {
+ ret = fctx->sync(fence, prev, chan);
+ if (unlikely(ret))
+ ret = nouveau_fence_wait(fence, true, false);
+ }
+ }
+
+ return ret;
}
-void
-nouveau_fence_handler(struct drm_device *dev, int channel)
+static void
+nouveau_fence_del(struct kref *kref)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_channel *chan = NULL;
-
- if (channel >= 0 && channel < dev_priv->engine.fifo.channels)
- chan = dev_priv->fifos[channel];
+ struct nouveau_fence *fence = container_of(kref, typeof(*fence), kref);
+ kfree(fence);
+}
- if (chan) {
- spin_lock_irq(&chan->fence.lock);
- nouveau_fence_update(chan);
- spin_unlock_irq(&chan->fence.lock);
- }
+void
+nouveau_fence_unref(struct nouveau_fence **pfence)
+{
+ if (*pfence)
+ kref_put(&(*pfence)->kref, nouveau_fence_del);
+ *pfence = NULL;
}
-int
-nouveau_fence_init(struct nouveau_channel *chan)
+struct nouveau_fence *
+nouveau_fence_ref(struct nouveau_fence *fence)
{
- INIT_LIST_HEAD(&chan->fence.pending);
- spin_lock_init(&chan->fence.lock);
- return 0;
+ if (fence)
+ kref_get(&fence->kref);
+ return fence;
}
-void
-nouveau_fence_fini(struct nouveau_channel *chan)
+int
+nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
+ struct nouveau_fence **pfence)
{
- struct list_head *entry, *tmp;
struct nouveau_fence *fence;
+ int ret = 0;
- list_for_each_safe(entry, tmp, &chan->fence.pending) {
- fence = list_entry(entry, struct nouveau_fence, entry);
+ if (unlikely(!chan->fence))
+ return -ENODEV;
- fence->signalled = true;
- list_del(&fence->entry);
- kref_put(&fence->refcount, nouveau_fence_del);
- }
-}
+ fence = kzalloc(sizeof(*fence), GFP_KERNEL);
+ if (!fence)
+ return -ENOMEM;
+ INIT_LIST_HEAD(&fence->work);
+ fence->sysmem = sysmem;
+ kref_init(&fence->kref);
+
+ ret = nouveau_fence_emit(fence, chan);
+ if (ret)
+ nouveau_fence_unref(&fence);
+
+ *pfence = fence;
+ return ret;
+}