aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/nouveau/nouveau_display.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/nouveau/nouveau_display.c')
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c781
1 files changed, 679 insertions, 102 deletions
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 505c6bfb4d7..47ad74255bf 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -24,14 +24,177 @@
*
*/
-#include "drmP.h"
-#include "drm_crtc_helper.h"
-#include "nouveau_drv.h"
-#include "nouveau_fb.h"
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+
#include "nouveau_fbcon.h"
-#include "nouveau_hw.h"
+#include "dispnv04/hw.h"
#include "nouveau_crtc.h"
#include "nouveau_dma.h"
+#include "nouveau_gem.h"
+#include "nouveau_connector.h"
+#include "nv50_display.h"
+
+#include "nouveau_fence.h"
+
+#include <engine/disp.h>
+
+#include <core/class.h>
+
+static int
+nouveau_display_vblank_handler(void *data, u32 type, int head)
+{
+ struct nouveau_drm *drm = data;
+ drm_handle_vblank(drm->dev, head);
+ return NVKM_EVENT_KEEP;
+}
+
+int
+nouveau_display_vblank_enable(struct drm_device *dev, int head)
+{
+ struct nouveau_display *disp = nouveau_display(dev);
+ if (disp) {
+ nouveau_event_get(disp->vblank[head]);
+ return 0;
+ }
+ return -EIO;
+}
+
+void
+nouveau_display_vblank_disable(struct drm_device *dev, int head)
+{
+ struct nouveau_display *disp = nouveau_display(dev);
+ if (disp)
+ nouveau_event_put(disp->vblank[head]);
+}
+
+static inline int
+calc(int blanks, int blanke, int total, int line)
+{
+ if (blanke >= blanks) {
+ if (line >= blanks)
+ line -= total;
+ } else {
+ if (line >= blanks)
+ line -= total;
+ line -= blanke + 1;
+ }
+ return line;
+}
+
+int
+nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime)
+{
+ const u32 mthd = NV04_DISP_SCANOUTPOS + nouveau_crtc(crtc)->index;
+ struct nouveau_display *disp = nouveau_display(crtc->dev);
+ struct nv04_display_scanoutpos args;
+ int ret, retry = 1;
+
+ do {
+ ret = nv_exec(disp->core, mthd, &args, sizeof(args));
+ if (ret != 0)
+ return 0;
+
+ if (args.vline) {
+ ret |= DRM_SCANOUTPOS_ACCURATE;
+ ret |= DRM_SCANOUTPOS_VALID;
+ break;
+ }
+
+ if (retry) ndelay(crtc->linedur_ns);
+ } while (retry--);
+
+ *hpos = args.hline;
+ *vpos = calc(args.vblanks, args.vblanke, args.vtotal, args.vline);
+ if (stime) *stime = ns_to_ktime(args.time[0]);
+ if (etime) *etime = ns_to_ktime(args.time[1]);
+
+ if (*vpos < 0)
+ ret |= DRM_SCANOUTPOS_INVBL;
+ return ret;
+}
+
+int
+nouveau_display_scanoutpos(struct drm_device *dev, int head, unsigned int flags,
+ int *vpos, int *hpos, ktime_t *stime, ktime_t *etime)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (nouveau_crtc(crtc)->index == head) {
+ return nouveau_display_scanoutpos_head(crtc, vpos, hpos,
+ stime, etime);
+ }
+ }
+
+ return 0;
+}
+
+int
+nouveau_display_vblstamp(struct drm_device *dev, int head, int *max_error,
+ struct timeval *time, unsigned flags)
+{
+ struct drm_crtc *crtc;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ if (nouveau_crtc(crtc)->index == head) {
+ return drm_calc_vbltimestamp_from_scanoutpos(dev,
+ head, max_error, time, flags, crtc,
+ &crtc->hwmode);
+ }
+ }
+
+ return -EINVAL;
+}
+
+static void
+nouveau_display_vblank_fini(struct drm_device *dev)
+{
+ struct nouveau_display *disp = nouveau_display(dev);
+ int i;
+
+ drm_vblank_cleanup(dev);
+
+ if (disp->vblank) {
+ for (i = 0; i < dev->mode_config.num_crtc; i++)
+ nouveau_event_ref(NULL, &disp->vblank[i]);
+ kfree(disp->vblank);
+ disp->vblank = NULL;
+ }
+}
+
+static int
+nouveau_display_vblank_init(struct drm_device *dev)
+{
+ struct nouveau_display *disp = nouveau_display(dev);
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_disp *pdisp = nouveau_disp(drm->device);
+ int ret, i;
+
+ disp->vblank = kzalloc(dev->mode_config.num_crtc *
+ sizeof(*disp->vblank), GFP_KERNEL);
+ if (!disp->vblank)
+ return -ENOMEM;
+
+ for (i = 0; i < dev->mode_config.num_crtc; i++) {
+ ret = nouveau_event_new(pdisp->vblank, 1, i,
+ nouveau_display_vblank_handler,
+ drm, &disp->vblank[i]);
+ if (ret) {
+ nouveau_display_vblank_fini(dev);
+ return ret;
+ }
+ }
+
+ ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
+ if (ret) {
+ nouveau_display_vblank_fini(dev);
+ return ret;
+ }
+
+ return 0;
+}
static void
nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
@@ -39,7 +202,7 @@ nouveau_user_framebuffer_destroy(struct drm_framebuffer *drm_fb)
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
if (fb->nvbo)
- drm_gem_object_unreference_unlocked(fb->nvbo->gem);
+ drm_gem_object_unreference_unlocked(&fb->nvbo->gem);
drm_framebuffer_cleanup(drm_fb);
kfree(fb);
@@ -52,7 +215,7 @@ nouveau_user_framebuffer_create_handle(struct drm_framebuffer *drm_fb,
{
struct nouveau_framebuffer *fb = nouveau_framebuffer(drm_fb);
- return drm_gem_handle_create(file_priv, fb->nvbo->gem, handle);
+ return drm_gem_handle_create(file_priv, &fb->nvbo->gem, handle);
}
static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
@@ -61,118 +224,421 @@ static const struct drm_framebuffer_funcs nouveau_framebuffer_funcs = {
};
int
-nouveau_framebuffer_init(struct drm_device *dev, struct nouveau_framebuffer *nouveau_fb,
- struct drm_mode_fb_cmd *mode_cmd, struct nouveau_bo *nvbo)
+nouveau_framebuffer_init(struct drm_device *dev,
+ struct nouveau_framebuffer *nv_fb,
+ struct drm_mode_fb_cmd2 *mode_cmd,
+ struct nouveau_bo *nvbo)
{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct drm_framebuffer *fb = &nv_fb->base;
int ret;
- ret = drm_framebuffer_init(dev, &nouveau_fb->base, &nouveau_framebuffer_funcs);
+ drm_helper_mode_fill_fb_struct(fb, mode_cmd);
+ nv_fb->nvbo = nvbo;
+
+ if (nv_device(drm->device)->card_type >= NV_50) {
+ u32 tile_flags = nouveau_bo_tile_layout(nvbo);
+ if (tile_flags == 0x7a00 ||
+ tile_flags == 0xfe00)
+ nv_fb->r_dma = NvEvoFB32;
+ else
+ if (tile_flags == 0x7000)
+ nv_fb->r_dma = NvEvoFB16;
+ else
+ nv_fb->r_dma = NvEvoVRAM_LP;
+
+ switch (fb->depth) {
+ case 8: nv_fb->r_format = 0x1e00; break;
+ case 15: nv_fb->r_format = 0xe900; break;
+ case 16: nv_fb->r_format = 0xe800; break;
+ case 24:
+ case 32: nv_fb->r_format = 0xcf00; break;
+ case 30: nv_fb->r_format = 0xd100; break;
+ default:
+ NV_ERROR(drm, "unknown depth %d\n", fb->depth);
+ return -EINVAL;
+ }
+
+ if (nvbo->tile_flags & NOUVEAU_GEM_TILE_NONCONTIG) {
+ NV_ERROR(drm, "framebuffer requires contiguous bo\n");
+ return -EINVAL;
+ }
+
+ if (nv_device(drm->device)->chipset == 0x50)
+ nv_fb->r_format |= (tile_flags << 8);
+
+ if (!tile_flags) {
+ if (nv_device(drm->device)->card_type < NV_D0)
+ nv_fb->r_pitch = 0x00100000 | fb->pitches[0];
+ else
+ nv_fb->r_pitch = 0x01000000 | fb->pitches[0];
+ } else {
+ u32 mode = nvbo->tile_mode;
+ if (nv_device(drm->device)->card_type >= NV_C0)
+ mode >>= 4;
+ nv_fb->r_pitch = ((fb->pitches[0] / 4) << 4) | mode;
+ }
+ }
+
+ ret = drm_framebuffer_init(dev, fb, &nouveau_framebuffer_funcs);
if (ret) {
return ret;
}
- drm_helper_mode_fill_fb_struct(&nouveau_fb->base, mode_cmd);
- nouveau_fb->nvbo = nvbo;
return 0;
}
static struct drm_framebuffer *
nouveau_user_framebuffer_create(struct drm_device *dev,
struct drm_file *file_priv,
- struct drm_mode_fb_cmd *mode_cmd)
+ struct drm_mode_fb_cmd2 *mode_cmd)
{
struct nouveau_framebuffer *nouveau_fb;
struct drm_gem_object *gem;
- int ret;
+ int ret = -ENOMEM;
- gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handle);
+ gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]);
if (!gem)
return ERR_PTR(-ENOENT);
nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL);
if (!nouveau_fb)
- return ERR_PTR(-ENOMEM);
+ goto err_unref;
ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem));
- if (ret) {
- drm_gem_object_unreference(gem);
- return ERR_PTR(ret);
- }
+ if (ret)
+ goto err;
return &nouveau_fb->base;
+
+err:
+ kfree(nouveau_fb);
+err_unref:
+ drm_gem_object_unreference(gem);
+ return ERR_PTR(ret);
}
-const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
+static const struct drm_mode_config_funcs nouveau_mode_config_funcs = {
.fb_create = nouveau_user_framebuffer_create,
.output_poll_changed = nouveau_fbcon_output_poll_changed,
};
+
+struct nouveau_drm_prop_enum_list {
+ u8 gen_mask;
+ int type;
+ char *name;
+};
+
+static struct nouveau_drm_prop_enum_list underscan[] = {
+ { 6, UNDERSCAN_AUTO, "auto" },
+ { 6, UNDERSCAN_OFF, "off" },
+ { 6, UNDERSCAN_ON, "on" },
+ {}
+};
+
+static struct nouveau_drm_prop_enum_list dither_mode[] = {
+ { 7, DITHERING_MODE_AUTO, "auto" },
+ { 7, DITHERING_MODE_OFF, "off" },
+ { 1, DITHERING_MODE_ON, "on" },
+ { 6, DITHERING_MODE_STATIC2X2, "static 2x2" },
+ { 6, DITHERING_MODE_DYNAMIC2X2, "dynamic 2x2" },
+ { 4, DITHERING_MODE_TEMPORAL, "temporal" },
+ {}
+};
+
+static struct nouveau_drm_prop_enum_list dither_depth[] = {
+ { 6, DITHERING_DEPTH_AUTO, "auto" },
+ { 6, DITHERING_DEPTH_6BPC, "6 bpc" },
+ { 6, DITHERING_DEPTH_8BPC, "8 bpc" },
+ {}
+};
+
+#define PROP_ENUM(p,gen,n,list) do { \
+ struct nouveau_drm_prop_enum_list *l = (list); \
+ int c = 0; \
+ while (l->gen_mask) { \
+ if (l->gen_mask & (1 << (gen))) \
+ c++; \
+ l++; \
+ } \
+ if (c) { \
+ p = drm_property_create(dev, DRM_MODE_PROP_ENUM, n, c); \
+ l = (list); \
+ c = 0; \
+ while (p && l->gen_mask) { \
+ if (l->gen_mask & (1 << (gen))) { \
+ drm_property_add_enum(p, c, l->type, l->name); \
+ c++; \
+ } \
+ l++; \
+ } \
+ } \
+} while(0)
+
int
-nouveau_vblank_enable(struct drm_device *dev, int crtc)
+nouveau_display_init(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_display *disp = nouveau_display(dev);
+ struct drm_connector *connector;
+ int ret;
- if (dev_priv->card_type >= NV_50)
- nv_mask(dev, NV50_PDISPLAY_INTR_EN_1, 0,
- NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc));
- else
- NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0,
- NV_PCRTC_INTR_0_VBLANK);
+ ret = disp->init(dev);
+ if (ret)
+ return ret;
- return 0;
+ /* enable polling for external displays */
+ drm_kms_helper_poll_enable(dev);
+
+ /* enable hotplug interrupts */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct nouveau_connector *conn = nouveau_connector(connector);
+ if (conn->hpd) nouveau_event_get(conn->hpd);
+ }
+
+ return ret;
}
void
-nouveau_vblank_disable(struct drm_device *dev, int crtc)
+nouveau_display_fini(struct drm_device *dev)
{
- struct drm_nouveau_private *dev_priv = dev->dev_private;
+ struct nouveau_display *disp = nouveau_display(dev);
+ struct drm_connector *connector;
- if (dev_priv->card_type >= NV_50)
- nv_mask(dev, NV50_PDISPLAY_INTR_EN_1,
- NV50_PDISPLAY_INTR_EN_1_VBLANK_CRTC_(crtc), 0);
- else
- NVWriteCRTC(dev, crtc, NV_PCRTC_INTR_EN_0, 0);
+ /* disable hotplug interrupts */
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ struct nouveau_connector *conn = nouveau_connector(connector);
+ if (conn->hpd) nouveau_event_put(conn->hpd);
+ }
+
+ drm_kms_helper_poll_disable(dev);
+ disp->fini(dev);
}
-static int
-nouveau_page_flip_reserve(struct nouveau_bo *old_bo,
- struct nouveau_bo *new_bo)
+int
+nouveau_display_create(struct drm_device *dev)
{
- int ret;
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_device *device = nouveau_dev(dev);
+ struct nouveau_display *disp;
+ int ret, gen;
- ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
- if (ret)
- return ret;
+ disp = drm->display = kzalloc(sizeof(*disp), GFP_KERNEL);
+ if (!disp)
+ return -ENOMEM;
- ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0);
- if (ret)
- goto fail;
+ drm_mode_config_init(dev);
+ drm_mode_create_scaling_mode_property(dev);
+ drm_mode_create_dvi_i_properties(dev);
+
+ if (nv_device(drm->device)->card_type < NV_50)
+ gen = 0;
+ else
+ if (nv_device(drm->device)->card_type < NV_D0)
+ gen = 1;
+ else
+ gen = 2;
+
+ PROP_ENUM(disp->dithering_mode, gen, "dithering mode", dither_mode);
+ PROP_ENUM(disp->dithering_depth, gen, "dithering depth", dither_depth);
+ PROP_ENUM(disp->underscan_property, gen, "underscan", underscan);
+
+ disp->underscan_hborder_property =
+ drm_property_create_range(dev, 0, "underscan hborder", 0, 128);
+
+ disp->underscan_vborder_property =
+ drm_property_create_range(dev, 0, "underscan vborder", 0, 128);
+
+ if (gen >= 1) {
+ /* -90..+90 */
+ disp->vibrant_hue_property =
+ drm_property_create_range(dev, 0, "vibrant hue", 0, 180);
+
+ /* -100..+100 */
+ disp->color_vibrance_property =
+ drm_property_create_range(dev, 0, "color vibrance", 0, 200);
+ }
+
+ dev->mode_config.funcs = &nouveau_mode_config_funcs;
+ dev->mode_config.fb_base = nv_device_resource_start(device, 1);
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+ if (nv_device(drm->device)->card_type < NV_10) {
+ dev->mode_config.max_width = 2048;
+ dev->mode_config.max_height = 2048;
+ } else
+ if (nv_device(drm->device)->card_type < NV_50) {
+ dev->mode_config.max_width = 4096;
+ dev->mode_config.max_height = 4096;
+ } else {
+ dev->mode_config.max_width = 8192;
+ dev->mode_config.max_height = 8192;
+ }
+
+ dev->mode_config.preferred_depth = 24;
+ dev->mode_config.prefer_shadow = 1;
+
+ if (nv_device(drm->device)->chipset < 0x11)
+ dev->mode_config.async_page_flip = false;
+ else
+ dev->mode_config.async_page_flip = true;
+
+ drm_kms_helper_poll_init(dev);
+ drm_kms_helper_poll_disable(dev);
+
+ if (drm->vbios.dcb.entries) {
+ static const u16 oclass[] = {
+ GM107_DISP_CLASS,
+ NVF0_DISP_CLASS,
+ NVE0_DISP_CLASS,
+ NVD0_DISP_CLASS,
+ NVA3_DISP_CLASS,
+ NV94_DISP_CLASS,
+ NVA0_DISP_CLASS,
+ NV84_DISP_CLASS,
+ NV50_DISP_CLASS,
+ NV04_DISP_CLASS,
+ };
+ int i;
+
+ for (i = 0, ret = -ENODEV; ret && i < ARRAY_SIZE(oclass); i++) {
+ ret = nouveau_object_new(nv_object(drm), NVDRM_DEVICE,
+ NVDRM_DISPLAY, oclass[i],
+ NULL, 0, &disp->core);
+ }
+
+ if (ret == 0) {
+ if (nv_mclass(disp->core) < NV50_DISP_CLASS)
+ ret = nv04_display_create(dev);
+ else
+ ret = nv50_display_create(dev);
+ }
+ } else {
+ ret = 0;
+ }
- ret = ttm_bo_reserve(&old_bo->bo, false, false, false, 0);
if (ret)
- goto fail_unreserve;
+ goto disp_create_err;
+ if (dev->mode_config.num_crtc) {
+ ret = nouveau_display_vblank_init(dev);
+ if (ret)
+ goto vblank_err;
+ }
+
+ nouveau_backlight_init(dev);
return 0;
-fail_unreserve:
- ttm_bo_unreserve(&new_bo->bo);
-fail:
- nouveau_bo_unpin(new_bo);
+vblank_err:
+ disp->dtor(dev);
+disp_create_err:
+ drm_kms_helper_poll_fini(dev);
+ drm_mode_config_cleanup(dev);
return ret;
}
-static void
-nouveau_page_flip_unreserve(struct nouveau_bo *old_bo,
- struct nouveau_bo *new_bo,
- struct nouveau_fence *fence)
+void
+nouveau_display_destroy(struct drm_device *dev)
{
- nouveau_bo_fence(new_bo, fence);
- ttm_bo_unreserve(&new_bo->bo);
+ struct nouveau_display *disp = nouveau_display(dev);
+ struct nouveau_drm *drm = nouveau_drm(dev);
- nouveau_bo_fence(old_bo, fence);
- ttm_bo_unreserve(&old_bo->bo);
+ nouveau_backlight_exit(dev);
+ nouveau_display_vblank_fini(dev);
+
+ drm_kms_helper_poll_fini(dev);
+ drm_mode_config_cleanup(dev);
+
+ if (disp->dtor)
+ disp->dtor(dev);
+
+ nouveau_object_del(nv_object(drm), NVDRM_DEVICE, NVDRM_DISPLAY);
+
+ nouveau_drm(dev)->display = NULL;
+ kfree(disp);
+}
+
+int
+nouveau_display_suspend(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct drm_crtc *crtc;
+
+ nouveau_display_fini(dev);
+
+ NV_INFO(drm, "unpinning framebuffer(s)...\n");
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_framebuffer *nouveau_fb;
+
+ nouveau_fb = nouveau_framebuffer(crtc->primary->fb);
+ if (!nouveau_fb || !nouveau_fb->nvbo)
+ continue;
+
+ nouveau_bo_unpin(nouveau_fb->nvbo);
+ }
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+ nouveau_bo_unmap(nv_crtc->cursor.nvbo);
+ nouveau_bo_unpin(nv_crtc->cursor.nvbo);
+ }
+
+ return 0;
+}
+
+void
+nouveau_display_repin(struct drm_device *dev)
+{
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct drm_crtc *crtc;
+ int ret;
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_framebuffer *nouveau_fb;
+
+ nouveau_fb = nouveau_framebuffer(crtc->primary->fb);
+ if (!nouveau_fb || !nouveau_fb->nvbo)
+ continue;
+
+ nouveau_bo_pin(nouveau_fb->nvbo, TTM_PL_FLAG_VRAM);
+ }
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+ ret = nouveau_bo_pin(nv_crtc->cursor.nvbo, TTM_PL_FLAG_VRAM);
+ if (!ret)
+ ret = nouveau_bo_map(nv_crtc->cursor.nvbo);
+ if (ret)
+ NV_ERROR(drm, "Could not pin/map cursor.\n");
+ }
+}
+
+void
+nouveau_display_resume(struct drm_device *dev)
+{
+ struct drm_crtc *crtc;
+ nouveau_display_init(dev);
+
+ /* Force CLUT to get re-loaded during modeset */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+
+ nv_crtc->lut.depth = 0;
+ }
- nouveau_bo_unpin(old_bo);
+ drm_helper_resume_force_mode(dev);
+
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
+ u32 offset = nv_crtc->cursor.nvbo->bo.offset;
+
+ nv_crtc->cursor.set_offset(nv_crtc, offset);
+ nv_crtc->cursor.set_pos(nv_crtc, nv_crtc->cursor_saved_x,
+ nv_crtc->cursor_saved_y);
+ }
}
static int
@@ -182,13 +648,15 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
struct nouveau_page_flip_state *s,
struct nouveau_fence **pfence)
{
- struct drm_device *dev = chan->dev;
+ struct nouveau_fence_chan *fctx = chan->fence;
+ struct nouveau_drm *drm = chan->drm;
+ struct drm_device *dev = drm->dev;
unsigned long flags;
int ret;
/* Queue it to the pending list */
spin_lock_irqsave(&dev->event_lock, flags);
- list_add_tail(&s->head, &chan->nvsw.flip);
+ list_add_tail(&s->head, &fctx->flip);
spin_unlock_irqrestore(&dev->event_lock, flags);
/* Synchronize with the old framebuffer */
@@ -201,11 +669,14 @@ nouveau_page_flip_emit(struct nouveau_channel *chan,
if (ret)
goto fail;
- BEGIN_RING(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
- OUT_RING(chan, 0);
- FIRE_RING(chan);
+ if (nv_device(drm->device)->card_type < NV_C0)
+ BEGIN_NV04(chan, NvSubSw, NV_SW_PAGE_FLIP, 1);
+ else
+ BEGIN_NVC0(chan, FermiSw, NV_SW_PAGE_FLIP, 1);
+ OUT_RING (chan, 0x00000000);
+ FIRE_RING (chan);
- ret = nouveau_fence_new(chan, pfence, true);
+ ret = nouveau_fence_new(chan, false, pfence);
if (ret)
goto fail;
@@ -219,56 +690,104 @@ fail:
int
nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
- struct drm_pending_vblank_event *event)
+ struct drm_pending_vblank_event *event, u32 flags)
{
+ const int swap_interval = (flags & DRM_MODE_PAGE_FLIP_ASYNC) ? 0 : 1;
struct drm_device *dev = crtc->dev;
- struct drm_nouveau_private *dev_priv = dev->dev_private;
- struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->fb)->nvbo;
+ struct nouveau_drm *drm = nouveau_drm(dev);
+ struct nouveau_bo *old_bo = nouveau_framebuffer(crtc->primary->fb)->nvbo;
struct nouveau_bo *new_bo = nouveau_framebuffer(fb)->nvbo;
struct nouveau_page_flip_state *s;
- struct nouveau_channel *chan;
+ struct nouveau_channel *chan = drm->channel;
struct nouveau_fence *fence;
int ret;
- if (dev_priv->engine.graph.accel_blocked)
+ if (!drm->channel)
return -ENODEV;
s = kzalloc(sizeof(*s), GFP_KERNEL);
if (!s)
return -ENOMEM;
- /* Don't let the buffers go away while we flip */
- ret = nouveau_page_flip_reserve(old_bo, new_bo);
+ if (new_bo != old_bo) {
+ ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM);
+ if (ret)
+ goto fail_free;
+ }
+
+ mutex_lock(&chan->cli->mutex);
+
+ /* synchronise rendering channel with the kernel's channel */
+ spin_lock(&new_bo->bo.bdev->fence_lock);
+ fence = nouveau_fence_ref(new_bo->bo.sync_obj);
+ spin_unlock(&new_bo->bo.bdev->fence_lock);
+ ret = nouveau_fence_sync(fence, chan);
+ nouveau_fence_unref(&fence);
+ if (ret)
+ goto fail_unpin;
+
+ ret = ttm_bo_reserve(&old_bo->bo, true, false, false, NULL);
if (ret)
- goto fail_free;
+ goto fail_unpin;
/* Initialize a page flip struct */
*s = (struct nouveau_page_flip_state)
- { { }, s->event, nouveau_crtc(crtc)->index,
- fb->bits_per_pixel, fb->pitch, crtc->x, crtc->y,
+ { { }, event, nouveau_crtc(crtc)->index,
+ fb->bits_per_pixel, fb->pitches[0], crtc->x, crtc->y,
new_bo->bo.offset };
- /* Choose the channel the flip will be handled in */
- chan = nouveau_fence_channel(new_bo->bo.sync_obj);
- if (!chan)
- chan = nouveau_channel_get_unlocked(dev_priv->channel);
- mutex_lock(&chan->mutex);
+ /* Keep vblanks on during flip, for the target crtc of this flip */
+ drm_vblank_get(dev, nouveau_crtc(crtc)->index);
/* Emit a page flip */
+ if (nv_device(drm->device)->card_type >= NV_50) {
+ ret = nv50_display_flip_next(crtc, fb, chan, swap_interval);
+ if (ret)
+ goto fail_unreserve;
+ } else {
+ struct nv04_display *dispnv04 = nv04_display(dev);
+ int head = nouveau_crtc(crtc)->index;
+
+ if (swap_interval) {
+ ret = RING_SPACE(chan, 8);
+ if (ret)
+ goto fail_unreserve;
+
+ BEGIN_NV04(chan, NvSubImageBlit, 0x012c, 1);
+ OUT_RING (chan, 0);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0134, 1);
+ OUT_RING (chan, head);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0100, 1);
+ OUT_RING (chan, 0);
+ BEGIN_NV04(chan, NvSubImageBlit, 0x0130, 1);
+ OUT_RING (chan, 0);
+ }
+
+ nouveau_bo_ref(new_bo, &dispnv04->image[head]);
+ }
+
ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence);
- nouveau_channel_put(&chan);
if (ret)
goto fail_unreserve;
+ mutex_unlock(&chan->cli->mutex);
/* Update the crtc struct and cleanup */
- crtc->fb = fb;
+ crtc->primary->fb = fb;
- nouveau_page_flip_unreserve(old_bo, new_bo, fence);
+ nouveau_bo_fence(old_bo, fence);
+ ttm_bo_unreserve(&old_bo->bo);
+ if (old_bo != new_bo)
+ nouveau_bo_unpin(old_bo);
nouveau_fence_unref(&fence);
return 0;
fail_unreserve:
- nouveau_page_flip_unreserve(old_bo, new_bo, NULL);
+ drm_vblank_put(dev, nouveau_crtc(crtc)->index);
+ ttm_bo_unreserve(&old_bo->bo);
+fail_unpin:
+ mutex_unlock(&chan->cli->mutex);
+ if (old_bo != new_bo)
+ nouveau_bo_unpin(new_bo);
fail_free:
kfree(s);
return ret;
@@ -278,36 +797,94 @@ int
nouveau_finish_page_flip(struct nouveau_channel *chan,
struct nouveau_page_flip_state *ps)
{
- struct drm_device *dev = chan->dev;
+ struct nouveau_fence_chan *fctx = chan->fence;
+ struct nouveau_drm *drm = chan->drm;
+ struct drm_device *dev = drm->dev;
struct nouveau_page_flip_state *s;
unsigned long flags;
+ int crtcid = -1;
spin_lock_irqsave(&dev->event_lock, flags);
- if (list_empty(&chan->nvsw.flip)) {
- NV_ERROR(dev, "Unexpected pageflip in channel %d.\n", chan->id);
+ if (list_empty(&fctx->flip)) {
+ NV_ERROR(drm, "unexpected pageflip\n");
spin_unlock_irqrestore(&dev->event_lock, flags);
return -EINVAL;
}
- s = list_first_entry(&chan->nvsw.flip,
- struct nouveau_page_flip_state, head);
+ s = list_first_entry(&fctx->flip, struct nouveau_page_flip_state, head);
if (s->event) {
- struct drm_pending_vblank_event *e = s->event;
- struct timeval now;
-
- do_gettimeofday(&now);
- e->event.sequence = 0;
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
- list_add_tail(&e->base.link, &e->base.file_priv->event_list);
- wake_up_interruptible(&e->base.file_priv->event_wait);
+ /* Vblank timestamps/counts are only correct on >= NV-50 */
+ if (nv_device(drm->device)->card_type >= NV_50)
+ crtcid = s->crtc;
+
+ drm_send_vblank_event(dev, crtcid, s->event);
}
+ /* Give up ownership of vblank for page-flipped crtc */
+ drm_vblank_put(dev, s->crtc);
+
list_del(&s->head);
- *ps = *s;
+ if (ps)
+ *ps = *s;
kfree(s);
spin_unlock_irqrestore(&dev->event_lock, flags);
return 0;
}
+
+int
+nouveau_flip_complete(void *data)
+{
+ struct nouveau_channel *chan = data;
+ struct nouveau_drm *drm = chan->drm;
+ struct nouveau_page_flip_state state;
+
+ if (!nouveau_finish_page_flip(chan, &state)) {
+ if (nv_device(drm->device)->card_type < NV_50) {
+ nv_set_crtc_base(drm->dev, state.crtc, state.offset +
+ state.y * state.pitch +
+ state.x * state.bpp / 8);
+ }
+ }
+
+ return 0;
+}
+
+int
+nouveau_display_dumb_create(struct drm_file *file_priv, struct drm_device *dev,
+ struct drm_mode_create_dumb *args)
+{
+ struct nouveau_bo *bo;
+ int ret;
+
+ args->pitch = roundup(args->width * (args->bpp / 8), 256);
+ args->size = args->pitch * args->height;
+ args->size = roundup(args->size, PAGE_SIZE);
+
+ ret = nouveau_gem_new(dev, args->size, 0, NOUVEAU_GEM_DOMAIN_VRAM, 0, 0, &bo);
+ if (ret)
+ return ret;
+
+ ret = drm_gem_handle_create(file_priv, &bo->gem, &args->handle);
+ drm_gem_object_unreference_unlocked(&bo->gem);
+ return ret;
+}
+
+int
+nouveau_display_dumb_map_offset(struct drm_file *file_priv,
+ struct drm_device *dev,
+ uint32_t handle, uint64_t *poffset)
+{
+ struct drm_gem_object *gem;
+
+ gem = drm_gem_object_lookup(dev, file_priv, handle);
+ if (gem) {
+ struct nouveau_bo *bo = nouveau_gem_object(gem);
+ *poffset = drm_vma_node_offset_addr(&bo->bo.vma_node);
+ drm_gem_object_unreference_unlocked(gem);
+ return 0;
+ }
+
+ return -ENOENT;
+}