diff options
Diffstat (limited to 'drivers/gpu')
118 files changed, 1990 insertions, 1132 deletions
diff --git a/drivers/gpu/drm/ast/ast_ttm.c b/drivers/gpu/drm/ast/ast_ttm.c index 98d670825a1..6e8887fe6c1 100644 --- a/drivers/gpu/drm/ast/ast_ttm.c +++ b/drivers/gpu/drm/ast/ast_ttm.c @@ -323,6 +323,7 @@ int ast_bo_create(struct drm_device *dev, int size, int align, astbo->gem.driver_private = NULL; astbo->bo.bdev = &ast->ttm.bdev; + astbo->bo.bdev->dev_mapping = dev->dev_mapping; ast_ttm_placement(astbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c index 0047012045c..69fd8f1ac8d 100644 --- a/drivers/gpu/drm/cirrus/cirrus_ttm.c +++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c @@ -328,6 +328,7 @@ int cirrus_bo_create(struct drm_device *dev, int size, int align, cirrusbo->gem.driver_private = NULL; cirrusbo->bo.bdev = &cirrus->ttm.bdev; + cirrusbo->bo.bdev->dev_mapping = dev->dev_mapping; cirrus_ttm_placement(cirrusbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c index 738a4294d82..6a647493ca7 100644 --- a/drivers/gpu/drm/drm_crtc_helper.c +++ b/drivers/gpu/drm/drm_crtc_helper.c @@ -677,6 +677,11 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) /* don't break so fail path works correct */ fail = 1; break; + + if (connector->dpms != DRM_MODE_DPMS_ON) { + DRM_DEBUG_KMS("connector dpms not on, full mode switch\n"); + mode_changed = true; + } } } @@ -754,6 +759,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) ret = -EINVAL; goto fail; } + DRM_DEBUG_KMS("Setting connector DPMS state to on\n"); + for (i = 0; i < set->num_connectors; i++) { + DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id, + drm_get_connector_name(set->connectors[i])); + set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON); + } } drm_helper_disable_unused_functions(dev); } else if (fb_changed) { @@ -771,22 +782,6 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set) } } - /* - * crtc set_config helpers implicit set the crtc and all connected - * encoders to DPMS on for a full mode set. But for just an fb update it - * doesn't do that. To not confuse userspace, do an explicit DPMS_ON - * unconditionally. This will also ensure driver internal dpms state is - * consistent again. - */ - if (set->crtc->enabled) { - DRM_DEBUG_KMS("Setting connector DPMS state to on\n"); - for (i = 0; i < set->num_connectors; i++) { - DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id, - drm_get_connector_name(set->connectors[i])); - set->connectors[i]->funcs->dpms(set->connectors[i], DRM_MODE_DPMS_ON); - } - } - kfree(save_connectors); kfree(save_encoders); kfree(save_crtcs); diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 8bcce7866d3..f92da0a32f0 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -708,7 +708,10 @@ int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev, int crtc, /* Subtract time delta from raw timestamp to get final * vblank_time timestamp for end of vblank. */ - etime = ktime_sub_ns(etime, delta_ns); + if (delta_ns < 0) + etime = ktime_add_ns(etime, -delta_ns); + else + etime = ktime_sub_ns(etime, delta_ns); *vblank_time = ktime_to_timeval(etime); DRM_DEBUG("crtc %d : v %d p(%d,%d)@ %ld.%ld -> %ld.%ld [e %d us, %d rep]\n", diff --git a/drivers/gpu/drm/exynos/exynos_ddc.c b/drivers/gpu/drm/exynos/exynos_ddc.c index 95c75edef01..30ef41bcd7b 100644 --- a/drivers/gpu/drm/exynos/exynos_ddc.c +++ b/drivers/gpu/drm/exynos/exynos_ddc.c @@ -15,7 +15,6 @@ #include <linux/kernel.h> #include <linux/i2c.h> -#include <linux/module.h> #include "exynos_drm_drv.h" diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c index 61b094f689a..6e047bd53e2 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c @@ -12,7 +12,6 @@ * */ #include <linux/kernel.h> -#include <linux/module.h> #include <linux/platform_device.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c index 3e106beca5b..1c263dac3c1 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c +++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c @@ -14,7 +14,6 @@ #include <drm/drmP.h> #include <linux/kernel.h> -#include <linux/module.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/of_device.h> @@ -130,7 +129,6 @@ static const struct of_device_id fimd_driver_dt_match[] = { .data = &exynos5_fimd_driver_data }, {}, }; -MODULE_DEVICE_TABLE(of, fimd_driver_dt_match); #endif static inline struct fimd_driver_data *drm_fimd_get_driver_data( @@ -1082,7 +1080,6 @@ static struct platform_device_id fimd_driver_ids[] = { }, {}, }; -MODULE_DEVICE_TABLE(platform, fimd_driver_ids); static const struct dev_pm_ops fimd_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(fimd_suspend, fimd_resume) diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c index 42a5a546607..eddea494148 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c +++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c @@ -8,7 +8,6 @@ */ #include <linux/kernel.h> -#include <linux/module.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/interrupt.h> @@ -806,9 +805,20 @@ static void g2d_dma_start(struct g2d_data *g2d, struct g2d_cmdlist_node *node = list_first_entry(&runqueue_node->run_cmdlist, struct g2d_cmdlist_node, list); + int ret; + + ret = pm_runtime_get_sync(g2d->dev); + if (ret < 0) { + dev_warn(g2d->dev, "failed pm power on.\n"); + return; + } - pm_runtime_get_sync(g2d->dev); - clk_enable(g2d->gate_clk); + ret = clk_prepare_enable(g2d->gate_clk); + if (ret < 0) { + dev_warn(g2d->dev, "failed to enable clock.\n"); + pm_runtime_put_sync(g2d->dev); + return; + } writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR); writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND); @@ -861,7 +871,7 @@ static void g2d_runqueue_worker(struct work_struct *work) runqueue_work); mutex_lock(&g2d->runqueue_mutex); - clk_disable(g2d->gate_clk); + clk_disable_unprepare(g2d->gate_clk); pm_runtime_put_sync(g2d->dev); complete(&g2d->runqueue_node->complete); @@ -1521,7 +1531,6 @@ static const struct of_device_id exynos_g2d_match[] = { { .compatible = "samsung,exynos5250-g2d" }, {}, }; -MODULE_DEVICE_TABLE(of, exynos_g2d_match); #endif struct platform_driver g2d_driver = { diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c index 472e3b25e7f..90b8a1a5344 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c +++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c @@ -12,7 +12,6 @@ * */ #include <linux/kernel.h> -#include <linux/module.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/pm_runtime.h> diff --git a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c index aaa550d622f..8d3bc01d683 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_hdmi.c @@ -15,7 +15,6 @@ #include <linux/kernel.h> #include <linux/wait.h> -#include <linux/module.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c index b1ef8e7ff9c..d2b6ab4def9 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c +++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c @@ -12,7 +12,6 @@ * */ #include <linux/kernel.h> -#include <linux/module.h> #include <linux/platform_device.h> #include <linux/types.h> #include <linux/clk.h> @@ -342,10 +341,10 @@ int exynos_drm_ipp_get_property(struct drm_device *drm_dev, void *data, */ ippdrv = ipp_find_obj(&ctx->ipp_idr, &ctx->ipp_lock, prop_list->ipp_id); - if (!ippdrv) { + if (IS_ERR(ippdrv)) { DRM_ERROR("not found ipp%d driver.\n", prop_list->ipp_id); - return -EINVAL; + return PTR_ERR(ippdrv); } prop_list = ippdrv->prop_list; @@ -970,9 +969,9 @@ int exynos_drm_ipp_queue_buf(struct drm_device *drm_dev, void *data, /* find command node */ c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, qbuf->prop_id); - if (!c_node) { + if (IS_ERR(c_node)) { DRM_ERROR("failed to get command node.\n"); - return -EFAULT; + return PTR_ERR(c_node); } /* buffer control */ @@ -1106,9 +1105,9 @@ int exynos_drm_ipp_cmd_ctrl(struct drm_device *drm_dev, void *data, c_node = ipp_find_obj(&ctx->prop_idr, &ctx->prop_lock, cmd_ctrl->prop_id); - if (!c_node) { + if (IS_ERR(c_node)) { DRM_ERROR("invalid command node list.\n"); - return -EINVAL; + return PTR_ERR(c_node); } if (!exynos_drm_ipp_check_valid(ippdrv->dev, cmd_ctrl->ctrl, diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c index 427640aa514..49669aa24c4 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c +++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c @@ -10,7 +10,6 @@ */ #include <linux/kernel.h> -#include <linux/module.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/io.h> diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c index 41cc74d83e4..c57c56519ad 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c +++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c @@ -13,7 +13,6 @@ #include <drm/drmP.h> #include <linux/kernel.h> -#include <linux/module.h> #include <linux/platform_device.h> #include <drm/exynos_drm.h> diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c index 62ef5971ac3..2f5c6942c96 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmi.c +++ b/drivers/gpu/drm/exynos/exynos_hdmi.c @@ -24,7 +24,6 @@ #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/i2c.h> -#include <linux/module.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/irq.h> diff --git a/drivers/gpu/drm/exynos/exynos_hdmiphy.c b/drivers/gpu/drm/exynos/exynos_hdmiphy.c index ef04255076c..6e320ae9afe 100644 --- a/drivers/gpu/drm/exynos/exynos_hdmiphy.c +++ b/drivers/gpu/drm/exynos/exynos_hdmiphy.c @@ -15,7 +15,6 @@ #include <linux/kernel.h> #include <linux/i2c.h> -#include <linux/module.h> #include "exynos_drm_drv.h" #include "exynos_hdmi.h" diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c index 42ffb71c63b..c9a137caea4 100644 --- a/drivers/gpu/drm/exynos/exynos_mixer.c +++ b/drivers/gpu/drm/exynos/exynos_mixer.c @@ -23,7 +23,6 @@ #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/i2c.h> -#include <linux/module.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/irq.h> diff --git a/drivers/gpu/drm/gma500/psb_intel_sdvo.c b/drivers/gpu/drm/gma500/psb_intel_sdvo.c index 19e36603b23..3bc8414533c 100644 --- a/drivers/gpu/drm/gma500/psb_intel_sdvo.c +++ b/drivers/gpu/drm/gma500/psb_intel_sdvo.c @@ -500,7 +500,8 @@ static bool psb_intel_sdvo_read_response(struct psb_intel_sdvo *psb_intel_sdvo, &status)) goto log_fail; - while (status == SDVO_CMD_STATUS_PENDING && retry--) { + while ((status == SDVO_CMD_STATUS_PENDING || + status == SDVO_CMD_STATUS_TARGET_NOT_SPECIFIED) && retry--) { udelay(15); if (!psb_intel_sdvo_read_byte(psb_intel_sdvo, SDVO_I2C_CMD_STATUS, diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c index cf188ab7051..f4669802a0f 100644 --- a/drivers/gpu/drm/i915/i915_dma.c +++ b/drivers/gpu/drm/i915/i915_dma.c @@ -1495,6 +1495,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) dev_priv->dev = dev; dev_priv->info = info; + spin_lock_init(&dev_priv->irq_lock); + spin_lock_init(&dev_priv->gpu_error.lock); + spin_lock_init(&dev_priv->rps.lock); + spin_lock_init(&dev_priv->gt_lock); + spin_lock_init(&dev_priv->backlight.lock); + mutex_init(&dev_priv->dpio_lock); + mutex_init(&dev_priv->rps.hw_lock); + mutex_init(&dev_priv->modeset_restore_lock); + i915_dump_device_info(dev_priv); if (i915_get_bridge_dev(dev)) { @@ -1585,6 +1594,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) intel_detect_pch(dev); intel_irq_init(dev); + intel_pm_init(dev); + intel_gt_sanitize(dev); intel_gt_init(dev); /* Try to make sure MCHBAR is enabled before poking at it */ @@ -1610,15 +1621,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (!IS_I945G(dev) && !IS_I945GM(dev)) pci_enable_msi(dev->pdev); - spin_lock_init(&dev_priv->irq_lock); - spin_lock_init(&dev_priv->gpu_error.lock); - spin_lock_init(&dev_priv->rps.lock); - spin_lock_init(&dev_priv->backlight.lock); - mutex_init(&dev_priv->dpio_lock); - - mutex_init(&dev_priv->rps.hw_lock); - mutex_init(&dev_priv->modeset_restore_lock); - dev_priv->num_plane = 1; if (IS_VALLEYVIEW(dev)) dev_priv->num_plane = 2; @@ -1648,7 +1650,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) if (INTEL_INFO(dev)->num_pipes) { /* Must be done after probing outputs */ intel_opregion_init(dev); - acpi_video_register_with_quirks(); + acpi_video_register(); } if (IS_GEN5(dev)) diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c index f4af1ca0fb6..45b3c030f48 100644 --- a/drivers/gpu/drm/i915/i915_drv.c +++ b/drivers/gpu/drm/i915/i915_drv.c @@ -706,7 +706,7 @@ static int i915_drm_thaw(struct drm_device *dev) { int error = 0; - intel_gt_reset(dev); + intel_gt_sanitize(dev); if (drm_core_check_feature(dev, DRIVER_MODESET)) { mutex_lock(&dev->struct_mutex); @@ -732,7 +732,7 @@ int i915_resume(struct drm_device *dev) pci_set_master(dev->pdev); - intel_gt_reset(dev); + intel_gt_sanitize(dev); /* * Platforms with opregion should have sane BIOS, older ones (gen3 and @@ -1253,21 +1253,21 @@ hsw_unclaimed_reg_check(struct drm_i915_private *dev_priv, u32 reg) #define __i915_read(x, y) \ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ + unsigned long irqflags; \ u##x val = 0; \ + spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ if (IS_GEN5(dev_priv->dev)) \ ilk_dummy_write(dev_priv); \ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ - unsigned long irqflags; \ - spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ if (dev_priv->forcewake_count == 0) \ dev_priv->gt.force_wake_get(dev_priv); \ val = read##y(dev_priv->regs + reg); \ if (dev_priv->forcewake_count == 0) \ dev_priv->gt.force_wake_put(dev_priv); \ - spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ } else { \ val = read##y(dev_priv->regs + reg); \ } \ + spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ trace_i915_reg_rw(false, reg, val, sizeof(val)); \ return val; \ } @@ -1280,8 +1280,10 @@ __i915_read(64, q) #define __i915_write(x, y) \ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ + unsigned long irqflags; \ u32 __fifo_ret = 0; \ trace_i915_reg_rw(true, reg, val, sizeof(val)); \ + spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ } \ @@ -1293,6 +1295,7 @@ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ gen6_gt_check_fifodbg(dev_priv); \ } \ hsw_unclaimed_reg_check(dev_priv, reg); \ + spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ } __i915_write(8, b) __i915_write(16, w) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index a416645bcd2..1929bffc1c7 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -555,6 +555,7 @@ enum intel_sbi_destination { #define QUIRK_PIPEA_FORCE (1<<0) #define QUIRK_LVDS_SSC_DISABLE (1<<1) #define QUIRK_INVERT_BRIGHTNESS (1<<2) +#define QUIRK_NO_PCH_PWM_ENABLE (1<<3) struct intel_fbdev; struct intel_fbc_work; @@ -1581,9 +1582,10 @@ void i915_hangcheck_elapsed(unsigned long data); void i915_handle_error(struct drm_device *dev, bool wedged); extern void intel_irq_init(struct drm_device *dev); +extern void intel_pm_init(struct drm_device *dev); extern void intel_hpd_init(struct drm_device *dev); extern void intel_gt_init(struct drm_device *dev); -extern void intel_gt_reset(struct drm_device *dev); +extern void intel_gt_sanitize(struct drm_device *dev); void i915_error_state_free(struct kref *error_ref); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 97afd2639fb..d9e2208cfe9 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -2258,7 +2258,17 @@ void i915_gem_restore_fences(struct drm_device *dev) for (i = 0; i < dev_priv->num_fence_regs; i++) { struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i]; - i915_gem_write_fence(dev, i, reg->obj); + + /* + * Commit delayed tiling changes if we have an object still + * attached to the fence, otherwise just clear the fence. + */ + if (reg->obj) { + i915_gem_object_update_fence(reg->obj, reg, + reg->obj->tiling_mode); + } else { + i915_gem_write_fence(dev, i, NULL); + } } } @@ -2795,6 +2805,10 @@ static void i915_gem_write_fence(struct drm_device *dev, int reg, if (i915_gem_object_needs_mb(dev_priv->fence_regs[reg].obj)) mb(); + WARN(obj && (!obj->stride || !obj->tiling_mode), + "bogus fence setup with stride: 0x%x, tiling mode: %i\n", + obj->stride, obj->tiling_mode); + switch (INTEL_INFO(dev)->gen) { case 7: case 6: @@ -2836,6 +2850,7 @@ static void i915_gem_object_update_fence(struct drm_i915_gem_object *obj, fence->obj = NULL; list_del_init(&fence->lru_list); } + obj->fence_dirty = false; } static int @@ -2965,7 +2980,6 @@ i915_gem_object_get_fence(struct drm_i915_gem_object *obj) return 0; i915_gem_object_update_fence(obj, reg, enable); - obj->fence_dirty = false; return 0; } diff --git a/drivers/gpu/drm/i915/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/i915_gem_dmabuf.c index dc53a527126..9e657833080 100644 --- a/drivers/gpu/drm/i915/i915_gem_dmabuf.c +++ b/drivers/gpu/drm/i915/i915_gem_dmabuf.c @@ -85,9 +85,17 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment, struct sg_table *sg, enum dma_data_direction dir) { + struct drm_i915_gem_object *obj = attachment->dmabuf->priv; + + mutex_lock(&obj->base.dev->struct_mutex); + dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir); sg_free_table(sg); kfree(sg); + + i915_gem_object_unpin_pages(obj); + + mutex_unlock(&obj->base.dev->struct_mutex); } static void i915_gem_dmabuf_release(struct dma_buf *dma_buf) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f2326fc60ac..53cddd98540 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -752,6 +752,8 @@ will not assert AGPBUSY# and will only be delivered when out of C3. */ #define INSTPM_FORCE_ORDERING (1<<7) /* GEN6+ */ +#define INSTPM_TLB_INVALIDATE (1<<9) +#define INSTPM_SYNC_FLUSH (1<<5) #define ACTHD 0x020c8 #define FW_BLC 0x020d8 #define FW_BLC2 0x020dc @@ -1856,10 +1858,16 @@ #define CRT_HOTPLUG_DETECT_VOLTAGE_475MV (1 << 2) #define PORT_HOTPLUG_STAT (dev_priv->info->display_mmio_offset + 0x61114) -/* HDMI/DP bits are gen4+ */ -#define PORTB_HOTPLUG_LIVE_STATUS (1 << 29) +/* + * HDMI/DP bits are gen4+ + * + * WARNING: Bspec for hpd status bits on gen4 seems to be completely confused. + * Please check the detailed lore in the commit message for for experimental + * evidence. + */ +#define PORTD_HOTPLUG_LIVE_STATUS (1 << 29) #define PORTC_HOTPLUG_LIVE_STATUS (1 << 28) -#define PORTD_HOTPLUG_LIVE_STATUS (1 << 27) +#define PORTB_HOTPLUG_LIVE_STATUS (1 << 27) #define PORTD_HOTPLUG_INT_STATUS (3 << 21) #define PORTC_HOTPLUG_INT_STATUS (3 << 19) #define PORTB_HOTPLUG_INT_STATUS (3 << 17) diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c index 324211ac9c5..b042ee5c407 100644 --- a/drivers/gpu/drm/i915/intel_ddi.c +++ b/drivers/gpu/drm/i915/intel_ddi.c @@ -301,7 +301,7 @@ static void intel_ddi_mode_set(struct drm_encoder *encoder, struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder); - intel_dp->DP = intel_dig_port->port_reversal | + intel_dp->DP = intel_dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE | DDI_BUF_EMP_400MV_0DB_HSW; intel_dp->DP |= DDI_PORT_WIDTH(intel_dp->lane_count); @@ -1109,7 +1109,8 @@ static void intel_enable_ddi(struct intel_encoder *intel_encoder) * enabling the port. */ I915_WRITE(DDI_BUF_CTL(port), - intel_dig_port->port_reversal | DDI_BUF_CTL_ENABLE); + intel_dig_port->saved_port_bits | + DDI_BUF_CTL_ENABLE); } else if (type == INTEL_OUTPUT_EDP) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); @@ -1347,8 +1348,9 @@ void intel_ddi_init(struct drm_device *dev, enum port port) intel_encoder->get_config = intel_ddi_get_config; intel_dig_port->port = port; - intel_dig_port->port_reversal = I915_READ(DDI_BUF_CTL(port)) & - DDI_BUF_PORT_REVERSAL; + intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) & + (DDI_BUF_PORT_REVERSAL | + DDI_A_4_LANES); intel_dig_port->dp.output_reg = DDI_BUF_CTL(port); intel_encoder->type = INTEL_OUTPUT_UNKNOWN; diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 85f3eb74d2b..be79f477a38 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -4913,22 +4913,19 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc, uint32_t tmp; tmp = I915_READ(PFIT_CONTROL); + if (!(tmp & PFIT_ENABLE)) + return; + /* Check whether the pfit is attached to our pipe. */ if (INTEL_INFO(dev)->gen < 4) { if (crtc->pipe != PIPE_B) return; - - /* gen2/3 store dither state in pfit control, needs to match */ - pipe_config->gmch_pfit.control = tmp & PANEL_8TO6_DITHER_ENABLE; } else { if ((tmp & PFIT_PIPE_MASK) != (crtc->pipe << PFIT_PIPE_SHIFT)) return; } - if (!(tmp & PFIT_ENABLE)) - return; - - pipe_config->gmch_pfit.control = I915_READ(PFIT_CONTROL); + pipe_config->gmch_pfit.control = tmp; pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS); if (INTEL_INFO(dev)->gen < 5) pipe_config->gmch_pfit.lvds_border_bits = @@ -8272,9 +8269,11 @@ check_crtc_state(struct drm_device *dev) list_for_each_entry(encoder, &dev->mode_config.encoder_list, base.head) { + enum pipe pipe; if (encoder->base.crtc != &crtc->base) continue; - if (encoder->get_config) + if (encoder->get_config && + encoder->get_hw_state(encoder, &pipe)) encoder->get_config(encoder, &pipe_config); } @@ -8317,6 +8316,8 @@ check_shared_dpll_state(struct drm_device *dev) pll->active, pll->refcount); WARN(pll->active && !pll->on, "pll in active use but not on in sw tracking\n"); + WARN(pll->on && !pll->active, + "pll in on but not on in use in sw tracking\n"); WARN(pll->on != active, "pll on state mismatch (expected %i, found %i)\n", pll->on, active); @@ -8541,15 +8542,20 @@ static void intel_set_config_restore_state(struct drm_device *dev, } static bool -is_crtc_connector_off(struct drm_crtc *crtc, struct drm_connector *connectors, - int num_connectors) +is_crtc_connector_off(struct drm_mode_set *set) { int i; - for (i = 0; i < num_connectors; i++) - if (connectors[i].encoder && - connectors[i].encoder->crtc == crtc && - connectors[i].dpms != DRM_MODE_DPMS_ON) + if (set->num_connectors == 0) + return false; + + if (WARN_ON(set->connectors == NULL)) + return false; + + for (i = 0; i < set->num_connectors; i++) + if (set->connectors[i]->encoder && + set->connectors[i]->encoder->crtc == set->crtc && + set->connectors[i]->dpms != DRM_MODE_DPMS_ON) return true; return false; @@ -8562,10 +8568,8 @@ intel_set_config_compute_mode_changes(struct drm_mode_set *set, /* We should be able to check here if the fb has the same properties * and then just flip_or_move it */ - if (set->connectors != NULL && - is_crtc_connector_off(set->crtc, *set->connectors, - set->num_connectors)) { - config->mode_changed = true; + if (is_crtc_connector_off(set)) { + config->mode_changed = true; } else if (set->crtc->fb != set->fb) { /* If we have no fb then treat it as a full mode set */ if (set->crtc->fb == NULL) { @@ -9398,6 +9402,17 @@ static void quirk_invert_brightness(struct drm_device *dev) DRM_INFO("applying inverted panel brightness quirk\n"); } +/* + * Some machines (Dell XPS13) suffer broken backlight controls if + * BLM_PCH_PWM_ENABLE is set. + */ +static void quirk_no_pcm_pwm_enable(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + dev_priv->quirks |= QUIRK_NO_PCH_PWM_ENABLE; + DRM_INFO("applying no-PCH_PWM_ENABLE quirk\n"); +} + struct intel_quirk { int device; int subsystem_vendor; @@ -9467,6 +9482,11 @@ static struct intel_quirk intel_quirks[] = { /* Acer Aspire 4736Z */ { 0x2a42, 0x1025, 0x0260, quirk_invert_brightness }, + + /* Dell XPS13 HD Sandy Bridge */ + { 0x0116, 0x1028, 0x052e, quirk_no_pcm_pwm_enable }, + /* Dell XPS13 HD and XPS13 FHD Ivy Bridge */ + { 0x0166, 0x1028, 0x058b, quirk_no_pcm_pwm_enable }, }; static void intel_init_quirks(struct drm_device *dev) @@ -9817,8 +9837,8 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev) } pll->refcount = pll->active; - DRM_DEBUG_KMS("%s hw state readout: refcount %i\n", - pll->name, pll->refcount); + DRM_DEBUG_KMS("%s hw state readout: refcount %i, on %i\n", + pll->name, pll->refcount, pll->on); } list_for_each_entry(encoder, &dev->mode_config.encoder_list, @@ -9869,6 +9889,7 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, struct drm_plane *plane; struct intel_crtc *crtc; struct intel_encoder *encoder; + int i; intel_modeset_readout_hw_state(dev); @@ -9884,6 +9905,18 @@ void intel_modeset_setup_hw_state(struct drm_device *dev, intel_dump_pipe_config(crtc, &crtc->config, "[setup_hw_state]"); } + for (i = 0; i < dev_priv->num_shared_dpll; i++) { + struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i]; + + if (!pll->on || pll->active) + continue; + + DRM_DEBUG_KMS("%s enabled but not in use, disabling\n", pll->name); + + pll->disable(dev_priv, pll); + pll->on = false; + } + if (force_restore) { /* * We need to use raw interfaces for restoring state to avoid @@ -10009,6 +10042,8 @@ struct intel_display_error_state { u32 power_well_driver; + int num_transcoders; + struct intel_cursor_error_state { u32 control; u32 position; @@ -10017,16 +10052,7 @@ struct intel_display_error_state { } cursor[I915_MAX_PIPES]; struct intel_pipe_error_state { - enum transcoder cpu_transcoder; - u32 conf; u32 source; - - u32 htotal; - u32 hblank; - u32 hsync; - u32 vtotal; - u32 vblank; - u32 vsync; } pipe[I915_MAX_PIPES]; struct intel_plane_error_state { @@ -10038,6 +10064,19 @@ struct intel_display_error_state { u32 surface; u32 tile_offset; } plane[I915_MAX_PIPES]; + + struct intel_transcoder_error_state { + enum transcoder cpu_transcoder; + + u32 conf; + + u32 htotal; + u32 hblank; + u32 hsync; + u32 vtotal; + u32 vblank; + u32 vsync; + } transcoder[4]; }; struct intel_display_error_state * @@ -10045,9 +10084,17 @@ intel_display_capture_error_state(struct drm_device *dev) { drm_i915_private_t *dev_priv = dev->dev_private; struct intel_display_error_state *error; - enum transcoder cpu_transcoder; + int transcoders[] = { + TRANSCODER_A, + TRANSCODER_B, + TRANSCODER_C, + TRANSCODER_EDP, + }; int i; + if (INTEL_INFO(dev)->num_pipes == 0) + return NULL; + error = kmalloc(sizeof(*error), GFP_ATOMIC); if (error == NULL) return NULL; @@ -10056,9 +10103,6 @@ intel_display_capture_error_state(struct drm_device *dev) error->power_well_driver = I915_READ(HSW_PWR_WELL_DRIVER); for_each_pipe(i) { - cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv, i); - error->pipe[i].cpu_transcoder = cpu_transcoder; - if (INTEL_INFO(dev)->gen <= 6 || IS_VALLEYVIEW(dev)) { error->cursor[i].control = I915_READ(CURCNTR(i)); error->cursor[i].position = I915_READ(CURPOS(i)); @@ -10082,14 +10126,25 @@ intel_display_capture_error_state(struct drm_device *dev) error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i)); } - error->pipe[i].conf = I915_READ(PIPECONF(cpu_transcoder)); error->pipe[i].source = I915_READ(PIPESRC(i)); - error->pipe[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); - error->pipe[i].hblank = I915_READ(HBLANK(cpu_transcoder)); - error->pipe[i].hsync = I915_READ(HSYNC(cpu_transcoder)); - error->pipe[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); - error->pipe[i].vblank = I915_READ(VBLANK(cpu_transcoder)); - error->pipe[i].vsync = I915_READ(VSYNC(cpu_transcoder)); + } + + error->num_transcoders = INTEL_INFO(dev)->num_pipes; + if (HAS_DDI(dev_priv->dev)) + error->num_transcoders++; /* Account for eDP. */ + + for (i = 0; i < error->num_transcoders; i++) { + enum transcoder cpu_transcoder = transcoders[i]; + + error->transcoder[i].cpu_transcoder = cpu_transcoder; + + error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder)); + error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder)); + error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder)); + error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder)); + error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder)); + error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder)); + error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder)); } /* In the code above we read the registers without checking if the power @@ -10111,22 +10166,16 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, { int i; + if (!error) + return; + err_printf(m, "Num Pipes: %d\n", INTEL_INFO(dev)->num_pipes); if (HAS_POWER_WELL(dev)) err_printf(m, "PWR_WELL_CTL2: %08x\n", error->power_well_driver); for_each_pipe(i) { err_printf(m, "Pipe [%d]:\n", i); - err_printf(m, " CPU transcoder: %c\n", - transcoder_name(error->pipe[i].cpu_transcoder)); - err_printf(m, " CONF: %08x\n", error->pipe[i].conf); err_printf(m, " SRC: %08x\n", error->pipe[i].source); - err_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal); - err_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank); - err_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync); - err_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal); - err_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank); - err_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync); err_printf(m, "Plane [%d]:\n", i); err_printf(m, " CNTR: %08x\n", error->plane[i].control); @@ -10147,5 +10196,17 @@ intel_display_print_error_state(struct drm_i915_error_state_buf *m, err_printf(m, " POS: %08x\n", error->cursor[i].position); err_printf(m, " BASE: %08x\n", error->cursor[i].base); } + + for (i = 0; i < error->num_transcoders; i++) { + err_printf(m, " CPU transcoder: %c\n", + transcoder_name(error->transcoder[i].cpu_transcoder)); + err_printf(m, " CONF: %08x\n", error->transcoder[i].conf); + err_printf(m, " HTOTAL: %08x\n", error->transcoder[i].htotal); + err_printf(m, " HBLANK: %08x\n", error->transcoder[i].hblank); + err_printf(m, " HSYNC: %08x\n", error->transcoder[i].hsync); + err_printf(m, " VTOTAL: %08x\n", error->transcoder[i].vtotal); + err_printf(m, " VBLANK: %08x\n", error->transcoder[i].vblank); + err_printf(m, " VSYNC: %08x\n", error->transcoder[i].vsync); + } } #endif diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h index c8c9b6f4823..b7d6e09456c 100644 --- a/drivers/gpu/drm/i915/intel_drv.h +++ b/drivers/gpu/drm/i915/intel_drv.h @@ -504,7 +504,7 @@ struct intel_dp { struct intel_digital_port { struct intel_encoder base; enum port port; - u32 port_reversal; + u32 saved_port_bits; struct intel_dp dp; struct intel_hdmi hdmi; }; diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c index 98df2a0c85b..2fd3fd5b943 100644 --- a/drivers/gpu/drm/i915/intel_hdmi.c +++ b/drivers/gpu/drm/i915/intel_hdmi.c @@ -785,10 +785,22 @@ static void intel_disable_hdmi(struct intel_encoder *encoder) } } +static int hdmi_portclock_limit(struct intel_hdmi *hdmi) +{ + struct drm_device *dev = intel_hdmi_to_dev(hdmi); + + if (IS_G4X(dev)) + return 165000; + else if (IS_HASWELL(dev)) + return 300000; + else + return 225000; +} + static int intel_hdmi_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode) { - if (mode->clock > 165000) + if (mode->clock > hdmi_portclock_limit(intel_attached_hdmi(connector))) return MODE_CLOCK_HIGH; if (mode->clock < 20000) return MODE_CLOCK_LOW; @@ -806,6 +818,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, struct drm_device *dev = encoder->base.dev; struct drm_display_mode *adjusted_mode = &pipe_config->adjusted_mode; int clock_12bpc = pipe_config->requested_mode.clock * 3 / 2; + int portclock_limit = hdmi_portclock_limit(intel_hdmi); int desired_bpp; if (intel_hdmi->color_range_auto) { @@ -829,7 +842,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, * outputs. We also need to check that the higher clock still fits * within limits. */ - if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= 225000 + if (pipe_config->pipe_bpp > 8*3 && clock_12bpc <= portclock_limit && HAS_PCH_SPLIT(dev)) { DRM_DEBUG_KMS("picking bpc to 12 for HDMI output\n"); desired_bpp = 12*3; @@ -846,7 +859,7 @@ bool intel_hdmi_compute_config(struct intel_encoder *encoder, pipe_config->pipe_bpp = desired_bpp; } - if (adjusted_mode->clock > 225000) { + if (adjusted_mode->clock > portclock_limit) { DRM_DEBUG_KMS("too high HDMI clock, rejecting mode\n"); return false; } diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index 021e8daa022..61348eae2f0 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -109,6 +109,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder, flags |= DRM_MODE_FLAG_PVSYNC; pipe_config->adjusted_mode.flags |= flags; + + /* gen2/3 store dither state in pfit control, needs to match */ + if (INTEL_INFO(dev)->gen < 4) { + tmp = I915_READ(PFIT_CONTROL); + + pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE; + } } /* The LVDS pin pair needs to be on before the DPLLs are enabled. @@ -290,14 +297,11 @@ static bool intel_lvds_compute_config(struct intel_encoder *intel_encoder, intel_pch_panel_fitting(intel_crtc, pipe_config, intel_connector->panel.fitting_mode); - return true; } else { intel_gmch_panel_fitting(intel_crtc, pipe_config, intel_connector->panel.fitting_mode); - } - drm_mode_set_crtcinfo(adjusted_mode, 0); - pipe_config->timings_set = true; + } /* * XXX: It would be nice to support lower refresh rates on the diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c index 80bea1d3209..5950888ae1d 100644 --- a/drivers/gpu/drm/i915/intel_panel.c +++ b/drivers/gpu/drm/i915/intel_panel.c @@ -194,6 +194,9 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc, adjusted_mode->vdisplay == mode->vdisplay) goto out; + drm_mode_set_crtcinfo(adjusted_mode, 0); + pipe_config->timings_set = true; + switch (fitting_mode) { case DRM_MODE_SCALE_CENTER: /* @@ -494,8 +497,11 @@ void intel_panel_set_backlight(struct drm_device *dev, u32 level, u32 max) goto out; } - /* scale to hardware */ - level = level * freq / max; + /* scale to hardware, but be careful to not overflow */ + if (freq < max) + level = level * freq / max; + else + level = freq / max * level; dev_priv->backlight.level = level; if (dev_priv->backlight.device) @@ -512,6 +518,17 @@ void intel_panel_disable_backlight(struct drm_device *dev) struct drm_i915_private *dev_priv = dev->dev_private; unsigned long flags; + /* + * Do not disable backlight on the vgaswitcheroo path. When switching + * away from i915, the other client may depend on i915 to handle the + * backlight. This will leave the backlight on unnecessarily when + * another client is not activated. + */ + if (dev->switch_power_state == DRM_SWITCH_POWER_CHANGING) { + DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n"); + return; + } + spin_lock_irqsave(&dev_priv->backlight.lock, flags); dev_priv->backlight.enabled = false; @@ -580,7 +597,8 @@ void intel_panel_enable_backlight(struct drm_device *dev, POSTING_READ(reg); I915_WRITE(reg, tmp | BLM_PWM_ENABLE); - if (HAS_PCH_SPLIT(dev)) { + if (HAS_PCH_SPLIT(dev) && + !(dev_priv->quirks & QUIRK_NO_PCH_PWM_ENABLE)) { tmp = I915_READ(BLC_PWM_PCH_CTL1); tmp |= BLM_PCH_PWM_ENABLE; tmp &= ~BLM_PCH_OVERRIDE_ENABLE; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index d10e6735771..b0e4a0bd131 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -5063,8 +5063,26 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) } } else { if (enable_requested) { + unsigned long irqflags; + enum pipe p; + I915_WRITE(HSW_PWR_WELL_DRIVER, 0); + POSTING_READ(HSW_PWR_WELL_DRIVER); DRM_DEBUG_KMS("Requesting to disable the power well\n"); + + /* + * After this, the registers on the pipes that are part + * of the power well will become zero, so we have to + * adjust our counters according to that. + * + * FIXME: Should we do this in general in + * drm_vblank_post_modeset? + */ + spin_lock_irqsave(&dev->vbl_lock, irqflags); + for_each_pipe(p) + if (p != PIPE_A) + dev->last_vblank[p] = 0; + spin_unlock_irqrestore(&dev->vbl_lock, irqflags); } } } @@ -5476,7 +5494,7 @@ static void vlv_force_wake_put(struct drm_i915_private *dev_priv) gen6_gt_check_fifodbg(dev_priv); } -void intel_gt_reset(struct drm_device *dev) +void intel_gt_sanitize(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; @@ -5487,16 +5505,16 @@ void intel_gt_reset(struct drm_device *dev) if (IS_IVYBRIDGE(dev) || IS_HASWELL(dev)) __gen6_gt_force_wake_mt_reset(dev_priv); } + + /* BIOS often leaves RC6 enabled, but disable it for hw init */ + if (INTEL_INFO(dev)->gen >= 6) + intel_disable_gt_powersave(dev); } void intel_gt_init(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; - spin_lock_init(&dev_priv->gt_lock); - - intel_gt_reset(dev); - if (IS_VALLEYVIEW(dev)) { dev_priv->gt.force_wake_get = vlv_force_wake_get; dev_priv->gt.force_wake_put = vlv_force_wake_put; @@ -5536,6 +5554,12 @@ void intel_gt_init(struct drm_device *dev) dev_priv->gt.force_wake_get = __gen6_gt_force_wake_get; dev_priv->gt.force_wake_put = __gen6_gt_force_wake_put; } +} + +void intel_pm_init(struct drm_device *dev) +{ + struct drm_i915_private *dev_priv = dev->dev_private; + INIT_DELAYED_WORK(&dev_priv->rps.delayed_resume_work, intel_gen6_powersave_work); } diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 664118d8c1d..079ef0129e7 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -968,6 +968,18 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring) I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); POSTING_READ(mmio); + + /* Flush the TLB for this page */ + if (INTEL_INFO(dev)->gen >= 6) { + u32 reg = RING_INSTPM(ring->mmio_base); + I915_WRITE(reg, + _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | + INSTPM_SYNC_FLUSH)); + if (wait_for((I915_READ(reg) & INSTPM_SYNC_FLUSH) == 0, + 1000)) + DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n", + ring->name); + } } static int diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c index 251784aa222..503a414cbda 100644 --- a/drivers/gpu/drm/mgag200/mgag200_mode.c +++ b/drivers/gpu/drm/mgag200/mgag200_mode.c @@ -29,6 +29,7 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc) struct mga_crtc *mga_crtc = to_mga_crtc(crtc); struct drm_device *dev = crtc->dev; struct mga_device *mdev = dev->dev_private; + struct drm_framebuffer *fb = crtc->fb; int i; if (!crtc->enabled) @@ -36,6 +37,28 @@ static void mga_crtc_load_lut(struct drm_crtc *crtc) WREG8(DAC_INDEX + MGA1064_INDEX, 0); + if (fb && fb->bits_per_pixel == 16) { + int inc = (fb->depth == 15) ? 8 : 4; + u8 r, b; + for (i = 0; i < MGAG200_LUT_SIZE; i += inc) { + if (fb->depth == 16) { + if (i > (MGAG200_LUT_SIZE >> 1)) { + r = b = 0; + } else { + r = mga_crtc->lut_r[i << 1]; + b = mga_crtc->lut_b[i << 1]; + } + } else { + r = mga_crtc->lut_r[i]; + b = mga_crtc->lut_b[i]; + } + /* VGA registers */ + WREG8(DAC_INDEX + MGA1064_COL_PAL, r); + WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_g[i]); + WREG8(DAC_INDEX + MGA1064_COL_PAL, b); + } + return; + } for (i = 0; i < MGAG200_LUT_SIZE; i++) { /* VGA registers */ WREG8(DAC_INDEX + MGA1064_COL_PAL, mga_crtc->lut_r[i]); @@ -877,7 +900,7 @@ static int mga_crtc_mode_set(struct drm_crtc *crtc, pitch = crtc->fb->pitches[0] / (crtc->fb->bits_per_pixel / 8); if (crtc->fb->bits_per_pixel == 24) - pitch = pitch >> (4 - bppshift); + pitch = (pitch * 3) >> (4 - bppshift); else pitch = pitch >> (4 - bppshift); @@ -1251,6 +1274,24 @@ static void mga_crtc_destroy(struct drm_crtc *crtc) kfree(mga_crtc); } +static void mga_crtc_disable(struct drm_crtc *crtc) +{ + int ret; + DRM_DEBUG_KMS("\n"); + mga_crtc_dpms(crtc, DRM_MODE_DPMS_OFF); + if (crtc->fb) { + struct mga_framebuffer *mga_fb = to_mga_framebuffer(crtc->fb); + struct drm_gem_object *obj = mga_fb->obj; + struct mgag200_bo *bo = gem_to_mga_bo(obj); + ret = mgag200_bo_reserve(bo, false); + if (ret) + return; + mgag200_bo_push_sysram(bo); + mgag200_bo_unreserve(bo); + } + crtc->fb = NULL; +} + /* These provide the minimum set of functions required to handle a CRTC */ static const struct drm_crtc_funcs mga_crtc_funcs = { .cursor_set = mga_crtc_cursor_set, @@ -1261,6 +1302,7 @@ static const struct drm_crtc_funcs mga_crtc_funcs = { }; static const struct drm_crtc_helper_funcs mga_helper_funcs = { + .disable = mga_crtc_disable, .dpms = mga_crtc_dpms, .mode_fixup = mga_crtc_mode_fixup, .mode_set = mga_crtc_mode_set, @@ -1581,6 +1623,8 @@ static struct drm_connector *mga_vga_init(struct drm_device *dev) drm_connector_helper_add(connector, &mga_vga_connector_helper_funcs); + drm_sysfs_connector_add(connector); + mga_connector->i2c = mgag200_i2c_create(dev); if (!mga_connector->i2c) DRM_ERROR("failed to add ddc bus\n"); diff --git a/drivers/gpu/drm/mgag200/mgag200_ttm.c b/drivers/gpu/drm/mgag200/mgag200_ttm.c index 3acb2b044c7..d70e4a92773 100644 --- a/drivers/gpu/drm/mgag200/mgag200_ttm.c +++ b/drivers/gpu/drm/mgag200/mgag200_ttm.c @@ -323,6 +323,7 @@ int mgag200_bo_create(struct drm_device *dev, int size, int align, mgabo->gem.driver_private = NULL; mgabo->bo.bdev = &mdev->ttm.bdev; + mgabo->bo.bdev->dev_mapping = dev->dev_mapping; mgag200_ttm_placement(mgabo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM); @@ -353,6 +354,7 @@ int mgag200_bo_pin(struct mgag200_bo *bo, u32 pl_flag, u64 *gpu_addr) bo->pin_count++; if (gpu_addr) *gpu_addr = mgag200_bo_gpu_offset(bo); + return 0; } mgag200_ttm_placement(bo, pl_flag); diff --git a/drivers/gpu/drm/nouveau/core/core/mm.c b/drivers/gpu/drm/nouveau/core/core/mm.c index d8291724dbd..7a4e0891c5f 100644 --- a/drivers/gpu/drm/nouveau/core/core/mm.c +++ b/drivers/gpu/drm/nouveau/core/core/mm.c @@ -98,6 +98,8 @@ nouveau_mm_head(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min, u32 splitoff; u32 s, e; + BUG_ON(!type); + list_for_each_entry(this, &mm->free, fl_entry) { e = this->offset + this->length; s = this->offset; @@ -162,6 +164,8 @@ nouveau_mm_tail(struct nouveau_mm *mm, u8 type, u32 size_max, u32 size_min, struct nouveau_mm_node *prev, *this, *next; u32 mask = align - 1; + BUG_ON(!type); + list_for_each_entry_reverse(this, &mm->free, fl_entry) { u32 e = this->offset + this->length; u32 s = this->offset; diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c index 262c9f5f5f6..ce860de43e6 100644 --- a/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nvc0.c @@ -90,6 +90,7 @@ nvc0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, return ret; nv_subdev(priv)->unit = 0x00008000; + nv_subdev(priv)->intr = nouveau_falcon_intr; nv_engine(priv)->cclass = &nvc0_bsp_cclass; nv_engine(priv)->sclass = nvc0_bsp_sclass; return 0; diff --git a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c index c46882c8398..ba6aeca0285 100644 --- a/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/bsp/nve0.c @@ -90,6 +90,7 @@ nve0_bsp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, return ret; nv_subdev(priv)->unit = 0x00008000; + nv_subdev(priv)->intr = nouveau_falcon_intr; nv_engine(priv)->cclass = &nve0_bsp_cclass; nv_engine(priv)->sclass = nve0_bsp_sclass; return 0; diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c index 373dbcc523b..a19e7d79b84 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanva3.c @@ -36,6 +36,8 @@ nva3_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size) if (data && data[0]) { for (i = 0; i < size; i++) nv_wr32(priv, 0x61c440 + soff, (i << 8) | data[i]); + for (; i < 0x60; i++) + nv_wr32(priv, 0x61c440 + soff, (i << 8)); nv_mask(priv, 0x61c448 + soff, 0x80000003, 0x80000003); } else if (data) { diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c index dc57e24fc1d..717639386ce 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/hdanvd0.c @@ -41,6 +41,8 @@ nvd0_hda_eld(struct nv50_disp_priv *priv, int or, u8 *data, u32 size) if (data && data[0]) { for (i = 0; i < size; i++) nv_wr32(priv, 0x10ec00 + soff, (i << 8) | data[i]); + for (; i < 0x60; i++) + nv_wr32(priv, 0x10ec00 + soff, (i << 8)); nv_mask(priv, 0x10ec10 + soff, 0x80000003, 0x80000003); } else if (data) { diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c index ab1e918469a..526b7524289 100644 --- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c +++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c @@ -47,14 +47,8 @@ int nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size) { struct nv50_disp_priv *priv = (void *)object->engine; - struct nouveau_bios *bios = nouveau_bios(priv); - const u16 type = (mthd & NV50_DISP_SOR_MTHD_TYPE) >> 12; const u8 head = (mthd & NV50_DISP_SOR_MTHD_HEAD) >> 3; - const u8 link = (mthd & NV50_DISP_SOR_MTHD_LINK) >> 2; const u8 or = (mthd & NV50_DISP_SOR_MTHD_OR); - const u16 mask = (0x0100 << head) | (0x0040 << link) | (0x0001 << or); - struct dcb_output outp; - u8 ver, hdr; u32 data; int ret = -EINVAL; @@ -62,8 +56,6 @@ nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size) return -EINVAL; data = *(u32 *)args; - if (type && !dcb_outp_match(bios, type, mask, &ver, &hdr, &outp)) - return -ENODEV; switch (mthd & ~0x3f) { case NV50_DISP_SOR_PWR: diff --git a/drivers/gpu/drm/nouveau/core/engine/falcon.c b/drivers/gpu/drm/nouveau/core/engine/falcon.c index 3c7a31f7590..e03fc8e4dc1 100644 --- a/drivers/gpu/drm/nouveau/core/engine/falcon.c +++ b/drivers/gpu/drm/nouveau/core/engine/falcon.c @@ -23,6 +23,25 @@ #include <engine/falcon.h> #include <subdev/timer.h> +void +nouveau_falcon_intr(struct nouveau_subdev *subdev) +{ + struct nouveau_falcon *falcon = (void *)subdev; + u32 dispatch = nv_ro32(falcon, 0x01c); + u32 intr = nv_ro32(falcon, 0x008) & dispatch & ~(dispatch >> 16); + + if (intr & 0x00000010) { + nv_debug(falcon, "ucode halted\n"); + nv_wo32(falcon, 0x004, 0x00000010); + intr &= ~0x00000010; + } + + if (intr) { + nv_error(falcon, "unhandled intr 0x%08x\n", intr); + nv_wo32(falcon, 0x004, intr); + } +} + u32 _nouveau_falcon_rd32(struct nouveau_object *object, u64 addr) { diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c index 49ecbb859b2..c1900430130 100644 --- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c +++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv31.c @@ -265,8 +265,8 @@ nv31_mpeg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, int nv31_mpeg_init(struct nouveau_object *object) { - struct nouveau_engine *engine = nv_engine(object->engine); - struct nv31_mpeg_priv *priv = (void *)engine; + struct nouveau_engine *engine = nv_engine(object); + struct nv31_mpeg_priv *priv = (void *)object; struct nouveau_fb *pfb = nouveau_fb(object); int ret, i; @@ -284,7 +284,10 @@ nv31_mpeg_init(struct nouveau_object *object) /* PMPEG init */ nv_wr32(priv, 0x00b32c, 0x00000000); nv_wr32(priv, 0x00b314, 0x00000100); - nv_wr32(priv, 0x00b220, nv44_graph_class(priv) ? 0x00000044 : 0x00000031); + if (nv_device(priv)->chipset >= 0x40 && nv44_graph_class(priv)) + nv_wr32(priv, 0x00b220, 0x00000044); + else + nv_wr32(priv, 0x00b220, 0x00000031); nv_wr32(priv, 0x00b300, 0x02001ec1); nv_mask(priv, 0x00b32c, 0x00000001, 0x00000001); diff --git a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c index f7c581ad199..dd6196072e9 100644 --- a/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c +++ b/drivers/gpu/drm/nouveau/core/engine/mpeg/nv40.c @@ -61,6 +61,7 @@ nv40_mpeg_context_ctor(struct nouveau_object *parent, if (ret) return ret; + nv_wo32(&chan->base.base, 0x78, 0x02001ec1); return 0; } diff --git a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c index 98072c1ff36..73719aaa62d 100644 --- a/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/ppp/nvc0.c @@ -90,6 +90,7 @@ nvc0_ppp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, return ret; nv_subdev(priv)->unit = 0x00000002; + nv_subdev(priv)->intr = nouveau_falcon_intr; nv_engine(priv)->cclass = &nvc0_ppp_cclass; nv_engine(priv)->sclass = nvc0_ppp_sclass; return 0; diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c index 1879229b60e..ac1f62aace7 100644 --- a/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/engine/vp/nvc0.c @@ -90,6 +90,7 @@ nvc0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, return ret; nv_subdev(priv)->unit = 0x00020000; + nv_subdev(priv)->intr = nouveau_falcon_intr; nv_engine(priv)->cclass = &nvc0_vp_cclass; nv_engine(priv)->sclass = nvc0_vp_sclass; return 0; diff --git a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c index d28ecbf7bc4..d4c3108479c 100644 --- a/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c +++ b/drivers/gpu/drm/nouveau/core/engine/vp/nve0.c @@ -90,6 +90,7 @@ nve0_vp_ctor(struct nouveau_object *parent, struct nouveau_object *engine, return ret; nv_subdev(priv)->unit = 0x00020000; + nv_subdev(priv)->intr = nouveau_falcon_intr; nv_engine(priv)->cclass = &nve0_vp_cclass; nv_engine(priv)->sclass = nve0_vp_sclass; return 0; diff --git a/drivers/gpu/drm/nouveau/core/engine/xtensa.c b/drivers/gpu/drm/nouveau/core/engine/xtensa.c index 0639bc59d0a..5f6ede7c489 100644 --- a/drivers/gpu/drm/nouveau/core/engine/xtensa.c +++ b/drivers/gpu/drm/nouveau/core/engine/xtensa.c @@ -118,7 +118,13 @@ _nouveau_xtensa_init(struct nouveau_object *object) return ret; } - ret = nouveau_gpuobj_new(object, NULL, fw->size, 0x1000, 0, + if (fw->size > 0x40000) { + nv_warn(xtensa, "firmware %s too large\n", name); + release_firmware(fw); + return -EINVAL; + } + + ret = nouveau_gpuobj_new(object, NULL, 0x40000, 0x1000, 0, &xtensa->gpu_fw); if (ret) { release_firmware(fw); diff --git a/drivers/gpu/drm/nouveau/core/include/engine/falcon.h b/drivers/gpu/drm/nouveau/core/include/engine/falcon.h index 1edec386ab3..181aa7da524 100644 --- a/drivers/gpu/drm/nouveau/core/include/engine/falcon.h +++ b/drivers/gpu/drm/nouveau/core/include/engine/falcon.h @@ -72,6 +72,8 @@ int nouveau_falcon_create_(struct nouveau_object *, struct nouveau_object *, struct nouveau_oclass *, u32, bool, const char *, const char *, int, void **); +void nouveau_falcon_intr(struct nouveau_subdev *subdev); + #define _nouveau_falcon_dtor _nouveau_engine_dtor int _nouveau_falcon_init(struct nouveau_object *); int _nouveau_falcon_fini(struct nouveau_object *, bool); diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h index d5502267c30..9d2cd200625 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/mc.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/mc.h @@ -20,8 +20,8 @@ nouveau_mc(void *obj) return (void *)nv_device(obj)->subdev[NVDEV_SUBDEV_MC]; } -#define nouveau_mc_create(p,e,o,d) \ - nouveau_mc_create_((p), (e), (o), sizeof(**d), (void **)d) +#define nouveau_mc_create(p,e,o,m,d) \ + nouveau_mc_create_((p), (e), (o), (m), sizeof(**d), (void **)d) #define nouveau_mc_destroy(p) ({ \ struct nouveau_mc *pmc = (p); _nouveau_mc_dtor(nv_object(pmc)); \ }) @@ -33,7 +33,8 @@ nouveau_mc(void *obj) }) int nouveau_mc_create_(struct nouveau_object *, struct nouveau_object *, - struct nouveau_oclass *, int, void **); + struct nouveau_oclass *, const struct nouveau_mc_intr *, + int, void **); void _nouveau_mc_dtor(struct nouveau_object *); int _nouveau_mc_init(struct nouveau_object *); int _nouveau_mc_fini(struct nouveau_object *, bool); diff --git a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h index f2e87b10566..fcf57fa309b 100644 --- a/drivers/gpu/drm/nouveau/core/include/subdev/vm.h +++ b/drivers/gpu/drm/nouveau/core/include/subdev/vm.h @@ -55,7 +55,7 @@ struct nouveau_vma { struct nouveau_vm { struct nouveau_vmmgr *vmm; struct nouveau_mm mm; - int refcount; + struct kref refcount; struct list_head pgd_list; atomic_t engref[NVDEV_SUBDEV_NR]; diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h index 6c974dd83e8..db9d6ddde52 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/priv.h @@ -81,7 +81,7 @@ void nv44_fb_tile_prog(struct nouveau_fb *, int, struct nouveau_fb_tile *); void nv46_fb_tile_init(struct nouveau_fb *, int i, u32 addr, u32 size, u32 pitch, u32 flags, struct nouveau_fb_tile *); -void nv50_ram_put(struct nouveau_fb *, struct nouveau_mem **); +void __nv50_ram_put(struct nouveau_fb *, struct nouveau_mem *); extern int nv50_fb_memtype[0x80]; #endif diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c index 19e3a9a63a0..ab7ef0ac9e3 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv49.c @@ -40,15 +40,15 @@ nv49_ram_create(struct nouveau_object *parent, struct nouveau_object *engine, return ret; switch (pfb914 & 0x00000003) { - case 0x00000000: pfb->ram->type = NV_MEM_TYPE_DDR1; break; - case 0x00000001: pfb->ram->type = NV_MEM_TYPE_DDR2; break; - case 0x00000002: pfb->ram->type = NV_MEM_TYPE_GDDR3; break; + case 0x00000000: ram->type = NV_MEM_TYPE_DDR1; break; + case 0x00000001: ram->type = NV_MEM_TYPE_DDR2; break; + case 0x00000002: ram->type = NV_MEM_TYPE_GDDR3; break; case 0x00000003: break; } - pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; - pfb->ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; - pfb->ram->tags = nv_rd32(pfb, 0x100320); + ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; + ram->parts = (nv_rd32(pfb, 0x100200) & 0x00000003) + 1; + ram->tags = nv_rd32(pfb, 0x100320); return 0; } diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c index 7192aa6e557..63a6aab8602 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv4e.c @@ -38,8 +38,8 @@ nv4e_ram_create(struct nouveau_object *parent, struct nouveau_object *engine, if (ret) return ret; - pfb->ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; - pfb->ram->type = NV_MEM_TYPE_STOLEN; + ram->size = nv_rd32(pfb, 0x10020c) & 0xff000000; + ram->type = NV_MEM_TYPE_STOLEN; return 0; } diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c index af5aa7ee8ad..903baff77fd 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnv50.c @@ -27,17 +27,10 @@ #include "priv.h" void -nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) +__nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem *mem) { struct nouveau_mm_node *this; - struct nouveau_mem *mem; - mem = *pmem; - *pmem = NULL; - if (unlikely(mem == NULL)) - return; - - mutex_lock(&pfb->base.mutex); while (!list_empty(&mem->regions)) { this = list_first_entry(&mem->regions, typeof(*this), rl_entry); @@ -46,6 +39,19 @@ nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) } nouveau_mm_free(&pfb->tags, &mem->tag); +} + +void +nv50_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) +{ + struct nouveau_mem *mem = *pmem; + + *pmem = NULL; + if (unlikely(mem == NULL)) + return; + + mutex_lock(&pfb->base.mutex); + __nv50_ram_put(pfb, mem); mutex_unlock(&pfb->base.mutex); kfree(mem); diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c index 9c3634acbb9..cf97c4de4a6 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnvc0.c @@ -33,11 +33,19 @@ void nvc0_ram_put(struct nouveau_fb *pfb, struct nouveau_mem **pmem) { struct nouveau_ltcg *ltcg = nouveau_ltcg(pfb); + struct nouveau_mem *mem = *pmem; - if ((*pmem)->tag) - ltcg->tags_free(ltcg, &(*pmem)->tag); + *pmem = NULL; + if (unlikely(mem == NULL)) + return; - nv50_ram_put(pfb, pmem); + mutex_lock(&pfb->base.mutex); + if (mem->tag) + ltcg->tags_free(ltcg, &mem->tag); + __nv50_ram_put(pfb, mem); + mutex_unlock(&pfb->base.mutex); + + kfree(mem); } int diff --git a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c index bf489dcf46e..c4c1d415e7f 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/gpio/nv50.c @@ -103,7 +103,7 @@ nv50_gpio_intr(struct nouveau_subdev *subdev) int i; intr0 = nv_rd32(priv, 0xe054) & nv_rd32(priv, 0xe050); - if (nv_device(priv)->chipset >= 0x90) + if (nv_device(priv)->chipset > 0x92) intr1 = nv_rd32(priv, 0xe074) & nv_rd32(priv, 0xe070); hi = (intr0 & 0x0000ffff) | (intr1 << 16); @@ -115,7 +115,7 @@ nv50_gpio_intr(struct nouveau_subdev *subdev) } nv_wr32(priv, 0xe054, intr0); - if (nv_device(priv)->chipset >= 0x90) + if (nv_device(priv)->chipset > 0x92) nv_wr32(priv, 0xe074, intr1); } @@ -146,7 +146,7 @@ nv50_gpio_ctor(struct nouveau_object *parent, struct nouveau_object *engine, int ret; ret = nouveau_gpio_create(parent, engine, oclass, - nv_device(parent)->chipset >= 0x90 ? 32 : 16, + nv_device(parent)->chipset > 0x92 ? 32 : 16, &priv); *pobject = nv_object(priv); if (ret) @@ -182,7 +182,7 @@ nv50_gpio_init(struct nouveau_object *object) /* disable, and ack any pending gpio interrupts */ nv_wr32(priv, 0xe050, 0x00000000); nv_wr32(priv, 0xe054, 0xffffffff); - if (nv_device(priv)->chipset >= 0x90) { + if (nv_device(priv)->chipset > 0x92) { nv_wr32(priv, 0xe070, 0x00000000); nv_wr32(priv, 0xe074, 0xffffffff); } @@ -195,7 +195,7 @@ nv50_gpio_fini(struct nouveau_object *object, bool suspend) { struct nv50_gpio_priv *priv = (void *)object; nv_wr32(priv, 0xe050, 0x00000000); - if (nv_device(priv)->chipset >= 0x90) + if (nv_device(priv)->chipset > 0x92) nv_wr32(priv, 0xe070, 0x00000000); return nouveau_gpio_fini(&priv->base, suspend); } diff --git a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c index bcca883018f..cce65cc5651 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/ltcg/nvc0.c @@ -30,8 +30,9 @@ struct nvc0_ltcg_priv { struct nouveau_ltcg base; u32 part_nr; u32 subp_nr; - struct nouveau_mm tags; u32 num_tags; + u32 tag_base; + struct nouveau_mm tags; struct nouveau_mm_node *tag_ram; }; @@ -117,10 +118,6 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv) u32 tag_size, tag_margin, tag_align; int ret; - nv_wr32(priv, 0x17e8d8, priv->part_nr); - if (nv_device(pfb)->card_type >= NV_E0) - nv_wr32(priv, 0x17e000, priv->part_nr); - /* tags for 1/4 of VRAM should be enough (8192/4 per GiB of VRAM) */ priv->num_tags = (pfb->ram->size >> 17) / 4; if (priv->num_tags > (1 << 17)) @@ -142,7 +139,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv) tag_size += tag_align; tag_size = (tag_size + 0xfff) >> 12; /* round up */ - ret = nouveau_mm_tail(&pfb->vram, 0, tag_size, tag_size, 1, + ret = nouveau_mm_tail(&pfb->vram, 1, tag_size, tag_size, 1, &priv->tag_ram); if (ret) { priv->num_tags = 0; @@ -152,7 +149,7 @@ nvc0_ltcg_init_tag_ram(struct nouveau_fb *pfb, struct nvc0_ltcg_priv *priv) tag_base += tag_align - 1; ret = do_div(tag_base, tag_align); - nv_wr32(priv, 0x17e8d4, tag_base); + priv->tag_base = tag_base; } ret = nouveau_mm_init(&priv->tags, 0, priv->num_tags, 1); @@ -182,8 +179,6 @@ nvc0_ltcg_ctor(struct nouveau_object *parent, struct nouveau_object *engine, } priv->subp_nr = nv_rd32(priv, 0x17e8dc) >> 28; - nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */ - ret = nvc0_ltcg_init_tag_ram(pfb, priv); if (ret) return ret; @@ -209,13 +204,32 @@ nvc0_ltcg_dtor(struct nouveau_object *object) nouveau_ltcg_destroy(ltcg); } +static int +nvc0_ltcg_init(struct nouveau_object *object) +{ + struct nouveau_ltcg *ltcg = (struct nouveau_ltcg *)object; + struct nvc0_ltcg_priv *priv = (struct nvc0_ltcg_priv *)ltcg; + int ret; + + ret = nouveau_ltcg_init(ltcg); + if (ret) + return ret; + + nv_mask(priv, 0x17e820, 0x00100000, 0x00000000); /* INTR_EN &= ~0x10 */ + nv_wr32(priv, 0x17e8d8, priv->part_nr); + if (nv_device(ltcg)->card_type >= NV_E0) + nv_wr32(priv, 0x17e000, priv->part_nr); + nv_wr32(priv, 0x17e8d4, priv->tag_base); + return 0; +} + struct nouveau_oclass nvc0_ltcg_oclass = { .handle = NV_SUBDEV(LTCG, 0xc0), .ofuncs = &(struct nouveau_ofuncs) { .ctor = nvc0_ltcg_ctor, .dtor = nvc0_ltcg_dtor, - .init = _nouveau_ltcg_init, + .init = nvc0_ltcg_init, .fini = _nouveau_ltcg_fini, }, }; diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c index 1c0330b8c9a..ec9cd6f10f9 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/base.c @@ -80,7 +80,9 @@ _nouveau_mc_dtor(struct nouveau_object *object) int nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine, - struct nouveau_oclass *oclass, int length, void **pobject) + struct nouveau_oclass *oclass, + const struct nouveau_mc_intr *intr_map, + int length, void **pobject) { struct nouveau_device *device = nv_device(parent); struct nouveau_mc *pmc; @@ -92,6 +94,8 @@ nouveau_mc_create_(struct nouveau_object *parent, struct nouveau_object *engine, if (ret) return ret; + pmc->intr_map = intr_map; + ret = request_irq(device->pdev->irq, nouveau_mc_intr, IRQF_SHARED, "nouveau", pmc); if (ret < 0) diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c index 8c769715227..64aa4edb0d9 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv04.c @@ -50,12 +50,11 @@ nv04_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nv04_mc_priv *priv; int ret; - ret = nouveau_mc_create(parent, engine, oclass, &priv); + ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv); *pobject = nv_object(priv); if (ret) return ret; - priv->base.intr_map = nv04_mc_intr; return 0; } diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c index 51919371810..d9891782bf2 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv44.c @@ -36,12 +36,11 @@ nv44_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nv44_mc_priv *priv; int ret; - ret = nouveau_mc_create(parent, engine, oclass, &priv); + ret = nouveau_mc_create(parent, engine, oclass, nv04_mc_intr, &priv); *pobject = nv_object(priv); if (ret) return ret; - priv->base.intr_map = nv04_mc_intr; return 0; } diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c index 0cb322a5e72..2b1afe225db 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv50.c @@ -41,7 +41,7 @@ nv50_mc_intr[] = { { 0x04000000, NVDEV_ENGINE_DISP }, { 0x10000000, NVDEV_SUBDEV_BUS }, { 0x80000000, NVDEV_ENGINE_SW }, - { 0x0000d101, NVDEV_SUBDEV_FB }, + { 0x0002d101, NVDEV_SUBDEV_FB }, {}, }; @@ -53,12 +53,11 @@ nv50_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nv50_mc_priv *priv; int ret; - ret = nouveau_mc_create(parent, engine, oclass, &priv); + ret = nouveau_mc_create(parent, engine, oclass, nv50_mc_intr, &priv); *pobject = nv_object(priv); if (ret) return ret; - priv->base.intr_map = nv50_mc_intr; return 0; } diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c index e82fd21b504..0d57b4d3e00 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nv98.c @@ -54,12 +54,11 @@ nv98_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nv98_mc_priv *priv; int ret; - ret = nouveau_mc_create(parent, engine, oclass, &priv); + ret = nouveau_mc_create(parent, engine, oclass, nv98_mc_intr, &priv); *pobject = nv_object(priv); if (ret) return ret; - priv->base.intr_map = nv98_mc_intr; return 0; } diff --git a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c index c5da3babbc6..104175c5a2d 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c +++ b/drivers/gpu/drm/nouveau/core/subdev/mc/nvc0.c @@ -57,12 +57,11 @@ nvc0_mc_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nvc0_mc_priv *priv; int ret; - ret = nouveau_mc_create(parent, engine, oclass, &priv); + ret = nouveau_mc_create(parent, engine, oclass, nvc0_mc_intr, &priv); *pobject = nv_object(priv); if (ret) return ret; - priv->base.intr_map = nvc0_mc_intr; return 0; } diff --git a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c index 67fcb6c852a..ef3133e7575 100644 --- a/drivers/gpu/drm/nouveau/core/subdev/vm/base.c +++ b/drivers/gpu/drm/nouveau/core/subdev/vm/base.c @@ -361,7 +361,7 @@ nouveau_vm_create(struct nouveau_vmmgr *vmm, u64 offset, u64 length, INIT_LIST_HEAD(&vm->pgd_list); vm->vmm = vmm; - vm->refcount = 1; + kref_init(&vm->refcount); vm->fpde = offset >> (vmm->pgt_bits + 12); vm->lpde = (offset + length - 1) >> (vmm->pgt_bits + 12); @@ -441,8 +441,9 @@ nouveau_vm_unlink(struct nouveau_vm *vm, struct nouveau_gpuobj *mpgd) } static void -nouveau_vm_del(struct nouveau_vm *vm) +nouveau_vm_del(struct kref *kref) { + struct nouveau_vm *vm = container_of(kref, typeof(*vm), refcount); struct nouveau_vm_pgd *vpgd, *tmp; list_for_each_entry_safe(vpgd, tmp, &vm->pgd_list, head) { @@ -458,27 +459,19 @@ int nouveau_vm_ref(struct nouveau_vm *ref, struct nouveau_vm **ptr, struct nouveau_gpuobj *pgd) { - struct nouveau_vm *vm; - int ret; - - vm = ref; - if (vm) { - ret = nouveau_vm_link(vm, pgd); + if (ref) { + int ret = nouveau_vm_link(ref, pgd); if (ret) return ret; - vm->refcount++; + kref_get(&ref->refcount); } - vm = *ptr; - *ptr = ref; - - if (vm) { - nouveau_vm_unlink(vm, pgd); - - if (--vm->refcount == 0) - nouveau_vm_del(vm); + if (*ptr) { + nouveau_vm_unlink(*ptr, pgd); + kref_put(&(*ptr)->refcount, nouveau_vm_del); } + *ptr = ref; return 0; } diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c index 0782bd2f1e0..6a13ffb53bd 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c +++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c @@ -606,6 +606,24 @@ nv_crtc_mode_set_regs(struct drm_crtc *crtc, struct drm_display_mode * mode) regp->ramdac_a34 = 0x1; } +static int +nv_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) +{ + struct nv04_display *disp = nv04_display(crtc->dev); + struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb); + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + int ret; + + ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM); + if (ret == 0) { + if (disp->image[nv_crtc->index]) + nouveau_bo_unpin(disp->image[nv_crtc->index]); + nouveau_bo_ref(nvfb->nvbo, &disp->image[nv_crtc->index]); + } + + return ret; +} + /** * Sets up registers for the given mode/adjusted_mode pair. * @@ -622,10 +640,15 @@ nv_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_device *dev = crtc->dev; struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct nouveau_drm *drm = nouveau_drm(dev); + int ret; NV_DEBUG(drm, "CTRC mode on CRTC %d:\n", nv_crtc->index); drm_mode_debug_printmodeline(adjusted_mode); + ret = nv_crtc_swap_fbs(crtc, old_fb); + if (ret) + return ret; + /* unlock must come after turning off FP_TG_CONTROL in output_prepare */ nv_lock_vga_crtc_shadow(dev, nv_crtc->index, -1); @@ -722,6 +745,7 @@ static void nv_crtc_commit(struct drm_crtc *crtc) static void nv_crtc_destroy(struct drm_crtc *crtc) { + struct nv04_display *disp = nv04_display(crtc->dev); struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); if (!nv_crtc) @@ -729,6 +753,10 @@ static void nv_crtc_destroy(struct drm_crtc *crtc) drm_crtc_cleanup(crtc); + if (disp->image[nv_crtc->index]) + nouveau_bo_unpin(disp->image[nv_crtc->index]); + nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]); + nouveau_bo_unmap(nv_crtc->cursor.nvbo); nouveau_bo_unpin(nv_crtc->cursor.nvbo); nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); @@ -754,6 +782,16 @@ nv_crtc_gamma_load(struct drm_crtc *crtc) } static void +nv_crtc_disable(struct drm_crtc *crtc) +{ + struct nv04_display *disp = nv04_display(crtc->dev); + struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + if (disp->image[nv_crtc->index]) + nouveau_bo_unpin(disp->image[nv_crtc->index]); + nouveau_bo_ref(NULL, &disp->image[nv_crtc->index]); +} + +static void nv_crtc_gamma_set(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b, uint32_t start, uint32_t size) { @@ -791,7 +829,6 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, struct drm_framebuffer *drm_fb; struct nouveau_framebuffer *fb; int arb_burst, arb_lwm; - int ret; NV_DEBUG(drm, "index %d\n", nv_crtc->index); @@ -801,10 +838,8 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, return 0; } - /* If atomic, we want to switch to the fb we were passed, so - * now we update pointers to do that. (We don't pin; just - * assume we're already pinned and update the base address.) + * now we update pointers to do that. */ if (atomic) { drm_fb = passed_fb; @@ -812,17 +847,6 @@ nv04_crtc_do_mode_set_base(struct drm_crtc *crtc, } else { drm_fb = crtc->fb; fb = nouveau_framebuffer(crtc->fb); - /* If not atomic, we can go ahead and pin, and unpin the - * old fb we were passed. - */ - ret = nouveau_bo_pin(fb->nvbo, TTM_PL_FLAG_VRAM); - if (ret) - return ret; - - if (passed_fb) { - struct nouveau_framebuffer *ofb = nouveau_framebuffer(passed_fb); - nouveau_bo_unpin(ofb->nvbo); - } } nv_crtc->fb.offset = fb->nvbo->bo.offset; @@ -877,6 +901,9 @@ static int nv04_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y, struct drm_framebuffer *old_fb) { + int ret = nv_crtc_swap_fbs(crtc, old_fb); + if (ret) + return ret; return nv04_crtc_do_mode_set_base(crtc, old_fb, x, y, false); } @@ -1027,6 +1054,7 @@ static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = { .mode_set_base = nv04_crtc_mode_set_base, .mode_set_base_atomic = nv04_crtc_mode_set_base_atomic, .load_lut = nv_crtc_gamma_load, + .disable = nv_crtc_disable, }; int diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.h b/drivers/gpu/drm/nouveau/dispnv04/disp.h index a0a031dad13..9928187f0a7 100644 --- a/drivers/gpu/drm/nouveau/dispnv04/disp.h +++ b/drivers/gpu/drm/nouveau/dispnv04/disp.h @@ -81,6 +81,7 @@ struct nv04_display { uint32_t saved_vga_font[4][16384]; uint32_t dac_users[4]; struct nouveau_object *core; + struct nouveau_bo *image[2]; }; static inline struct nv04_display * diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 4b1afb13138..af20fba3a1a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -148,6 +148,7 @@ nouveau_bo_del_ttm(struct ttm_buffer_object *bo) if (unlikely(nvbo->gem)) DRM_ERROR("bo %p still attached to GEM object\n", bo); + WARN_ON(nvbo->pin_refcnt > 0); nv10_bo_put_tile_region(dev, nvbo->tile, NULL); kfree(nvbo); } @@ -197,6 +198,17 @@ nouveau_bo_new(struct drm_device *dev, int size, int align, size_t acc_size; int ret; int type = ttm_bo_type_device; + int lpg_shift = 12; + int max_size; + + if (drm->client.base.vm) + lpg_shift = drm->client.base.vm->vmm->lpg_shift; + max_size = INT_MAX & ~((1 << lpg_shift) - 1); + + if (size <= 0 || size > max_size) { + nv_warn(drm, "skipped size %x\n", (u32)size); + return -EINVAL; + } if (sg) type = ttm_bo_type_sg; @@ -340,13 +352,15 @@ nouveau_bo_unpin(struct nouveau_bo *nvbo) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct ttm_buffer_object *bo = &nvbo->bo; - int ret; + int ret, ref; ret = ttm_bo_reserve(bo, false, false, false, 0); if (ret) return ret; - if (--nvbo->pin_refcnt) + ref = --nvbo->pin_refcnt; + WARN_ON_ONCE(ref < 0); + if (ref) goto out; nouveau_bo_placement_set(nvbo, bo->mem.placement, 0); @@ -578,7 +592,7 @@ nve0_bo_move_init(struct nouveau_channel *chan, u32 handle) int ret = RING_SPACE(chan, 2); if (ret == 0) { BEGIN_NVC0(chan, NvSubCopy, 0x0000, 1); - OUT_RING (chan, handle); + OUT_RING (chan, handle & 0x0000ffff); FIRE_RING (chan); } return ret; @@ -973,7 +987,7 @@ nouveau_bo_move_m2mf(struct ttm_buffer_object *bo, int evict, bool intr, struct ttm_mem_reg *old_mem = &bo->mem; int ret; - mutex_lock(&chan->cli->mutex); + mutex_lock_nested(&chan->cli->mutex, SINGLE_DEPTH_NESTING); /* create temporary vmas for the transfer and attach them to the * old nouveau_mem node, these will get cleaned up after ttm has @@ -1014,7 +1028,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm) struct ttm_mem_reg *, struct ttm_mem_reg *); int (*init)(struct nouveau_channel *, u32 handle); } _methods[] = { - { "COPY", 0, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init }, + { "COPY", 4, 0xa0b5, nve0_bo_move_copy, nve0_bo_move_init }, { "GRCE", 0, 0xa0b5, nve0_bo_move_copy, nvc0_bo_move_init }, { "COPY1", 5, 0x90b8, nvc0_bo_move_copy, nvc0_bo_move_init }, { "COPY0", 4, 0x90b5, nvc0_bo_move_copy, nvc0_bo_move_init }, @@ -1034,7 +1048,7 @@ nouveau_bo_move_init(struct nouveau_drm *drm) struct nouveau_channel *chan; u32 handle = (mthd->engine << 16) | mthd->oclass; - if (mthd->init == nve0_bo_move_init) + if (mthd->engine) chan = drm->cechan; else chan = drm->channel; diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 708b2d1c003..a03e75deaca 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -138,7 +138,7 @@ nouveau_user_framebuffer_create(struct drm_device *dev, { struct nouveau_framebuffer *nouveau_fb; struct drm_gem_object *gem; - int ret; + int ret = -ENOMEM; gem = drm_gem_object_lookup(dev, file_priv, mode_cmd->handles[0]); if (!gem) @@ -146,15 +146,19 @@ nouveau_user_framebuffer_create(struct drm_device *dev, nouveau_fb = kzalloc(sizeof(struct nouveau_framebuffer), GFP_KERNEL); if (!nouveau_fb) - return ERR_PTR(-ENOMEM); + goto err_unref; ret = nouveau_framebuffer_init(dev, nouveau_fb, mode_cmd, nouveau_gem_object(gem)); - if (ret) { - drm_gem_object_unreference(gem); - return ERR_PTR(ret); - } + if (ret) + goto err; return &nouveau_fb->base; + +err: + kfree(nouveau_fb); +err_unref: + drm_gem_object_unreference(gem); + return ERR_PTR(ret); } static const struct drm_mode_config_funcs nouveau_mode_config_funcs = { @@ -524,9 +528,12 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, struct nouveau_page_flip_state *s; struct nouveau_channel *chan = NULL; struct nouveau_fence *fence; - struct list_head res; - struct ttm_validate_buffer res_val[2]; + struct ttm_validate_buffer resv[2] = { + { .bo = &old_bo->bo }, + { .bo = &new_bo->bo }, + }; struct ww_acquire_ctx ticket; + LIST_HEAD(res); int ret; if (!drm->channel) @@ -545,27 +552,19 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, chan = drm->channel; spin_unlock(&old_bo->bo.bdev->fence_lock); - mutex_lock(&chan->cli->mutex); - if (new_bo != old_bo) { ret = nouveau_bo_pin(new_bo, TTM_PL_FLAG_VRAM); - if (likely(!ret)) { - res_val[0].bo = &old_bo->bo; - res_val[1].bo = &new_bo->bo; - INIT_LIST_HEAD(&res); - list_add_tail(&res_val[0].head, &res); - list_add_tail(&res_val[1].head, &res); - ret = ttm_eu_reserve_buffers(&ticket, &res); - if (ret) - nouveau_bo_unpin(new_bo); - } - } else - ret = ttm_bo_reserve(&new_bo->bo, false, false, false, 0); + if (ret) + goto fail_free; - if (ret) { - mutex_unlock(&chan->cli->mutex); - goto fail_free; + list_add(&resv[1].head, &res); } + list_add(&resv[0].head, &res); + + mutex_lock(&chan->cli->mutex); + ret = ttm_eu_reserve_buffers(&ticket, &res); + if (ret) + goto fail_unpin; /* Initialize a page flip struct */ *s = (struct nouveau_page_flip_state) @@ -576,10 +575,11 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, /* Emit a page flip */ if (nv_device(drm->device)->card_type >= NV_50) { ret = nv50_display_flip_next(crtc, fb, chan, 0); - if (ret) { - mutex_unlock(&chan->cli->mutex); + if (ret) goto fail_unreserve; - } + } else { + struct nv04_display *dispnv04 = nv04_display(dev); + nouveau_bo_ref(new_bo, &dispnv04->image[nouveau_crtc(crtc)->index]); } ret = nouveau_page_flip_emit(chan, old_bo, new_bo, s, &fence); @@ -590,22 +590,18 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb, /* Update the crtc struct and cleanup */ crtc->fb = fb; - if (old_bo != new_bo) { - ttm_eu_fence_buffer_objects(&ticket, &res, fence); + ttm_eu_fence_buffer_objects(&ticket, &res, fence); + if (old_bo != new_bo) nouveau_bo_unpin(old_bo); - } else { - nouveau_bo_fence(new_bo, fence); - ttm_bo_unreserve(&new_bo->bo); - } nouveau_fence_unref(&fence); return 0; fail_unreserve: - if (old_bo != new_bo) { - ttm_eu_backoff_reservation(&ticket, &res); + ttm_eu_backoff_reservation(&ticket, &res); +fail_unpin: + mutex_unlock(&chan->cli->mutex); + if (old_bo != new_bo) nouveau_bo_unpin(new_bo); - } else - ttm_bo_unreserve(&new_bo->bo); fail_free: kfree(s); return ret; diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 218a4b522fe..61972668fd0 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -192,6 +192,18 @@ nouveau_accel_init(struct nouveau_drm *drm) arg0 = NVE0_CHANNEL_IND_ENGINE_GR; arg1 = 1; + } else + if (device->chipset >= 0xa3 && + device->chipset != 0xaa && + device->chipset != 0xac) { + ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, + NVDRM_CHAN + 1, NvDmaFB, NvDmaTT, + &drm->cechan); + if (ret) + NV_ERROR(drm, "failed to create ce channel, %d\n", ret); + + arg0 = NvDmaFB; + arg1 = NvDmaTT; } else { arg0 = NvDmaFB; arg1 = NvDmaTT; @@ -284,8 +296,6 @@ static int nouveau_drm_probe(struct pci_dev *pdev, return 0; } -static struct lock_class_key drm_client_lock_class_key; - static int nouveau_drm_load(struct drm_device *dev, unsigned long flags) { @@ -297,7 +307,6 @@ nouveau_drm_load(struct drm_device *dev, unsigned long flags) ret = nouveau_cli_create(pdev, "DRM", sizeof(*drm), (void**)&drm); if (ret) return ret; - lockdep_set_class(&drm->client.mutex, &drm_client_lock_class_key); dev->dev_private = drm; drm->dev = dev; diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c index 9352010030e..8f6d63d7edd 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c +++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c @@ -385,6 +385,7 @@ out_unlock: mutex_unlock(&dev->struct_mutex); if (chan) nouveau_bo_vma_del(nvbo, &fbcon->nouveau_fb.vma); + nouveau_bo_unmap(nvbo); out_unpin: nouveau_bo_unpin(nvbo); out_unref: @@ -397,7 +398,8 @@ void nouveau_fbcon_output_poll_changed(struct drm_device *dev) { struct nouveau_drm *drm = nouveau_drm(dev); - drm_fb_helper_hotplug_event(&drm->fbcon->helper); + if (drm->fbcon) + drm_fb_helper_hotplug_event(&drm->fbcon->helper); } static int diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c index 1680d9187ba..be3149932c2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_fence.c +++ b/drivers/gpu/drm/nouveau/nouveau_fence.c @@ -143,7 +143,7 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan) int ret; fence->channel = chan; - fence->timeout = jiffies + (3 * DRM_HZ); + fence->timeout = jiffies + (15 * DRM_HZ); fence->sequence = ++fctx->sequence; ret = fctx->emit(fence); diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index e72d09c068a..830cb7bad92 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -50,12 +50,6 @@ nouveau_gem_object_del(struct drm_gem_object *gem) return; nvbo->gem = NULL; - /* Lockdep hates you for doing reserve with gem object lock held */ - if (WARN_ON_ONCE(nvbo->pin_refcnt)) { - nvbo->pin_refcnt = 1; - nouveau_bo_unpin(nvbo); - } - if (gem->import_attach) drm_prime_gem_destroy(gem, nvbo->bo.sg); diff --git a/drivers/gpu/drm/nouveau/nv17_fence.c b/drivers/gpu/drm/nouveau/nv17_fence.c index 8e47a9bae8c..22aa9963ea6 100644 --- a/drivers/gpu/drm/nouveau/nv17_fence.c +++ b/drivers/gpu/drm/nouveau/nv17_fence.c @@ -76,7 +76,7 @@ nv17_fence_context_new(struct nouveau_channel *chan) struct ttm_mem_reg *mem = &priv->bo->bo.mem; struct nouveau_object *object; u32 start = mem->start * PAGE_SIZE; - u32 limit = mem->start + mem->size - 1; + u32 limit = start + mem->size - 1; int ret = 0; fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); diff --git a/drivers/gpu/drm/nouveau/nv40_pm.c b/drivers/gpu/drm/nouveau/nv40_pm.c index 3af5bcd0b20..625f80d53dc 100644 --- a/drivers/gpu/drm/nouveau/nv40_pm.c +++ b/drivers/gpu/drm/nouveau/nv40_pm.c @@ -131,7 +131,7 @@ nv40_calc_pll(struct drm_device *dev, u32 reg, struct nvbios_pll *pll, if (clk < pll->vco1.max_freq) pll->vco2.max_freq = 0; - pclk->pll_calc(pclk, pll, clk, &coef); + ret = pclk->pll_calc(pclk, pll, clk, &coef); if (ret == 0) return -ERANGE; diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c index 54dc6355b0c..8b40a36c1b5 100644 --- a/drivers/gpu/drm/nouveau/nv50_display.c +++ b/drivers/gpu/drm/nouveau/nv50_display.c @@ -355,6 +355,7 @@ struct nv50_oimm { struct nv50_head { struct nouveau_crtc base; + struct nouveau_bo *image; struct nv50_curs curs; struct nv50_sync sync; struct nv50_ovly ovly; @@ -517,9 +518,10 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, { struct nouveau_framebuffer *nv_fb = nouveau_framebuffer(fb); struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); + struct nv50_head *head = nv50_head(crtc); struct nv50_sync *sync = nv50_sync(crtc); - int head = nv_crtc->index, ret; u32 *push; + int ret; swap_interval <<= 4; if (swap_interval == 0) @@ -537,7 +539,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, return ret; BEGIN_NV04(chan, 0, NV11_SUBCHAN_DMA_SEMAPHORE, 2); - OUT_RING (chan, NvEvoSema0 + head); + OUT_RING (chan, NvEvoSema0 + nv_crtc->index); OUT_RING (chan, sync->addr ^ 0x10); BEGIN_NV04(chan, 0, NV11_SUBCHAN_SEMAPHORE_RELEASE, 1); OUT_RING (chan, sync->data + 1); @@ -546,7 +548,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, OUT_RING (chan, sync->data); } else if (chan && nv_mclass(chan->object) < NVC0_CHANNEL_IND_CLASS) { - u64 addr = nv84_fence_crtc(chan, head) + sync->addr; + u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr; ret = RING_SPACE(chan, 12); if (ret) return ret; @@ -565,7 +567,7 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, OUT_RING (chan, NV84_SUBCHAN_SEMAPHORE_TRIGGER_ACQUIRE_EQUAL); } else if (chan) { - u64 addr = nv84_fence_crtc(chan, head) + sync->addr; + u64 addr = nv84_fence_crtc(chan, nv_crtc->index) + sync->addr; ret = RING_SPACE(chan, 10); if (ret) return ret; @@ -630,6 +632,8 @@ nv50_display_flip_next(struct drm_crtc *crtc, struct drm_framebuffer *fb, evo_mthd(push, 0x0080, 1); evo_data(push, 0x00000000); evo_kick(push, sync); + + nouveau_bo_ref(nv_fb->nvbo, &head->image); return 0; } @@ -1038,18 +1042,17 @@ static int nv50_crtc_swap_fbs(struct drm_crtc *crtc, struct drm_framebuffer *old_fb) { struct nouveau_framebuffer *nvfb = nouveau_framebuffer(crtc->fb); + struct nv50_head *head = nv50_head(crtc); int ret; ret = nouveau_bo_pin(nvfb->nvbo, TTM_PL_FLAG_VRAM); - if (ret) - return ret; - - if (old_fb) { - nvfb = nouveau_framebuffer(old_fb); - nouveau_bo_unpin(nvfb->nvbo); + if (ret == 0) { + if (head->image) + nouveau_bo_unpin(head->image); + nouveau_bo_ref(nvfb->nvbo, &head->image); } - return 0; + return ret; } static int @@ -1198,6 +1201,15 @@ nv50_crtc_lut_load(struct drm_crtc *crtc) } } +static void +nv50_crtc_disable(struct drm_crtc *crtc) +{ + struct nv50_head *head = nv50_head(crtc); + if (head->image) + nouveau_bo_unpin(head->image); + nouveau_bo_ref(NULL, &head->image); +} + static int nv50_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv, uint32_t handle, uint32_t width, uint32_t height) @@ -1271,18 +1283,29 @@ nv50_crtc_destroy(struct drm_crtc *crtc) struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc); struct nv50_disp *disp = nv50_disp(crtc->dev); struct nv50_head *head = nv50_head(crtc); + nv50_dmac_destroy(disp->core, &head->ovly.base); nv50_pioc_destroy(disp->core, &head->oimm.base); nv50_dmac_destroy(disp->core, &head->sync.base); nv50_pioc_destroy(disp->core, &head->curs.base); + + /*XXX: this shouldn't be necessary, but the core doesn't call + * disconnect() during the cleanup paths + */ + if (head->image) + nouveau_bo_unpin(head->image); + nouveau_bo_ref(NULL, &head->image); + nouveau_bo_unmap(nv_crtc->cursor.nvbo); if (nv_crtc->cursor.nvbo) nouveau_bo_unpin(nv_crtc->cursor.nvbo); nouveau_bo_ref(NULL, &nv_crtc->cursor.nvbo); + nouveau_bo_unmap(nv_crtc->lut.nvbo); if (nv_crtc->lut.nvbo) nouveau_bo_unpin(nv_crtc->lut.nvbo); nouveau_bo_ref(NULL, &nv_crtc->lut.nvbo); + drm_crtc_cleanup(crtc); kfree(crtc); } @@ -1296,6 +1319,7 @@ static const struct drm_crtc_helper_funcs nv50_crtc_hfunc = { .mode_set_base = nv50_crtc_mode_set_base, .mode_set_base_atomic = nv50_crtc_mode_set_base_atomic, .load_lut = nv50_crtc_lut_load, + .disable = nv50_crtc_disable, }; static const struct drm_crtc_funcs nv50_crtc_func = { diff --git a/drivers/gpu/drm/nouveau/nv50_fence.c b/drivers/gpu/drm/nouveau/nv50_fence.c index f9701e567db..0ee36384003 100644 --- a/drivers/gpu/drm/nouveau/nv50_fence.c +++ b/drivers/gpu/drm/nouveau/nv50_fence.c @@ -39,6 +39,8 @@ nv50_fence_context_new(struct nouveau_channel *chan) struct nv10_fence_chan *fctx; struct ttm_mem_reg *mem = &priv->bo->bo.mem; struct nouveau_object *object; + u32 start = mem->start * PAGE_SIZE; + u32 limit = start + mem->size - 1; int ret, i; fctx = chan->fence = kzalloc(sizeof(*fctx), GFP_KERNEL); @@ -51,26 +53,28 @@ nv50_fence_context_new(struct nouveau_channel *chan) fctx->base.sync = nv17_fence_sync; ret = nouveau_object_new(nv_object(chan->cli), chan->handle, - NvSema, 0x0002, + NvSema, 0x003d, &(struct nv_dma_class) { .flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR, - .start = mem->start * PAGE_SIZE, - .limit = mem->size - 1, + .start = start, + .limit = limit, }, sizeof(struct nv_dma_class), &object); /* dma objects for display sync channel semaphore blocks */ for (i = 0; !ret && i < dev->mode_config.num_crtc; i++) { struct nouveau_bo *bo = nv50_display_crtc_sema(dev, i); + u32 start = bo->bo.mem.start * PAGE_SIZE; + u32 limit = start + bo->bo.mem.size - 1; ret = nouveau_object_new(nv_object(chan->cli), chan->handle, NvEvoSema0 + i, 0x003d, &(struct nv_dma_class) { .flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR, - .start = bo->bo.offset, - .limit = bo->bo.offset + 0xfff, + .start = start, + .limit = limit, }, sizeof(struct nv_dma_class), &object); } diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c index 93c2f2cceb5..eb89653a7a1 100644 --- a/drivers/gpu/drm/qxl/qxl_cmd.c +++ b/drivers/gpu/drm/qxl/qxl_cmd.c @@ -179,9 +179,10 @@ qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *relea uint32_t type, bool interruptible) { struct qxl_command cmd; + struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); cmd.type = type; - cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset); + cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset); return qxl_ring_push(qdev->command_ring, &cmd, interruptible); } @@ -191,9 +192,10 @@ qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *releas uint32_t type, bool interruptible) { struct qxl_command cmd; + struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); cmd.type = type; - cmd.data = qxl_bo_physical_address(qdev, release->bos[0], release->release_offset); + cmd.data = qxl_bo_physical_address(qdev, to_qxl_bo(entry->tv.bo), release->release_offset); return qxl_ring_push(qdev->cursor_ring, &cmd, interruptible); } @@ -214,7 +216,6 @@ int qxl_garbage_collect(struct qxl_device *qdev) struct qxl_release *release; uint64_t id, next_id; int i = 0; - int ret; union qxl_release_info *info; while (qxl_ring_pop(qdev->release_ring, &id)) { @@ -224,17 +225,10 @@ int qxl_garbage_collect(struct qxl_device *qdev) if (release == NULL) break; - ret = qxl_release_reserve(qdev, release, false); - if (ret) { - qxl_io_log(qdev, "failed to reserve release on garbage collect %lld\n", id); - DRM_ERROR("failed to reserve release %lld\n", id); - } - info = qxl_release_map(qdev, release); next_id = info->next; qxl_release_unmap(qdev, release, info); - qxl_release_unreserve(qdev, release); QXL_INFO(qdev, "popped %lld, next %lld\n", id, next_id); @@ -259,27 +253,29 @@ int qxl_garbage_collect(struct qxl_device *qdev) return i; } -int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size, +int qxl_alloc_bo_reserved(struct qxl_device *qdev, + struct qxl_release *release, + unsigned long size, struct qxl_bo **_bo) { struct qxl_bo *bo; int ret; ret = qxl_bo_create(qdev, size, false /* not kernel - device */, - QXL_GEM_DOMAIN_VRAM, NULL, &bo); + false, QXL_GEM_DOMAIN_VRAM, NULL, &bo); if (ret) { DRM_ERROR("failed to allocate VRAM BO\n"); return ret; } - ret = qxl_bo_reserve(bo, false); - if (unlikely(ret != 0)) + ret = qxl_release_list_add(release, bo); + if (ret) goto out_unref; *_bo = bo; return 0; out_unref: qxl_bo_unref(&bo); - return 0; + return ret; } static int wait_for_io_cmd_user(struct qxl_device *qdev, uint8_t val, long port, bool intr) @@ -503,6 +499,10 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev, if (ret) return ret; + ret = qxl_release_reserve_list(release, true); + if (ret) + return ret; + cmd = (struct qxl_surface_cmd *)qxl_release_map(qdev, release); cmd->type = QXL_SURFACE_CMD_CREATE; cmd->u.surface_create.format = surf->surf.format; @@ -524,14 +524,11 @@ int qxl_hw_surface_alloc(struct qxl_device *qdev, surf->surf_create = release; - /* no need to add a release to the fence for this bo, + /* no need to add a release to the fence for this surface bo, since it is only released when we ask to destroy the surface and it would never signal otherwise */ - qxl_fence_releaseable(qdev, release); - qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); - - qxl_release_unreserve(qdev, release); + qxl_release_fence_buffer_objects(release); surf->hw_surf_alloc = true; spin_lock(&qdev->surf_id_idr_lock); @@ -573,12 +570,9 @@ int qxl_hw_surface_dealloc(struct qxl_device *qdev, cmd->surface_id = id; qxl_release_unmap(qdev, release, &cmd->release_info); - qxl_fence_releaseable(qdev, release); - qxl_push_command_ring_release(qdev, release, QXL_CMD_SURFACE, false); - qxl_release_unreserve(qdev, release); - + qxl_release_fence_buffer_objects(release); return 0; } diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c index f76f5dd7bfc..835caba026d 100644 --- a/drivers/gpu/drm/qxl/qxl_display.c +++ b/drivers/gpu/drm/qxl/qxl_display.c @@ -179,7 +179,7 @@ static void qxl_crtc_destroy(struct drm_crtc *crtc) kfree(qxl_crtc); } -static void +static int qxl_hide_cursor(struct qxl_device *qdev) { struct qxl_release *release; @@ -188,14 +188,22 @@ qxl_hide_cursor(struct qxl_device *qdev) ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, &release, NULL); + if (ret) + return ret; + + ret = qxl_release_reserve_list(release, true); + if (ret) { + qxl_release_free(qdev, release); + return ret; + } cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); cmd->type = QXL_CURSOR_HIDE; qxl_release_unmap(qdev, release, &cmd->release_info); - qxl_fence_releaseable(qdev, release); qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); - qxl_release_unreserve(qdev, release); + qxl_release_fence_buffer_objects(release); + return 0; } static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, @@ -216,10 +224,8 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, int size = 64*64*4; int ret = 0; - if (!handle) { - qxl_hide_cursor(qdev); - return 0; - } + if (!handle) + return qxl_hide_cursor(qdev); obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); if (!obj) { @@ -234,8 +240,9 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, goto out_unref; ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL); + qxl_bo_unreserve(user_bo); if (ret) - goto out_unreserve; + goto out_unref; ret = qxl_bo_kmap(user_bo, &user_ptr); if (ret) @@ -246,14 +253,20 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, &release, NULL); if (ret) goto out_kunmap; - ret = qxl_alloc_bo_reserved(qdev, sizeof(struct qxl_cursor) + size, - &cursor_bo); + + ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_cursor) + size, + &cursor_bo); if (ret) goto out_free_release; - ret = qxl_bo_kmap(cursor_bo, (void **)&cursor); + + ret = qxl_release_reserve_list(release, false); if (ret) goto out_free_bo; + ret = qxl_bo_kmap(cursor_bo, (void **)&cursor); + if (ret) + goto out_backoff; + cursor->header.unique = 0; cursor->header.type = SPICE_CURSOR_TYPE_ALPHA; cursor->header.width = 64; @@ -269,11 +282,7 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, qxl_bo_kunmap(cursor_bo); - /* finish with the userspace bo */ qxl_bo_kunmap(user_bo); - qxl_bo_unpin(user_bo); - qxl_bo_unreserve(user_bo); - drm_gem_object_unreference_unlocked(obj); cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); cmd->type = QXL_CURSOR_SET; @@ -281,30 +290,35 @@ static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, cmd->u.set.position.y = qcrtc->cur_y; cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0); - qxl_release_add_res(qdev, release, cursor_bo); cmd->u.set.visible = 1; qxl_release_unmap(qdev, release, &cmd->release_info); - qxl_fence_releaseable(qdev, release); qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); - qxl_release_unreserve(qdev, release); + qxl_release_fence_buffer_objects(release); + + /* finish with the userspace bo */ + ret = qxl_bo_reserve(user_bo, false); + if (!ret) { + qxl_bo_unpin(user_bo); + qxl_bo_unreserve(user_bo); + } + drm_gem_object_unreference_unlocked(obj); - qxl_bo_unreserve(cursor_bo); qxl_bo_unref(&cursor_bo); return ret; + +out_backoff: + qxl_release_backoff_reserve_list(release); out_free_bo: qxl_bo_unref(&cursor_bo); out_free_release: - qxl_release_unreserve(qdev, release); qxl_release_free(qdev, release); out_kunmap: qxl_bo_kunmap(user_bo); out_unpin: qxl_bo_unpin(user_bo); -out_unreserve: - qxl_bo_unreserve(user_bo); out_unref: drm_gem_object_unreference_unlocked(obj); return ret; @@ -322,6 +336,14 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc, ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, &release, NULL); + if (ret) + return ret; + + ret = qxl_release_reserve_list(release, true); + if (ret) { + qxl_release_free(qdev, release); + return ret; + } qcrtc->cur_x = x; qcrtc->cur_y = y; @@ -332,9 +354,9 @@ static int qxl_crtc_cursor_move(struct drm_crtc *crtc, cmd->u.position.y = qcrtc->cur_y; qxl_release_unmap(qdev, release, &cmd->release_info); - qxl_fence_releaseable(qdev, release); qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); - qxl_release_unreserve(qdev, release); + qxl_release_fence_buffer_objects(release); + return 0; } diff --git a/drivers/gpu/drm/qxl/qxl_draw.c b/drivers/gpu/drm/qxl/qxl_draw.c index 3c8c3dbf937..56e1d633875 100644 --- a/drivers/gpu/drm/qxl/qxl_draw.c +++ b/drivers/gpu/drm/qxl/qxl_draw.c @@ -23,25 +23,29 @@ #include "qxl_drv.h" #include "qxl_object.h" +static int alloc_clips(struct qxl_device *qdev, + struct qxl_release *release, + unsigned num_clips, + struct qxl_bo **clips_bo) +{ + int size = sizeof(struct qxl_clip_rects) + sizeof(struct qxl_rect) * num_clips; + + return qxl_alloc_bo_reserved(qdev, release, size, clips_bo); +} + /* returns a pointer to the already allocated qxl_rect array inside * the qxl_clip_rects. This is *not* the same as the memory allocated * on the device, it is offset to qxl_clip_rects.chunk.data */ static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, struct qxl_drawable *drawable, unsigned num_clips, - struct qxl_bo **clips_bo, - struct qxl_release *release) + struct qxl_bo *clips_bo) { struct qxl_clip_rects *dev_clips; int ret; - int size = sizeof(*dev_clips) + sizeof(struct qxl_rect) * num_clips; - ret = qxl_alloc_bo_reserved(qdev, size, clips_bo); - if (ret) - return NULL; - ret = qxl_bo_kmap(*clips_bo, (void **)&dev_clips); + ret = qxl_bo_kmap(clips_bo, (void **)&dev_clips); if (ret) { - qxl_bo_unref(clips_bo); return NULL; } dev_clips->num_rects = num_clips; @@ -52,20 +56,34 @@ static struct qxl_rect *drawable_set_clipping(struct qxl_device *qdev, } static int +alloc_drawable(struct qxl_device *qdev, struct qxl_release **release) +{ + int ret; + ret = qxl_alloc_release_reserved(qdev, sizeof(struct qxl_drawable), + QXL_RELEASE_DRAWABLE, release, + NULL); + return ret; +} + +static void +free_drawable(struct qxl_device *qdev, struct qxl_release *release) +{ + qxl_release_free(qdev, release); +} + +/* release needs to be reserved at this point */ +static int make_drawable(struct qxl_device *qdev, int surface, uint8_t type, const struct qxl_rect *rect, - struct qxl_release **release) + struct qxl_release *release) { struct qxl_drawable *drawable; - int i, ret; + int i; - ret = qxl_alloc_release_reserved(qdev, sizeof(*drawable), - QXL_RELEASE_DRAWABLE, release, - NULL); - if (ret) - return ret; + drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); + if (!drawable) + return -ENOMEM; - drawable = (struct qxl_drawable *)qxl_release_map(qdev, *release); drawable->type = type; drawable->surface_id = surface; /* Only primary for now */ @@ -91,14 +109,23 @@ make_drawable(struct qxl_device *qdev, int surface, uint8_t type, drawable->bbox = *rect; drawable->mm_time = qdev->rom->mm_clock; - qxl_release_unmap(qdev, *release, &drawable->release_info); + qxl_release_unmap(qdev, release, &drawable->release_info); return 0; } -static int qxl_palette_create_1bit(struct qxl_bo **palette_bo, +static int alloc_palette_object(struct qxl_device *qdev, + struct qxl_release *release, + struct qxl_bo **palette_bo) +{ + return qxl_alloc_bo_reserved(qdev, release, + sizeof(struct qxl_palette) + sizeof(uint32_t) * 2, + palette_bo); +} + +static int qxl_palette_create_1bit(struct qxl_bo *palette_bo, + struct qxl_release *release, const struct qxl_fb_image *qxl_fb_image) { - struct qxl_device *qdev = qxl_fb_image->qdev; const struct fb_image *fb_image = &qxl_fb_image->fb_image; uint32_t visual = qxl_fb_image->visual; const uint32_t *pseudo_palette = qxl_fb_image->pseudo_palette; @@ -108,12 +135,7 @@ static int qxl_palette_create_1bit(struct qxl_bo **palette_bo, static uint64_t unique; /* we make no attempt to actually set this * correctly globaly, since that would require * tracking all of our palettes. */ - - ret = qxl_alloc_bo_reserved(qdev, - sizeof(struct qxl_palette) + sizeof(uint32_t) * 2, - palette_bo); - - ret = qxl_bo_kmap(*palette_bo, (void **)&pal); + ret = qxl_bo_kmap(palette_bo, (void **)&pal); pal->num_ents = 2; pal->unique = unique++; if (visual == FB_VISUAL_TRUECOLOR || visual == FB_VISUAL_DIRECTCOLOR) { @@ -126,7 +148,7 @@ static int qxl_palette_create_1bit(struct qxl_bo **palette_bo, } pal->ents[0] = bgcolor; pal->ents[1] = fgcolor; - qxl_bo_kunmap(*palette_bo); + qxl_bo_kunmap(palette_bo); return 0; } @@ -144,44 +166,63 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image, const char *src = fb_image->data; int depth = fb_image->depth; struct qxl_release *release; - struct qxl_bo *image_bo; struct qxl_image *image; int ret; - + struct qxl_drm_image *dimage; + struct qxl_bo *palette_bo = NULL; if (stride == 0) stride = depth * width / 8; + ret = alloc_drawable(qdev, &release); + if (ret) + return; + + ret = qxl_image_alloc_objects(qdev, release, + &dimage, + height, stride); + if (ret) + goto out_free_drawable; + + if (depth == 1) { + ret = alloc_palette_object(qdev, release, &palette_bo); + if (ret) + goto out_free_image; + } + + /* do a reservation run over all the objects we just allocated */ + ret = qxl_release_reserve_list(release, true); + if (ret) + goto out_free_palette; + rect.left = x; rect.right = x + width; rect.top = y; rect.bottom = y + height; - ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, &release); - if (ret) - return; + ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &rect, release); + if (ret) { + qxl_release_backoff_reserve_list(release); + goto out_free_palette; + } - ret = qxl_image_create(qdev, release, &image_bo, - (const uint8_t *)src, 0, 0, - width, height, depth, stride); + ret = qxl_image_init(qdev, release, dimage, + (const uint8_t *)src, 0, 0, + width, height, depth, stride); if (ret) { - qxl_release_unreserve(qdev, release); + qxl_release_backoff_reserve_list(release); qxl_release_free(qdev, release); return; } if (depth == 1) { - struct qxl_bo *palette_bo; void *ptr; - ret = qxl_palette_create_1bit(&palette_bo, qxl_fb_image); - qxl_release_add_res(qdev, release, palette_bo); + ret = qxl_palette_create_1bit(palette_bo, release, qxl_fb_image); - ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0); + ptr = qxl_bo_kmap_atomic_page(qdev, dimage->bo, 0); image = ptr; image->u.bitmap.palette = qxl_bo_physical_address(qdev, palette_bo, 0); - qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr); - qxl_bo_unreserve(palette_bo); - qxl_bo_unref(&palette_bo); + qxl_bo_kunmap_atomic_page(qdev, dimage->bo, ptr); } drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); @@ -199,16 +240,20 @@ void qxl_draw_opaque_fb(const struct qxl_fb_image *qxl_fb_image, drawable->u.copy.mask.bitmap = 0; drawable->u.copy.src_bitmap = - qxl_bo_physical_address(qdev, image_bo, 0); + qxl_bo_physical_address(qdev, dimage->bo, 0); qxl_release_unmap(qdev, release, &drawable->release_info); - qxl_release_add_res(qdev, release, image_bo); - qxl_bo_unreserve(image_bo); - qxl_bo_unref(&image_bo); - - qxl_fence_releaseable(qdev, release); qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); - qxl_release_unreserve(qdev, release); + qxl_release_fence_buffer_objects(release); + +out_free_palette: + if (palette_bo) + qxl_bo_unref(&palette_bo); +out_free_image: + qxl_image_free_objects(qdev, dimage); +out_free_drawable: + if (ret) + free_drawable(qdev, release); } /* push a draw command using the given clipping rectangles as @@ -243,10 +288,14 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, int depth = qxl_fb->base.bits_per_pixel; uint8_t *surface_base; struct qxl_release *release; - struct qxl_bo *image_bo; struct qxl_bo *clips_bo; + struct qxl_drm_image *dimage; int ret; + ret = alloc_drawable(qdev, &release); + if (ret) + return; + left = clips->x1; right = clips->x2; top = clips->y1; @@ -263,36 +312,52 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, width = right - left; height = bottom - top; + + ret = alloc_clips(qdev, release, num_clips, &clips_bo); + if (ret) + goto out_free_drawable; + + ret = qxl_image_alloc_objects(qdev, release, + &dimage, + height, stride); + if (ret) + goto out_free_clips; + + /* do a reservation run over all the objects we just allocated */ + ret = qxl_release_reserve_list(release, true); + if (ret) + goto out_free_image; + drawable_rect.left = left; drawable_rect.right = right; drawable_rect.top = top; drawable_rect.bottom = bottom; + ret = make_drawable(qdev, 0, QXL_DRAW_COPY, &drawable_rect, - &release); + release); if (ret) - return; + goto out_release_backoff; ret = qxl_bo_kmap(bo, (void **)&surface_base); if (ret) - goto out_unref; + goto out_release_backoff; - ret = qxl_image_create(qdev, release, &image_bo, surface_base, - left, top, width, height, depth, stride); + + ret = qxl_image_init(qdev, release, dimage, surface_base, + left, top, width, height, depth, stride); qxl_bo_kunmap(bo); if (ret) - goto out_unref; + goto out_release_backoff; + + rects = drawable_set_clipping(qdev, drawable, num_clips, clips_bo); + if (!rects) + goto out_release_backoff; - rects = drawable_set_clipping(qdev, drawable, num_clips, &clips_bo, release); - if (!rects) { - qxl_bo_unref(&image_bo); - goto out_unref; - } drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); drawable->clip.type = SPICE_CLIP_TYPE_RECTS; drawable->clip.data = qxl_bo_physical_address(qdev, clips_bo, 0); - qxl_release_add_res(qdev, release, clips_bo); drawable->u.copy.src_area.top = 0; drawable->u.copy.src_area.bottom = height; @@ -306,11 +371,9 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, drawable->u.copy.mask.pos.y = 0; drawable->u.copy.mask.bitmap = 0; - drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, image_bo, 0); + drawable->u.copy.src_bitmap = qxl_bo_physical_address(qdev, dimage->bo, 0); qxl_release_unmap(qdev, release, &drawable->release_info); - qxl_release_add_res(qdev, release, image_bo); - qxl_bo_unreserve(image_bo); - qxl_bo_unref(&image_bo); + clips_ptr = clips; for (i = 0; i < num_clips; i++, clips_ptr += inc) { rects[i].left = clips_ptr->x1; @@ -319,17 +382,22 @@ void qxl_draw_dirty_fb(struct qxl_device *qdev, rects[i].bottom = clips_ptr->y2; } qxl_bo_kunmap(clips_bo); - qxl_bo_unreserve(clips_bo); - qxl_bo_unref(&clips_bo); - qxl_fence_releaseable(qdev, release); qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); - qxl_release_unreserve(qdev, release); - return; + qxl_release_fence_buffer_objects(release); + +out_release_backoff: + if (ret) + qxl_release_backoff_reserve_list(release); +out_free_image: + qxl_image_free_objects(qdev, dimage); +out_free_clips: + qxl_bo_unref(&clips_bo); +out_free_drawable: + /* only free drawable on error */ + if (ret) + free_drawable(qdev, release); -out_unref: - qxl_release_unreserve(qdev, release); - qxl_release_free(qdev, release); } void qxl_draw_copyarea(struct qxl_device *qdev, @@ -342,22 +410,36 @@ void qxl_draw_copyarea(struct qxl_device *qdev, struct qxl_release *release; int ret; + ret = alloc_drawable(qdev, &release); + if (ret) + return; + + /* do a reservation run over all the objects we just allocated */ + ret = qxl_release_reserve_list(release, true); + if (ret) + goto out_free_release; + rect.left = dx; rect.top = dy; rect.right = dx + width; rect.bottom = dy + height; - ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, &release); - if (ret) - return; + ret = make_drawable(qdev, 0, QXL_COPY_BITS, &rect, release); + if (ret) { + qxl_release_backoff_reserve_list(release); + goto out_free_release; + } drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); drawable->u.copy_bits.src_pos.x = sx; drawable->u.copy_bits.src_pos.y = sy; - qxl_release_unmap(qdev, release, &drawable->release_info); - qxl_fence_releaseable(qdev, release); + qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); - qxl_release_unreserve(qdev, release); + qxl_release_fence_buffer_objects(release); + +out_free_release: + if (ret) + free_drawable(qdev, release); } void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec) @@ -370,10 +452,21 @@ void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec) struct qxl_release *release; int ret; - ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, &release); + ret = alloc_drawable(qdev, &release); if (ret) return; + /* do a reservation run over all the objects we just allocated */ + ret = qxl_release_reserve_list(release, true); + if (ret) + goto out_free_release; + + ret = make_drawable(qdev, 0, QXL_DRAW_FILL, &rect, release); + if (ret) { + qxl_release_backoff_reserve_list(release); + goto out_free_release; + } + drawable = (struct qxl_drawable *)qxl_release_map(qdev, release); drawable->u.fill.brush.type = SPICE_BRUSH_TYPE_SOLID; drawable->u.fill.brush.u.color = color; @@ -384,7 +477,11 @@ void qxl_draw_fill(struct qxl_draw_fill *qxl_draw_fill_rec) drawable->u.fill.mask.bitmap = 0; qxl_release_unmap(qdev, release, &drawable->release_info); - qxl_fence_releaseable(qdev, release); + qxl_push_command_ring_release(qdev, release, QXL_CMD_DRAW, false); - qxl_release_unreserve(qdev, release); + qxl_release_fence_buffer_objects(release); + +out_free_release: + if (ret) + free_drawable(qdev, release); } diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index aacb791464a..7e96f4f1173 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -42,6 +42,9 @@ #include <ttm/ttm_placement.h> #include <ttm/ttm_module.h> +/* just for ttm_validate_buffer */ +#include <ttm/ttm_execbuf_util.h> + #include <drm/qxl_drm.h> #include "qxl_dev.h" @@ -118,9 +121,9 @@ struct qxl_bo { uint32_t surface_id; struct qxl_fence fence; /* per bo fence - list of releases */ struct qxl_release *surf_create; - atomic_t reserve_count; }; #define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, gem_base) +#define to_qxl_bo(tobj) container_of((tobj), struct qxl_bo, tbo) struct qxl_gem { struct mutex mutex; @@ -128,12 +131,7 @@ struct qxl_gem { }; struct qxl_bo_list { - struct list_head lhead; - struct qxl_bo *bo; -}; - -struct qxl_reloc_list { - struct list_head bos; + struct ttm_validate_buffer tv; }; struct qxl_crtc { @@ -195,10 +193,20 @@ enum { struct qxl_release { int id; int type; - int bo_count; uint32_t release_offset; uint32_t surface_release_id; - struct qxl_bo *bos[QXL_MAX_RES]; + struct ww_acquire_ctx ticket; + struct list_head bos; +}; + +struct qxl_drm_chunk { + struct list_head head; + struct qxl_bo *bo; +}; + +struct qxl_drm_image { + struct qxl_bo *bo; + struct list_head chunk_list; }; struct qxl_fb_image { @@ -314,6 +322,7 @@ struct qxl_device { struct workqueue_struct *gc_queue; struct work_struct gc_work; + struct work_struct fb_work; }; /* forward declaration for QXL_INFO_IO */ @@ -433,12 +442,19 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma); /* qxl image */ -int qxl_image_create(struct qxl_device *qdev, - struct qxl_release *release, - struct qxl_bo **image_bo, - const uint8_t *data, - int x, int y, int width, int height, - int depth, int stride); +int qxl_image_init(struct qxl_device *qdev, + struct qxl_release *release, + struct qxl_drm_image *dimage, + const uint8_t *data, + int x, int y, int width, int height, + int depth, int stride); +int +qxl_image_alloc_objects(struct qxl_device *qdev, + struct qxl_release *release, + struct qxl_drm_image **image_ptr, + int height, int stride); +void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage); + void qxl_update_screen(struct qxl_device *qxl); /* qxl io operations (qxl_cmd.c) */ @@ -459,20 +475,15 @@ int qxl_ring_push(struct qxl_ring *ring, const void *new_elt, bool interruptible void qxl_io_flush_release(struct qxl_device *qdev); void qxl_io_flush_surfaces(struct qxl_device *qdev); -int qxl_release_reserve(struct qxl_device *qdev, - struct qxl_release *release, bool no_wait); -void qxl_release_unreserve(struct qxl_device *qdev, - struct qxl_release *release); union qxl_release_info *qxl_release_map(struct qxl_device *qdev, struct qxl_release *release); void qxl_release_unmap(struct qxl_device *qdev, struct qxl_release *release, union qxl_release_info *info); -/* - * qxl_bo_add_resource. - * - */ -void qxl_bo_add_resource(struct qxl_bo *main_bo, struct qxl_bo *resource); +int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo); +int qxl_release_reserve_list(struct qxl_release *release, bool no_intr); +void qxl_release_backoff_reserve_list(struct qxl_release *release); +void qxl_release_fence_buffer_objects(struct qxl_release *release); int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, enum qxl_surface_cmd_type surface_cmd_type, @@ -481,15 +492,16 @@ int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, int type, struct qxl_release **release, struct qxl_bo **rbo); -int qxl_fence_releaseable(struct qxl_device *qdev, - struct qxl_release *release); + int qxl_push_command_ring_release(struct qxl_device *qdev, struct qxl_release *release, uint32_t type, bool interruptible); int qxl_push_cursor_ring_release(struct qxl_device *qdev, struct qxl_release *release, uint32_t type, bool interruptible); -int qxl_alloc_bo_reserved(struct qxl_device *qdev, unsigned long size, +int qxl_alloc_bo_reserved(struct qxl_device *qdev, + struct qxl_release *release, + unsigned long size, struct qxl_bo **_bo); /* qxl drawing commands */ @@ -510,15 +522,9 @@ void qxl_draw_copyarea(struct qxl_device *qdev, u32 sx, u32 sy, u32 dx, u32 dy); -uint64_t -qxl_release_alloc(struct qxl_device *qdev, int type, - struct qxl_release **ret); - void qxl_release_free(struct qxl_device *qdev, struct qxl_release *release); -void qxl_release_add_res(struct qxl_device *qdev, - struct qxl_release *release, - struct qxl_bo *bo); + /* used by qxl_debugfs_release */ struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, uint64_t id); @@ -561,7 +567,7 @@ void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freein int qxl_update_surface(struct qxl_device *qdev, struct qxl_bo *surf); /* qxl_fence.c */ -int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id); +void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id); int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id); int qxl_fence_init(struct qxl_device *qdev, struct qxl_fence *qfence); void qxl_fence_fini(struct qxl_fence *qfence); diff --git a/drivers/gpu/drm/qxl/qxl_fb.c b/drivers/gpu/drm/qxl/qxl_fb.c index 76f39d88d68..88722f23343 100644 --- a/drivers/gpu/drm/qxl/qxl_fb.c +++ b/drivers/gpu/drm/qxl/qxl_fb.c @@ -37,12 +37,29 @@ #define QXL_DIRTY_DELAY (HZ / 30) +#define QXL_FB_OP_FILLRECT 1 +#define QXL_FB_OP_COPYAREA 2 +#define QXL_FB_OP_IMAGEBLIT 3 + +struct qxl_fb_op { + struct list_head head; + int op_type; + union { + struct fb_fillrect fr; + struct fb_copyarea ca; + struct fb_image ib; + } op; + void *img_data; +}; + struct qxl_fbdev { struct drm_fb_helper helper; struct qxl_framebuffer qfb; struct list_head fbdev_list; struct qxl_device *qdev; + spinlock_t delayed_ops_lock; + struct list_head delayed_ops; void *shadow; int size; @@ -164,8 +181,69 @@ static struct fb_deferred_io qxl_defio = { .deferred_io = qxl_deferred_io, }; -static void qxl_fb_fillrect(struct fb_info *info, - const struct fb_fillrect *fb_rect) +static void qxl_fb_delayed_fillrect(struct qxl_fbdev *qfbdev, + const struct fb_fillrect *fb_rect) +{ + struct qxl_fb_op *op; + unsigned long flags; + + op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN); + if (!op) + return; + + op->op.fr = *fb_rect; + op->img_data = NULL; + op->op_type = QXL_FB_OP_FILLRECT; + + spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags); + list_add_tail(&op->head, &qfbdev->delayed_ops); + spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags); +} + +static void qxl_fb_delayed_copyarea(struct qxl_fbdev *qfbdev, + const struct fb_copyarea *fb_copy) +{ + struct qxl_fb_op *op; + unsigned long flags; + + op = kmalloc(sizeof(struct qxl_fb_op), GFP_ATOMIC | __GFP_NOWARN); + if (!op) + return; + + op->op.ca = *fb_copy; + op->img_data = NULL; + op->op_type = QXL_FB_OP_COPYAREA; + + spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags); + list_add_tail(&op->head, &qfbdev->delayed_ops); + spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags); +} + +static void qxl_fb_delayed_imageblit(struct qxl_fbdev *qfbdev, + const struct fb_image *fb_image) +{ + struct qxl_fb_op *op; + unsigned long flags; + uint32_t size = fb_image->width * fb_image->height * (fb_image->depth >= 8 ? fb_image->depth / 8 : 1); + + op = kmalloc(sizeof(struct qxl_fb_op) + size, GFP_ATOMIC | __GFP_NOWARN); + if (!op) + return; + + op->op.ib = *fb_image; + op->img_data = (void *)(op + 1); + op->op_type = QXL_FB_OP_IMAGEBLIT; + + memcpy(op->img_data, fb_image->data, size); + + op->op.ib.data = op->img_data; + spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags); + list_add_tail(&op->head, &qfbdev->delayed_ops); + spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags); +} + +static void qxl_fb_fillrect_internal(struct fb_info *info, + const struct fb_fillrect *fb_rect) { struct qxl_fbdev *qfbdev = info->par; struct qxl_device *qdev = qfbdev->qdev; @@ -203,17 +281,28 @@ static void qxl_fb_fillrect(struct fb_info *info, qxl_draw_fill_rec.rect = rect; qxl_draw_fill_rec.color = color; qxl_draw_fill_rec.rop = rop; + + qxl_draw_fill(&qxl_draw_fill_rec); +} + +static void qxl_fb_fillrect(struct fb_info *info, + const struct fb_fillrect *fb_rect) +{ + struct qxl_fbdev *qfbdev = info->par; + struct qxl_device *qdev = qfbdev->qdev; + if (!drm_can_sleep()) { - qxl_io_log(qdev, - "%s: TODO use RCU, mysterious locks with spin_lock\n", - __func__); + qxl_fb_delayed_fillrect(qfbdev, fb_rect); + schedule_work(&qdev->fb_work); return; } - qxl_draw_fill(&qxl_draw_fill_rec); + /* make sure any previous work is done */ + flush_work(&qdev->fb_work); + qxl_fb_fillrect_internal(info, fb_rect); } -static void qxl_fb_copyarea(struct fb_info *info, - const struct fb_copyarea *region) +static void qxl_fb_copyarea_internal(struct fb_info *info, + const struct fb_copyarea *region) { struct qxl_fbdev *qfbdev = info->par; @@ -223,37 +312,89 @@ static void qxl_fb_copyarea(struct fb_info *info, region->dx, region->dy); } +static void qxl_fb_copyarea(struct fb_info *info, + const struct fb_copyarea *region) +{ + struct qxl_fbdev *qfbdev = info->par; + struct qxl_device *qdev = qfbdev->qdev; + + if (!drm_can_sleep()) { + qxl_fb_delayed_copyarea(qfbdev, region); + schedule_work(&qdev->fb_work); + return; + } + /* make sure any previous work is done */ + flush_work(&qdev->fb_work); + qxl_fb_copyarea_internal(info, region); +} + static void qxl_fb_imageblit_safe(struct qxl_fb_image *qxl_fb_image) { qxl_draw_opaque_fb(qxl_fb_image, 0); } +static void qxl_fb_imageblit_internal(struct fb_info *info, + const struct fb_image *image) +{ + struct qxl_fbdev *qfbdev = info->par; + struct qxl_fb_image qxl_fb_image; + + /* ensure proper order rendering operations - TODO: must do this + * for everything. */ + qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image); + qxl_fb_imageblit_safe(&qxl_fb_image); +} + static void qxl_fb_imageblit(struct fb_info *info, const struct fb_image *image) { struct qxl_fbdev *qfbdev = info->par; struct qxl_device *qdev = qfbdev->qdev; - struct qxl_fb_image qxl_fb_image; if (!drm_can_sleep()) { - /* we cannot do any ttm_bo allocation since that will fail on - * ioremap_wc..__get_vm_area_node, so queue the work item - * instead This can happen from printk inside an interrupt - * context, i.e.: smp_apic_timer_interrupt..check_cpu_stall */ - qxl_io_log(qdev, - "%s: TODO use RCU, mysterious locks with spin_lock\n", - __func__); + qxl_fb_delayed_imageblit(qfbdev, image); + schedule_work(&qdev->fb_work); return; } + /* make sure any previous work is done */ + flush_work(&qdev->fb_work); + qxl_fb_imageblit_internal(info, image); +} - /* ensure proper order of rendering operations - TODO: must do this - * for everything. */ - qxl_fb_image_init(&qxl_fb_image, qfbdev->qdev, info, image); - qxl_fb_imageblit_safe(&qxl_fb_image); +static void qxl_fb_work(struct work_struct *work) +{ + struct qxl_device *qdev = container_of(work, struct qxl_device, fb_work); + unsigned long flags; + struct qxl_fb_op *entry, *tmp; + struct qxl_fbdev *qfbdev = qdev->mode_info.qfbdev; + + /* since the irq context just adds entries to the end of the + list dropping the lock should be fine, as entry isn't modified + in the operation code */ + spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags); + list_for_each_entry_safe(entry, tmp, &qfbdev->delayed_ops, head) { + spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags); + switch (entry->op_type) { + case QXL_FB_OP_FILLRECT: + qxl_fb_fillrect_internal(qfbdev->helper.fbdev, &entry->op.fr); + break; + case QXL_FB_OP_COPYAREA: + qxl_fb_copyarea_internal(qfbdev->helper.fbdev, &entry->op.ca); + break; + case QXL_FB_OP_IMAGEBLIT: + qxl_fb_imageblit_internal(qfbdev->helper.fbdev, &entry->op.ib); + break; + } + spin_lock_irqsave(&qfbdev->delayed_ops_lock, flags); + list_del(&entry->head); + kfree(entry); + } + spin_unlock_irqrestore(&qfbdev->delayed_ops_lock, flags); } int qxl_fb_init(struct qxl_device *qdev) { + INIT_WORK(&qdev->fb_work, qxl_fb_work); return 0; } @@ -536,7 +677,8 @@ int qxl_fbdev_init(struct qxl_device *qdev) qfbdev->qdev = qdev; qdev->mode_info.qfbdev = qfbdev; qfbdev->helper.funcs = &qxl_fb_helper_funcs; - + spin_lock_init(&qfbdev->delayed_ops_lock); + INIT_LIST_HEAD(&qfbdev->delayed_ops); ret = drm_fb_helper_init(qdev->ddev, &qfbdev->helper, qxl_num_crtc /* num_crtc - QXL supports just 1 */, QXLFB_CONN_LIMIT); diff --git a/drivers/gpu/drm/qxl/qxl_fence.c b/drivers/gpu/drm/qxl/qxl_fence.c index 63c6715ad38..ae59e91cfb9 100644 --- a/drivers/gpu/drm/qxl/qxl_fence.c +++ b/drivers/gpu/drm/qxl/qxl_fence.c @@ -49,17 +49,11 @@ For some reason every so often qxl hw fails to release, things go wrong. */ - - -int qxl_fence_add_release(struct qxl_fence *qfence, uint32_t rel_id) +/* must be called with the fence lock held */ +void qxl_fence_add_release_locked(struct qxl_fence *qfence, uint32_t rel_id) { - struct qxl_bo *bo = container_of(qfence, struct qxl_bo, fence); - - spin_lock(&bo->tbo.bdev->fence_lock); radix_tree_insert(&qfence->tree, rel_id, qfence); qfence->num_active_releases++; - spin_unlock(&bo->tbo.bdev->fence_lock); - return 0; } int qxl_fence_remove_release(struct qxl_fence *qfence, uint32_t rel_id) diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c index a235693aabb..25e1777fb0a 100644 --- a/drivers/gpu/drm/qxl/qxl_gem.c +++ b/drivers/gpu/drm/qxl/qxl_gem.c @@ -55,7 +55,7 @@ int qxl_gem_object_create(struct qxl_device *qdev, int size, /* At least align on page size */ if (alignment < PAGE_SIZE) alignment = PAGE_SIZE; - r = qxl_bo_create(qdev, size, kernel, initial_domain, surf, &qbo); + r = qxl_bo_create(qdev, size, kernel, false, initial_domain, surf, &qbo); if (r) { if (r != -ERESTARTSYS) DRM_ERROR( diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c index cf856206996..7fbcc35e8ad 100644 --- a/drivers/gpu/drm/qxl/qxl_image.c +++ b/drivers/gpu/drm/qxl/qxl_image.c @@ -30,31 +30,100 @@ #include "qxl_object.h" static int -qxl_image_create_helper(struct qxl_device *qdev, +qxl_allocate_chunk(struct qxl_device *qdev, + struct qxl_release *release, + struct qxl_drm_image *image, + unsigned int chunk_size) +{ + struct qxl_drm_chunk *chunk; + int ret; + + chunk = kmalloc(sizeof(struct qxl_drm_chunk), GFP_KERNEL); + if (!chunk) + return -ENOMEM; + + ret = qxl_alloc_bo_reserved(qdev, release, chunk_size, &chunk->bo); + if (ret) { + kfree(chunk); + return ret; + } + + list_add_tail(&chunk->head, &image->chunk_list); + return 0; +} + +int +qxl_image_alloc_objects(struct qxl_device *qdev, struct qxl_release *release, - struct qxl_bo **image_bo, - const uint8_t *data, - int width, int height, - int depth, unsigned int hash, - int stride) + struct qxl_drm_image **image_ptr, + int height, int stride) +{ + struct qxl_drm_image *image; + int ret; + + image = kmalloc(sizeof(struct qxl_drm_image), GFP_KERNEL); + if (!image) + return -ENOMEM; + + INIT_LIST_HEAD(&image->chunk_list); + + ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_image), &image->bo); + if (ret) { + kfree(image); + return ret; + } + + ret = qxl_allocate_chunk(qdev, release, image, sizeof(struct qxl_data_chunk) + stride * height); + if (ret) { + qxl_bo_unref(&image->bo); + kfree(image); + return ret; + } + *image_ptr = image; + return 0; +} + +void qxl_image_free_objects(struct qxl_device *qdev, struct qxl_drm_image *dimage) { + struct qxl_drm_chunk *chunk, *tmp; + + list_for_each_entry_safe(chunk, tmp, &dimage->chunk_list, head) { + qxl_bo_unref(&chunk->bo); + kfree(chunk); + } + + qxl_bo_unref(&dimage->bo); + kfree(dimage); +} + +static int +qxl_image_init_helper(struct qxl_device *qdev, + struct qxl_release *release, + struct qxl_drm_image *dimage, + const uint8_t *data, + int width, int height, + int depth, unsigned int hash, + int stride) +{ + struct qxl_drm_chunk *drv_chunk; struct qxl_image *image; struct qxl_data_chunk *chunk; int i; int chunk_stride; int linesize = width * depth / 8; - struct qxl_bo *chunk_bo; - int ret; + struct qxl_bo *chunk_bo, *image_bo; void *ptr; /* Chunk */ /* FIXME: Check integer overflow */ /* TODO: variable number of chunks */ + + drv_chunk = list_first_entry(&dimage->chunk_list, struct qxl_drm_chunk, head); + + chunk_bo = drv_chunk->bo; chunk_stride = stride; /* TODO: should use linesize, but it renders wrong (check the bitmaps are sent correctly first) */ - ret = qxl_alloc_bo_reserved(qdev, sizeof(*chunk) + height * chunk_stride, - &chunk_bo); - + ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, 0); chunk = ptr; chunk->data_size = height * chunk_stride; @@ -102,7 +171,6 @@ qxl_image_create_helper(struct qxl_device *qdev, while (remain > 0) { page_base = out_offset & PAGE_MASK; page_offset = offset_in_page(out_offset); - size = min((int)(PAGE_SIZE - page_offset), remain); ptr = qxl_bo_kmap_atomic_page(qdev, chunk_bo, page_base); @@ -116,14 +184,10 @@ qxl_image_create_helper(struct qxl_device *qdev, } } } - - qxl_bo_kunmap(chunk_bo); - /* Image */ - ret = qxl_alloc_bo_reserved(qdev, sizeof(*image), image_bo); - - ptr = qxl_bo_kmap_atomic_page(qdev, *image_bo, 0); + image_bo = dimage->bo; + ptr = qxl_bo_kmap_atomic_page(qdev, image_bo, 0); image = ptr; image->descriptor.id = 0; @@ -154,23 +218,20 @@ qxl_image_create_helper(struct qxl_device *qdev, image->u.bitmap.stride = chunk_stride; image->u.bitmap.palette = 0; image->u.bitmap.data = qxl_bo_physical_address(qdev, chunk_bo, 0); - qxl_release_add_res(qdev, release, chunk_bo); - qxl_bo_unreserve(chunk_bo); - qxl_bo_unref(&chunk_bo); - qxl_bo_kunmap_atomic_page(qdev, *image_bo, ptr); + qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr); return 0; } -int qxl_image_create(struct qxl_device *qdev, +int qxl_image_init(struct qxl_device *qdev, struct qxl_release *release, - struct qxl_bo **image_bo, + struct qxl_drm_image *dimage, const uint8_t *data, int x, int y, int width, int height, int depth, int stride) { data += y * stride + x * (depth / 8); - return qxl_image_create_helper(qdev, release, image_bo, data, + return qxl_image_init_helper(qdev, release, dimage, data, width, height, depth, 0, stride); } diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index 27f45e49250..6de33563d6f 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -68,55 +68,60 @@ static int qxl_map_ioctl(struct drm_device *dev, void *data, &qxl_map->offset); } +struct qxl_reloc_info { + int type; + struct qxl_bo *dst_bo; + uint32_t dst_offset; + struct qxl_bo *src_bo; + int src_offset; +}; + /* * dst must be validated, i.e. whole bo on vram/surfacesram (right now all bo's * are on vram). * *(dst + dst_off) = qxl_bo_physical_address(src, src_off) */ static void -apply_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off, - struct qxl_bo *src, uint64_t src_off) +apply_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) { void *reloc_page; - - reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK); - *(uint64_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = qxl_bo_physical_address(qdev, - src, src_off); - qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page); + reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK); + *(uint64_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = qxl_bo_physical_address(qdev, + info->src_bo, + info->src_offset); + qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page); } static void -apply_surf_reloc(struct qxl_device *qdev, struct qxl_bo *dst, uint64_t dst_off, - struct qxl_bo *src) +apply_surf_reloc(struct qxl_device *qdev, struct qxl_reloc_info *info) { uint32_t id = 0; void *reloc_page; - if (src && !src->is_primary) - id = src->surface_id; + if (info->src_bo && !info->src_bo->is_primary) + id = info->src_bo->surface_id; - reloc_page = qxl_bo_kmap_atomic_page(qdev, dst, dst_off & PAGE_MASK); - *(uint32_t *)(reloc_page + (dst_off & ~PAGE_MASK)) = id; - qxl_bo_kunmap_atomic_page(qdev, dst, reloc_page); + reloc_page = qxl_bo_kmap_atomic_page(qdev, info->dst_bo, info->dst_offset & PAGE_MASK); + *(uint32_t *)(reloc_page + (info->dst_offset & ~PAGE_MASK)) = id; + qxl_bo_kunmap_atomic_page(qdev, info->dst_bo, reloc_page); } /* return holding the reference to this object */ static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev, struct drm_file *file_priv, uint64_t handle, - struct qxl_reloc_list *reloc_list) + struct qxl_release *release) { struct drm_gem_object *gobj; struct qxl_bo *qobj; int ret; gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle); - if (!gobj) { - DRM_ERROR("bad bo handle %lld\n", handle); + if (!gobj) return NULL; - } + qobj = gem_to_qxl_bo(gobj); - ret = qxl_bo_list_add(reloc_list, qobj); + ret = qxl_release_list_add(release, qobj); if (ret) return NULL; @@ -129,151 +134,177 @@ static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev, * However, the command as passed from user space must *not* contain the initial * QXLReleaseInfo struct (first XXX bytes) */ -static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) +static int qxl_process_single_command(struct qxl_device *qdev, + struct drm_qxl_command *cmd, + struct drm_file *file_priv) { - struct qxl_device *qdev = dev->dev_private; - struct drm_qxl_execbuffer *execbuffer = data; - struct drm_qxl_command user_cmd; - int cmd_num; - struct qxl_bo *reloc_src_bo; - struct qxl_bo *reloc_dst_bo; - struct drm_qxl_reloc reloc; + struct qxl_reloc_info *reloc_info; + int release_type; + struct qxl_release *release; + struct qxl_bo *cmd_bo; void *fb_cmd; - int i, ret; - struct qxl_reloc_list reloc_list; + int i, j, ret, num_relocs; int unwritten; - uint32_t reloc_dst_offset; - INIT_LIST_HEAD(&reloc_list.bos); - for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) { - struct qxl_release *release; - struct qxl_bo *cmd_bo; - int release_type; - struct drm_qxl_command *commands = - (struct drm_qxl_command *)(uintptr_t)execbuffer->commands; + switch (cmd->type) { + case QXL_CMD_DRAW: + release_type = QXL_RELEASE_DRAWABLE; + break; + case QXL_CMD_SURFACE: + case QXL_CMD_CURSOR: + default: + DRM_DEBUG("Only draw commands in execbuffers\n"); + return -EINVAL; + break; + } - if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num], - sizeof(user_cmd))) - return -EFAULT; - switch (user_cmd.type) { - case QXL_CMD_DRAW: - release_type = QXL_RELEASE_DRAWABLE; - break; - case QXL_CMD_SURFACE: - case QXL_CMD_CURSOR: - default: - DRM_DEBUG("Only draw commands in execbuffers\n"); - return -EINVAL; - break; - } + if (cmd->command_size > PAGE_SIZE - sizeof(union qxl_release_info)) + return -EINVAL; - if (user_cmd.command_size > PAGE_SIZE - sizeof(union qxl_release_info)) - return -EINVAL; + if (!access_ok(VERIFY_READ, + (void *)(unsigned long)cmd->command, + cmd->command_size)) + return -EFAULT; - if (!access_ok(VERIFY_READ, - (void *)(unsigned long)user_cmd.command, - user_cmd.command_size)) - return -EFAULT; + reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL); + if (!reloc_info) + return -ENOMEM; - ret = qxl_alloc_release_reserved(qdev, - sizeof(union qxl_release_info) + - user_cmd.command_size, - release_type, - &release, - &cmd_bo); - if (ret) - return ret; + ret = qxl_alloc_release_reserved(qdev, + sizeof(union qxl_release_info) + + cmd->command_size, + release_type, + &release, + &cmd_bo); + if (ret) + goto out_free_reloc; - /* TODO copy slow path code from i915 */ - fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE)); - unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)user_cmd.command, user_cmd.command_size); + /* TODO copy slow path code from i915 */ + fb_cmd = qxl_bo_kmap_atomic_page(qdev, cmd_bo, (release->release_offset & PAGE_SIZE)); + unwritten = __copy_from_user_inatomic_nocache(fb_cmd + sizeof(union qxl_release_info) + (release->release_offset & ~PAGE_SIZE), (void *)(unsigned long)cmd->command, cmd->command_size); - { - struct qxl_drawable *draw = fb_cmd; + { + struct qxl_drawable *draw = fb_cmd; + draw->mm_time = qdev->rom->mm_clock; + } - draw->mm_time = qdev->rom->mm_clock; - } - qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd); - if (unwritten) { - DRM_ERROR("got unwritten %d\n", unwritten); - qxl_release_unreserve(qdev, release); - qxl_release_free(qdev, release); - return -EFAULT; + qxl_bo_kunmap_atomic_page(qdev, cmd_bo, fb_cmd); + if (unwritten) { + DRM_ERROR("got unwritten %d\n", unwritten); + ret = -EFAULT; + goto out_free_release; + } + + /* fill out reloc info structs */ + num_relocs = 0; + for (i = 0; i < cmd->relocs_num; ++i) { + struct drm_qxl_reloc reloc; + + if (DRM_COPY_FROM_USER(&reloc, + &((struct drm_qxl_reloc *)(uintptr_t)cmd->relocs)[i], + sizeof(reloc))) { + ret = -EFAULT; + goto out_free_bos; } - for (i = 0 ; i < user_cmd.relocs_num; ++i) { - if (DRM_COPY_FROM_USER(&reloc, - &((struct drm_qxl_reloc *)(uintptr_t)user_cmd.relocs)[i], - sizeof(reloc))) { - qxl_bo_list_unreserve(&reloc_list, true); - qxl_release_unreserve(qdev, release); - qxl_release_free(qdev, release); - return -EFAULT; - } + /* add the bos to the list of bos to validate - + need to validate first then process relocs? */ + if (reloc.reloc_type != QXL_RELOC_TYPE_BO && reloc.reloc_type != QXL_RELOC_TYPE_SURF) { + DRM_DEBUG("unknown reloc type %d\n", reloc_info[i].type); - /* add the bos to the list of bos to validate - - need to validate first then process relocs? */ - if (reloc.dst_handle) { - reloc_dst_bo = qxlhw_handle_to_bo(qdev, file_priv, - reloc.dst_handle, &reloc_list); - if (!reloc_dst_bo) { - qxl_bo_list_unreserve(&reloc_list, true); - qxl_release_unreserve(qdev, release); - qxl_release_free(qdev, release); - return -EINVAL; - } - reloc_dst_offset = 0; - } else { - reloc_dst_bo = cmd_bo; - reloc_dst_offset = release->release_offset; + ret = -EINVAL; + goto out_free_bos; + } + reloc_info[i].type = reloc.reloc_type; + + if (reloc.dst_handle) { + reloc_info[i].dst_bo = qxlhw_handle_to_bo(qdev, file_priv, + reloc.dst_handle, release); + if (!reloc_info[i].dst_bo) { + ret = -EINVAL; + reloc_info[i].src_bo = NULL; + goto out_free_bos; } - - /* reserve and validate the reloc dst bo */ - if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) { - reloc_src_bo = - qxlhw_handle_to_bo(qdev, file_priv, - reloc.src_handle, &reloc_list); - if (!reloc_src_bo) { - if (reloc_dst_bo != cmd_bo) - drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base); - qxl_bo_list_unreserve(&reloc_list, true); - qxl_release_unreserve(qdev, release); - qxl_release_free(qdev, release); - return -EINVAL; - } - } else - reloc_src_bo = NULL; - if (reloc.reloc_type == QXL_RELOC_TYPE_BO) { - apply_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, - reloc_src_bo, reloc.src_offset); - } else if (reloc.reloc_type == QXL_RELOC_TYPE_SURF) { - apply_surf_reloc(qdev, reloc_dst_bo, reloc_dst_offset + reloc.dst_offset, reloc_src_bo); - } else { - DRM_ERROR("unknown reloc type %d\n", reloc.reloc_type); - return -EINVAL; + reloc_info[i].dst_offset = reloc.dst_offset; + } else { + reloc_info[i].dst_bo = cmd_bo; + reloc_info[i].dst_offset = reloc.dst_offset + release->release_offset; + } + num_relocs++; + + /* reserve and validate the reloc dst bo */ + if (reloc.reloc_type == QXL_RELOC_TYPE_BO || reloc.src_handle > 0) { + reloc_info[i].src_bo = + qxlhw_handle_to_bo(qdev, file_priv, + reloc.src_handle, release); + if (!reloc_info[i].src_bo) { + if (reloc_info[i].dst_bo != cmd_bo) + drm_gem_object_unreference_unlocked(&reloc_info[i].dst_bo->gem_base); + ret = -EINVAL; + goto out_free_bos; } + reloc_info[i].src_offset = reloc.src_offset; + } else { + reloc_info[i].src_bo = NULL; + reloc_info[i].src_offset = 0; + } + } - if (reloc_src_bo && reloc_src_bo != cmd_bo) { - qxl_release_add_res(qdev, release, reloc_src_bo); - drm_gem_object_unreference_unlocked(&reloc_src_bo->gem_base); - } + /* validate all buffers */ + ret = qxl_release_reserve_list(release, false); + if (ret) + goto out_free_bos; - if (reloc_dst_bo != cmd_bo) - drm_gem_object_unreference_unlocked(&reloc_dst_bo->gem_base); - } - qxl_fence_releaseable(qdev, release); + for (i = 0; i < cmd->relocs_num; ++i) { + if (reloc_info[i].type == QXL_RELOC_TYPE_BO) + apply_reloc(qdev, &reloc_info[i]); + else if (reloc_info[i].type == QXL_RELOC_TYPE_SURF) + apply_surf_reloc(qdev, &reloc_info[i]); + } - ret = qxl_push_command_ring_release(qdev, release, user_cmd.type, true); - if (ret == -ERESTARTSYS) { - qxl_release_unreserve(qdev, release); - qxl_release_free(qdev, release); - qxl_bo_list_unreserve(&reloc_list, true); + ret = qxl_push_command_ring_release(qdev, release, cmd->type, true); + if (ret) + qxl_release_backoff_reserve_list(release); + else + qxl_release_fence_buffer_objects(release); + +out_free_bos: + for (j = 0; j < num_relocs; j++) { + if (reloc_info[j].dst_bo != cmd_bo) + drm_gem_object_unreference_unlocked(&reloc_info[j].dst_bo->gem_base); + if (reloc_info[j].src_bo && reloc_info[j].src_bo != cmd_bo) + drm_gem_object_unreference_unlocked(&reloc_info[j].src_bo->gem_base); + } +out_free_release: + if (ret) + qxl_release_free(qdev, release); +out_free_reloc: + kfree(reloc_info); + return ret; +} + +static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv) +{ + struct qxl_device *qdev = dev->dev_private; + struct drm_qxl_execbuffer *execbuffer = data; + struct drm_qxl_command user_cmd; + int cmd_num; + int ret; + + for (cmd_num = 0; cmd_num < execbuffer->commands_num; ++cmd_num) { + + struct drm_qxl_command *commands = + (struct drm_qxl_command *)(uintptr_t)execbuffer->commands; + + if (DRM_COPY_FROM_USER(&user_cmd, &commands[cmd_num], + sizeof(user_cmd))) + return -EFAULT; + + ret = qxl_process_single_command(qdev, &user_cmd, file_priv); + if (ret) return ret; - } - qxl_release_unreserve(qdev, release); } - qxl_bo_list_unreserve(&reloc_list, 0); return 0; } @@ -305,7 +336,7 @@ static int qxl_update_area_ioctl(struct drm_device *dev, void *data, goto out; if (!qobj->pin_count) { - qxl_ttm_placement_from_domain(qobj, qobj->type); + qxl_ttm_placement_from_domain(qobj, qobj->type, false); ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, true, false); if (unlikely(ret)) diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c index 1191fe7788c..aa161cddd87 100644 --- a/drivers/gpu/drm/qxl/qxl_object.c +++ b/drivers/gpu/drm/qxl/qxl_object.c @@ -51,20 +51,21 @@ bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo) return false; } -void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain) +void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned) { u32 c = 0; + u32 pflag = pinned ? TTM_PL_FLAG_NO_EVICT : 0; qbo->placement.fpfn = 0; qbo->placement.lpfn = 0; qbo->placement.placement = qbo->placements; qbo->placement.busy_placement = qbo->placements; if (domain == QXL_GEM_DOMAIN_VRAM) - qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM; + qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_VRAM | pflag; if (domain == QXL_GEM_DOMAIN_SURFACE) - qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0; + qbo->placements[c++] = TTM_PL_FLAG_CACHED | TTM_PL_FLAG_PRIV0 | pflag; if (domain == QXL_GEM_DOMAIN_CPU) - qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; + qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM | pflag; if (!c) qbo->placements[c++] = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM; qbo->placement.num_placement = c; @@ -73,7 +74,7 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain) int qxl_bo_create(struct qxl_device *qdev, - unsigned long size, bool kernel, u32 domain, + unsigned long size, bool kernel, bool pinned, u32 domain, struct qxl_surface *surf, struct qxl_bo **bo_ptr) { @@ -99,15 +100,15 @@ int qxl_bo_create(struct qxl_device *qdev, } bo->gem_base.driver_private = NULL; bo->type = domain; - bo->pin_count = 0; + bo->pin_count = pinned ? 1 : 0; bo->surface_id = 0; qxl_fence_init(qdev, &bo->fence); INIT_LIST_HEAD(&bo->list); - atomic_set(&bo->reserve_count, 0); + if (surf) bo->surf = *surf; - qxl_ttm_placement_from_domain(bo, domain); + qxl_ttm_placement_from_domain(bo, domain, pinned); r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, &bo->placement, 0, !kernel, NULL, size, @@ -228,7 +229,7 @@ struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo) int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) { struct qxl_device *qdev = (struct qxl_device *)bo->gem_base.dev->dev_private; - int r, i; + int r; if (bo->pin_count) { bo->pin_count++; @@ -236,9 +237,7 @@ int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr) *gpu_addr = qxl_bo_gpu_offset(bo); return 0; } - qxl_ttm_placement_from_domain(bo, domain); - for (i = 0; i < bo->placement.num_placement; i++) - bo->placements[i] |= TTM_PL_FLAG_NO_EVICT; + qxl_ttm_placement_from_domain(bo, domain, true); r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false); if (likely(r == 0)) { bo->pin_count = 1; @@ -317,53 +316,6 @@ int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo) return 0; } -void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed) -{ - struct qxl_bo_list *entry, *sf; - - list_for_each_entry_safe(entry, sf, &reloc_list->bos, lhead) { - qxl_bo_unreserve(entry->bo); - list_del(&entry->lhead); - kfree(entry); - } -} - -int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo) -{ - struct qxl_bo_list *entry; - int ret; - - list_for_each_entry(entry, &reloc_list->bos, lhead) { - if (entry->bo == bo) - return 0; - } - - entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL); - if (!entry) - return -ENOMEM; - - entry->bo = bo; - list_add(&entry->lhead, &reloc_list->bos); - - ret = qxl_bo_reserve(bo, false); - if (ret) - return ret; - - if (!bo->pin_count) { - qxl_ttm_placement_from_domain(bo, bo->type); - ret = ttm_bo_validate(&bo->tbo, &bo->placement, - true, false); - if (ret) - return ret; - } - - /* allocate a surface for reserved + validated buffers */ - ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo); - if (ret) - return ret; - return 0; -} - int qxl_surf_evict(struct qxl_device *qdev) { return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV0); diff --git a/drivers/gpu/drm/qxl/qxl_object.h b/drivers/gpu/drm/qxl/qxl_object.h index ee7ad79ce78..8cb6167038e 100644 --- a/drivers/gpu/drm/qxl/qxl_object.h +++ b/drivers/gpu/drm/qxl/qxl_object.h @@ -88,7 +88,7 @@ static inline int qxl_bo_wait(struct qxl_bo *bo, u32 *mem_type, extern int qxl_bo_create(struct qxl_device *qdev, unsigned long size, - bool kernel, u32 domain, + bool kernel, bool pinned, u32 domain, struct qxl_surface *surf, struct qxl_bo **bo_ptr); extern int qxl_bo_kmap(struct qxl_bo *bo, void **ptr); @@ -99,9 +99,7 @@ extern struct qxl_bo *qxl_bo_ref(struct qxl_bo *bo); extern void qxl_bo_unref(struct qxl_bo **bo); extern int qxl_bo_pin(struct qxl_bo *bo, u32 domain, u64 *gpu_addr); extern int qxl_bo_unpin(struct qxl_bo *bo); -extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain); +extern void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain, bool pinned); extern bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo); -extern int qxl_bo_list_add(struct qxl_reloc_list *reloc_list, struct qxl_bo *bo); -extern void qxl_bo_list_unreserve(struct qxl_reloc_list *reloc_list, bool failed); #endif diff --git a/drivers/gpu/drm/qxl/qxl_release.c b/drivers/gpu/drm/qxl/qxl_release.c index b443d6751d5..b61449e52cd 100644 --- a/drivers/gpu/drm/qxl/qxl_release.c +++ b/drivers/gpu/drm/qxl/qxl_release.c @@ -38,7 +38,8 @@ static const int release_size_per_bo[] = { RELEASE_SIZE, SURFACE_RELEASE_SIZE, RELEASE_SIZE }; static const int releases_per_bo[] = { RELEASES_PER_BO, SURFACE_RELEASES_PER_BO, RELEASES_PER_BO }; -uint64_t + +static uint64_t qxl_release_alloc(struct qxl_device *qdev, int type, struct qxl_release **ret) { @@ -53,9 +54,9 @@ qxl_release_alloc(struct qxl_device *qdev, int type, return 0; } release->type = type; - release->bo_count = 0; release->release_offset = 0; release->surface_release_id = 0; + INIT_LIST_HEAD(&release->bos); idr_preload(GFP_KERNEL); spin_lock(&qdev->release_idr_lock); @@ -77,20 +78,20 @@ void qxl_release_free(struct qxl_device *qdev, struct qxl_release *release) { - int i; - - QXL_INFO(qdev, "release %d, type %d, %d bos\n", release->id, - release->type, release->bo_count); + struct qxl_bo_list *entry, *tmp; + QXL_INFO(qdev, "release %d, type %d\n", release->id, + release->type); if (release->surface_release_id) qxl_surface_id_dealloc(qdev, release->surface_release_id); - for (i = 0 ; i < release->bo_count; ++i) { + list_for_each_entry_safe(entry, tmp, &release->bos, tv.head) { + struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); QXL_INFO(qdev, "release %llx\n", - release->bos[i]->tbo.addr_space_offset + entry->tv.bo->addr_space_offset - DRM_FILE_OFFSET); - qxl_fence_remove_release(&release->bos[i]->fence, release->id); - qxl_bo_unref(&release->bos[i]); + qxl_fence_remove_release(&bo->fence, release->id); + qxl_bo_unref(&bo); } spin_lock(&qdev->release_idr_lock); idr_remove(&qdev->release_idr, release->id); @@ -98,83 +99,117 @@ qxl_release_free(struct qxl_device *qdev, kfree(release); } -void -qxl_release_add_res(struct qxl_device *qdev, struct qxl_release *release, - struct qxl_bo *bo) -{ - int i; - for (i = 0; i < release->bo_count; i++) - if (release->bos[i] == bo) - return; - - if (release->bo_count >= QXL_MAX_RES) { - DRM_ERROR("exceeded max resource on a qxl_release item\n"); - return; - } - release->bos[release->bo_count++] = qxl_bo_ref(bo); -} - static int qxl_release_bo_alloc(struct qxl_device *qdev, struct qxl_bo **bo) { int ret; - ret = qxl_bo_create(qdev, PAGE_SIZE, false, QXL_GEM_DOMAIN_VRAM, NULL, + /* pin releases bo's they are too messy to evict */ + ret = qxl_bo_create(qdev, PAGE_SIZE, false, true, + QXL_GEM_DOMAIN_VRAM, NULL, bo); return ret; } -int qxl_release_reserve(struct qxl_device *qdev, - struct qxl_release *release, bool no_wait) +int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo) +{ + struct qxl_bo_list *entry; + + list_for_each_entry(entry, &release->bos, tv.head) { + if (entry->tv.bo == &bo->tbo) + return 0; + } + + entry = kmalloc(sizeof(struct qxl_bo_list), GFP_KERNEL); + if (!entry) + return -ENOMEM; + + qxl_bo_ref(bo); + entry->tv.bo = &bo->tbo; + list_add_tail(&entry->tv.head, &release->bos); + return 0; +} + +static int qxl_release_validate_bo(struct qxl_bo *bo) { int ret; - if (atomic_inc_return(&release->bos[0]->reserve_count) == 1) { - ret = qxl_bo_reserve(release->bos[0], no_wait); + + if (!bo->pin_count) { + qxl_ttm_placement_from_domain(bo, bo->type, false); + ret = ttm_bo_validate(&bo->tbo, &bo->placement, + true, false); if (ret) return ret; } + + /* allocate a surface for reserved + validated buffers */ + ret = qxl_bo_check_id(bo->gem_base.dev->dev_private, bo); + if (ret) + return ret; + return 0; +} + +int qxl_release_reserve_list(struct qxl_release *release, bool no_intr) +{ + int ret; + struct qxl_bo_list *entry; + + /* if only one object on the release its the release itself + since these objects are pinned no need to reserve */ + if (list_is_singular(&release->bos)) + return 0; + + ret = ttm_eu_reserve_buffers(&release->ticket, &release->bos); + if (ret) + return ret; + + list_for_each_entry(entry, &release->bos, tv.head) { + struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); + + ret = qxl_release_validate_bo(bo); + if (ret) { + ttm_eu_backoff_reservation(&release->ticket, &release->bos); + return ret; + } + } return 0; } -void qxl_release_unreserve(struct qxl_device *qdev, - struct qxl_release *release) +void qxl_release_backoff_reserve_list(struct qxl_release *release) { - if (atomic_dec_and_test(&release->bos[0]->reserve_count)) - qxl_bo_unreserve(release->bos[0]); + /* if only one object on the release its the release itself + since these objects are pinned no need to reserve */ + if (list_is_singular(&release->bos)) + return; + + ttm_eu_backoff_reservation(&release->ticket, &release->bos); } + int qxl_alloc_surface_release_reserved(struct qxl_device *qdev, enum qxl_surface_cmd_type surface_cmd_type, struct qxl_release *create_rel, struct qxl_release **release) { - int ret; - if (surface_cmd_type == QXL_SURFACE_CMD_DESTROY && create_rel) { int idr_ret; + struct qxl_bo_list *entry = list_first_entry(&create_rel->bos, struct qxl_bo_list, tv.head); struct qxl_bo *bo; union qxl_release_info *info; /* stash the release after the create command */ idr_ret = qxl_release_alloc(qdev, QXL_RELEASE_SURFACE_CMD, release); - bo = qxl_bo_ref(create_rel->bos[0]); + bo = qxl_bo_ref(to_qxl_bo(entry->tv.bo)); (*release)->release_offset = create_rel->release_offset + 64; - qxl_release_add_res(qdev, *release, bo); + qxl_release_list_add(*release, bo); - ret = qxl_release_reserve(qdev, *release, false); - if (ret) { - DRM_ERROR("release reserve failed\n"); - goto out_unref; - } info = qxl_release_map(qdev, *release); info->id = idr_ret; qxl_release_unmap(qdev, *release, info); - -out_unref: qxl_bo_unref(&bo); - return ret; + return 0; } return qxl_alloc_release_reserved(qdev, sizeof(struct qxl_surface_cmd), @@ -187,7 +222,7 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, { struct qxl_bo *bo; int idr_ret; - int ret; + int ret = 0; union qxl_release_info *info; int cur_idx; @@ -216,11 +251,6 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, mutex_unlock(&qdev->release_mutex); return ret; } - - /* pin releases bo's they are too messy to evict */ - ret = qxl_bo_reserve(qdev->current_release_bo[cur_idx], false); - qxl_bo_pin(qdev->current_release_bo[cur_idx], QXL_GEM_DOMAIN_VRAM, NULL); - qxl_bo_unreserve(qdev->current_release_bo[cur_idx]); } bo = qxl_bo_ref(qdev->current_release_bo[cur_idx]); @@ -231,36 +261,18 @@ int qxl_alloc_release_reserved(struct qxl_device *qdev, unsigned long size, if (rbo) *rbo = bo; - qxl_release_add_res(qdev, *release, bo); - - ret = qxl_release_reserve(qdev, *release, false); mutex_unlock(&qdev->release_mutex); - if (ret) - goto out_unref; + + qxl_release_list_add(*release, bo); info = qxl_release_map(qdev, *release); info->id = idr_ret; qxl_release_unmap(qdev, *release, info); -out_unref: qxl_bo_unref(&bo); return ret; } -int qxl_fence_releaseable(struct qxl_device *qdev, - struct qxl_release *release) -{ - int i, ret; - for (i = 0; i < release->bo_count; i++) { - if (!release->bos[i]->tbo.sync_obj) - release->bos[i]->tbo.sync_obj = &release->bos[i]->fence; - ret = qxl_fence_add_release(&release->bos[i]->fence, release->id); - if (ret) - return ret; - } - return 0; -} - struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, uint64_t id) { @@ -273,10 +285,7 @@ struct qxl_release *qxl_release_from_id_locked(struct qxl_device *qdev, DRM_ERROR("failed to find id in release_idr\n"); return NULL; } - if (release->bo_count < 1) { - DRM_ERROR("read a released resource with 0 bos\n"); - return NULL; - } + return release; } @@ -285,9 +294,12 @@ union qxl_release_info *qxl_release_map(struct qxl_device *qdev, { void *ptr; union qxl_release_info *info; - struct qxl_bo *bo = release->bos[0]; + struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); + struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); ptr = qxl_bo_kmap_atomic_page(qdev, bo, release->release_offset & PAGE_SIZE); + if (!ptr) + return NULL; info = ptr + (release->release_offset & ~PAGE_SIZE); return info; } @@ -296,9 +308,51 @@ void qxl_release_unmap(struct qxl_device *qdev, struct qxl_release *release, union qxl_release_info *info) { - struct qxl_bo *bo = release->bos[0]; + struct qxl_bo_list *entry = list_first_entry(&release->bos, struct qxl_bo_list, tv.head); + struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); void *ptr; ptr = ((void *)info) - (release->release_offset & ~PAGE_SIZE); qxl_bo_kunmap_atomic_page(qdev, bo, ptr); } + +void qxl_release_fence_buffer_objects(struct qxl_release *release) +{ + struct ttm_validate_buffer *entry; + struct ttm_buffer_object *bo; + struct ttm_bo_global *glob; + struct ttm_bo_device *bdev; + struct ttm_bo_driver *driver; + struct qxl_bo *qbo; + + /* if only one object on the release its the release itself + since these objects are pinned no need to reserve */ + if (list_is_singular(&release->bos)) + return; + + bo = list_first_entry(&release->bos, struct ttm_validate_buffer, head)->bo; + bdev = bo->bdev; + driver = bdev->driver; + glob = bo->glob; + + spin_lock(&glob->lru_lock); + spin_lock(&bdev->fence_lock); + + list_for_each_entry(entry, &release->bos, head) { + bo = entry->bo; + qbo = to_qxl_bo(bo); + + if (!entry->bo->sync_obj) + entry->bo->sync_obj = &qbo->fence; + + qxl_fence_add_release_locked(&qbo->fence, release->id); + + ttm_bo_add_to_lru(bo); + ww_mutex_unlock(&bo->resv->lock); + entry->reserved = false; + } + spin_unlock(&bdev->fence_lock); + spin_unlock(&glob->lru_lock); + ww_acquire_fini(&release->ticket); +} + diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 489cb8cece4..1dfd84cda2a 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -206,7 +206,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo, return; } qbo = container_of(bo, struct qxl_bo, tbo); - qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU); + qxl_ttm_placement_from_domain(qbo, QXL_GEM_DOMAIN_CPU, false); *placement = qbo->placement; } diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index fb441a790f3..15da7ef344a 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c @@ -1222,12 +1222,17 @@ int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) int r; mutex_lock(&ctx->mutex); + /* reset data block */ + ctx->data_block = 0; /* reset reg block */ ctx->reg_block = 0; /* reset fb window */ ctx->fb_base = 0; /* reset io mode */ ctx->io_mode = ATOM_IO_MM; + /* reset divmul */ + ctx->divmul[0] = 0; + ctx->divmul[1] = 0; r = atom_execute_table_locked(ctx, index, params); mutex_unlock(&ctx->mutex); return r; diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c index 064023bed48..32501f6ec99 100644 --- a/drivers/gpu/drm/radeon/atombios_dp.c +++ b/drivers/gpu/drm/radeon/atombios_dp.c @@ -44,6 +44,41 @@ static char *pre_emph_names[] = { }; /***** radeon AUX functions *****/ + +/* Atom needs data in little endian format + * so swap as appropriate when copying data to + * or from atom. Note that atom operates on + * dw units. + */ +static void radeon_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le) +{ +#ifdef __BIG_ENDIAN + u8 src_tmp[20], dst_tmp[20]; /* used for byteswapping */ + u32 *dst32, *src32; + int i; + + memcpy(src_tmp, src, num_bytes); + src32 = (u32 *)src_tmp; + dst32 = (u32 *)dst_tmp; + if (to_le) { + for (i = 0; i < ((num_bytes + 3) / 4); i++) + dst32[i] = cpu_to_le32(src32[i]); + memcpy(dst, dst_tmp, num_bytes); + } else { + u8 dws = num_bytes & ~3; + for (i = 0; i < ((num_bytes + 3) / 4); i++) + dst32[i] = le32_to_cpu(src32[i]); + memcpy(dst, dst_tmp, dws); + if (num_bytes % 4) { + for (i = 0; i < (num_bytes % 4); i++) + dst[dws+i] = dst_tmp[dws+i]; + } + } +#else + memcpy(dst, src, num_bytes); +#endif +} + union aux_channel_transaction { PROCESS_AUX_CHANNEL_TRANSACTION_PS_ALLOCATION v1; PROCESS_AUX_CHANNEL_TRANSACTION_PARAMETERS_V2 v2; @@ -65,10 +100,10 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1); - memcpy(base, send, send_bytes); + radeon_copy_swap(base, send, send_bytes, true); - args.v1.lpAuxRequest = 0 + 4; - args.v1.lpDataOut = 16 + 4; + args.v1.lpAuxRequest = cpu_to_le16((u16)(0 + 4)); + args.v1.lpDataOut = cpu_to_le16((u16)(16 + 4)); args.v1.ucDataOutLen = 0; args.v1.ucChannelID = chan->rec.i2c_id; args.v1.ucDelay = delay / 10; @@ -102,7 +137,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan, recv_bytes = recv_size; if (recv && recv_size) - memcpy(recv, base + 16, recv_bytes); + radeon_copy_swap(recv, base + 16, recv_bytes, false); return recv_bytes; } diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c index 0bfd55e0882..9953e1fbc46 100644 --- a/drivers/gpu/drm/radeon/btc_dpm.c +++ b/drivers/gpu/drm/radeon/btc_dpm.c @@ -2548,9 +2548,6 @@ int btc_dpm_init(struct radeon_device *rdev) { struct rv7xx_power_info *pi; struct evergreen_power_info *eg_pi; - int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); - u16 data_offset, size; - u8 frev, crev; struct atom_clock_dividers dividers; int ret; @@ -2633,16 +2630,7 @@ int btc_dpm_init(struct radeon_device *rdev) eg_pi->vddci_control = radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); - if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, - &frev, &crev, &data_offset)) { - pi->sclk_ss = true; - pi->mclk_ss = true; - pi->dynamic_ss = true; - } else { - pi->sclk_ss = false; - pi->mclk_ss = false; - pi->dynamic_ss = true; - } + rv770_get_engine_memory_ss(rdev); pi->asi = RV770_ASI_DFLT; pi->pasi = CYPRESS_HASI_DFLT; @@ -2659,8 +2647,7 @@ int btc_dpm_init(struct radeon_device *rdev) pi->dynamic_pcie_gen2 = true; - if (pi->gfx_clock_gating && - (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) + if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) pi->thermal_protection = true; else pi->thermal_protection = false; diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 6dacec4e209..8928bd109c1 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -2587,9 +2587,11 @@ u32 cik_compute_ring_get_rptr(struct radeon_device *rdev, if (rdev->wb.enabled) { rptr = le32_to_cpu(rdev->wb.wb[ring->rptr_offs/4]); } else { + mutex_lock(&rdev->srbm_mutex); cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); rptr = RREG32(CP_HQD_PQ_RPTR); cik_srbm_select(rdev, 0, 0, 0, 0); + mutex_unlock(&rdev->srbm_mutex); } rptr = (rptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; @@ -2604,9 +2606,11 @@ u32 cik_compute_ring_get_wptr(struct radeon_device *rdev, if (rdev->wb.enabled) { wptr = le32_to_cpu(rdev->wb.wb[ring->wptr_offs/4]); } else { + mutex_lock(&rdev->srbm_mutex); cik_srbm_select(rdev, ring->me, ring->pipe, ring->queue, 0); wptr = RREG32(CP_HQD_PQ_WPTR); cik_srbm_select(rdev, 0, 0, 0, 0); + mutex_unlock(&rdev->srbm_mutex); } wptr = (wptr & ring->ptr_reg_mask) >> ring->ptr_reg_shift; @@ -2897,6 +2901,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) WREG32(CP_CPF_DEBUG, tmp); /* init the pipes */ + mutex_lock(&rdev->srbm_mutex); for (i = 0; i < (rdev->mec.num_pipe * rdev->mec.num_mec); i++) { int me = (i < 4) ? 1 : 2; int pipe = (i < 4) ? i : (i - 4); @@ -2919,6 +2924,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) WREG32(CP_HPD_EOP_CONTROL, tmp); } cik_srbm_select(rdev, 0, 0, 0, 0); + mutex_unlock(&rdev->srbm_mutex); /* init the queues. Just two for now. */ for (i = 0; i < 2; i++) { @@ -2972,6 +2978,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) mqd->static_thread_mgmt23[0] = 0xffffffff; mqd->static_thread_mgmt23[1] = 0xffffffff; + mutex_lock(&rdev->srbm_mutex); cik_srbm_select(rdev, rdev->ring[idx].me, rdev->ring[idx].pipe, rdev->ring[idx].queue, 0); @@ -3099,6 +3106,7 @@ static int cik_cp_compute_resume(struct radeon_device *rdev) WREG32(CP_HQD_ACTIVE, mqd->queue_state.cp_hqd_active); cik_srbm_select(rdev, 0, 0, 0, 0); + mutex_unlock(&rdev->srbm_mutex); radeon_bo_kunmap(rdev->ring[idx].mqd_obj); radeon_bo_unreserve(rdev->ring[idx].mqd_obj); @@ -4320,6 +4328,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) /* XXX SH_MEM regs */ /* where to put LDS, scratch, GPUVM in FSA64 space */ + mutex_lock(&rdev->srbm_mutex); for (i = 0; i < 16; i++) { cik_srbm_select(rdev, 0, 0, 0, i); /* CP and shaders */ @@ -4335,6 +4344,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev) /* XXX SDMA RLC - todo */ } cik_srbm_select(rdev, 0, 0, 0, 0); + mutex_unlock(&rdev->srbm_mutex); cik_pcie_gart_tlb_flush(rdev); DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", @@ -5954,6 +5964,8 @@ static int cik_startup(struct radeon_device *rdev) struct radeon_ring *ring; int r; + cik_mc_program(rdev); + if (rdev->flags & RADEON_IS_IGP) { if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || !rdev->mec_fw || !rdev->sdma_fw || !rdev->rlc_fw) { @@ -5985,7 +5997,6 @@ static int cik_startup(struct radeon_device *rdev) if (r) return r; - cik_mc_program(rdev); r = cik_pcie_gart_enable(rdev); if (r) return r; @@ -6194,7 +6205,7 @@ int cik_suspend(struct radeon_device *rdev) radeon_vm_manager_fini(rdev); cik_cp_enable(rdev, false); cik_sdma_enable(rdev, false); - r600_uvd_rbc_stop(rdev); + r600_uvd_stop(rdev); radeon_uvd_suspend(rdev); cik_irq_suspend(rdev); radeon_wb_disable(rdev); @@ -6358,6 +6369,7 @@ void cik_fini(struct radeon_device *rdev) radeon_vm_manager_fini(rdev); radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); + r600_uvd_stop(rdev); radeon_uvd_fini(rdev); cik_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); @@ -6978,7 +6990,7 @@ int cik_uvd_resume(struct radeon_device *rdev) /* programm the VCPU memory controller bits 0-27 */ addr = rdev->uvd.gpu_addr >> 3; - size = RADEON_GPU_PAGE_ALIGN(rdev->uvd.fw_size + 4) >> 3; + size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3; WREG32(UVD_VCPU_CACHE_OFFSET0, addr); WREG32(UVD_VCPU_CACHE_SIZE0, size); diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c index 9bcdd174780..7e5d0b570a3 100644 --- a/drivers/gpu/drm/radeon/cypress_dpm.c +++ b/drivers/gpu/drm/radeon/cypress_dpm.c @@ -2038,9 +2038,6 @@ int cypress_dpm_init(struct radeon_device *rdev) { struct rv7xx_power_info *pi; struct evergreen_power_info *eg_pi; - int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); - uint16_t data_offset, size; - uint8_t frev, crev; struct atom_clock_dividers dividers; int ret; @@ -2092,16 +2089,7 @@ int cypress_dpm_init(struct radeon_device *rdev) eg_pi->vddci_control = radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); - if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, - &frev, &crev, &data_offset)) { - pi->sclk_ss = true; - pi->mclk_ss = true; - pi->dynamic_ss = true; - } else { - pi->sclk_ss = false; - pi->mclk_ss = false; - pi->dynamic_ss = true; - } + rv770_get_engine_memory_ss(rdev); pi->asi = RV770_ASI_DFLT; pi->pasi = CYPRESS_HASI_DFLT; @@ -2122,8 +2110,7 @@ int cypress_dpm_init(struct radeon_device *rdev) pi->dynamic_pcie_gen2 = true; - if (pi->gfx_clock_gating && - (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) + if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) pi->thermal_protection = true; else pi->thermal_protection = false; diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index 038dcac7670..d5b49e33315 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -5106,6 +5106,8 @@ static int evergreen_startup(struct radeon_device *rdev) /* enable aspm */ evergreen_program_aspm(rdev); + evergreen_mc_program(rdev); + if (ASIC_IS_DCE5(rdev)) { if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw || !rdev->mc_fw) { r = ni_init_microcode(rdev); @@ -5133,7 +5135,6 @@ static int evergreen_startup(struct radeon_device *rdev) if (r) return r; - evergreen_mc_program(rdev); if (rdev->flags & RADEON_IS_AGP) { evergreen_agp_enable(rdev); } else { @@ -5291,10 +5292,10 @@ int evergreen_resume(struct radeon_device *rdev) int evergreen_suspend(struct radeon_device *rdev) { r600_audio_fini(rdev); + r600_uvd_stop(rdev); radeon_uvd_suspend(rdev); r700_cp_stop(rdev); r600_dma_stop(rdev); - r600_uvd_rbc_stop(rdev); evergreen_irq_suspend(rdev); radeon_wb_disable(rdev); evergreen_pcie_gart_disable(rdev); @@ -5429,6 +5430,7 @@ void evergreen_fini(struct radeon_device *rdev) radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); evergreen_pcie_gart_fini(rdev); + r600_uvd_stop(rdev); radeon_uvd_fini(rdev); r600_vram_scratch_fini(rdev); radeon_gem_fini(rdev); diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c index b0d3fb34141..b0e280058b9 100644 --- a/drivers/gpu/drm/radeon/evergreen_hdmi.c +++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c @@ -148,18 +148,40 @@ static void evergreen_audio_set_dto(struct drm_encoder *encoder, u32 clock) struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc); u32 base_rate = 24000; + u32 max_ratio = clock / base_rate; + u32 dto_phase; + u32 dto_modulo = clock; + u32 wallclock_ratio; + u32 dto_cntl; if (!dig || !dig->afmt) return; + if (max_ratio >= 8) { + dto_phase = 192 * 1000; + wallclock_ratio = 3; + } else if (max_ratio >= 4) { + dto_phase = 96 * 1000; + wallclock_ratio = 2; + } else if (max_ratio >= 2) { + dto_phase = 48 * 1000; + wallclock_ratio = 1; + } else { + dto_phase = 24 * 1000; + wallclock_ratio = 0; + } + dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; + dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); + WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl); + /* XXX two dtos; generally use dto0 for hdmi */ /* Express [24MHz / target pixel clock] as an exact rational * number (coefficient of two integer numbers. DCCG_AUDIO_DTOx_PHASE * is the numerator, DCCG_AUDIO_DTOx_MODULE is the denominator */ - WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); - WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100); WREG32(DCCG_AUDIO_DTO_SOURCE, DCCG_AUDIO_DTO0_SOURCE_SEL(radeon_crtc->crtc_id)); + WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase); + WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo); } diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index a7baf67aef6..0d582ac1dc3 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -497,6 +497,9 @@ #define DCCG_AUDIO_DTO0_MODULE 0x05b4 #define DCCG_AUDIO_DTO0_LOAD 0x05b8 #define DCCG_AUDIO_DTO0_CNTL 0x05bc +# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0) +# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7 +# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0 #define DCCG_AUDIO_DTO1_PHASE 0x05c0 #define DCCG_AUDIO_DTO1_MODULE 0x05c4 diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 56bd4f3be4f..ccb4f8b5485 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -794,9 +794,13 @@ int ni_init_microcode(struct radeon_device *rdev) if ((rdev->family >= CHIP_BARTS) && (rdev->family <= CHIP_CAYMAN)) { snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); - if (err) - goto out; - if (rdev->smc_fw->size != smc_req_size) { + if (err) { + printk(KERN_ERR + "smc: error loading firmware \"%s\"\n", + fw_name); + release_firmware(rdev->smc_fw); + rdev->smc_fw = NULL; + } else if (rdev->smc_fw->size != smc_req_size) { printk(KERN_ERR "ni_mc: Bogus length %zu in firmware \"%s\"\n", rdev->mc_fw->size, fw_name); @@ -2079,6 +2083,8 @@ static int cayman_startup(struct radeon_device *rdev) /* enable aspm */ evergreen_program_aspm(rdev); + evergreen_mc_program(rdev); + if (rdev->flags & RADEON_IS_IGP) { if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { r = ni_init_microcode(rdev); @@ -2107,7 +2113,6 @@ static int cayman_startup(struct radeon_device *rdev) if (r) return r; - evergreen_mc_program(rdev); r = cayman_pcie_gart_enable(rdev); if (r) return r; @@ -2286,7 +2291,7 @@ int cayman_suspend(struct radeon_device *rdev) radeon_vm_manager_fini(rdev); cayman_cp_enable(rdev, false); cayman_dma_stop(rdev); - r600_uvd_rbc_stop(rdev); + r600_uvd_stop(rdev); radeon_uvd_suspend(rdev); evergreen_irq_suspend(rdev); radeon_wb_disable(rdev); @@ -2418,6 +2423,7 @@ void cayman_fini(struct radeon_device *rdev) radeon_vm_manager_fini(rdev); radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); + r600_uvd_stop(rdev); radeon_uvd_fini(rdev); cayman_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c index 559cf24d51a..f0f5f748938 100644 --- a/drivers/gpu/drm/radeon/ni_dpm.c +++ b/drivers/gpu/drm/radeon/ni_dpm.c @@ -1054,10 +1054,6 @@ static int ni_restrict_performance_levels_before_switch(struct radeon_device *rd int ni_dpm_force_performance_level(struct radeon_device *rdev, enum radeon_dpm_forced_level level) { - struct radeon_ps *rps = rdev->pm.dpm.current_ps; - struct ni_ps *ps = ni_get_ps(rps); - u32 levels; - if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK) return -EINVAL; @@ -1068,8 +1064,7 @@ int ni_dpm_force_performance_level(struct radeon_device *rdev, if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) return -EINVAL; - levels = ps->performance_level_count - 1; - if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK) + if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK) return -EINVAL; } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { if (ni_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) @@ -4072,9 +4067,6 @@ int ni_dpm_init(struct radeon_device *rdev) struct rv7xx_power_info *pi; struct evergreen_power_info *eg_pi; struct ni_power_info *ni_pi; - int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); - u16 data_offset, size; - u8 frev, crev; struct atom_clock_dividers dividers; int ret; @@ -4167,16 +4159,7 @@ int ni_dpm_init(struct radeon_device *rdev) eg_pi->vddci_control = radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDCI, 0); - if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, - &frev, &crev, &data_offset)) { - pi->sclk_ss = true; - pi->mclk_ss = true; - pi->dynamic_ss = true; - } else { - pi->sclk_ss = false; - pi->mclk_ss = false; - pi->dynamic_ss = true; - } + rv770_get_engine_memory_ss(rdev); pi->asi = RV770_ASI_DFLT; pi->pasi = CYPRESS_HASI_DFLT; @@ -4193,8 +4176,7 @@ int ni_dpm_init(struct radeon_device *rdev) pi->dynamic_pcie_gen2 = true; - if (pi->gfx_clock_gating && - (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) + if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) pi->thermal_protection = true; else pi->thermal_protection = false; diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index 393880a0941..e66e7207735 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -2299,9 +2299,13 @@ int r600_init_microcode(struct radeon_device *rdev) if ((rdev->family >= CHIP_RV770) && (rdev->family <= CHIP_HEMLOCK)) { snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", smc_chip_name); err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); - if (err) - goto out; - if (rdev->smc_fw->size != smc_req_size) { + if (err) { + printk(KERN_ERR + "smc: error loading firmware \"%s\"\n", + fw_name); + release_firmware(rdev->smc_fw); + rdev->smc_fw = NULL; + } else if (rdev->smc_fw->size != smc_req_size) { printk(KERN_ERR "smc: Bogus length %zu in firmware \"%s\"\n", rdev->smc_fw->size, fw_name); @@ -2697,12 +2701,29 @@ int r600_uvd_rbc_start(struct radeon_device *rdev) return 0; } -void r600_uvd_rbc_stop(struct radeon_device *rdev) +void r600_uvd_stop(struct radeon_device *rdev) { struct radeon_ring *ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; /* force RBC into idle state */ WREG32(UVD_RBC_RB_CNTL, 0x11010101); + + /* Stall UMC and register bus before resetting VCPU */ + WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); + WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3)); + mdelay(1); + + /* put VCPU into reset */ + WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET); + mdelay(5); + + /* disable VCPU clock */ + WREG32(UVD_VCPU_CNTL, 0x0); + + /* Unstall UMC and register bus */ + WREG32_P(UVD_LMI_CTRL2, 0, ~(1 << 8)); + WREG32_P(UVD_RB_ARB_CTRL, 0, ~(1 << 3)); + ring->ready = false; } @@ -2722,6 +2743,11 @@ int r600_uvd_init(struct radeon_device *rdev) /* disable interupt */ WREG32_P(UVD_MASTINT_EN, 0, ~(1 << 1)); + /* Stall UMC and register bus before resetting VCPU */ + WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); + WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3)); + mdelay(1); + /* put LMI, VCPU, RBC etc... into reset */ WREG32(UVD_SOFT_RESET, LMI_SOFT_RESET | VCPU_SOFT_RESET | LBSI_SOFT_RESET | RBC_SOFT_RESET | CSM_SOFT_RESET | @@ -2751,10 +2777,6 @@ int r600_uvd_init(struct radeon_device *rdev) WREG32(UVD_MPC_SET_ALU, 0); WREG32(UVD_MPC_SET_MUX, 0x88); - /* Stall UMC */ - WREG32_P(UVD_LMI_CTRL2, 1 << 8, ~(1 << 8)); - WREG32_P(UVD_RB_ARB_CTRL, 1 << 3, ~(1 << 3)); - /* take all subblocks out of reset, except VCPU */ WREG32(UVD_SOFT_RESET, VCPU_SOFT_RESET); mdelay(5); @@ -3166,7 +3188,7 @@ int r600_copy_cpdma(struct radeon_device *rdev, size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); num_loops = DIV_ROUND_UP(size_in_bytes, 0x1fffff); - r = radeon_ring_lock(rdev, ring, num_loops * 6 + 21); + r = radeon_ring_lock(rdev, ring, num_loops * 6 + 24); if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); radeon_semaphore_free(rdev, &sem, NULL); @@ -3181,6 +3203,9 @@ int r600_copy_cpdma(struct radeon_device *rdev, radeon_semaphore_free(rdev, &sem, NULL); } + radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); + radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); + radeon_ring_write(ring, WAIT_3D_IDLE_bit); for (i = 0; i < num_loops; i++) { cur_size_in_bytes = size_in_bytes; if (cur_size_in_bytes > 0x1fffff) @@ -3309,6 +3334,8 @@ static int r600_startup(struct radeon_device *rdev) /* enable pcie gen2 link */ r600_pcie_gen2_enable(rdev); + r600_mc_program(rdev); + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { r = r600_init_microcode(rdev); if (r) { @@ -3321,7 +3348,6 @@ static int r600_startup(struct radeon_device *rdev) if (r) return r; - r600_mc_program(rdev); if (rdev->flags & RADEON_IS_AGP) { r600_agp_enable(rdev); } else { diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c index b88f54b134a..e5c860f4ccb 100644 --- a/drivers/gpu/drm/radeon/r600_dpm.c +++ b/drivers/gpu/drm/radeon/r600_dpm.c @@ -278,9 +278,9 @@ bool r600_dynamicpm_enabled(struct radeon_device *rdev) void r600_enable_sclk_control(struct radeon_device *rdev, bool enable) { if (enable) - WREG32_P(GENERAL_PWRMGT, 0, ~SCLK_PWRMGT_OFF); + WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF); else - WREG32_P(GENERAL_PWRMGT, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF); + WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF); } void r600_enable_mclk_control(struct radeon_device *rdev, bool enable) diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c index f48240bb8c5..f264df5470f 100644 --- a/drivers/gpu/drm/radeon/r600_hdmi.c +++ b/drivers/gpu/drm/radeon/r600_hdmi.c @@ -226,10 +226,29 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; u32 base_rate = 24000; + u32 max_ratio = clock / base_rate; + u32 dto_phase; + u32 dto_modulo = clock; + u32 wallclock_ratio; + u32 dto_cntl; if (!dig || !dig->afmt) return; + if (max_ratio >= 8) { + dto_phase = 192 * 1000; + wallclock_ratio = 3; + } else if (max_ratio >= 4) { + dto_phase = 96 * 1000; + wallclock_ratio = 2; + } else if (max_ratio >= 2) { + dto_phase = 48 * 1000; + wallclock_ratio = 1; + } else { + dto_phase = 24 * 1000; + wallclock_ratio = 0; + } + /* there are two DTOs selected by DCCG_AUDIO_DTO_SELECT. * doesn't matter which one you use. Just use the first one. */ @@ -242,9 +261,21 @@ void r600_audio_set_dto(struct drm_encoder *encoder, u32 clock) /* according to the reg specs, this should DCE3.2 only, but in * practice it seems to cover DCE3.0 as well. */ - WREG32(DCCG_AUDIO_DTO0_PHASE, base_rate * 100); - WREG32(DCCG_AUDIO_DTO0_MODULE, clock * 100); - WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */ + if (dig->dig_encoder == 0) { + dto_cntl = RREG32(DCCG_AUDIO_DTO0_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; + dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); + WREG32(DCCG_AUDIO_DTO0_CNTL, dto_cntl); + WREG32(DCCG_AUDIO_DTO0_PHASE, dto_phase); + WREG32(DCCG_AUDIO_DTO0_MODULE, dto_modulo); + WREG32(DCCG_AUDIO_DTO_SELECT, 0); /* select DTO0 */ + } else { + dto_cntl = RREG32(DCCG_AUDIO_DTO1_CNTL) & ~DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK; + dto_cntl |= DCCG_AUDIO_DTO_WALLCLOCK_RATIO(wallclock_ratio); + WREG32(DCCG_AUDIO_DTO1_CNTL, dto_cntl); + WREG32(DCCG_AUDIO_DTO1_PHASE, dto_phase); + WREG32(DCCG_AUDIO_DTO1_MODULE, dto_modulo); + WREG32(DCCG_AUDIO_DTO_SELECT, 1); /* select DTO1 */ + } } else { /* according to the reg specs, this should be DCE2.0 and DCE3.0 */ WREG32(AUDIO_DTO, AUDIO_DTO_PHASE(base_rate / 10) | diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h index 8e3fe815eda..7c780839a7f 100644 --- a/drivers/gpu/drm/radeon/r600d.h +++ b/drivers/gpu/drm/radeon/r600d.h @@ -933,6 +933,9 @@ #define DCCG_AUDIO_DTO0_LOAD 0x051c # define DTO_LOAD (1 << 31) #define DCCG_AUDIO_DTO0_CNTL 0x0520 +# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO(x) (((x) & 7) << 0) +# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_MASK 7 +# define DCCG_AUDIO_DTO_WALLCLOCK_RATIO_SHIFT 0 #define DCCG_AUDIO_DTO1_PHASE 0x0524 #define DCCG_AUDIO_DTO1_MODULE 0x0528 diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index 2f08219c39b..9f19259667d 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -1468,7 +1468,6 @@ struct radeon_uvd { void *cpu_addr; uint64_t gpu_addr; void *saved_bo; - unsigned fw_size; atomic_t handles[RADEON_MAX_UVD_HANDLES]; struct drm_file *filp[RADEON_MAX_UVD_HANDLES]; struct delayed_work idle_work; @@ -2066,6 +2065,7 @@ struct radeon_device { const struct firmware *mec_fw; /* CIK MEC firmware */ const struct firmware *sdma_fw; /* CIK SDMA firmware */ const struct firmware *smc_fw; /* SMC firmware */ + const struct firmware *uvd_fw; /* UVD firmware */ struct r600_blit r600_blit; struct r600_vram_scratch vram_scratch; int msi_enabled; /* msi enabled */ @@ -2095,6 +2095,8 @@ struct radeon_device { /* ACPI interface */ struct radeon_atif atif; struct radeon_atcs atcs; + /* srbm instance registers */ + struct mutex srbm_mutex; }; int radeon_device_init(struct radeon_device *rdev, @@ -2161,7 +2163,7 @@ void cik_mm_wdoorbell(struct radeon_device *rdev, u32 offset, u32 v); WREG32(reg, tmp_); \ } while (0) #define WREG32_AND(reg, and) WREG32_P(reg, 0, and) -#define WREG32_OR(reg, or) WREG32_P(reg, or, ~or) +#define WREG32_OR(reg, or) WREG32_P(reg, or, ~(or)) #define WREG32_PLL_P(reg, val, mask) \ do { \ uint32_t tmp_ = RREG32_PLL(reg); \ diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c index 78bec1a58ed..f8f8b3113dd 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.c +++ b/drivers/gpu/drm/radeon/radeon_asic.c @@ -1161,6 +1161,7 @@ static struct radeon_asic rv6xx_asic = { .get_mclk = &rv6xx_dpm_get_mclk, .print_power_state = &rv6xx_dpm_print_power_state, .debugfs_print_current_performance_level = &rv6xx_dpm_debugfs_print_current_performance_level, + .force_performance_level = &rv6xx_dpm_force_performance_level, }, .pflip = { .pre_page_flip = &rs600_pre_page_flip, diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h index ca189570990..3d61d5aac18 100644 --- a/drivers/gpu/drm/radeon/radeon_asic.h +++ b/drivers/gpu/drm/radeon/radeon_asic.h @@ -421,6 +421,8 @@ void rv6xx_dpm_print_power_state(struct radeon_device *rdev, struct radeon_ps *ps); void rv6xx_dpm_debugfs_print_current_performance_level(struct radeon_device *rdev, struct seq_file *m); +int rv6xx_dpm_force_performance_level(struct radeon_device *rdev, + enum radeon_dpm_forced_level level); /* rs780 dpm */ int rs780_dpm_init(struct radeon_device *rdev); int rs780_dpm_enable(struct radeon_device *rdev); @@ -439,7 +441,7 @@ void rs780_dpm_debugfs_print_current_performance_level(struct radeon_device *rde /* uvd */ int r600_uvd_init(struct radeon_device *rdev); int r600_uvd_rbc_start(struct radeon_device *rdev); -void r600_uvd_rbc_stop(struct radeon_device *rdev); +void r600_uvd_stop(struct radeon_device *rdev); int r600_uvd_ib_test(struct radeon_device *rdev, struct radeon_ring *ring); void r600_uvd_fence_emit(struct radeon_device *rdev, struct radeon_fence *fence); diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index e3f3e884178..4ccd61f60eb 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -2782,7 +2782,7 @@ int radeon_atom_get_clock_dividers(struct radeon_device *rdev, ATOM_PLL_CNTL_FLAG_PLL_POST_DIV_EN) ? true : false; dividers->enable_dithen = (args.v3.ucCntlFlag & ATOM_PLL_CNTL_FLAG_FRACTION_DISABLE) ? false : true; - dividers->fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv); + dividers->whole_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDiv); dividers->frac_fb_div = le16_to_cpu(args.v3.ulFbDiv.usFbDivFrac); dividers->ref_div = args.v3.ucRefDiv; dividers->vco_mode = (args.v3.ucCntlFlag & diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 78edadc9e86..68ce3605601 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -147,7 +147,7 @@ static uint16_t combios_get_table_offset(struct drm_device *dev, enum radeon_combios_table_offset table) { struct radeon_device *rdev = dev->dev_private; - int rev; + int rev, size; uint16_t offset = 0, check_offset; if (!rdev->bios) @@ -156,174 +156,106 @@ static uint16_t combios_get_table_offset(struct drm_device *dev, switch (table) { /* absolute offset tables */ case COMBIOS_ASIC_INIT_1_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0xc); - if (check_offset) - offset = check_offset; + check_offset = 0xc; break; case COMBIOS_BIOS_SUPPORT_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x14); - if (check_offset) - offset = check_offset; + check_offset = 0x14; break; case COMBIOS_DAC_PROGRAMMING_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x2a); - if (check_offset) - offset = check_offset; + check_offset = 0x2a; break; case COMBIOS_MAX_COLOR_DEPTH_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x2c); - if (check_offset) - offset = check_offset; + check_offset = 0x2c; break; case COMBIOS_CRTC_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x2e); - if (check_offset) - offset = check_offset; + check_offset = 0x2e; break; case COMBIOS_PLL_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x30); - if (check_offset) - offset = check_offset; + check_offset = 0x30; break; case COMBIOS_TV_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x32); - if (check_offset) - offset = check_offset; + check_offset = 0x32; break; case COMBIOS_DFP_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x34); - if (check_offset) - offset = check_offset; + check_offset = 0x34; break; case COMBIOS_HW_CONFIG_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x36); - if (check_offset) - offset = check_offset; + check_offset = 0x36; break; case COMBIOS_MULTIMEDIA_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x38); - if (check_offset) - offset = check_offset; + check_offset = 0x38; break; case COMBIOS_TV_STD_PATCH_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x3e); - if (check_offset) - offset = check_offset; + check_offset = 0x3e; break; case COMBIOS_LCD_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x40); - if (check_offset) - offset = check_offset; + check_offset = 0x40; break; case COMBIOS_MOBILE_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x42); - if (check_offset) - offset = check_offset; + check_offset = 0x42; break; case COMBIOS_PLL_INIT_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x46); - if (check_offset) - offset = check_offset; + check_offset = 0x46; break; case COMBIOS_MEM_CONFIG_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x48); - if (check_offset) - offset = check_offset; + check_offset = 0x48; break; case COMBIOS_SAVE_MASK_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x4a); - if (check_offset) - offset = check_offset; + check_offset = 0x4a; break; case COMBIOS_HARDCODED_EDID_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x4c); - if (check_offset) - offset = check_offset; + check_offset = 0x4c; break; case COMBIOS_ASIC_INIT_2_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x4e); - if (check_offset) - offset = check_offset; + check_offset = 0x4e; break; case COMBIOS_CONNECTOR_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x50); - if (check_offset) - offset = check_offset; + check_offset = 0x50; break; case COMBIOS_DYN_CLK_1_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x52); - if (check_offset) - offset = check_offset; + check_offset = 0x52; break; case COMBIOS_RESERVED_MEM_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x54); - if (check_offset) - offset = check_offset; + check_offset = 0x54; break; case COMBIOS_EXT_TMDS_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x58); - if (check_offset) - offset = check_offset; + check_offset = 0x58; break; case COMBIOS_MEM_CLK_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x5a); - if (check_offset) - offset = check_offset; + check_offset = 0x5a; break; case COMBIOS_EXT_DAC_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x5c); - if (check_offset) - offset = check_offset; + check_offset = 0x5c; break; case COMBIOS_MISC_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x5e); - if (check_offset) - offset = check_offset; + check_offset = 0x5e; break; case COMBIOS_CRT_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x60); - if (check_offset) - offset = check_offset; + check_offset = 0x60; break; case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x62); - if (check_offset) - offset = check_offset; + check_offset = 0x62; break; case COMBIOS_COMPONENT_VIDEO_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x64); - if (check_offset) - offset = check_offset; + check_offset = 0x64; break; case COMBIOS_FAN_SPEED_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x66); - if (check_offset) - offset = check_offset; + check_offset = 0x66; break; case COMBIOS_OVERDRIVE_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x68); - if (check_offset) - offset = check_offset; + check_offset = 0x68; break; case COMBIOS_OEM_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x6a); - if (check_offset) - offset = check_offset; + check_offset = 0x6a; break; case COMBIOS_DYN_CLK_2_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x6c); - if (check_offset) - offset = check_offset; + check_offset = 0x6c; break; case COMBIOS_POWER_CONNECTOR_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x6e); - if (check_offset) - offset = check_offset; + check_offset = 0x6e; break; case COMBIOS_I2C_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x70); - if (check_offset) - offset = check_offset; + check_offset = 0x70; break; /* relative offset tables */ case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */ @@ -439,11 +371,16 @@ static uint16_t combios_get_table_offset(struct drm_device *dev, } break; default: + check_offset = 0; break; } - return offset; + size = RBIOS8(rdev->bios_header_start + 0x6); + /* check absolute offset tables */ + if (table < COMBIOS_ASIC_INIT_3_TABLE && check_offset && check_offset < size) + offset = RBIOS16(rdev->bios_header_start + check_offset); + return offset; } bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) @@ -965,16 +902,22 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct dac = RBIOS8(dac_info + 0x3) & 0xf; p_dac->ps2_pdac_adj = (bg << 8) | (dac); } - /* if the values are all zeros, use the table */ - if (p_dac->ps2_pdac_adj) + /* if the values are zeros, use the table */ + if ((dac == 0) || (bg == 0)) + found = 0; + else found = 1; } /* quirks */ + /* Radeon 7000 (RV100) */ + if (((dev->pdev->device == 0x5159) && + (dev->pdev->subsystem_vendor == 0x174B) && + (dev->pdev->subsystem_device == 0x7c28)) || /* Radeon 9100 (R200) */ - if ((dev->pdev->device == 0x514D) && + ((dev->pdev->device == 0x514D) && (dev->pdev->subsystem_vendor == 0x174B) && - (dev->pdev->subsystem_device == 0x7149)) { + (dev->pdev->subsystem_device == 0x7149))) { /* vbios value is bad, use the default */ found = 0; } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 82335e38ec4..63398ae1dbf 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1163,6 +1163,7 @@ int radeon_device_init(struct radeon_device *rdev, mutex_init(&rdev->gem.mutex); mutex_init(&rdev->pm.mutex); mutex_init(&rdev->gpu_clock_mutex); + mutex_init(&rdev->srbm_mutex); init_rwsem(&rdev->pm.mclk_lock); init_rwsem(&rdev->exclusive_lock); init_waitqueue_head(&rdev->irq.vblank_queue); @@ -1519,6 +1520,7 @@ int radeon_gpu_reset(struct radeon_device *rdev) radeon_save_bios_scratch_regs(rdev); /* block TTM */ resched = ttm_bo_lock_delayed_workqueue(&rdev->mman.bdev); + radeon_pm_suspend(rdev); radeon_suspend(rdev); for (i = 0; i < RADEON_NUM_RINGS; ++i) { @@ -1564,6 +1566,7 @@ retry: } } + radeon_pm_resume(rdev); drm_helper_resume_force_mode(rdev->ddev); ttm_bo_unlock_delayed_workqueue(&rdev->mman.bdev, resched); diff --git a/drivers/gpu/drm/radeon/radeon_fence.c b/drivers/gpu/drm/radeon/radeon_fence.c index 7ddb0efe240..ddb8f8e04eb 100644 --- a/drivers/gpu/drm/radeon/radeon_fence.c +++ b/drivers/gpu/drm/radeon/radeon_fence.c @@ -782,7 +782,7 @@ int radeon_fence_driver_start_ring(struct radeon_device *rdev, int ring) } else { /* put fence directly behind firmware */ - index = ALIGN(rdev->uvd.fw_size, 8); + index = ALIGN(rdev->uvd_fw->size, 8); rdev->fence_drv[ring].cpu_addr = rdev->uvd.cpu_addr + index; rdev->fence_drv[ring].gpu_addr = rdev->uvd.gpu_addr + index; } diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c index d9d31a38327..b990b1a2bd5 100644 --- a/drivers/gpu/drm/radeon/radeon_gart.c +++ b/drivers/gpu/drm/radeon/radeon_gart.c @@ -207,7 +207,6 @@ void radeon_gart_table_vram_free(struct radeon_device *rdev) if (rdev->gart.robj == NULL) { return; } - radeon_gart_table_vram_unpin(rdev); radeon_bo_unref(&rdev->gart.robj); } @@ -466,7 +465,7 @@ int radeon_vm_manager_init(struct radeon_device *rdev) size += rdev->vm_manager.max_pfn * 8; size *= 2; r = radeon_sa_bo_manager_init(rdev, &rdev->vm_manager.sa_manager, - RADEON_VM_PTB_ALIGN(size), + RADEON_GPU_PAGE_ALIGN(size), RADEON_VM_PTB_ALIGN_SIZE, RADEON_GEM_DOMAIN_VRAM); if (r) { @@ -621,7 +620,7 @@ int radeon_vm_alloc_pt(struct radeon_device *rdev, struct radeon_vm *vm) } retry: - pd_size = RADEON_VM_PTB_ALIGN(radeon_vm_directory_size(rdev)); + pd_size = radeon_vm_directory_size(rdev); r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->page_directory, pd_size, RADEON_VM_PTB_ALIGN_SIZE, false); @@ -953,8 +952,8 @@ static int radeon_vm_update_pdes(struct radeon_device *rdev, retry: r = radeon_sa_bo_new(rdev, &rdev->vm_manager.sa_manager, &vm->page_tables[pt_idx], - RADEON_VM_PTB_ALIGN(RADEON_VM_PTE_COUNT * 8), - RADEON_VM_PTB_ALIGN_SIZE, false); + RADEON_VM_PTE_COUNT * 8, + RADEON_GPU_PAGE_SIZE, false); if (r == -ENOMEM) { r = radeon_vm_evict(rdev, vm); diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index f374c467aac..c557850cd34 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -1176,7 +1176,14 @@ int radeon_pm_init(struct radeon_device *rdev) case CHIP_VERDE: case CHIP_OLAND: case CHIP_HAINAN: - if (radeon_dpm == 1) + /* DPM requires the RLC, RV770+ dGPU requires SMC */ + if (!rdev->rlc_fw) + rdev->pm.pm_method = PM_METHOD_PROFILE; + else if ((rdev->family >= CHIP_RV770) && + (!(rdev->flags & RADEON_IS_IGP)) && + (!rdev->smc_fw)) + rdev->pm.pm_method = PM_METHOD_PROFILE; + else if (radeon_dpm == 1) rdev->pm.pm_method = PM_METHOD_DPM; else rdev->pm.pm_method = PM_METHOD_PROFILE; diff --git a/drivers/gpu/drm/radeon/radeon_uvd.c b/drivers/gpu/drm/radeon/radeon_uvd.c index 414fd145d20..b79f4f5cdd6 100644 --- a/drivers/gpu/drm/radeon/radeon_uvd.c +++ b/drivers/gpu/drm/radeon/radeon_uvd.c @@ -56,7 +56,6 @@ static void radeon_uvd_idle_work_handler(struct work_struct *work); int radeon_uvd_init(struct radeon_device *rdev) { - const struct firmware *fw; unsigned long bo_size; const char *fw_name; int i, r; @@ -105,14 +104,14 @@ int radeon_uvd_init(struct radeon_device *rdev) return -EINVAL; } - r = request_firmware(&fw, fw_name, rdev->dev); + r = request_firmware(&rdev->uvd_fw, fw_name, rdev->dev); if (r) { dev_err(rdev->dev, "radeon_uvd: Can't load firmware \"%s\"\n", fw_name); return r; } - bo_size = RADEON_GPU_PAGE_ALIGN(fw->size + 8) + + bo_size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 8) + RADEON_UVD_STACK_SIZE + RADEON_UVD_HEAP_SIZE; r = radeon_bo_create(rdev, bo_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, NULL, &rdev->uvd.vcpu_bo); @@ -145,12 +144,6 @@ int radeon_uvd_init(struct radeon_device *rdev) radeon_bo_unreserve(rdev->uvd.vcpu_bo); - rdev->uvd.fw_size = fw->size; - memset(rdev->uvd.cpu_addr, 0, bo_size); - memcpy(rdev->uvd.cpu_addr, fw->data, fw->size); - - release_firmware(fw); - for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { atomic_set(&rdev->uvd.handles[i], 0); rdev->uvd.filp[i] = NULL; @@ -174,33 +167,60 @@ void radeon_uvd_fini(struct radeon_device *rdev) } radeon_bo_unref(&rdev->uvd.vcpu_bo); + + release_firmware(rdev->uvd_fw); } int radeon_uvd_suspend(struct radeon_device *rdev) { unsigned size; + void *ptr; + int i; if (rdev->uvd.vcpu_bo == NULL) return 0; + for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) + if (atomic_read(&rdev->uvd.handles[i])) + break; + + if (i == RADEON_MAX_UVD_HANDLES) + return 0; + size = radeon_bo_size(rdev->uvd.vcpu_bo); + size -= rdev->uvd_fw->size; + + ptr = rdev->uvd.cpu_addr; + ptr += rdev->uvd_fw->size; + rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL); - memcpy(rdev->uvd.saved_bo, rdev->uvd.cpu_addr, size); + memcpy(rdev->uvd.saved_bo, ptr, size); return 0; } int radeon_uvd_resume(struct radeon_device *rdev) { + unsigned size; + void *ptr; + if (rdev->uvd.vcpu_bo == NULL) return -EINVAL; + memcpy(rdev->uvd.cpu_addr, rdev->uvd_fw->data, rdev->uvd_fw->size); + + size = radeon_bo_size(rdev->uvd.vcpu_bo); + size -= rdev->uvd_fw->size; + + ptr = rdev->uvd.cpu_addr; + ptr += rdev->uvd_fw->size; + if (rdev->uvd.saved_bo != NULL) { - unsigned size = radeon_bo_size(rdev->uvd.vcpu_bo); - memcpy(rdev->uvd.cpu_addr, rdev->uvd.saved_bo, size); + memcpy(ptr, rdev->uvd.saved_bo, size); kfree(rdev->uvd.saved_bo); rdev->uvd.saved_bo = NULL; - } + } else + memset(ptr, 0, size); return 0; } @@ -215,8 +235,8 @@ void radeon_uvd_free_handles(struct radeon_device *rdev, struct drm_file *filp) { int i, r; for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) { - if (rdev->uvd.filp[i] == filp) { - uint32_t handle = atomic_read(&rdev->uvd.handles[i]); + uint32_t handle = atomic_read(&rdev->uvd.handles[i]); + if (handle != 0 && rdev->uvd.filp[i] == filp) { struct radeon_fence *fence; r = radeon_uvd_get_destroy_msg(rdev, @@ -336,9 +356,19 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, return -EINVAL; } + if (bo->tbo.sync_obj) { + r = radeon_fence_wait(bo->tbo.sync_obj, false); + if (r) { + DRM_ERROR("Failed waiting for UVD message (%d)!\n", r); + return r; + } + } + r = radeon_bo_kmap(bo, &ptr); - if (r) + if (r) { + DRM_ERROR("Failed mapping the UVD message (%d)!\n", r); return r; + } msg = ptr + offset; @@ -364,8 +394,14 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, radeon_bo_kunmap(bo); return 0; } else { - /* it's a create msg, no special handling needed */ radeon_bo_kunmap(bo); + + if (msg_type != 0) { + DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type); + return -EINVAL; + } + + /* it's a create msg, no special handling needed */ } /* create or decode, validate the handle */ @@ -388,7 +424,7 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo, static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, int data0, int data1, - unsigned buf_sizes[]) + unsigned buf_sizes[], bool *has_msg_cmd) { struct radeon_cs_chunk *relocs_chunk; struct radeon_cs_reloc *reloc; @@ -417,7 +453,7 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, if (cmd < 0x4) { if ((end - start) < buf_sizes[cmd]) { - DRM_ERROR("buffer to small (%d / %d)!\n", + DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd, (unsigned)(end - start), buf_sizes[cmd]); return -EINVAL; } @@ -442,9 +478,17 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, } if (cmd == 0) { + if (*has_msg_cmd) { + DRM_ERROR("More than one message in a UVD-IB!\n"); + return -EINVAL; + } + *has_msg_cmd = true; r = radeon_uvd_cs_msg(p, reloc->robj, offset, buf_sizes); if (r) return r; + } else if (!*has_msg_cmd) { + DRM_ERROR("Message needed before other commands are send!\n"); + return -EINVAL; } return 0; @@ -453,7 +497,8 @@ static int radeon_uvd_cs_reloc(struct radeon_cs_parser *p, static int radeon_uvd_cs_reg(struct radeon_cs_parser *p, struct radeon_cs_packet *pkt, int *data0, int *data1, - unsigned buf_sizes[]) + unsigned buf_sizes[], + bool *has_msg_cmd) { int i, r; @@ -467,7 +512,8 @@ static int radeon_uvd_cs_reg(struct radeon_cs_parser *p, *data1 = p->idx; break; case UVD_GPCOM_VCPU_CMD: - r = radeon_uvd_cs_reloc(p, *data0, *data1, buf_sizes); + r = radeon_uvd_cs_reloc(p, *data0, *data1, + buf_sizes, has_msg_cmd); if (r) return r; break; @@ -488,6 +534,9 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p) struct radeon_cs_packet pkt; int r, data0 = 0, data1 = 0; + /* does the IB has a msg command */ + bool has_msg_cmd = false; + /* minimum buffer sizes */ unsigned buf_sizes[] = { [0x00000000] = 2048, @@ -514,8 +563,8 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p) return r; switch (pkt.type) { case RADEON_PACKET_TYPE0: - r = radeon_uvd_cs_reg(p, &pkt, &data0, - &data1, buf_sizes); + r = radeon_uvd_cs_reg(p, &pkt, &data0, &data1, + buf_sizes, &has_msg_cmd); if (r) return r; break; @@ -527,6 +576,12 @@ int radeon_uvd_cs_parse(struct radeon_cs_parser *p) return -EINVAL; } } while (p->idx < p->chunks[p->chunk_ib_idx].length_dw); + + if (!has_msg_cmd) { + DRM_ERROR("UVD-IBs need a msg command!\n"); + return -EINVAL; + } + return 0; } diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c index 65e33f38734..bdd888b4db2 100644 --- a/drivers/gpu/drm/radeon/rv6xx_dpm.c +++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c @@ -819,7 +819,7 @@ static void rv6xx_program_memory_timing_parameters(struct radeon_device *rdev) POWERMODE1(calculate_memory_refresh_rate(rdev, pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) | POWERMODE2(calculate_memory_refresh_rate(rdev, - pi->hw.sclks[R600_POWER_LEVEL_MEDIUM])) | + pi->hw.sclks[R600_POWER_LEVEL_HIGH])) | POWERMODE3(calculate_memory_refresh_rate(rdev, pi->hw.sclks[R600_POWER_LEVEL_HIGH]))); WREG32(ARB_RFSH_RATE, arb_refresh_rate); @@ -1182,10 +1182,10 @@ static void rv6xx_program_display_gap(struct radeon_device *rdev) u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); - if (RREG32(AVIVO_D1CRTC_CONTROL) & AVIVO_CRTC_EN) { + if (rdev->pm.dpm.new_active_crtcs & 1) { tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK); tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); - } else if (RREG32(AVIVO_D2CRTC_CONTROL) & AVIVO_CRTC_EN) { + } else if (rdev->pm.dpm.new_active_crtcs & 2) { tmp |= DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE); tmp |= DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK); } else { @@ -1670,6 +1670,8 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev) struct radeon_ps *old_ps = rdev->pm.dpm.current_ps; int ret; + pi->restricted_levels = 0; + rv6xx_set_uvd_clock_before_set_eng_clock(rdev, new_ps, old_ps); rv6xx_clear_vc(rdev); @@ -1756,6 +1758,8 @@ int rv6xx_dpm_set_power_state(struct radeon_device *rdev) rv6xx_set_uvd_clock_after_set_eng_clock(rdev, new_ps, old_ps); + rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO; + return 0; } @@ -1940,9 +1944,7 @@ static int rv6xx_parse_power_table(struct radeon_device *rdev) int rv6xx_dpm_init(struct radeon_device *rdev) { - int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); - uint16_t data_offset, size; - uint8_t frev, crev; + struct radeon_atom_ss ss; struct atom_clock_dividers dividers; struct rv6xx_power_info *pi; int ret; @@ -1985,16 +1987,18 @@ int rv6xx_dpm_init(struct radeon_device *rdev) pi->gfx_clock_gating = true; - if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, - &frev, &crev, &data_offset)) { - pi->sclk_ss = true; - pi->mclk_ss = true; + pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, + ASIC_INTERNAL_ENGINE_SS, 0); + pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, + ASIC_INTERNAL_MEMORY_SS, 0); + + /* Disable sclk ss, causes hangs on a lot of systems */ + pi->sclk_ss = false; + + if (pi->sclk_ss || pi->mclk_ss) pi->dynamic_ss = true; - } else { - pi->sclk_ss = false; - pi->mclk_ss = false; + else pi->dynamic_ss = false; - } pi->dynamic_pcie_gen2 = true; @@ -2085,3 +2089,34 @@ u32 rv6xx_dpm_get_mclk(struct radeon_device *rdev, bool low) else return requested_state->high.mclk; } + +int rv6xx_dpm_force_performance_level(struct radeon_device *rdev, + enum radeon_dpm_forced_level level) +{ + struct rv6xx_power_info *pi = rv6xx_get_pi(rdev); + + if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { + pi->restricted_levels = 3; + } else if (level == RADEON_DPM_FORCED_LEVEL_LOW) { + pi->restricted_levels = 2; + } else { + pi->restricted_levels = 0; + } + + rv6xx_clear_vc(rdev); + r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, true); + r600_set_at(rdev, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF); + r600_wait_for_power_level(rdev, R600_POWER_LEVEL_LOW); + r600_power_level_enable(rdev, R600_POWER_LEVEL_HIGH, false); + r600_power_level_enable(rdev, R600_POWER_LEVEL_MEDIUM, false); + rv6xx_enable_medium(rdev); + rv6xx_enable_high(rdev); + if (pi->restricted_levels == 3) + r600_power_level_enable(rdev, R600_POWER_LEVEL_LOW, false); + rv6xx_program_vc(rdev); + rv6xx_program_at(rdev); + + rdev->pm.dpm.forced_level = level; + + return 0; +} diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index 30ea14e8854..f5e92cfcc14 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -744,10 +744,10 @@ static void rv770_init_golden_registers(struct radeon_device *rdev) (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers)); radeon_program_register_sequence(rdev, rv730_golden_registers, - (const u32)ARRAY_SIZE(rv770_golden_registers)); + (const u32)ARRAY_SIZE(rv730_golden_registers)); radeon_program_register_sequence(rdev, rv730_mgcg_init, - (const u32)ARRAY_SIZE(rv770_mgcg_init)); + (const u32)ARRAY_SIZE(rv730_mgcg_init)); break; case CHIP_RV710: radeon_program_register_sequence(rdev, @@ -758,18 +758,18 @@ static void rv770_init_golden_registers(struct radeon_device *rdev) (const u32)ARRAY_SIZE(r7xx_golden_dyn_gpr_registers)); radeon_program_register_sequence(rdev, rv710_golden_registers, - (const u32)ARRAY_SIZE(rv770_golden_registers)); + (const u32)ARRAY_SIZE(rv710_golden_registers)); radeon_program_register_sequence(rdev, rv710_mgcg_init, - (const u32)ARRAY_SIZE(rv770_mgcg_init)); + (const u32)ARRAY_SIZE(rv710_mgcg_init)); break; case CHIP_RV740: radeon_program_register_sequence(rdev, rv740_golden_registers, - (const u32)ARRAY_SIZE(rv770_golden_registers)); + (const u32)ARRAY_SIZE(rv740_golden_registers)); radeon_program_register_sequence(rdev, rv740_mgcg_init, - (const u32)ARRAY_SIZE(rv770_mgcg_init)); + (const u32)ARRAY_SIZE(rv740_mgcg_init)); break; default: break; @@ -813,7 +813,7 @@ int rv770_uvd_resume(struct radeon_device *rdev) /* programm the VCPU memory controller bits 0-27 */ addr = rdev->uvd.gpu_addr >> 3; - size = RADEON_GPU_PAGE_ALIGN(rdev->uvd.fw_size + 4) >> 3; + size = RADEON_GPU_PAGE_ALIGN(rdev->uvd_fw->size + 4) >> 3; WREG32(UVD_VCPU_CACHE_OFFSET0, addr); WREG32(UVD_VCPU_CACHE_SIZE0, size); @@ -1829,6 +1829,8 @@ static int rv770_startup(struct radeon_device *rdev) /* enable pcie gen2 link */ rv770_pcie_gen2_enable(rdev); + rv770_mc_program(rdev); + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->rlc_fw) { r = r600_init_microcode(rdev); if (r) { @@ -1841,7 +1843,6 @@ static int rv770_startup(struct radeon_device *rdev) if (r) return r; - rv770_mc_program(rdev); if (rdev->flags & RADEON_IS_AGP) { rv770_agp_enable(rdev); } else { @@ -1983,6 +1984,7 @@ int rv770_resume(struct radeon_device *rdev) int rv770_suspend(struct radeon_device *rdev) { r600_audio_fini(rdev); + r600_uvd_stop(rdev); radeon_uvd_suspend(rdev); r700_cp_stop(rdev); r600_dma_stop(rdev); @@ -2098,6 +2100,7 @@ void rv770_fini(struct radeon_device *rdev) radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); rv770_pcie_gart_fini(rdev); + r600_uvd_stop(rdev); radeon_uvd_fini(rdev); r600_vram_scratch_fini(rdev); radeon_gem_fini(rdev); diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c index 2d347925f77..094c67a29d0 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.c +++ b/drivers/gpu/drm/radeon/rv770_dpm.c @@ -2319,12 +2319,25 @@ int rv7xx_parse_power_table(struct radeon_device *rdev) return 0; } +void rv770_get_engine_memory_ss(struct radeon_device *rdev) +{ + struct rv7xx_power_info *pi = rv770_get_pi(rdev); + struct radeon_atom_ss ss; + + pi->sclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, + ASIC_INTERNAL_ENGINE_SS, 0); + pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, + ASIC_INTERNAL_MEMORY_SS, 0); + + if (pi->sclk_ss || pi->mclk_ss) + pi->dynamic_ss = true; + else + pi->dynamic_ss = false; +} + int rv770_dpm_init(struct radeon_device *rdev) { struct rv7xx_power_info *pi; - int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); - uint16_t data_offset, size; - uint8_t frev, crev; struct atom_clock_dividers dividers; int ret; @@ -2369,16 +2382,7 @@ int rv770_dpm_init(struct radeon_device *rdev) pi->mvdd_control = radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_MVDDC, 0); - if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, - &frev, &crev, &data_offset)) { - pi->sclk_ss = true; - pi->mclk_ss = true; - pi->dynamic_ss = true; - } else { - pi->sclk_ss = false; - pi->mclk_ss = false; - pi->dynamic_ss = false; - } + rv770_get_engine_memory_ss(rdev); pi->asi = RV770_ASI_DFLT; pi->pasi = RV770_HASI_DFLT; @@ -2393,8 +2397,7 @@ int rv770_dpm_init(struct radeon_device *rdev) pi->dynamic_pcie_gen2 = true; - if (pi->gfx_clock_gating && - (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) + if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) pi->thermal_protection = true; else pi->thermal_protection = false; diff --git a/drivers/gpu/drm/radeon/rv770_dpm.h b/drivers/gpu/drm/radeon/rv770_dpm.h index 96b1b2a62a8..9244effc6b5 100644 --- a/drivers/gpu/drm/radeon/rv770_dpm.h +++ b/drivers/gpu/drm/radeon/rv770_dpm.h @@ -275,6 +275,7 @@ void rv770_set_uvd_clock_before_set_eng_clock(struct radeon_device *rdev, void rv770_set_uvd_clock_after_set_eng_clock(struct radeon_device *rdev, struct radeon_ps *new_ps, struct radeon_ps *old_ps); +void rv770_get_engine_memory_ss(struct radeon_device *rdev); /* smc */ int rv770_read_smc_soft_register(struct radeon_device *rdev, diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index d325280e2f9..daa8d2df8ec 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -1663,9 +1663,13 @@ static int si_init_microcode(struct radeon_device *rdev) snprintf(fw_name, sizeof(fw_name), "radeon/%s_smc.bin", chip_name); err = request_firmware(&rdev->smc_fw, fw_name, rdev->dev); - if (err) - goto out; - if (rdev->smc_fw->size != smc_req_size) { + if (err) { + printk(KERN_ERR + "smc: error loading firmware \"%s\"\n", + fw_name); + release_firmware(rdev->smc_fw); + rdev->smc_fw = NULL; + } else if (rdev->smc_fw->size != smc_req_size) { printk(KERN_ERR "si_smc: Bogus length %zu in firmware \"%s\"\n", rdev->smc_fw->size, fw_name); @@ -5215,14 +5219,12 @@ static void si_enable_mc_ls(struct radeon_device *rdev, static void si_init_cg(struct radeon_device *rdev) { - bool has_uvd = true; - si_enable_mgcg(rdev, true); - si_enable_cgcg(rdev, true); + si_enable_cgcg(rdev, false); /* disable MC LS on Tahiti */ if (rdev->family == CHIP_TAHITI) si_enable_mc_ls(rdev, false); - if (has_uvd) { + if (rdev->has_uvd) { si_enable_uvd_mgcg(rdev, true); si_init_uvd_internal_cg(rdev); } @@ -5230,9 +5232,7 @@ static void si_init_cg(struct radeon_device *rdev) static void si_fini_cg(struct radeon_device *rdev) { - bool has_uvd = true; - - if (has_uvd) + if (rdev->has_uvd) si_enable_uvd_mgcg(rdev, false); si_enable_cgcg(rdev, false); si_enable_mgcg(rdev, false); @@ -5241,11 +5241,11 @@ static void si_fini_cg(struct radeon_device *rdev) static void si_init_pg(struct radeon_device *rdev) { bool has_pg = false; - +#if 0 /* only cape verde supports PG */ if (rdev->family == CHIP_VERDE) has_pg = true; - +#endif if (has_pg) { si_init_ao_cu_mask(rdev); si_init_dma_pg(rdev); @@ -6422,6 +6422,8 @@ static int si_startup(struct radeon_device *rdev) /* enable aspm */ si_program_aspm(rdev); + si_mc_program(rdev); + if (!rdev->me_fw || !rdev->pfp_fw || !rdev->ce_fw || !rdev->rlc_fw || !rdev->mc_fw) { r = si_init_microcode(rdev); @@ -6441,7 +6443,6 @@ static int si_startup(struct radeon_device *rdev) if (r) return r; - si_mc_program(rdev); r = si_pcie_gart_enable(rdev); if (r) return r; @@ -6625,7 +6626,7 @@ int si_suspend(struct radeon_device *rdev) si_cp_enable(rdev, false); cayman_dma_stop(rdev); if (rdev->has_uvd) { - r600_uvd_rbc_stop(rdev); + r600_uvd_stop(rdev); radeon_uvd_suspend(rdev); } si_irq_suspend(rdev); @@ -6767,8 +6768,10 @@ void si_fini(struct radeon_device *rdev) radeon_vm_manager_fini(rdev); radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); - if (rdev->has_uvd) + if (rdev->has_uvd) { + r600_uvd_stop(rdev); radeon_uvd_fini(rdev); + } si_pcie_gart_fini(rdev); r600_vram_scratch_fini(rdev); radeon_gem_fini(rdev); diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c index 73aaa2e4c31..88699e3cd86 100644 --- a/drivers/gpu/drm/radeon/si_dpm.c +++ b/drivers/gpu/drm/radeon/si_dpm.c @@ -37,8 +37,6 @@ #define SMC_RAM_END 0x20000 -#define DDR3_DRAM_ROWS 0x2000 - #define SCLK_MIN_DEEPSLEEP_FREQ 1350 static const struct si_cac_config_reg cac_weights_tahiti[] = @@ -1767,8 +1765,9 @@ static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coe { s64 kt, kv, leakage_w, i_leakage, vddc; s64 temperature, t_slope, t_intercept, av, bv, t_ref; + s64 tmp; - i_leakage = drm_int2fixp(ileakage / 100); + i_leakage = div64_s64(drm_int2fixp(ileakage), 100); vddc = div64_s64(drm_int2fixp(v), 1000); temperature = div64_s64(drm_int2fixp(t), 1000); @@ -1778,8 +1777,9 @@ static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coe bv = div64_s64(drm_int2fixp(coeff->bv), 100000000); t_ref = drm_int2fixp(coeff->t_ref); - kt = drm_fixp_div(drm_fixp_exp(drm_fixp_mul(drm_fixp_mul(t_slope, vddc) + t_intercept, temperature)), - drm_fixp_exp(drm_fixp_mul(drm_fixp_mul(t_slope, vddc) + t_intercept, t_ref))); + tmp = drm_fixp_mul(t_slope, vddc) + t_intercept; + kt = drm_fixp_exp(drm_fixp_mul(tmp, temperature)); + kt = drm_fixp_div(kt, drm_fixp_exp(drm_fixp_mul(tmp, t_ref))); kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc))); leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc); @@ -1931,6 +1931,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev) si_pi->cac_override = cac_override_pitcairn; si_pi->powertune_data = &powertune_data_pitcairn; si_pi->dte_data = dte_data_pitcairn; + break; } } else if (rdev->family == CHIP_VERDE) { si_pi->lcac_config = lcac_cape_verde; @@ -1941,6 +1942,7 @@ static void si_initialize_powertune_defaults(struct radeon_device *rdev) case 0x683B: case 0x683F: case 0x6829: + case 0x6835: si_pi->cac_weights = cac_weights_cape_verde_pro; si_pi->dte_data = dte_data_cape_verde; break; @@ -2901,7 +2903,8 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, { struct ni_ps *ps = ni_get_ps(rps); struct radeon_clock_and_voltage_limits *max_limits; - bool disable_mclk_switching; + bool disable_mclk_switching = false; + bool disable_sclk_switching = false; u32 mclk, sclk; u16 vddc, vddci; int i; @@ -2909,8 +2912,11 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, if ((rdev->pm.dpm.new_active_crtc_count > 1) || ni_dpm_vblank_too_short(rdev)) disable_mclk_switching = true; - else - disable_mclk_switching = false; + + if (rps->vclk || rps->dclk) { + disable_mclk_switching = true; + disable_sclk_switching = true; + } if (rdev->pm.dpm.ac_power) max_limits = &rdev->pm.dpm.dyn_state.max_clock_voltage_on_ac; @@ -2938,27 +2944,43 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev, if (disable_mclk_switching) { mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; - sclk = ps->performance_levels[0].sclk; - vddc = ps->performance_levels[0].vddc; vddci = ps->performance_levels[ps->performance_level_count - 1].vddci; } else { - sclk = ps->performance_levels[0].sclk; mclk = ps->performance_levels[0].mclk; - vddc = ps->performance_levels[0].vddc; vddci = ps->performance_levels[0].vddci; } + if (disable_sclk_switching) { + sclk = ps->performance_levels[ps->performance_level_count - 1].sclk; + vddc = ps->performance_levels[ps->performance_level_count - 1].vddc; + } else { + sclk = ps->performance_levels[0].sclk; + vddc = ps->performance_levels[0].vddc; + } + /* adjusted low state */ ps->performance_levels[0].sclk = sclk; ps->performance_levels[0].mclk = mclk; ps->performance_levels[0].vddc = vddc; ps->performance_levels[0].vddci = vddci; - for (i = 1; i < ps->performance_level_count; i++) { - if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk) - ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk; - if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc) - ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc; + if (disable_sclk_switching) { + sclk = ps->performance_levels[0].sclk; + for (i = 1; i < ps->performance_level_count; i++) { + if (sclk < ps->performance_levels[i].sclk) + sclk = ps->performance_levels[i].sclk; + } + for (i = 0; i < ps->performance_level_count; i++) { + ps->performance_levels[i].sclk = sclk; + ps->performance_levels[i].vddc = vddc; + } + } else { + for (i = 1; i < ps->performance_level_count; i++) { + if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk) + ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk; + if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc) + ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc; + } } if (disable_mclk_switching) { @@ -3237,10 +3259,10 @@ int si_dpm_force_performance_level(struct radeon_device *rdev, { struct radeon_ps *rps = rdev->pm.dpm.current_ps; struct ni_ps *ps = ni_get_ps(rps); - u32 levels; + u32 levels = ps->performance_level_count; if (level == RADEON_DPM_FORCED_LEVEL_HIGH) { - if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK) + if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK) return -EINVAL; if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK) @@ -3249,14 +3271,13 @@ int si_dpm_force_performance_level(struct radeon_device *rdev, if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) return -EINVAL; - levels = ps->performance_level_count - 1; - if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK) + if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK) return -EINVAL; } else if (level == RADEON_DPM_FORCED_LEVEL_AUTO) { if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) return -EINVAL; - if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, 0) != PPSMC_Result_OK) + if (si_send_msg_to_smc_with_parameter(rdev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK) return -EINVAL; } @@ -3620,8 +3641,12 @@ static void si_enable_display_gap(struct radeon_device *rdev) { u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); + tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK); + tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) | + DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE)); + tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); - tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE) | + tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) | DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE)); WREG32(CG_DISPLAY_GAP_CNTL, tmp); } @@ -4036,16 +4061,15 @@ static int si_force_switch_to_arb_f0(struct radeon_device *rdev) static u32 si_calculate_memory_refresh_rate(struct radeon_device *rdev, u32 engine_clock) { - struct rv7xx_power_info *pi = rv770_get_pi(rdev); u32 dram_rows; u32 dram_refresh_rate; u32 mc_arb_rfsh_rate; u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT; - if (pi->mem_gddr5) - dram_rows = 1 << (tmp + 10); + if (tmp >= 4) + dram_rows = 16384; else - dram_rows = DDR3_DRAM_ROWS; + dram_rows = 1 << (tmp + 10); dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3); mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64; @@ -6013,16 +6037,11 @@ int si_dpm_set_power_state(struct radeon_device *rdev) return ret; } -#if 0 - /* XXX */ ret = si_dpm_force_performance_level(rdev, RADEON_DPM_FORCED_LEVEL_AUTO); if (ret) { DRM_ERROR("si_dpm_force_performance_level failed\n"); return ret; } -#else - rdev->pm.dpm.forced_level = RADEON_DPM_FORCED_LEVEL_AUTO; -#endif return 0; } @@ -6254,9 +6273,6 @@ int si_dpm_init(struct radeon_device *rdev) struct evergreen_power_info *eg_pi; struct ni_power_info *ni_pi; struct si_power_info *si_pi; - int index = GetIndexIntoMasterTable(DATA, ASIC_InternalSS_Info); - u16 data_offset, size; - u8 frev, crev; struct atom_clock_dividers dividers; int ret; u32 mask; @@ -6347,16 +6363,7 @@ int si_dpm_init(struct radeon_device *rdev) si_pi->vddc_phase_shed_control = radeon_atom_is_voltage_gpio(rdev, SET_VOLTAGE_TYPE_ASIC_VDDC, VOLTAGE_OBJ_PHASE_LUT); - if (atom_parse_data_header(rdev->mode_info.atom_context, index, &size, - &frev, &crev, &data_offset)) { - pi->sclk_ss = true; - pi->mclk_ss = true; - pi->dynamic_ss = true; - } else { - pi->sclk_ss = false; - pi->mclk_ss = false; - pi->dynamic_ss = true; - } + rv770_get_engine_memory_ss(rdev); pi->asi = RV770_ASI_DFLT; pi->pasi = CYPRESS_HASI_DFLT; @@ -6367,8 +6374,7 @@ int si_dpm_init(struct radeon_device *rdev) eg_pi->sclk_deep_sleep = true; si_pi->sclk_deep_sleep_above_low = false; - if (pi->gfx_clock_gating && - (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE)) + if (rdev->pm.int_thermal_type != THERMAL_TYPE_NONE) pi->thermal_protection = true; else pi->thermal_protection = false; |