aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/i915
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915')
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c3
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c720
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c8
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h1
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c44
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c34
-rw-r--r--drivers/gpu/drm/i915/intel_crt.c159
-rw-r--r--drivers/gpu/drm/i915/intel_display.c165
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c173
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c11
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c133
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c2
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c159
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h3
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c84
19 files changed, 1032 insertions, 682 deletions
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 7a26f4dd21a..e6800819bca 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -767,6 +767,9 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_BLT:
value = HAS_BLT(dev);
break;
+ case I915_PARAM_HAS_COHERENT_RINGS:
+ value = 1;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 3467dd42076..f737960712e 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -44,7 +44,7 @@ unsigned int i915_fbpercrtc = 0;
module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400);
unsigned int i915_powersave = 1;
-module_param_named(powersave, i915_powersave, int, 0400);
+module_param_named(powersave, i915_powersave, int, 0600);
unsigned int i915_lvds_downclock = 0;
module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400);
@@ -150,7 +150,8 @@ static const struct intel_device_info intel_ironlake_d_info = {
static const struct intel_device_info intel_ironlake_m_info = {
.gen = 5, .is_mobile = 1,
- .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
+ .need_gfx_hws = 1, .has_rc6 = 1, .has_hotplug = 1,
+ .has_fbc = 0, /* disabled due to buggy hardware */
.has_bsd_ring = 1,
};
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 2c2c19b6285..409826da309 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -1045,6 +1045,8 @@ void i915_gem_clflush_object(struct drm_gem_object *obj);
int i915_gem_object_set_domain(struct drm_gem_object *obj,
uint32_t read_domains,
uint32_t write_domain);
+int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
+ bool interruptible);
int i915_gem_init_ringbuffer(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
int i915_gem_do_init(struct drm_device *dev, unsigned long start,
@@ -1321,6 +1323,7 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
+#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
#define PRIMARY_RINGBUFFER_SIZE (128*1024)
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 8eb8453208b..275ec6ed43a 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -38,8 +38,7 @@
static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
-static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
- bool pipelined);
+static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
@@ -547,6 +546,19 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_object *obj_priv;
int ret = 0;
+ if (args->size == 0)
+ return 0;
+
+ if (!access_ok(VERIFY_WRITE,
+ (char __user *)(uintptr_t)args->data_ptr,
+ args->size))
+ return -EFAULT;
+
+ ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
+ args->size);
+ if (ret)
+ return -EFAULT;
+
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
@@ -564,23 +576,6 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
goto out;
}
- if (args->size == 0)
- goto out;
-
- if (!access_ok(VERIFY_WRITE,
- (char __user *)(uintptr_t)args->data_ptr,
- args->size)) {
- ret = -EFAULT;
- goto out;
- }
-
- ret = fault_in_pages_writeable((char __user *)(uintptr_t)args->data_ptr,
- args->size);
- if (ret) {
- ret = -EFAULT;
- goto out;
- }
-
ret = i915_gem_object_get_pages_or_evict(obj);
if (ret)
goto out;
@@ -981,7 +976,20 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_pwrite *args = data;
struct drm_gem_object *obj;
struct drm_i915_gem_object *obj_priv;
- int ret = 0;
+ int ret;
+
+ if (args->size == 0)
+ return 0;
+
+ if (!access_ok(VERIFY_READ,
+ (char __user *)(uintptr_t)args->data_ptr,
+ args->size))
+ return -EFAULT;
+
+ ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
+ args->size);
+ if (ret)
+ return -EFAULT;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
@@ -994,30 +1002,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
}
obj_priv = to_intel_bo(obj);
-
/* Bounds check destination. */
if (args->offset > obj->size || args->size > obj->size - args->offset) {
ret = -EINVAL;
goto out;
}
- if (args->size == 0)
- goto out;
-
- if (!access_ok(VERIFY_READ,
- (char __user *)(uintptr_t)args->data_ptr,
- args->size)) {
- ret = -EFAULT;
- goto out;
- }
-
- ret = fault_in_pages_readable((char __user *)(uintptr_t)args->data_ptr,
- args->size);
- if (ret) {
- ret = -EFAULT;
- goto out;
- }
-
/* We can only do the GTT pwrite on untiled buffers, as otherwise
* it would end up going through the fenced access, and we'll get
* different detiling behavior between reading and writing.
@@ -2172,7 +2162,7 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
static int i915_ring_idle(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
- if (list_empty(&ring->gpu_write_list))
+ if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
return 0;
i915_gem_flush_ring(dev, NULL, ring,
@@ -2190,9 +2180,7 @@ i915_gpu_idle(struct drm_device *dev)
int ret;
lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->render_ring.active_list) &&
- list_empty(&dev_priv->bsd_ring.active_list) &&
- list_empty(&dev_priv->blt_ring.active_list));
+ list_empty(&dev_priv->mm.active_list));
if (lists_empty)
return 0;
@@ -2605,7 +2593,7 @@ i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
if (reg->gpu) {
int ret;
- ret = i915_gem_object_flush_gpu_write_domain(obj, true);
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
@@ -2753,8 +2741,7 @@ i915_gem_clflush_object(struct drm_gem_object *obj)
/** Flushes any GPU write domain for the object if it's dirty. */
static int
-i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
- bool pipelined)
+i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
uint32_t old_write_domain;
@@ -2773,10 +2760,7 @@ i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
obj->read_domains,
old_write_domain);
- if (pipelined)
- return 0;
-
- return i915_gem_object_wait_rendering(obj, true);
+ return 0;
}
/** Flushes the GTT write domain for the object if it's dirty. */
@@ -2837,18 +2821,15 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
if (obj_priv->gtt_space == NULL)
return -EINVAL;
- ret = i915_gem_object_flush_gpu_write_domain(obj, false);
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret != 0)
return ret;
+ ret = i915_gem_object_wait_rendering(obj, true);
+ if (ret)
+ return ret;
i915_gem_object_flush_cpu_write_domain(obj);
- if (write) {
- ret = i915_gem_object_wait_rendering(obj, true);
- if (ret)
- return ret;
- }
-
old_write_domain = obj->write_domain;
old_read_domains = obj->read_domains;
@@ -2886,7 +2867,7 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
if (obj_priv->gtt_space == NULL)
return -EINVAL;
- ret = i915_gem_object_flush_gpu_write_domain(obj, true);
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret)
return ret;
@@ -2909,6 +2890,20 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
return 0;
}
+int
+i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
+ bool interruptible)
+{
+ if (!obj->active)
+ return 0;
+
+ if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
+ i915_gem_flush_ring(obj->base.dev, NULL, obj->ring,
+ 0, obj->base.write_domain);
+
+ return i915_gem_object_wait_rendering(&obj->base, interruptible);
+}
+
/**
* Moves a single object to the CPU read, and possibly write domain.
*
@@ -2921,9 +2916,12 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
uint32_t old_write_domain, old_read_domains;
int ret;
- ret = i915_gem_object_flush_gpu_write_domain(obj, false);
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret != 0)
return ret;
+ ret = i915_gem_object_wait_rendering(obj, true);
+ if (ret)
+ return ret;
i915_gem_object_flush_gtt_write_domain(obj);
@@ -2932,12 +2930,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
*/
i915_gem_object_set_to_full_cpu_read_domain(obj);
- if (write) {
- ret = i915_gem_object_wait_rendering(obj, true);
- if (ret)
- return ret;
- }
-
old_write_domain = obj->write_domain;
old_read_domains = obj->read_domains;
@@ -3108,7 +3100,8 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
* write domain
*/
if (obj->write_domain &&
- obj->write_domain != obj->pending_read_domains) {
+ (obj->write_domain != obj->pending_read_domains ||
+ obj_priv->ring != ring)) {
flush_domains |= obj->write_domain;
invalidate_domains |=
obj->pending_read_domains & ~obj->write_domain;
@@ -3201,9 +3194,13 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
if (offset == 0 && size == obj->size)
return i915_gem_object_set_to_cpu_domain(obj, 0);
- ret = i915_gem_object_flush_gpu_write_domain(obj, false);
+ ret = i915_gem_object_flush_gpu_write_domain(obj);
if (ret != 0)
return ret;
+ ret = i915_gem_object_wait_rendering(obj, true);
+ if (ret)
+ return ret;
+
i915_gem_object_flush_gtt_write_domain(obj);
/* If we're already fully in the CPU read domain, we're done. */
@@ -3250,192 +3247,230 @@ i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
return 0;
}
-/**
- * Pin an object to the GTT and evaluate the relocations landing in it.
- */
static int
-i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj,
- struct drm_file *file_priv,
- struct drm_i915_gem_exec_object2 *entry)
+i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
+ struct drm_file *file_priv,
+ struct drm_i915_gem_exec_object2 *entry,
+ struct drm_i915_gem_relocation_entry *reloc)
{
struct drm_device *dev = obj->base.dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_relocation_entry __user *user_relocs;
- struct drm_gem_object *target_obj = NULL;
- uint32_t target_handle = 0;
- int i, ret = 0;
+ struct drm_gem_object *target_obj;
+ uint32_t target_offset;
+ int ret = -EINVAL;
- user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
- for (i = 0; i < entry->relocation_count; i++) {
- struct drm_i915_gem_relocation_entry reloc;
- uint32_t target_offset;
+ target_obj = drm_gem_object_lookup(dev, file_priv,
+ reloc->target_handle);
+ if (target_obj == NULL)
+ return -ENOENT;
- if (__copy_from_user_inatomic(&reloc,
- user_relocs+i,
- sizeof(reloc))) {
- ret = -EFAULT;
- break;
- }
+ target_offset = to_intel_bo(target_obj)->gtt_offset;
- if (reloc.target_handle != target_handle) {
- drm_gem_object_unreference(target_obj);
+#if WATCH_RELOC
+ DRM_INFO("%s: obj %p offset %08x target %d "
+ "read %08x write %08x gtt %08x "
+ "presumed %08x delta %08x\n",
+ __func__,
+ obj,
+ (int) reloc->offset,
+ (int) reloc->target_handle,
+ (int) reloc->read_domains,
+ (int) reloc->write_domain,
+ (int) target_offset,
+ (int) reloc->presumed_offset,
+ reloc->delta);
+#endif
- target_obj = drm_gem_object_lookup(dev, file_priv,
- reloc.target_handle);
- if (target_obj == NULL) {
- ret = -ENOENT;
- break;
- }
+ /* The target buffer should have appeared before us in the
+ * exec_object list, so it should have a GTT space bound by now.
+ */
+ if (target_offset == 0) {
+ DRM_ERROR("No GTT space found for object %d\n",
+ reloc->target_handle);
+ goto err;
+ }
- target_handle = reloc.target_handle;
- }
- target_offset = to_intel_bo(target_obj)->gtt_offset;
+ /* Validate that the target is in a valid r/w GPU domain */
+ if (reloc->write_domain & (reloc->write_domain - 1)) {
+ DRM_ERROR("reloc with multiple write domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->read_domains,
+ reloc->write_domain);
+ goto err;
+ }
+ if (reloc->write_domain & I915_GEM_DOMAIN_CPU ||
+ reloc->read_domains & I915_GEM_DOMAIN_CPU) {
+ DRM_ERROR("reloc with read/write CPU domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->read_domains,
+ reloc->write_domain);
+ goto err;
+ }
+ if (reloc->write_domain && target_obj->pending_write_domain &&
+ reloc->write_domain != target_obj->pending_write_domain) {
+ DRM_ERROR("Write domain conflict: "
+ "obj %p target %d offset %d "
+ "new %08x old %08x\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->write_domain,
+ target_obj->pending_write_domain);
+ goto err;
+ }
-#if WATCH_RELOC
- DRM_INFO("%s: obj %p offset %08x target %d "
- "read %08x write %08x gtt %08x "
- "presumed %08x delta %08x\n",
- __func__,
- obj,
- (int) reloc.offset,
- (int) reloc.target_handle,
- (int) reloc.read_domains,
- (int) reloc.write_domain,
- (int) target_offset,
- (int) reloc.presumed_offset,
- reloc.delta);
-#endif
+ target_obj->pending_read_domains |= reloc->read_domains;
+ target_obj->pending_write_domain |= reloc->write_domain;
- /* The target buffer should have appeared before us in the
- * exec_object list, so it should have a GTT space bound by now.
- */
- if (target_offset == 0) {
- DRM_ERROR("No GTT space found for object %d\n",
- reloc.target_handle);
- ret = -EINVAL;
- break;
- }
+ /* If the relocation already has the right value in it, no
+ * more work needs to be done.
+ */
+ if (target_offset == reloc->presumed_offset)
+ goto out;
- /* Validate that the target is in a valid r/w GPU domain */
- if (reloc.write_domain & (reloc.write_domain - 1)) {
- DRM_ERROR("reloc with multiple write domains: "
- "obj %p target %d offset %d "
- "read %08x write %08x",
- obj, reloc.target_handle,
- (int) reloc.offset,
- reloc.read_domains,
- reloc.write_domain);
- ret = -EINVAL;
- break;
- }
- if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
- reloc.read_domains & I915_GEM_DOMAIN_CPU) {
- DRM_ERROR("reloc with read/write CPU domains: "
- "obj %p target %d offset %d "
- "read %08x write %08x",
- obj, reloc.target_handle,
- (int) reloc.offset,
- reloc.read_domains,
- reloc.write_domain);
- ret = -EINVAL;
- break;
- }
- if (reloc.write_domain && target_obj->pending_write_domain &&
- reloc.write_domain != target_obj->pending_write_domain) {
- DRM_ERROR("Write domain conflict: "
- "obj %p target %d offset %d "
- "new %08x old %08x\n",
- obj, reloc.target_handle,
- (int) reloc.offset,
- reloc.write_domain,
- target_obj->pending_write_domain);
- ret = -EINVAL;
- break;
- }
+ /* Check that the relocation address is valid... */
+ if (reloc->offset > obj->base.size - 4) {
+ DRM_ERROR("Relocation beyond object bounds: "
+ "obj %p target %d offset %d size %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ (int) obj->base.size);
+ goto err;
+ }
+ if (reloc->offset & 3) {
+ DRM_ERROR("Relocation not 4-byte aligned: "
+ "obj %p target %d offset %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset);
+ goto err;
+ }
- target_obj->pending_read_domains |= reloc.read_domains;
- target_obj->pending_write_domain |= reloc.write_domain;
+ /* and points to somewhere within the target object. */
+ if (reloc->delta >= target_obj->size) {
+ DRM_ERROR("Relocation beyond target object bounds: "
+ "obj %p target %d delta %d size %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->delta,
+ (int) target_obj->size);
+ goto err;
+ }
- /* If the relocation already has the right value in it, no
- * more work needs to be done.
- */
- if (target_offset == reloc.presumed_offset)
- continue;
+ reloc->delta += target_offset;
+ if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
+ uint32_t page_offset = reloc->offset & ~PAGE_MASK;
+ char *vaddr;
- /* Check that the relocation address is valid... */
- if (reloc.offset > obj->base.size - 4) {
- DRM_ERROR("Relocation beyond object bounds: "
- "obj %p target %d offset %d size %d.\n",
- obj, reloc.target_handle,
- (int) reloc.offset, (int) obj->base.size);
- ret = -EINVAL;
- break;
- }
- if (reloc.offset & 3) {
- DRM_ERROR("Relocation not 4-byte aligned: "
- "obj %p target %d offset %d.\n",
- obj, reloc.target_handle,
- (int) reloc.offset);
- ret = -EINVAL;
- break;
- }
+ vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
+ *(uint32_t *)(vaddr + page_offset) = reloc->delta;
+ kunmap_atomic(vaddr);
+ } else {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t __iomem *reloc_entry;
+ void __iomem *reloc_page;
- /* and points to somewhere within the target object. */
- if (reloc.delta >= target_obj->size) {
- DRM_ERROR("Relocation beyond target object bounds: "
- "obj %p target %d delta %d size %d.\n",
- obj, reloc.target_handle,
- (int) reloc.delta, (int) target_obj->size);
- ret = -EINVAL;
- break;
- }
+ ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
+ if (ret)
+ goto err;
- reloc.delta += target_offset;
- if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
- uint32_t page_offset = reloc.offset & ~PAGE_MASK;
- char *vaddr;
+ /* Map the page containing the relocation we're going to perform. */
+ reloc->offset += obj->gtt_offset;
+ reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ reloc->offset & PAGE_MASK);
+ reloc_entry = (uint32_t __iomem *)
+ (reloc_page + (reloc->offset & ~PAGE_MASK));
+ iowrite32(reloc->delta, reloc_entry);
+ io_mapping_unmap_atomic(reloc_page);
+ }
- vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT]);
- *(uint32_t *)(vaddr + page_offset) = reloc.delta;
- kunmap_atomic(vaddr);
- } else {
- uint32_t __iomem *reloc_entry;
- void __iomem *reloc_page;
+ /* and update the user's relocation entry */
+ reloc->presumed_offset = target_offset;
- ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
- if (ret)
- break;
+out:
+ ret = 0;
+err:
+ drm_gem_object_unreference(target_obj);
+ return ret;
+}
- /* Map the page containing the relocation we're going to perform. */
- reloc.offset += obj->gtt_offset;
- reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
- reloc.offset & PAGE_MASK);
- reloc_entry = (uint32_t __iomem *)
- (reloc_page + (reloc.offset & ~PAGE_MASK));
- iowrite32(reloc.delta, reloc_entry);
- io_mapping_unmap_atomic(reloc_page);
- }
+static int
+i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
+ struct drm_file *file_priv,
+ struct drm_i915_gem_exec_object2 *entry)
+{
+ struct drm_i915_gem_relocation_entry __user *user_relocs;
+ int i, ret;
+
+ user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
+ for (i = 0; i < entry->relocation_count; i++) {
+ struct drm_i915_gem_relocation_entry reloc;
+
+ if (__copy_from_user_inatomic(&reloc,
+ user_relocs+i,
+ sizeof(reloc)))
+ return -EFAULT;
+
+ ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &reloc);
+ if (ret)
+ return ret;
- /* and update the user's relocation entry */
- reloc.presumed_offset = target_offset;
if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
- &reloc.presumed_offset,
- sizeof(reloc.presumed_offset))) {
- ret = -EFAULT;
- break;
- }
+ &reloc.presumed_offset,
+ sizeof(reloc.presumed_offset)))
+ return -EFAULT;
}
- drm_gem_object_unreference(target_obj);
- return ret;
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
+ struct drm_file *file_priv,
+ struct drm_i915_gem_exec_object2 *entry,
+ struct drm_i915_gem_relocation_entry *relocs)
+{
+ int i, ret;
+
+ for (i = 0; i < entry->relocation_count; i++) {
+ ret = i915_gem_execbuffer_relocate_entry(obj, file_priv, entry, &relocs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
}
static int
-i915_gem_execbuffer_pin(struct drm_device *dev,
- struct drm_file *file,
- struct drm_gem_object **object_list,
- struct drm_i915_gem_exec_object2 *exec_list,
- int count)
+i915_gem_execbuffer_relocate(struct drm_device *dev,
+ struct drm_file *file,
+ struct drm_gem_object **object_list,
+ struct drm_i915_gem_exec_object2 *exec_list,
+ int count)
+{
+ int i, ret;
+
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
+ obj->base.pending_read_domains = 0;
+ obj->base.pending_write_domain = 0;
+ ret = i915_gem_execbuffer_relocate_object(obj, file,
+ &exec_list[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_reserve(struct drm_device *dev,
+ struct drm_file *file,
+ struct drm_gem_object **object_list,
+ struct drm_i915_gem_exec_object2 *exec_list,
+ int count)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret, i, retry;
@@ -3497,6 +3532,133 @@ i915_gem_execbuffer_pin(struct drm_device *dev,
return 0;
}
+static int
+i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+ struct drm_file *file,
+ struct drm_gem_object **object_list,
+ struct drm_i915_gem_exec_object2 *exec_list,
+ int count)
+{
+ struct drm_i915_gem_relocation_entry *reloc;
+ int i, total, ret;
+
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
+ obj->in_execbuffer = false;
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ total = 0;
+ for (i = 0; i < count; i++)
+ total += exec_list[i].relocation_count;
+
+ reloc = drm_malloc_ab(total, sizeof(*reloc));
+ if (reloc == NULL) {
+ mutex_lock(&dev->struct_mutex);
+ return -ENOMEM;
+ }
+
+ total = 0;
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_relocation_entry __user *user_relocs;
+
+ user_relocs = (void __user *)(uintptr_t)exec_list[i].relocs_ptr;
+
+ if (copy_from_user(reloc+total, user_relocs,
+ exec_list[i].relocation_count *
+ sizeof(*reloc))) {
+ ret = -EFAULT;
+ mutex_lock(&dev->struct_mutex);
+ goto err;
+ }
+
+ total += exec_list[i].relocation_count;
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret) {
+ mutex_lock(&dev->struct_mutex);
+ goto err;
+ }
+
+ ret = i915_gem_execbuffer_reserve(dev, file,
+ object_list, exec_list,
+ count);
+ if (ret)
+ goto err;
+
+ total = 0;
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
+ obj->base.pending_read_domains = 0;
+ obj->base.pending_write_domain = 0;
+ ret = i915_gem_execbuffer_relocate_object_slow(obj, file,
+ &exec_list[i],
+ reloc + total);
+ if (ret)
+ goto err;
+
+ total += exec_list[i].relocation_count;
+ }
+
+ /* Leave the user relocations as are, this is the painfully slow path,
+ * and we want to avoid the complication of dropping the lock whilst
+ * having buffers reserved in the aperture and so causing spurious
+ * ENOSPC for random operations.
+ */
+
+err:
+ drm_free_large(reloc);
+ return ret;
+}
+
+static int
+i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
+ struct drm_file *file,
+ struct intel_ring_buffer *ring,
+ struct drm_gem_object **objects,
+ int count)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret, i;
+
+ /* Zero the global flush/invalidate flags. These
+ * will be modified as new domains are computed
+ * for each object
+ */
+ dev->invalidate_domains = 0;
+ dev->flush_domains = 0;
+ dev_priv->mm.flush_rings = 0;
+ for (i = 0; i < count; i++)
+ i915_gem_object_set_to_gpu_domain(objects[i], ring);
+
+ if (dev->invalidate_domains | dev->flush_domains) {
+#if WATCH_EXEC
+ DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
+ __func__,
+ dev->invalidate_domains,
+ dev->flush_domains);
+#endif
+ i915_gem_flush(dev, file,
+ dev->invalidate_domains,
+ dev->flush_domains,
+ dev_priv->mm.flush_rings);
+ }
+
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
+ /* XXX replace with semaphores */
+ if (obj->ring && ring != obj->ring) {
+ ret = i915_gem_object_wait_rendering(&obj->base, true);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
/* Throttle our rendering by waiting until the ring has completed our requests
* emitted over 20 msec ago.
*
@@ -3580,8 +3742,15 @@ validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
for (i = 0; i < count; i++) {
char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
- size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry);
+ int length; /* limited by fault_in_pages_readable() */
+
+ /* First check for malicious input causing overflow */
+ if (exec[i].relocation_count >
+ INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
+ return -EINVAL;
+ length = exec[i].relocation_count *
+ sizeof(struct drm_i915_gem_relocation_entry);
if (!access_ok(VERIFY_READ, ptr, length))
return -EFAULT;
@@ -3724,18 +3893,24 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
}
/* Move the objects en-masse into the GTT, evicting if necessary. */
- ret = i915_gem_execbuffer_pin(dev, file,
- object_list, exec_list,
- args->buffer_count);
+ ret = i915_gem_execbuffer_reserve(dev, file,
+ object_list, exec_list,
+ args->buffer_count);
if (ret)
goto err;
/* The objects are in their final locations, apply the relocations. */
- for (i = 0; i < args->buffer_count; i++) {
- struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
- obj->base.pending_read_domains = 0;
- obj->base.pending_write_domain = 0;
- ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]);
+ ret = i915_gem_execbuffer_relocate(dev, file,
+ object_list, exec_list,
+ args->buffer_count);
+ if (ret) {
+ if (ret == -EFAULT) {
+ ret = i915_gem_execbuffer_relocate_slow(dev, file,
+ object_list,
+ exec_list,
+ args->buffer_count);
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ }
if (ret)
goto err;
}
@@ -3757,33 +3932,10 @@ i915_gem_do_execbuffer(struct drm_device *dev, void *data,
goto err;
}
- /* Zero the global flush/invalidate flags. These
- * will be modified as new domains are computed
- * for each object
- */
- dev->invalidate_domains = 0;
- dev->flush_domains = 0;
- dev_priv->mm.flush_rings = 0;
-
- for (i = 0; i < args->buffer_count; i++) {
- struct drm_gem_object *obj = object_list[i];
-
- /* Compute new gpu domains and update invalidate/flush */
- i915_gem_object_set_to_gpu_domain(obj, ring);
- }
-
- if (dev->invalidate_domains | dev->flush_domains) {
-#if WATCH_EXEC
- DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
- __func__,
- dev->invalidate_domains,
- dev->flush_domains);
-#endif
- i915_gem_flush(dev, file,
- dev->invalidate_domains,
- dev->flush_domains,
- dev_priv->mm.flush_rings);
- }
+ ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
+ object_list, args->buffer_count);
+ if (ret)
+ goto err;
for (i = 0; i < args->buffer_count; i++) {
struct drm_gem_object *obj = object_list[i];
@@ -4043,8 +4195,7 @@ i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
alignment = i915_gem_get_gtt_alignment(obj);
if (obj_priv->gtt_offset & (alignment - 1)) {
WARN(obj_priv->pin_count,
- "bo is already pinned with incorrect alignment:"
- " offset=%x, req.alignment=%x\n",
+ "bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n",
obj_priv->gtt_offset, alignment);
ret = i915_gem_object_unbind(obj);
if (ret)
@@ -4223,10 +4374,20 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
* use this buffer rather sooner than later, so issuing the required
* flush earlier is beneficial.
*/
- if (obj->write_domain & I915_GEM_GPU_DOMAINS)
+ if (obj->write_domain & I915_GEM_GPU_DOMAINS) {
i915_gem_flush_ring(dev, file_priv,
obj_priv->ring,
0, obj->write_domain);
+ } else if (obj_priv->ring->outstanding_lazy_request) {
+ /* This ring is not being cleared by active usage,
+ * so emit a request to do so.
+ */
+ u32 seqno = i915_add_request(dev,
+ NULL, NULL,
+ obj_priv->ring);
+ if (seqno == 0)
+ ret = -ENOMEM;
+ }
/* Update the active list for the hardware's current position.
* Otherwise this only updates on a delayed timer or when irqs
@@ -4856,17 +5017,24 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
struct drm_file *file_priv)
{
struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- void *obj_addr;
- int ret;
- char __user *user_data;
+ void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
+ char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
- user_data = (char __user *) (uintptr_t) args->data_ptr;
- obj_addr = obj_priv->phys_obj->handle->vaddr + args->offset;
+ DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
- DRM_DEBUG_DRIVER("obj_addr %p, %lld\n", obj_addr, args->size);
- ret = copy_from_user(obj_addr, user_data, args->size);
- if (ret)
- return -EFAULT;
+ if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
+ unsigned long unwritten;
+
+ /* The physical object once assigned is fixed for the lifetime
+ * of the obj, so we can safely drop the lock and continue
+ * to access vaddr.
+ */
+ mutex_unlock(&dev->struct_mutex);
+ unwritten = copy_from_user(vaddr, user_data, args->size);
+ mutex_lock(&dev->struct_mutex);
+ if (unwritten)
+ return -EFAULT;
+ }
drm_agp_chipset_flush(dev);
return 0;
@@ -4900,9 +5068,7 @@ i915_gpu_is_active(struct drm_device *dev)
int lists_empty;
lists_empty = list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->render_ring.active_list) &&
- list_empty(&dev_priv->bsd_ring.active_list) &&
- list_empty(&dev_priv->blt_ring.active_list);
+ list_empty(&dev_priv->mm.active_list);
return !lists_empty;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 43a4013f53f..d8ae7d1d0cc 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -165,9 +165,7 @@ i915_gem_evict_everything(struct drm_device *dev)
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->render_ring.active_list) &&
- list_empty(&dev_priv->bsd_ring.active_list) &&
- list_empty(&dev_priv->blt_ring.active_list));
+ list_empty(&dev_priv->mm.active_list));
if (lists_empty)
return -ENOSPC;
@@ -184,9 +182,7 @@ i915_gem_evict_everything(struct drm_device *dev)
lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->render_ring.active_list) &&
- list_empty(&dev_priv->bsd_ring.active_list) &&
- list_empty(&dev_priv->blt_ring.active_list));
+ list_empty(&dev_priv->mm.active_list));
BUG_ON(!lists_empty);
return 0;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 25ed911a311..878fc766a12 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3033,6 +3033,7 @@
#define TRANS_DP_10BPC (1<<9)
#define TRANS_DP_6BPC (2<<9)
#define TRANS_DP_12BPC (3<<9)
+#define TRANS_DP_BPC_MASK (3<<9)
#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4)
#define TRANS_DP_VSYNC_ACTIVE_LOW 0
#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3)
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 989c19d2d95..42729d25da5 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -239,6 +239,16 @@ static void i915_save_modeset_reg(struct drm_device *dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
+ /* Cursor state */
+ dev_priv->saveCURACNTR = I915_READ(CURACNTR);
+ dev_priv->saveCURAPOS = I915_READ(CURAPOS);
+ dev_priv->saveCURABASE = I915_READ(CURABASE);
+ dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
+ dev_priv->saveCURBPOS = I915_READ(CURBPOS);
+ dev_priv->saveCURBBASE = I915_READ(CURBBASE);
+ if (IS_GEN2(dev))
+ dev_priv->saveCURSIZE = I915_READ(CURSIZE);
+
if (HAS_PCH_SPLIT(dev)) {
dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
@@ -529,6 +539,16 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
+ /* Cursor state */
+ I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
+ I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
+ I915_WRITE(CURABASE, dev_priv->saveCURABASE);
+ I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
+ I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
+ I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
+ if (IS_GEN2(dev))
+ I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
+
return;
}
@@ -543,16 +563,6 @@ void i915_save_display(struct drm_device *dev)
/* Don't save them in KMS mode */
i915_save_modeset_reg(dev);
- /* Cursor state */
- dev_priv->saveCURACNTR = I915_READ(CURACNTR);
- dev_priv->saveCURAPOS = I915_READ(CURAPOS);
- dev_priv->saveCURABASE = I915_READ(CURABASE);
- dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
- dev_priv->saveCURBPOS = I915_READ(CURBPOS);
- dev_priv->saveCURBBASE = I915_READ(CURBBASE);
- if (IS_GEN2(dev))
- dev_priv->saveCURSIZE = I915_READ(CURSIZE);
-
/* CRT state */
if (HAS_PCH_SPLIT(dev)) {
dev_priv->saveADPA = I915_READ(PCH_ADPA);
@@ -657,16 +667,6 @@ void i915_restore_display(struct drm_device *dev)
/* Don't restore them in KMS mode */
i915_restore_modeset_reg(dev);
- /* Cursor state */
- I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
- I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
- I915_WRITE(CURABASE, dev_priv->saveCURABASE);
- I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
- I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
- I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
- if (IS_GEN2(dev))
- I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
-
/* CRT state */
if (HAS_PCH_SPLIT(dev))
I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
@@ -862,8 +862,10 @@ int i915_restore_state(struct drm_device *dev)
/* Clock gating state */
intel_init_clock_gating(dev);
- if (HAS_PCH_SPLIT(dev))
+ if (HAS_PCH_SPLIT(dev)) {
ironlake_enable_drps(dev);
+ intel_init_emon(dev);
+ }
/* Cache mode state */
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index 65c88f9ba12..2cb8e0b9f1e 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -190,37 +190,6 @@ out:
kfree(output.pointer);
}
-static int intel_dsm_switchto(enum vga_switcheroo_client_id id)
-{
- return 0;
-}
-
-static int intel_dsm_power_state(enum vga_switcheroo_client_id id,
- enum vga_switcheroo_state state)
-{
- return 0;
-}
-
-static int intel_dsm_init(void)
-{
- return 0;
-}
-
-static int intel_dsm_get_client_id(struct pci_dev *pdev)
-{
- if (intel_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
- return VGA_SWITCHEROO_IGD;
- else
- return VGA_SWITCHEROO_DIS;
-}
-
-static struct vga_switcheroo_handler intel_dsm_handler = {
- .switchto = intel_dsm_switchto,
- .power_state = intel_dsm_power_state,
- .init = intel_dsm_init,
- .get_client_id = intel_dsm_get_client_id,
-};
-
static bool intel_dsm_pci_probe(struct pci_dev *pdev)
{
acpi_handle dhandle, intel_handle;
@@ -276,11 +245,8 @@ void intel_register_dsm_handler(void)
{
if (!intel_dsm_detect())
return;
-
- vga_switcheroo_register_handler(&intel_dsm_handler);
}
void intel_unregister_dsm_handler(void)
{
- vga_switcheroo_unregister_handler();
}
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index c55c7704335..8df57431606 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -34,6 +34,25 @@
#include "i915_drm.h"
#include "i915_drv.h"
+/* Here's the desired hotplug mode */
+#define ADPA_HOTPLUG_BITS (ADPA_CRT_HOTPLUG_PERIOD_128 | \
+ ADPA_CRT_HOTPLUG_WARMUP_10MS | \
+ ADPA_CRT_HOTPLUG_SAMPLE_4S | \
+ ADPA_CRT_HOTPLUG_VOLTAGE_50 | \
+ ADPA_CRT_HOTPLUG_VOLREF_325MV | \
+ ADPA_CRT_HOTPLUG_ENABLE)
+
+struct intel_crt {
+ struct intel_encoder base;
+ bool force_hotplug_required;
+};
+
+static struct intel_crt *intel_attached_crt(struct drm_connector *connector)
+{
+ return container_of(intel_attached_encoder(connector),
+ struct intel_crt, base);
+}
+
static void intel_crt_dpms(struct drm_encoder *encoder, int mode)
{
struct drm_device *dev = encoder->dev;
@@ -129,7 +148,7 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
dpll_md & ~DPLL_MD_UDI_MULTIPLIER_MASK);
}
- adpa = 0;
+ adpa = ADPA_HOTPLUG_BITS;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
adpa |= ADPA_HSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
@@ -157,53 +176,44 @@ static void intel_crt_mode_set(struct drm_encoder *encoder,
static bool intel_ironlake_crt_detect_hotplug(struct drm_connector *connector)
{
struct drm_device *dev = connector->dev;
+ struct intel_crt *crt = intel_attached_crt(connector);
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 adpa, temp;
+ u32 adpa;
bool ret;
- bool turn_off_dac = false;
- temp = adpa = I915_READ(PCH_ADPA);
+ /* The first time through, trigger an explicit detection cycle */
+ if (crt->force_hotplug_required) {
+ bool turn_off_dac = HAS_PCH_SPLIT(dev);
+ u32 save_adpa;
- if (HAS_PCH_SPLIT(dev))
- turn_off_dac = true;
-
- adpa &= ~ADPA_CRT_HOTPLUG_MASK;
- if (turn_off_dac)
- adpa &= ~ADPA_DAC_ENABLE;
-
- /* disable HPD first */
- I915_WRITE(PCH_ADPA, adpa);
- (void)I915_READ(PCH_ADPA);
-
- adpa |= (ADPA_CRT_HOTPLUG_PERIOD_128 |
- ADPA_CRT_HOTPLUG_WARMUP_10MS |
- ADPA_CRT_HOTPLUG_SAMPLE_4S |
- ADPA_CRT_HOTPLUG_VOLTAGE_50 | /* default */
- ADPA_CRT_HOTPLUG_VOLREF_325MV |
- ADPA_CRT_HOTPLUG_ENABLE |
- ADPA_CRT_HOTPLUG_FORCE_TRIGGER);
-
- DRM_DEBUG_KMS("pch crt adpa 0x%x", adpa);
- I915_WRITE(PCH_ADPA, adpa);
-
- if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
- 1000))
- DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
-
- if (turn_off_dac) {
- /* Make sure hotplug is enabled */
- I915_WRITE(PCH_ADPA, temp | ADPA_CRT_HOTPLUG_ENABLE);
- (void)I915_READ(PCH_ADPA);
+ crt->force_hotplug_required = 0;
+
+ save_adpa = adpa = I915_READ(PCH_ADPA);
+ DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
+
+ adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
+ if (turn_off_dac)
+ adpa &= ~ADPA_DAC_ENABLE;
+
+ I915_WRITE(PCH_ADPA, adpa);
+
+ if (wait_for((I915_READ(PCH_ADPA) & ADPA_CRT_HOTPLUG_FORCE_TRIGGER) == 0,
+ 1000))
+ DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
+
+ if (turn_off_dac) {
+ I915_WRITE(PCH_ADPA, save_adpa);
+ POSTING_READ(PCH_ADPA);
+ }
}
/* Check the status to see if both blue and green are on now */
adpa = I915_READ(PCH_ADPA);
- adpa &= ADPA_CRT_HOTPLUG_MONITOR_MASK;
- if ((adpa == ADPA_CRT_HOTPLUG_MONITOR_COLOR) ||
- (adpa == ADPA_CRT_HOTPLUG_MONITOR_MONO))
+ if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
ret = true;
else
ret = false;
+ DRM_DEBUG_KMS("ironlake hotplug adpa=0x%x, result %d\n", adpa, ret);
return ret;
}
@@ -277,13 +287,12 @@ static bool intel_crt_ddc_probe(struct drm_i915_private *dev_priv, int ddc_bus)
return i2c_transfer(&dev_priv->gmbus[ddc_bus].adapter, msgs, 1) == 1;
}
-static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
+static bool intel_crt_detect_ddc(struct intel_crt *crt)
{
- struct intel_encoder *intel_encoder = to_intel_encoder(encoder);
- struct drm_i915_private *dev_priv = encoder->dev->dev_private;
+ struct drm_i915_private *dev_priv = crt->base.base.dev->dev_private;
/* CRT should always be at 0, but check anyway */
- if (intel_encoder->type != INTEL_OUTPUT_ANALOG)
+ if (crt->base.type != INTEL_OUTPUT_ANALOG)
return false;
if (intel_crt_ddc_probe(dev_priv, dev_priv->crt_ddc_pin)) {
@@ -291,7 +300,7 @@ static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
return true;
}
- if (intel_ddc_probe(intel_encoder, dev_priv->crt_ddc_pin)) {
+ if (intel_ddc_probe(&crt->base, dev_priv->crt_ddc_pin)) {
DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
return true;
}
@@ -300,9 +309,9 @@ static bool intel_crt_detect_ddc(struct drm_encoder *encoder)
}
static enum drm_connector_status
-intel_crt_load_detect(struct drm_crtc *crtc, struct intel_encoder *intel_encoder)
+intel_crt_load_detect(struct drm_crtc *crtc, struct intel_crt *crt)
{
- struct drm_encoder *encoder = &intel_encoder->base;
+ struct drm_encoder *encoder = &crt->base.base;
struct drm_device *dev = encoder->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
@@ -434,7 +443,7 @@ static enum drm_connector_status
intel_crt_detect(struct drm_connector *connector, bool force)
{
struct drm_device *dev = connector->dev;
- struct intel_encoder *encoder = intel_attached_encoder(connector);
+ struct intel_crt *crt = intel_attached_crt(connector);
struct drm_crtc *crtc;
int dpms_mode;
enum drm_connector_status status;
@@ -443,28 +452,31 @@ intel_crt_detect(struct drm_connector *connector, bool force)
if (intel_crt_detect_hotplug(connector)) {
DRM_DEBUG_KMS("CRT detected via hotplug\n");
return connector_status_connected;
- } else
+ } else {
+ DRM_DEBUG_KMS("CRT not detected via hotplug\n");
return connector_status_disconnected;
+ }
}
- if (intel_crt_detect_ddc(&encoder->base))
+ if (intel_crt_detect_ddc(crt))
return connector_status_connected;
if (!force)
return connector->status;
/* for pre-945g platforms use load detect */
- if (encoder->base.crtc && encoder->base.crtc->enabled) {
- status = intel_crt_load_detect(encoder->base.crtc, encoder);
+ crtc = crt->base.base.crtc;
+ if (crtc && crtc->enabled) {
+ status = intel_crt_load_detect(crtc, crt);
} else {
- crtc = intel_get_load_detect_pipe(encoder, connector,
+ crtc = intel_get_load_detect_pipe(&crt->base, connector,
NULL, &dpms_mode);
if (crtc) {
- if (intel_crt_detect_ddc(&encoder->base))
+ if (intel_crt_detect_ddc(crt))
status = connector_status_connected;
else
- status = intel_crt_load_detect(crtc, encoder);
- intel_release_load_detect_pipe(encoder,
+ status = intel_crt_load_detect(crtc, crt);
+ intel_release_load_detect_pipe(&crt->base,
connector, dpms_mode);
} else
status = connector_status_unknown;
@@ -536,17 +548,17 @@ static const struct drm_encoder_funcs intel_crt_enc_funcs = {
void intel_crt_init(struct drm_device *dev)
{
struct drm_connector *connector;
- struct intel_encoder *intel_encoder;
+ struct intel_crt *crt;
struct intel_connector *intel_connector;
struct drm_i915_private *dev_priv = dev->dev_private;
- intel_encoder = kzalloc(sizeof(struct intel_encoder), GFP_KERNEL);
- if (!intel_encoder)
+ crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL);
+ if (!crt)
return;
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
- kfree(intel_encoder);
+ kfree(crt);
return;
}
@@ -554,20 +566,20 @@ void intel_crt_init(struct drm_device *dev)
drm_connector_init(dev, &intel_connector->base,
&intel_crt_connector_funcs, DRM_MODE_CONNECTOR_VGA);
- drm_encoder_init(dev, &intel_encoder->base, &intel_crt_enc_funcs,
+ drm_encoder_init(dev, &crt->base.base, &intel_crt_enc_funcs,
DRM_MODE_ENCODER_DAC);
- intel_connector_attach_encoder(intel_connector, intel_encoder);
+ intel_connector_attach_encoder(intel_connector, &crt->base);
- intel_encoder->type = INTEL_OUTPUT_ANALOG;
- intel_encoder->clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
- (1 << INTEL_ANALOG_CLONE_BIT) |
- (1 << INTEL_SDVO_LVDS_CLONE_BIT);
- intel_encoder->crtc_mask = (1 << 0) | (1 << 1);
+ crt->base.type = INTEL_OUTPUT_ANALOG;
+ crt->base.clone_mask = (1 << INTEL_SDVO_NON_TV_CLONE_BIT |
+ 1 << INTEL_ANALOG_CLONE_BIT |
+ 1 << INTEL_SDVO_LVDS_CLONE_BIT);
+ crt->base.crtc_mask = (1 << 0) | (1 << 1);
connector->interlace_allowed = 1;
connector->doublescan_allowed = 0;
- drm_encoder_helper_add(&intel_encoder->base, &intel_crt_helper_funcs);
+ drm_encoder_helper_add(&crt->base.base, &intel_crt_helper_funcs);
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
drm_sysfs_connector_add(connector);
@@ -577,5 +589,22 @@ void intel_crt_init(struct drm_device *dev)
else
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ /*
+ * Configure the automatic hotplug detection stuff
+ */
+ crt->force_hotplug_required = 0;
+ if (HAS_PCH_SPLIT(dev)) {
+ u32 adpa;
+
+ adpa = I915_READ(PCH_ADPA);
+ adpa &= ~ADPA_CRT_HOTPLUG_MASK;
+ adpa |= ADPA_HOTPLUG_BITS;
+ I915_WRITE(PCH_ADPA, adpa);
+ POSTING_READ(PCH_ADPA);
+
+ DRM_DEBUG_KMS("pch crt adpa set to 0x%x\n", adpa);
+ crt->force_hotplug_required = 1;
+ }
+
dev_priv->hotplug_supported_mask |= CRT_HOTPLUG_INT_STATUS;
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 990f065374b..d9b7092439e 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1611,6 +1611,18 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
wait_event(dev_priv->pending_flip_queue,
atomic_read(&obj_priv->pending_flip) == 0);
+
+ /* Big Hammer, we also need to ensure that any pending
+ * MI_WAIT_FOR_EVENT inside a user batch buffer on the
+ * current scanout is retired before unpinning the old
+ * framebuffer.
+ */
+ ret = i915_gem_object_flush_gpu(obj_priv, false);
+ if (ret) {
+ i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
+ mutex_unlock(&dev->struct_mutex);
+ return ret;
+ }
}
ret = intel_pipe_set_base_atomic(crtc, crtc->fb, x, y,
@@ -1681,6 +1693,37 @@ static void ironlake_set_pll_edp(struct drm_crtc *crtc, int clock)
udelay(500);
}
+static void intel_fdi_normal_train(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ int pipe = intel_crtc->pipe;
+ u32 reg, temp;
+
+ /* enable normal train */
+ reg = FDI_TX_CTL(pipe);
+ temp = I915_READ(reg);
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
+ I915_WRITE(reg, temp);
+
+ reg = FDI_RX_CTL(pipe);
+ temp = I915_READ(reg);
+ if (HAS_PCH_CPT(dev)) {
+ temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
+ temp |= FDI_LINK_TRAIN_NORMAL_CPT;
+ } else {
+ temp &= ~FDI_LINK_TRAIN_NONE;
+ temp |= FDI_LINK_TRAIN_NONE;
+ }
+ I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+
+ /* wait one idle pattern time */
+ POSTING_READ(reg);
+ udelay(1000);
+}
+
/* The FDI link training functions for ILK/Ibexpeak. */
static void ironlake_fdi_link_train(struct drm_crtc *crtc)
{
@@ -1767,27 +1810,6 @@ static void ironlake_fdi_link_train(struct drm_crtc *crtc)
DRM_DEBUG_KMS("FDI train done\n");
- /* enable normal train */
- reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
- I915_WRITE(reg, temp);
-
- reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- if (HAS_PCH_CPT(dev)) {
- temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
- temp |= FDI_LINK_TRAIN_NORMAL_CPT;
- } else {
- temp &= ~FDI_LINK_TRAIN_NONE;
- temp |= FDI_LINK_TRAIN_NONE;
- }
- I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
-
- /* wait one idle pattern time */
- POSTING_READ(reg);
- udelay(1000);
}
static const int const snb_b_fdi_train_param [] = {
@@ -2090,15 +2112,19 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
I915_WRITE(TRANS_VBLANK(pipe), I915_READ(VBLANK(pipe)));
I915_WRITE(TRANS_VSYNC(pipe), I915_READ(VSYNC(pipe)));
+ intel_fdi_normal_train(crtc);
+
/* For PCH DP, enable TRANS_DP_CTL */
if (HAS_PCH_CPT(dev) &&
intel_pipe_has_type(crtc, INTEL_OUTPUT_DISPLAYPORT)) {
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(TRANS_DP_PORT_SEL_MASK |
- TRANS_DP_SYNC_MASK);
+ TRANS_DP_SYNC_MASK |
+ TRANS_DP_BPC_MASK);
temp |= (TRANS_DP_OUTPUT_ENABLE |
TRANS_DP_ENH_FRAMING);
+ temp |= TRANS_DP_8BPC;
if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
@@ -2200,9 +2226,10 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
udelay(100);
/* Ironlake workaround, disable clock pointer after downing FDI */
- I915_WRITE(FDI_RX_CHICKEN(pipe),
- I915_READ(FDI_RX_CHICKEN(pipe) &
- ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
+ if (HAS_PCH_IBX(dev))
+ I915_WRITE(FDI_RX_CHICKEN(pipe),
+ I915_READ(FDI_RX_CHICKEN(pipe) &
+ ~FDI_RX_PHASE_SYNC_POINTER_ENABLE));
/* still set train pattern 1 */
reg = FDI_TX_CTL(pipe);
@@ -2687,27 +2714,19 @@ fdi_reduce_ratio(u32 *num, u32 *den)
}
}
-#define DATA_N 0x800000
-#define LINK_N 0x80000
-
static void
ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
int link_clock, struct fdi_m_n *m_n)
{
- u64 temp;
-
m_n->tu = 64; /* default size */
- temp = (u64) DATA_N * pixel_clock;
- temp = div_u64(temp, link_clock);
- m_n->gmch_m = div_u64(temp * bits_per_pixel, nlanes);
- m_n->gmch_m >>= 3; /* convert to bytes_per_pixel */
- m_n->gmch_n = DATA_N;
+ /* BUG_ON(pixel_clock > INT_MAX / 36); */
+ m_n->gmch_m = bits_per_pixel * pixel_clock;
+ m_n->gmch_n = link_clock * nlanes * 8;
fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
- temp = (u64) LINK_N * pixel_clock;
- m_n->link_m = div_u64(temp, link_clock);
- m_n->link_n = LINK_N;
+ m_n->link_m = pixel_clock;
+ m_n->link_n = link_clock;
fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
}
@@ -3691,6 +3710,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* FDI link */
if (HAS_PCH_SPLIT(dev)) {
+ int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
int lane = 0, link_bw, bpp;
/* CPU eDP doesn't require FDI link, so just set DP M/N
according to current link config */
@@ -3774,6 +3794,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
intel_crtc->fdi_lanes = lane;
+ if (pixel_multiplier > 1)
+ link_bw *= pixel_multiplier;
ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
}
@@ -5211,6 +5233,55 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
.page_flip = intel_crtc_page_flip,
};
+static void intel_sanitize_modesetting(struct drm_device *dev,
+ int pipe, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg, val;
+
+ if (HAS_PCH_SPLIT(dev))
+ return;
+
+ /* Who knows what state these registers were left in by the BIOS or
+ * grub?
+ *
+ * If we leave the registers in a conflicting state (e.g. with the
+ * display plane reading from the other pipe than the one we intend
+ * to use) then when we attempt to teardown the active mode, we will
+ * not disable the pipes and planes in the correct order -- leaving
+ * a plane reading from a disabled pipe and possibly leading to
+ * undefined behaviour.
+ */
+
+ reg = DSPCNTR(plane);
+ val = I915_READ(reg);
+
+ if ((val & DISPLAY_PLANE_ENABLE) == 0)
+ return;
+ if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
+ return;
+
+ /* This display plane is active and attached to the other CPU pipe. */
+ pipe = !pipe;
+
+ /* Disable the plane and wait for it to stop reading from the pipe. */
+ I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
+ intel_flush_display_plane(dev, plane);
+
+ if (IS_GEN2(dev))
+ intel_wait_for_vblank(dev, pipe);
+
+ if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+ return;
+
+ /* Switch off the pipe. */
+ reg = PIPECONF(pipe);
+ val = I915_READ(reg);
+ if (val & PIPECONF_ENABLE) {
+ I915_WRITE(reg, val & ~PIPECONF_ENABLE);
+ intel_wait_for_pipe_off(dev, pipe);
+ }
+}
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
@@ -5262,6 +5333,8 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
(unsigned long)intel_crtc);
+
+ intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
}
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
@@ -5311,9 +5384,14 @@ static void intel_setup_outputs(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
bool dpd_is_edp = false;
+ bool has_lvds = false;
if (IS_MOBILE(dev) && !IS_I830(dev))
- intel_lvds_init(dev);
+ has_lvds = intel_lvds_init(dev);
+ if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
+ /* disable the panel fitter on everything but LVDS */
+ I915_WRITE(PFIT_CONTROL, 0);
+ }
if (HAS_PCH_SPLIT(dev)) {
dpd_is_edp = intel_dpd_is_edp(dev);
@@ -5581,20 +5659,19 @@ void ironlake_enable_drps(struct drm_device *dev)
fmin = (rgvmodectl & MEMMODE_FMIN_MASK);
fstart = (rgvmodectl & MEMMODE_FSTART_MASK) >>
MEMMODE_FSTART_SHIFT;
- fstart = fmax;
vstart = (I915_READ(PXVFREQ_BASE + (fstart * 4)) & PXVFREQ_PX_MASK) >>
PXVFREQ_PX_SHIFT;
- dev_priv->fmax = fstart; /* IPS callback will increase this */
+ dev_priv->fmax = fmax; /* IPS callback will increase this */
dev_priv->fstart = fstart;
- dev_priv->max_delay = fmax;
+ dev_priv->max_delay = fstart;
dev_priv->min_delay = fmin;
dev_priv->cur_delay = fstart;
- DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n", fmax, fmin,
- fstart);
+ DRM_DEBUG_DRIVER("fmax: %d, fmin: %d, fstart: %d\n",
+ fmax, fmin, fstart);
I915_WRITE(MEMINTREN, MEMINT_CX_SUPR_EN | MEMINT_EVAL_CHG_EN);
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 891f4f1d63b..df648cb4c29 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -584,17 +584,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
mode->clock = dev_priv->panel_fixed_mode->clock;
}
- /* Just use VBT values for eDP */
- if (is_edp(intel_dp)) {
- intel_dp->lane_count = dev_priv->edp.lanes;
- intel_dp->link_bw = dev_priv->edp.rate;
- adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
- DRM_DEBUG_KMS("eDP link bw %02x lane count %d clock %d\n",
- intel_dp->link_bw, intel_dp->lane_count,
- adjusted_mode->clock);
- return true;
- }
-
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
@@ -613,6 +602,19 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
}
+ if (is_edp(intel_dp)) {
+ /* okay we failed just pick the highest */
+ intel_dp->lane_count = max_lane_count;
+ intel_dp->link_bw = bws[max_clock];
+ adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
+ DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
+ "count %d clock %d\n",
+ intel_dp->link_bw, intel_dp->lane_count,
+ adjusted_mode->clock);
+
+ return true;
+ }
+
return false;
}
@@ -1087,21 +1089,11 @@ intel_get_adjust_train(struct intel_dp *intel_dp)
}
static uint32_t
-intel_dp_signal_levels(struct intel_dp *intel_dp)
+intel_dp_signal_levels(uint8_t train_set, int lane_count)
{
- struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t signal_levels = 0;
- u8 train_set = intel_dp->train_set[0];
- u32 vswing = train_set & DP_TRAIN_VOLTAGE_SWING_MASK;
- u32 preemphasis = train_set & DP_TRAIN_PRE_EMPHASIS_MASK;
+ uint32_t signal_levels = 0;
- if (is_edp(intel_dp)) {
- vswing = dev_priv->edp.vswing;
- preemphasis = dev_priv->edp.preemphasis;
- }
-
- switch (vswing) {
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
default:
signal_levels |= DP_VOLTAGE_0_4;
@@ -1116,7 +1108,7 @@ intel_dp_signal_levels(struct intel_dp *intel_dp)
signal_levels |= DP_VOLTAGE_1_2;
break;
}
- switch (preemphasis) {
+ switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
case DP_TRAIN_PRE_EMPHASIS_0:
default:
signal_levels |= DP_PRE_EMPHASIS_0;
@@ -1203,18 +1195,6 @@ intel_channel_eq_ok(struct intel_dp *intel_dp)
}
static bool
-intel_dp_aux_handshake_required(struct intel_dp *intel_dp)
-{
- struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (is_edp(intel_dp) && dev_priv->no_aux_handshake)
- return false;
-
- return true;
-}
-
-static bool
intel_dp_set_link_train(struct intel_dp *intel_dp,
uint32_t dp_reg_value,
uint8_t dp_train_pat)
@@ -1226,9 +1206,6 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
I915_WRITE(intel_dp->output_reg, dp_reg_value);
POSTING_READ(intel_dp->output_reg);
- if (!intel_dp_aux_handshake_required(intel_dp))
- return true;
-
intel_dp_aux_native_write_1(intel_dp,
DP_TRAINING_PATTERN_SET,
dp_train_pat);
@@ -1261,11 +1238,10 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
POSTING_READ(intel_dp->output_reg);
intel_wait_for_vblank(dev, intel_crtc->pipe);
- if (intel_dp_aux_handshake_required(intel_dp))
- /* Write the link configuration data */
- intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
- intel_dp->link_configuration,
- DP_LINK_CONFIGURATION_SIZE);
+ /* Write the link configuration data */
+ intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
+ intel_dp->link_configuration,
+ DP_LINK_CONFIGURATION_SIZE);
DP |= DP_PORT_EN;
if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
@@ -1283,7 +1259,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
- signal_levels = intel_dp_signal_levels(intel_dp);
+ signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
@@ -1297,37 +1273,33 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
break;
/* Set training pattern 1 */
- udelay(500);
- if (intel_dp_aux_handshake_required(intel_dp)) {
+ udelay(100);
+ if (!intel_dp_get_link_status(intel_dp))
break;
- } else {
- if (!intel_dp_get_link_status(intel_dp))
- break;
- if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
- clock_recovery = true;
- break;
- }
+ if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
+ clock_recovery = true;
+ break;
+ }
- /* Check to see if we've tried the max voltage */
- for (i = 0; i < intel_dp->lane_count; i++)
- if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
- break;
- if (i == intel_dp->lane_count)
+ /* Check to see if we've tried the max voltage */
+ for (i = 0; i < intel_dp->lane_count; i++)
+ if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
+ if (i == intel_dp->lane_count)
+ break;
- /* Check to see if we've tried the same voltage 5 times */
- if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
- ++tries;
- if (tries == 5)
- break;
- } else
- tries = 0;
- voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+ /* Check to see if we've tried the same voltage 5 times */
+ if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+ ++tries;
+ if (tries == 5)
+ break;
+ } else
+ tries = 0;
+ voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
- /* Compute new intel_dp->train_set as requested by target */
- intel_get_adjust_train(intel_dp);
- }
+ /* Compute new intel_dp->train_set as requested by target */
+ intel_get_adjust_train(intel_dp);
}
intel_dp->DP = DP;
@@ -1354,7 +1326,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
- signal_levels = intel_dp_signal_levels(intel_dp);
+ signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
@@ -1368,28 +1340,24 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
DP_TRAINING_PATTERN_2))
break;
- udelay(500);
-
- if (!intel_dp_aux_handshake_required(intel_dp)) {
+ udelay(400);
+ if (!intel_dp_get_link_status(intel_dp))
break;
- } else {
- if (!intel_dp_get_link_status(intel_dp))
- break;
- if (intel_channel_eq_ok(intel_dp)) {
- channel_eq = true;
- break;
- }
+ if (intel_channel_eq_ok(intel_dp)) {
+ channel_eq = true;
+ break;
+ }
- /* Try 5 times */
- if (tries > 5)
- break;
+ /* Try 5 times */
+ if (tries > 5)
+ break;
- /* Compute new intel_dp->train_set as requested by target */
- intel_get_adjust_train(intel_dp);
- ++tries;
- }
+ /* Compute new intel_dp->train_set as requested by target */
+ intel_get_adjust_train(intel_dp);
+ ++tries;
}
+
if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
reg = DP | DP_LINK_TRAIN_OFF_CPT;
else
@@ -1408,6 +1376,9 @@ intel_dp_link_down(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t DP = intel_dp->DP;
+ if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
+ return;
+
DRM_DEBUG_KMS("\n");
if (is_edp(intel_dp)) {
@@ -1430,6 +1401,28 @@ intel_dp_link_down(struct intel_dp *intel_dp)
if (is_edp(intel_dp))
DP |= DP_LINK_TRAIN_OFF;
+
+ if (!HAS_PCH_CPT(dev) &&
+ I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
+ /* Hardware workaround: leaving our transcoder select
+ * set to transcoder B while it's off will prevent the
+ * corresponding HDMI output on transcoder A.
+ *
+ * Combine this with another hardware workaround:
+ * transcoder select bit can only be cleared while the
+ * port is enabled.
+ */
+ DP &= ~DP_PIPEB_SELECT;
+ I915_WRITE(intel_dp->output_reg, DP);
+
+ /* Changes to enable or select take place the vblank
+ * after being written.
+ */
+ intel_wait_for_vblank(intel_dp->base.base.dev,
+ intel_crtc->pipe);
+ }
+
I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
POSTING_READ(intel_dp->output_reg);
}
@@ -1517,7 +1510,7 @@ g4x_dp_detect(struct intel_dp *intel_dp)
status = connector_status_connected;
}
- return bit;
+ return status;
}
/**
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 9af9f86a876..e52c6125bb1 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -237,7 +237,7 @@ extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj);
-extern void intel_lvds_init(struct drm_device *dev);
+extern bool intel_lvds_init(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int dp_reg);
void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
@@ -296,6 +296,7 @@ extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
extern void intel_init_clock_gating(struct drm_device *dev);
extern void ironlake_enable_drps(struct drm_device *dev);
extern void ironlake_disable_drps(struct drm_device *dev);
+extern void intel_init_emon(struct drm_device *dev);
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
struct drm_gem_object *obj,
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 2be4f728ed0..3dba086e7ee 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -160,7 +160,7 @@ intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin)
};
struct intel_gpio *gpio;
- if (pin < 1 || pin > 7)
+ if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin])
return NULL;
gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL);
@@ -172,7 +172,8 @@ intel_gpio_create(struct drm_i915_private *dev_priv, u32 pin)
gpio->reg += PCH_GPIOA - GPIOA;
gpio->dev_priv = dev_priv;
- snprintf(gpio->adapter.name, I2C_NAME_SIZE, "GPIO%c", "?BACDEF?"[pin]);
+ snprintf(gpio->adapter.name, sizeof(gpio->adapter.name),
+ "i915 GPIO%c", "?BACDE?F"[pin]);
gpio->adapter.owner = THIS_MODULE;
gpio->adapter.algo_data = &gpio->algo;
gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev;
@@ -349,7 +350,7 @@ int intel_setup_gmbus(struct drm_device *dev)
"panel",
"dpc",
"dpb",
- "reserved"
+ "reserved",
"dpd",
};
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -366,8 +367,8 @@ int intel_setup_gmbus(struct drm_device *dev)
bus->adapter.owner = THIS_MODULE;
bus->adapter.class = I2C_CLASS_DDC;
snprintf(bus->adapter.name,
- I2C_NAME_SIZE,
- "gmbus %s",
+ sizeof(bus->adapter.name),
+ "i915 gmbus %s",
names[i]);
bus->adapter.dev.parent = &dev->pdev->dev;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index f1a649990ea..25bcedf386f 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -68,7 +68,7 @@ static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
/**
* Sets the power state for the panel.
*/
-static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on)
+static void intel_lvds_enable(struct intel_lvds *intel_lvds)
{
struct drm_device *dev = intel_lvds->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -82,26 +82,61 @@ static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on)
lvds_reg = LVDS;
}
- if (on) {
- I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
- I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
- intel_panel_set_backlight(dev, dev_priv->backlight_level);
- } else {
- dev_priv->backlight_level = intel_panel_get_backlight(dev);
-
- intel_panel_set_backlight(dev, 0);
- I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
+ I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
- if (intel_lvds->pfit_control) {
- if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
- DRM_ERROR("timed out waiting for panel to power off\n");
- I915_WRITE(PFIT_CONTROL, 0);
- intel_lvds->pfit_control = 0;
+ if (intel_lvds->pfit_dirty) {
+ /*
+ * Enable automatic panel scaling so that non-native modes
+ * fill the screen. The panel fitter should only be
+ * adjusted whilst the pipe is disabled, according to
+ * register description and PRM.
+ */
+ DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
+ intel_lvds->pfit_control,
+ intel_lvds->pfit_pgm_ratios);
+ if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) {
+ DRM_ERROR("timed out waiting for panel to power off\n");
+ } else {
+ I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
intel_lvds->pfit_dirty = false;
}
+ }
+
+ I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
+ POSTING_READ(lvds_reg);
+
+ intel_panel_set_backlight(dev, dev_priv->backlight_level);
+}
+
+static void intel_lvds_disable(struct intel_lvds *intel_lvds)
+{
+ struct drm_device *dev = intel_lvds->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 ctl_reg, lvds_reg;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ ctl_reg = PCH_PP_CONTROL;
+ lvds_reg = PCH_LVDS;
+ } else {
+ ctl_reg = PP_CONTROL;
+ lvds_reg = LVDS;
+ }
+
+ dev_priv->backlight_level = intel_panel_get_backlight(dev);
+ intel_panel_set_backlight(dev, 0);
- I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
+ I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
+
+ if (intel_lvds->pfit_control) {
+ if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
+ DRM_ERROR("timed out waiting for panel to power off\n");
+
+ I915_WRITE(PFIT_CONTROL, 0);
+ intel_lvds->pfit_dirty = true;
}
+
+ I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
POSTING_READ(lvds_reg);
}
@@ -110,9 +145,9 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
if (mode == DRM_MODE_DPMS_ON)
- intel_lvds_set_power(intel_lvds, true);
+ intel_lvds_enable(intel_lvds);
else
- intel_lvds_set_power(intel_lvds, false);
+ intel_lvds_disable(intel_lvds);
/* XXX: We never power down the LVDS pairs. */
}
@@ -411,43 +446,18 @@ static void intel_lvds_commit(struct drm_encoder *encoder)
/* Always do a full power on as we do not know what state
* we were left in.
*/
- intel_lvds_set_power(intel_lvds, true);
+ intel_lvds_enable(intel_lvds);
}
static void intel_lvds_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
-
/*
* The LVDS pin pair will already have been turned on in the
* intel_crtc_mode_set since it has a large impact on the DPLL
* settings.
*/
-
- if (HAS_PCH_SPLIT(dev))
- return;
-
- if (!intel_lvds->pfit_dirty)
- return;
-
- /*
- * Enable automatic panel scaling so that non-native modes fill the
- * screen. Should be enabled before the pipe is enabled, according to
- * register description and PRM.
- */
- DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
- intel_lvds->pfit_control,
- intel_lvds->pfit_pgm_ratios);
- if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
- DRM_ERROR("timed out waiting for panel to power off\n");
-
- I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
- I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
- intel_lvds->pfit_dirty = false;
}
/**
@@ -481,11 +491,8 @@ static int intel_lvds_get_modes(struct drm_connector *connector)
struct drm_device *dev = connector->dev;
struct drm_display_mode *mode;
- if (intel_lvds->edid) {
- drm_mode_connector_update_edid_property(connector,
- intel_lvds->edid);
+ if (intel_lvds->edid)
return drm_add_edid_modes(connector, intel_lvds->edid);
- }
mode = drm_mode_duplicate(dev, intel_lvds->fixed_mode);
if (mode == 0)
@@ -840,7 +847,7 @@ static bool intel_lvds_ddc_probe(struct drm_device *dev, u8 pin)
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
*/
-void intel_lvds_init(struct drm_device *dev)
+bool intel_lvds_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds *intel_lvds;
@@ -856,37 +863,37 @@ void intel_lvds_init(struct drm_device *dev)
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds))
- return;
+ return false;
pin = GMBUS_PORT_PANEL;
if (!lvds_is_present_in_vbt(dev, &pin)) {
DRM_DEBUG_KMS("LVDS is not present in VBT\n");
- return;
+ return false;
}
if (HAS_PCH_SPLIT(dev)) {
if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
- return;
+ return false;
if (dev_priv->edp.support) {
DRM_DEBUG_KMS("disable LVDS for eDP support\n");
- return;
+ return false;
}
}
if (!intel_lvds_ddc_probe(dev, pin)) {
DRM_DEBUG_KMS("LVDS did not respond to DDC probe\n");
- return;
+ return false;
}
intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
if (!intel_lvds) {
- return;
+ return false;
}
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_lvds);
- return;
+ return false;
}
if (!HAS_PCH_SPLIT(dev)) {
@@ -939,7 +946,16 @@ void intel_lvds_init(struct drm_device *dev)
*/
intel_lvds->edid = drm_get_edid(connector,
&dev_priv->gmbus[pin].adapter);
-
+ if (intel_lvds->edid) {
+ if (drm_add_edid_modes(connector,
+ intel_lvds->edid)) {
+ drm_mode_connector_update_edid_property(connector,
+ intel_lvds->edid);
+ } else {
+ kfree(intel_lvds->edid);
+ intel_lvds->edid = NULL;
+ }
+ }
if (!intel_lvds->edid) {
/* Didn't get an EDID, so
* Set wide sync ranges so we get all modes
@@ -1020,7 +1036,7 @@ out:
/* keep the LVDS connector */
dev_priv->int_lvds_connector = connector;
drm_sysfs_connector_add(connector);
- return;
+ return true;
failed:
DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
@@ -1028,4 +1044,5 @@ failed:
drm_encoder_cleanup(encoder);
kfree(intel_lvds);
kfree(intel_connector);
+ return false;
}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 917c7dc3cd6..9b0d9a867ae 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -512,6 +512,6 @@ int intel_opregion_setup(struct drm_device *dev)
return 0;
err_out:
- iounmap(opregion->header);
+ iounmap(base);
return err;
}
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index afb96d25219..02ff0a481f4 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -946,7 +946,9 @@ static int check_overlay_src(struct drm_device *dev,
{
int uv_hscale = uv_hsubsampling(rec->flags);
int uv_vscale = uv_vsubsampling(rec->flags);
- u32 stride_mask, depth, tmp;
+ u32 stride_mask;
+ int depth;
+ u32 tmp;
/* check src dimensions */
if (IS_845G(dev) || IS_I830(dev)) {
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 09f2dc353ae..89a65be8a3f 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -156,28 +156,30 @@ static int init_ring_common(struct drm_device *dev,
/* G45 ring initialization fails to reset head to zero */
if (head != 0) {
- DRM_ERROR("%s head not reset to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- ring->name,
- I915_READ_CTL(ring),
- I915_READ_HEAD(ring),
- I915_READ_TAIL(ring),
- I915_READ_START(ring));
+ DRM_DEBUG_KMS("%s head not reset to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ_CTL(ring),
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
I915_WRITE_HEAD(ring, 0);
- DRM_ERROR("%s head forced to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- ring->name,
- I915_READ_CTL(ring),
- I915_READ_HEAD(ring),
- I915_READ_TAIL(ring),
- I915_READ_START(ring));
+ if (I915_READ_HEAD(ring) & HEAD_ADDR) {
+ DRM_ERROR("failed to set %s head to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ_CTL(ring),
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
+ }
}
I915_WRITE_CTL(ring,
((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
- | RING_NO_REPORT | RING_VALID);
+ | RING_REPORT_64K | RING_VALID);
head = I915_READ_HEAD(ring) & HEAD_ADDR;
/* If the head is still not zero, the ring is dead */
@@ -654,6 +656,10 @@ void intel_cleanup_ring_buffer(struct drm_device *dev,
i915_gem_object_unpin(ring->gem_object);
drm_gem_object_unreference(ring->gem_object);
ring->gem_object = NULL;
+
+ if (ring->cleanup)
+ ring->cleanup(ring);
+
cleanup_status_page(dev, ring);
}
@@ -688,6 +694,17 @@ int intel_wait_ring_buffer(struct drm_device *dev,
{
unsigned long end;
drm_i915_private_t *dev_priv = dev->dev_private;
+ u32 head;
+
+ head = intel_read_status_page(ring, 4);
+ if (head) {
+ ring->head = head & HEAD_ADDR;
+ ring->space = ring->head - (ring->tail + 8);
+ if (ring->space < 0)
+ ring->space += ring->size;
+ if (ring->space >= n)
+ return 0;
+ }
trace_i915_ring_wait_begin (dev);
end = jiffies + 3 * HZ;
@@ -854,19 +871,125 @@ blt_ring_put_user_irq(struct drm_device *dev,
/* do nothing */
}
+
+/* Workaround for some stepping of SNB,
+ * each time when BLT engine ring tail moved,
+ * the first command in the ring to be parsed
+ * should be MI_BATCH_BUFFER_START
+ */
+#define NEED_BLT_WORKAROUND(dev) \
+ (IS_GEN6(dev) && (dev->pdev->revision < 8))
+
+static inline struct drm_i915_gem_object *
+to_blt_workaround(struct intel_ring_buffer *ring)
+{
+ return ring->private;
+}
+
+static int blt_ring_init(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ if (NEED_BLT_WORKAROUND(dev)) {
+ struct drm_i915_gem_object *obj;
+ u32 __iomem *ptr;
+ int ret;
+
+ obj = to_intel_bo(i915_gem_alloc_object(dev, 4096));
+ if (obj == NULL)
+ return -ENOMEM;
+
+ ret = i915_gem_object_pin(&obj->base, 4096);
+ if (ret) {
+ drm_gem_object_unreference(&obj->base);
+ return ret;
+ }
+
+ ptr = kmap(obj->pages[0]);
+ iowrite32(MI_BATCH_BUFFER_END, ptr);
+ iowrite32(MI_NOOP, ptr+1);
+ kunmap(obj->pages[0]);
+
+ ret = i915_gem_object_set_to_gtt_domain(&obj->base, false);
+ if (ret) {
+ i915_gem_object_unpin(&obj->base);
+ drm_gem_object_unreference(&obj->base);
+ return ret;
+ }
+
+ ring->private = obj;
+ }
+
+ return init_ring_common(dev, ring);
+}
+
+static void blt_ring_begin(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ int num_dwords)
+{
+ if (ring->private) {
+ intel_ring_begin(dev, ring, num_dwords+2);
+ intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START);
+ intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset);
+ } else
+ intel_ring_begin(dev, ring, 4);
+}
+
+static void blt_ring_flush(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
+{
+ blt_ring_begin(dev, ring, 4);
+ intel_ring_emit(dev, ring, MI_FLUSH_DW);
+ intel_ring_emit(dev, ring, 0);
+ intel_ring_emit(dev, ring, 0);
+ intel_ring_emit(dev, ring, 0);
+ intel_ring_advance(dev, ring);
+}
+
+static u32
+blt_ring_add_request(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ u32 flush_domains)
+{
+ u32 seqno = i915_gem_get_seqno(dev);
+
+ blt_ring_begin(dev, ring, 4);
+ intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(dev, ring,
+ I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(dev, ring, seqno);
+ intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
+ intel_ring_advance(dev, ring);
+
+ DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
+ return seqno;
+}
+
+static void blt_ring_cleanup(struct intel_ring_buffer *ring)
+{
+ if (!ring->private)
+ return;
+
+ i915_gem_object_unpin(ring->private);
+ drm_gem_object_unreference(ring->private);
+ ring->private = NULL;
+}
+
static const struct intel_ring_buffer gen6_blt_ring = {
.name = "blt ring",
.id = RING_BLT,
.mmio_base = BLT_RING_BASE,
.size = 32 * PAGE_SIZE,
- .init = init_ring_common,
+ .init = blt_ring_init,
.write_tail = ring_write_tail,
- .flush = gen6_ring_flush,
- .add_request = ring_add_request,
+ .flush = blt_ring_flush,
+ .add_request = blt_ring_add_request,
.get_seqno = ring_status_page_get_seqno,
.user_irq_get = blt_ring_get_user_irq,
.user_irq_put = blt_ring_put_user_irq,
.dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
+ .cleanup = blt_ring_cleanup,
};
int intel_init_render_ring_buffer(struct drm_device *dev)
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index a05aff0e576..3126c268198 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -63,6 +63,7 @@ struct intel_ring_buffer {
struct drm_i915_gem_execbuffer2 *exec,
struct drm_clip_rect *cliprects,
uint64_t exec_offset);
+ void (*cleanup)(struct intel_ring_buffer *ring);
/**
* List of objects currently involved in rendering from the
@@ -98,6 +99,8 @@ struct intel_ring_buffer {
wait_queue_head_t irq_queue;
drm_local_map_t map;
+
+ void *private;
};
static inline u32
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index de158b76bcd..d97e6cb52d3 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -107,7 +107,8 @@ struct intel_sdvo {
* This is set if we treat the device as HDMI, instead of DVI.
*/
bool is_hdmi;
- bool has_audio;
+ bool has_hdmi_monitor;
+ bool has_hdmi_audio;
/**
* This is set if we detect output of sdvo device as LVDS and
@@ -1023,7 +1024,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
if (!intel_sdvo_set_target_input(intel_sdvo))
return;
- if (intel_sdvo->is_hdmi &&
+ if (intel_sdvo->has_hdmi_monitor &&
!intel_sdvo_set_avi_infoframe(intel_sdvo))
return;
@@ -1063,7 +1064,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
}
if (intel_crtc->pipe == 1)
sdvox |= SDVO_PIPE_B_SELECT;
- if (intel_sdvo->has_audio)
+ if (intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE;
if (INTEL_INFO(dev)->gen >= 4) {
@@ -1295,55 +1296,14 @@ intel_sdvo_get_edid(struct drm_connector *connector)
return drm_get_edid(connector, &sdvo->ddc);
}
-static struct drm_connector *
-intel_find_analog_connector(struct drm_device *dev)
-{
- struct drm_connector *connector;
- struct intel_sdvo *encoder;
-
- list_for_each_entry(encoder,
- &dev->mode_config.encoder_list,
- base.base.head) {
- if (encoder->base.type == INTEL_OUTPUT_ANALOG) {
- list_for_each_entry(connector,
- &dev->mode_config.connector_list,
- head) {
- if (&encoder->base ==
- intel_attached_encoder(connector))
- return connector;
- }
- }
- }
-
- return NULL;
-}
-
-static int
-intel_analog_is_connected(struct drm_device *dev)
-{
- struct drm_connector *analog_connector;
-
- analog_connector = intel_find_analog_connector(dev);
- if (!analog_connector)
- return false;
-
- if (analog_connector->funcs->detect(analog_connector, false) ==
- connector_status_disconnected)
- return false;
-
- return true;
-}
-
/* Mac mini hack -- use the same DDC as the analog connector */
static struct edid *
intel_sdvo_get_analog_edid(struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
- if (!intel_analog_is_connected(connector->dev))
- return NULL;
-
- return drm_get_edid(connector, &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+ return drm_get_edid(connector,
+ &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
}
enum drm_connector_status
@@ -1388,8 +1348,10 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
/* DDC bus is shared, match EDID to connector type */
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
status = connector_status_connected;
- intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid);
- intel_sdvo->has_audio = drm_detect_monitor_audio(edid);
+ if (intel_sdvo->is_hdmi) {
+ intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
+ intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
+ }
}
connector->display_info.raw_edid = NULL;
kfree(edid);
@@ -1398,7 +1360,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
if (status == connector_status_connected) {
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
if (intel_sdvo_connector->force_audio)
- intel_sdvo->has_audio = intel_sdvo_connector->force_audio > 0;
+ intel_sdvo->has_hdmi_audio = intel_sdvo_connector->force_audio > 0;
}
return status;
@@ -1415,10 +1377,12 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
if (!intel_sdvo_write_cmd(intel_sdvo,
SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
return connector_status_unknown;
- if (intel_sdvo->is_tv) {
- /* add 30ms delay when the output type is SDVO-TV */
+
+ /* add 30ms delay when the output type might be TV */
+ if (intel_sdvo->caps.output_flags &
+ (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
mdelay(30);
- }
+
if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
return connector_status_unknown;
@@ -1472,8 +1436,10 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
edid = intel_sdvo_get_analog_edid(connector);
if (edid != NULL) {
- drm_mode_connector_update_edid_property(connector, edid);
- drm_add_edid_modes(connector, edid);
+ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ drm_add_edid_modes(connector, edid);
+ }
connector->display_info.raw_edid = NULL;
kfree(edid);
}
@@ -1713,12 +1679,12 @@ intel_sdvo_set_property(struct drm_connector *connector,
intel_sdvo_connector->force_audio = val;
- if (val > 0 && intel_sdvo->has_audio)
+ if (val > 0 && intel_sdvo->has_hdmi_audio)
return 0;
- if (val < 0 && !intel_sdvo->has_audio)
+ if (val < 0 && !intel_sdvo->has_hdmi_audio)
return 0;
- intel_sdvo->has_audio = val > 0;
+ intel_sdvo->has_hdmi_audio = val > 0;
goto done;
}
@@ -2070,6 +2036,8 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+
+ intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
intel_sdvo->is_hdmi = true;
}
intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
@@ -2077,8 +2045,6 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
- intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
-
return true;
}