aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2010-12-22 09:48:54 +1000
committerDave Airlie <airlied@redhat.com>2010-12-22 09:48:54 +1000
commitae09f09e94d755ed45c58b695675636c0ec53f9e (patch)
tree77cb9bac7d81f5b1250b8638a007e10c17b600af /drivers/gpu/drm
parent1d99e5c57255d188773fb437391df24fe8faf575 (diff)
parent5909a77ac62cc042f94bd262016cf468a2f96022 (diff)
Merge remote branch 'intel/drm-intel-next' of /ssd/git/drm-next into drm-core-next
* 'intel/drm-intel-next' of /ssd/git/drm-next: (771 commits) drm/i915: Undo "Uncouple render/power ctx before suspending" drm/i915: Allow the application to choose the constant addressing mode drm/i915: dynamic render p-state support for Sandy Bridge drm/i915: Enable EI mode for RCx decision making on Sandybridge drm/i915/sdvo: Border and stall select became test bits in gen5 drm/i915: Add Guess-o-matic for pageflip timestamping. drm/i915: Add support for precise vblank timestamping (v2) drm/i915: Add frame buffer compression on Sandybridge drm/i915: Add self-refresh support on Sandybridge drm/i915: Wait for vblank before unpinning old fb Revert "drm/i915: Avoid using PIPE_CONTROL on Ironlake" drm/i915: Pass clock limits down to PLL matcher drm/i915: Poll for seqno completion if IRQ is disabled drm/i915/ringbuffer: Make IRQ refcnting atomic agp/intel: Fix missed cached memory flags setting in i965_write_entry() drm/i915/sdvo: Only use the SDVO pin if it is in the valid range drm/i915: Enable RC6 autodownclocking on Sandybridge drm/i915: Terminate the FORCE WAKE after we have finished reading drm/i915/gtt: Clear the cachelines upon resume drm/i915: Restore GTT mapping first upon resume ...
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/drm_agpsupport.c6
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c16
-rw-r--r--drivers/gpu/drm/drm_irq.c19
-rw-r--r--drivers/gpu/drm/drm_mm.c40
-rw-r--r--drivers/gpu/drm/i915/Makefile2
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c471
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c792
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c68
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h605
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c3645
-rw-r--r--drivers/gpu/drm/i915/i915_gem_debug.c23
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c125
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c1343
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c99
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c139
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c724
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h194
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c144
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h91
-rw-r--r--drivers/gpu/drm/i915/intel_acpi.c34
-rw-r--r--drivers/gpu/drm/i915/intel_display.c1082
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c207
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h24
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c25
-rw-r--r--drivers/gpu/drm/i915/intel_i2c.c21
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c160
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c8
-rw-r--r--drivers/gpu/drm/i915/intel_overlay.c116
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c52
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c1056
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.h141
-rw-r--r--drivers/gpu/drm/i915/intel_sdvo.c100
-rw-r--r--drivers/gpu/drm/i915/intel_tv.c14
-rw-r--r--drivers/gpu/drm/radeon/atom.c1
-rw-r--r--drivers/gpu/drm/radeon/r600.c6
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c2
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c16
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c13
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c34
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c13
43 files changed, 6870 insertions, 4808 deletions
diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c
index 252fdb98b73..0cb2ba50af5 100644
--- a/drivers/gpu/drm/drm_agpsupport.c
+++ b/drivers/gpu/drm/drm_agpsupport.c
@@ -466,10 +466,4 @@ drm_agp_bind_pages(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_agp_bind_pages);
-void drm_agp_chipset_flush(struct drm_device *dev)
-{
- agp_flush_chipset(dev->agp->bridge);
-}
-EXPORT_SYMBOL(drm_agp_chipset_flush);
-
#endif /* __OS_HAS_AGP */
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 4c200931a6b..c6b2e27a446 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -241,7 +241,7 @@ void drm_helper_disable_unused_functions(struct drm_device *dev)
}
list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
- if (!drm_helper_encoder_in_use(encoder)) {
+ if (encoder->crtc && !drm_helper_encoder_in_use(encoder)) {
drm_encoder_disable(encoder);
/* disconnector encoder from any connector */
encoder->crtc = NULL;
@@ -482,6 +482,7 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
int count = 0, ro, fail = 0;
struct drm_crtc_helper_funcs *crtc_funcs;
int ret = 0;
+ int i;
DRM_DEBUG_KMS("\n");
@@ -677,6 +678,12 @@ int drm_crtc_helper_set_config(struct drm_mode_set *set)
if (ret != 0)
goto fail;
}
+ DRM_DEBUG_KMS("Setting connector DPMS state to on\n");
+ for (i = 0; i < set->num_connectors; i++) {
+ DRM_DEBUG_KMS("\t[CONNECTOR:%d:%s] set DPMS on\n", set->connectors[i]->base.id,
+ drm_get_connector_name(set->connectors[i]));
+ set->connectors[i]->dpms = DRM_MODE_DPMS_ON;
+ }
kfree(save_connectors);
kfree(save_encoders);
@@ -852,7 +859,7 @@ static void output_poll_execute(struct work_struct *work)
struct delayed_work *delayed_work = to_delayed_work(work);
struct drm_device *dev = container_of(delayed_work, struct drm_device, mode_config.output_poll_work);
struct drm_connector *connector;
- enum drm_connector_status old_status, status;
+ enum drm_connector_status old_status;
bool repoll = false, changed = false;
if (!drm_kms_helper_poll)
@@ -877,8 +884,9 @@ static void output_poll_execute(struct work_struct *work)
!(connector->polled & DRM_CONNECTOR_POLL_HPD))
continue;
- status = connector->funcs->detect(connector, false);
- if (old_status != status)
+ connector->status = connector->funcs->detect(connector, false);
+ DRM_DEBUG_KMS("connector status updated to %d\n", connector->status);
+ if (old_status != connector->status)
changed = true;
}
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 55160d7c38b..8304c42195f 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -1047,10 +1047,13 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
struct timeval now;
unsigned long flags;
unsigned int seq;
+ int ret;
e = kzalloc(sizeof *e, GFP_KERNEL);
- if (e == NULL)
- return -ENOMEM;
+ if (e == NULL) {
+ ret = -ENOMEM;
+ goto err_put;
+ }
e->pipe = pipe;
e->base.pid = current->pid;
@@ -1064,9 +1067,8 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
spin_lock_irqsave(&dev->event_lock, flags);
if (file_priv->event_space < sizeof e->event) {
- spin_unlock_irqrestore(&dev->event_lock, flags);
- kfree(e);
- return -ENOMEM;
+ ret = -EBUSY;
+ goto err_unlock;
}
file_priv->event_space -= sizeof e->event;
@@ -1103,6 +1105,13 @@ static int drm_queue_vblank_event(struct drm_device *dev, int pipe,
spin_unlock_irqrestore(&dev->event_lock, flags);
return 0;
+
+err_unlock:
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ kfree(e);
+err_put:
+ drm_vblank_put(dev, e->pipe);
+ return ret;
}
/**
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index a6bfc302ed9..c59515ba7e6 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -392,10 +392,36 @@ void drm_mm_init_scan(struct drm_mm *mm, unsigned long size,
mm->scanned_blocks = 0;
mm->scan_hit_start = 0;
mm->scan_hit_size = 0;
+ mm->scan_check_range = 0;
}
EXPORT_SYMBOL(drm_mm_init_scan);
/**
+ * Initializa lru scanning.
+ *
+ * This simply sets up the scanning routines with the parameters for the desired
+ * hole. This version is for range-restricted scans.
+ *
+ * Warning: As long as the scan list is non-empty, no other operations than
+ * adding/removing nodes to/from the scan list are allowed.
+ */
+void drm_mm_init_scan_with_range(struct drm_mm *mm, unsigned long size,
+ unsigned alignment,
+ unsigned long start,
+ unsigned long end)
+{
+ mm->scan_alignment = alignment;
+ mm->scan_size = size;
+ mm->scanned_blocks = 0;
+ mm->scan_hit_start = 0;
+ mm->scan_hit_size = 0;
+ mm->scan_start = start;
+ mm->scan_end = end;
+ mm->scan_check_range = 1;
+}
+EXPORT_SYMBOL(drm_mm_init_scan_with_range);
+
+/**
* Add a node to the scan list that might be freed to make space for the desired
* hole.
*
@@ -406,6 +432,8 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
struct drm_mm *mm = node->mm;
struct list_head *prev_free, *next_free;
struct drm_mm_node *prev_node, *next_node;
+ unsigned long adj_start;
+ unsigned long adj_end;
mm->scanned_blocks++;
@@ -452,7 +480,17 @@ int drm_mm_scan_add_block(struct drm_mm_node *node)
node->free_stack.prev = prev_free;
node->free_stack.next = next_free;
- if (check_free_hole(node->start, node->start + node->size,
+ if (mm->scan_check_range) {
+ adj_start = node->start < mm->scan_start ?
+ mm->scan_start : node->start;
+ adj_end = node->start + node->size > mm->scan_end ?
+ mm->scan_end : node->start + node->size;
+ } else {
+ adj_start = node->start;
+ adj_end = node->start + node->size;
+ }
+
+ if (check_free_hole(adj_start , adj_end,
mm->scan_size, mm->scan_alignment)) {
mm->scan_hit_start = node->start;
mm->scan_hit_size = node->size;
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index fdc833d5cc7..0ae6a7c5020 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -9,6 +9,8 @@ i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o \
i915_gem.o \
i915_gem_debug.o \
i915_gem_evict.o \
+ i915_gem_execbuffer.o \
+ i915_gem_gtt.o \
i915_gem_tiling.o \
i915_trace_points.o \
intel_display.o \
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 1f4f3ceb63c..92f75782c33 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -32,6 +32,7 @@
#include "drmP.h"
#include "drm.h"
#include "intel_drv.h"
+#include "intel_ringbuffer.h"
#include "i915_drm.h"
#include "i915_drv.h"
@@ -72,7 +73,6 @@ static int i915_capabilities(struct seq_file *m, void *data)
B(is_broadwater);
B(is_crestline);
B(has_fbc);
- B(has_rc6);
B(has_pipe_cxsr);
B(has_hotplug);
B(cursor_needs_physical);
@@ -86,19 +86,19 @@ static int i915_capabilities(struct seq_file *m, void *data)
return 0;
}
-static const char *get_pin_flag(struct drm_i915_gem_object *obj_priv)
+static const char *get_pin_flag(struct drm_i915_gem_object *obj)
{
- if (obj_priv->user_pin_count > 0)
+ if (obj->user_pin_count > 0)
return "P";
- else if (obj_priv->pin_count > 0)
+ else if (obj->pin_count > 0)
return "p";
else
return " ";
}
-static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
+static const char *get_tiling_flag(struct drm_i915_gem_object *obj)
{
- switch (obj_priv->tiling_mode) {
+ switch (obj->tiling_mode) {
default:
case I915_TILING_NONE: return " ";
case I915_TILING_X: return "X";
@@ -109,7 +109,7 @@ static const char *get_tiling_flag(struct drm_i915_gem_object *obj_priv)
static void
describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
- seq_printf(m, "%p: %s%s %8zd %08x %08x %d%s%s",
+ seq_printf(m, "%p: %s%s %8zd %04x %04x %d %d%s%s",
&obj->base,
get_pin_flag(obj),
get_tiling_flag(obj),
@@ -117,6 +117,7 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
obj->base.read_domains,
obj->base.write_domain,
obj->last_rendering_seqno,
+ obj->last_fenced_seqno,
obj->dirty ? " dirty" : "",
obj->madv == I915_MADV_DONTNEED ? " purgeable" : "");
if (obj->base.name)
@@ -124,7 +125,17 @@ describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
if (obj->fence_reg != I915_FENCE_REG_NONE)
seq_printf(m, " (fence: %d)", obj->fence_reg);
if (obj->gtt_space != NULL)
- seq_printf(m, " (gtt_offset: %08x)", obj->gtt_offset);
+ seq_printf(m, " (gtt offset: %08x, size: %08x)",
+ obj->gtt_offset, (unsigned int)obj->gtt_space->size);
+ if (obj->pin_mappable || obj->fault_mappable) {
+ char s[3], *t = s;
+ if (obj->pin_mappable)
+ *t++ = 'p';
+ if (obj->fault_mappable)
+ *t++ = 'f';
+ *t = '\0';
+ seq_printf(m, " (%s mappable)", s);
+ }
if (obj->ring != NULL)
seq_printf(m, " (%s)", obj->ring->name);
}
@@ -136,7 +147,7 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
struct list_head *head;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
size_t total_obj_size, total_gtt_size;
int count, ret;
@@ -171,12 +182,12 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
}
total_obj_size = total_gtt_size = count = 0;
- list_for_each_entry(obj_priv, head, mm_list) {
+ list_for_each_entry(obj, head, mm_list) {
seq_printf(m, " ");
- describe_obj(m, obj_priv);
+ describe_obj(m, obj);
seq_printf(m, "\n");
- total_obj_size += obj_priv->base.size;
- total_gtt_size += obj_priv->gtt_space->size;
+ total_obj_size += obj->base.size;
+ total_gtt_size += obj->gtt_space->size;
count++;
}
mutex_unlock(&dev->struct_mutex);
@@ -186,24 +197,79 @@ static int i915_gem_object_list_info(struct seq_file *m, void *data)
return 0;
}
+#define count_objects(list, member) do { \
+ list_for_each_entry(obj, list, member) { \
+ size += obj->gtt_space->size; \
+ ++count; \
+ if (obj->map_and_fenceable) { \
+ mappable_size += obj->gtt_space->size; \
+ ++mappable_count; \
+ } \
+ } \
+} while(0)
+
static int i915_gem_object_info(struct seq_file *m, void* data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 count, mappable_count;
+ size_t size, mappable_size;
+ struct drm_i915_gem_object *obj;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- seq_printf(m, "%u objects\n", dev_priv->mm.object_count);
- seq_printf(m, "%zu object bytes\n", dev_priv->mm.object_memory);
- seq_printf(m, "%u pinned\n", dev_priv->mm.pin_count);
- seq_printf(m, "%zu pin bytes\n", dev_priv->mm.pin_memory);
- seq_printf(m, "%u objects in gtt\n", dev_priv->mm.gtt_count);
- seq_printf(m, "%zu gtt bytes\n", dev_priv->mm.gtt_memory);
- seq_printf(m, "%zu gtt total\n", dev_priv->mm.gtt_total);
+ seq_printf(m, "%u objects, %zu bytes\n",
+ dev_priv->mm.object_count,
+ dev_priv->mm.object_memory);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_objects(&dev_priv->mm.gtt_list, gtt_list);
+ seq_printf(m, "%u [%u] objects, %zu [%zu] bytes in gtt\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_objects(&dev_priv->mm.active_list, mm_list);
+ count_objects(&dev_priv->mm.flushing_list, mm_list);
+ seq_printf(m, " %u [%u] active objects, %zu [%zu] bytes\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_objects(&dev_priv->mm.pinned_list, mm_list);
+ seq_printf(m, " %u [%u] pinned objects, %zu [%zu] bytes\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_objects(&dev_priv->mm.inactive_list, mm_list);
+ seq_printf(m, " %u [%u] inactive objects, %zu [%zu] bytes\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ count_objects(&dev_priv->mm.deferred_free_list, mm_list);
+ seq_printf(m, " %u [%u] freed objects, %zu [%zu] bytes\n",
+ count, mappable_count, size, mappable_size);
+
+ size = count = mappable_size = mappable_count = 0;
+ list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ if (obj->fault_mappable) {
+ size += obj->gtt_space->size;
+ ++count;
+ }
+ if (obj->pin_mappable) {
+ mappable_size += obj->gtt_space->size;
+ ++mappable_count;
+ }
+ }
+ seq_printf(m, "%u pinned mappable objects, %zu bytes\n",
+ mappable_count, mappable_size);
+ seq_printf(m, "%u fault mappable objects, %zu bytes\n",
+ count, size);
+
+ seq_printf(m, "%zu [%zu] gtt total\n",
+ dev_priv->mm.gtt_total, dev_priv->mm.mappable_gtt_total);
mutex_unlock(&dev->struct_mutex);
@@ -243,14 +309,14 @@ static int i915_gem_pageflip_info(struct seq_file *m, void *data)
seq_printf(m, "%d prepares\n", work->pending);
if (work->old_fb_obj) {
- struct drm_i915_gem_object *obj_priv = to_intel_bo(work->old_fb_obj);
- if(obj_priv)
- seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
+ struct drm_i915_gem_object *obj = work->old_fb_obj;
+ if (obj)
+ seq_printf(m, "Old framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
}
if (work->pending_flip_obj) {
- struct drm_i915_gem_object *obj_priv = to_intel_bo(work->pending_flip_obj);
- if(obj_priv)
- seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj_priv->gtt_offset );
+ struct drm_i915_gem_object *obj = work->pending_flip_obj;
+ if (obj)
+ seq_printf(m, "New framebuffer gtt_offset 0x%08x\n", obj->gtt_offset);
}
}
spin_unlock_irqrestore(&dev->event_lock, flags);
@@ -265,44 +331,80 @@ static int i915_gem_request_info(struct seq_file *m, void *data)
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_gem_request *gem_request;
- int ret;
+ int ret, count;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- seq_printf(m, "Request:\n");
- list_for_each_entry(gem_request, &dev_priv->render_ring.request_list,
- list) {
- seq_printf(m, " %d @ %d\n",
- gem_request->seqno,
- (int) (jiffies - gem_request->emitted_jiffies));
+ count = 0;
+ if (!list_empty(&dev_priv->ring[RCS].request_list)) {
+ seq_printf(m, "Render requests:\n");
+ list_for_each_entry(gem_request,
+ &dev_priv->ring[RCS].request_list,
+ list) {
+ seq_printf(m, " %d @ %d\n",
+ gem_request->seqno,
+ (int) (jiffies - gem_request->emitted_jiffies));
+ }
+ count++;
+ }
+ if (!list_empty(&dev_priv->ring[VCS].request_list)) {
+ seq_printf(m, "BSD requests:\n");
+ list_for_each_entry(gem_request,
+ &dev_priv->ring[VCS].request_list,
+ list) {
+ seq_printf(m, " %d @ %d\n",
+ gem_request->seqno,
+ (int) (jiffies - gem_request->emitted_jiffies));
+ }
+ count++;
+ }
+ if (!list_empty(&dev_priv->ring[BCS].request_list)) {
+ seq_printf(m, "BLT requests:\n");
+ list_for_each_entry(gem_request,
+ &dev_priv->ring[BCS].request_list,
+ list) {
+ seq_printf(m, " %d @ %d\n",
+ gem_request->seqno,
+ (int) (jiffies - gem_request->emitted_jiffies));
+ }
+ count++;
}
mutex_unlock(&dev->struct_mutex);
+ if (count == 0)
+ seq_printf(m, "No requests\n");
+
return 0;
}
+static void i915_ring_seqno_info(struct seq_file *m,
+ struct intel_ring_buffer *ring)
+{
+ if (ring->get_seqno) {
+ seq_printf(m, "Current sequence (%s): %d\n",
+ ring->name, ring->get_seqno(ring));
+ seq_printf(m, "Waiter sequence (%s): %d\n",
+ ring->name, ring->waiting_seqno);
+ seq_printf(m, "IRQ sequence (%s): %d\n",
+ ring->name, ring->irq_seqno);
+ }
+}
+
static int i915_gem_seqno_info(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
+ int ret, i;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- if (dev_priv->render_ring.status_page.page_addr != NULL) {
- seq_printf(m, "Current sequence: %d\n",
- dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
- } else {
- seq_printf(m, "Current sequence: hws uninitialized\n");
- }
- seq_printf(m, "Waiter sequence: %d\n",
- dev_priv->mm.waiting_gem_seqno);
- seq_printf(m, "IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ i915_ring_seqno_info(m, &dev_priv->ring[i]);
mutex_unlock(&dev->struct_mutex);
@@ -315,7 +417,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
+ int ret, i;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
@@ -354,16 +456,8 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
}
seq_printf(m, "Interrupts received: %d\n",
atomic_read(&dev_priv->irq_received));
- if (dev_priv->render_ring.status_page.page_addr != NULL) {
- seq_printf(m, "Current sequence: %d\n",
- dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring));
- } else {
- seq_printf(m, "Current sequence: hws uninitialized\n");
- }
- seq_printf(m, "Waiter sequence: %d\n",
- dev_priv->mm.waiting_gem_seqno);
- seq_printf(m, "IRQ sequence: %d\n",
- dev_priv->mm.irq_gem_seqno);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ i915_ring_seqno_info(m, &dev_priv->ring[i]);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -383,29 +477,17 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
seq_printf(m, "Reserved fences = %d\n", dev_priv->fence_reg_start);
seq_printf(m, "Total fences = %d\n", dev_priv->num_fence_regs);
for (i = 0; i < dev_priv->num_fence_regs; i++) {
- struct drm_gem_object *obj = dev_priv->fence_regs[i].obj;
+ struct drm_i915_gem_object *obj = dev_priv->fence_regs[i].obj;
- if (obj == NULL) {
- seq_printf(m, "Fenced object[%2d] = unused\n", i);
- } else {
- struct drm_i915_gem_object *obj_priv;
-
- obj_priv = to_intel_bo(obj);
- seq_printf(m, "Fenced object[%2d] = %p: %s "
- "%08x %08zx %08x %s %08x %08x %d",
- i, obj, get_pin_flag(obj_priv),
- obj_priv->gtt_offset,
- obj->size, obj_priv->stride,
- get_tiling_flag(obj_priv),
- obj->read_domains, obj->write_domain,
- obj_priv->last_rendering_seqno);
- if (obj->name)
- seq_printf(m, " (name: %d)", obj->name);
- seq_printf(m, "\n");
- }
+ seq_printf(m, "Fenced object[%2d] = ", i);
+ if (obj == NULL)
+ seq_printf(m, "unused");
+ else
+ describe_obj(m, obj);
+ seq_printf(m, "\n");
}
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&dev->struct_mutex);
return 0;
}
@@ -414,10 +496,12 @@ static int i915_hws_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int i;
+ struct intel_ring_buffer *ring;
volatile u32 *hws;
+ int i;
- hws = (volatile u32 *)dev_priv->render_ring.status_page.page_addr;
+ ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
+ hws = (volatile u32 *)ring->status_page.page_addr;
if (hws == NULL)
return 0;
@@ -431,14 +515,14 @@ static int i915_hws_info(struct seq_file *m, void *data)
static void i915_dump_object(struct seq_file *m,
struct io_mapping *mapping,
- struct drm_i915_gem_object *obj_priv)
+ struct drm_i915_gem_object *obj)
{
int page, page_count, i;
- page_count = obj_priv->base.size / PAGE_SIZE;
+ page_count = obj->base.size / PAGE_SIZE;
for (page = 0; page < page_count; page++) {
u32 *mem = io_mapping_map_wc(mapping,
- obj_priv->gtt_offset + page * PAGE_SIZE);
+ obj->gtt_offset + page * PAGE_SIZE);
for (i = 0; i < PAGE_SIZE; i += 4)
seq_printf(m, "%08x : %08x\n", i, mem[i / 4]);
io_mapping_unmap(mem);
@@ -450,25 +534,21 @@ static int i915_batchbuffer_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
- obj = &obj_priv->base;
- if (obj->read_domains & I915_GEM_DOMAIN_COMMAND) {
- seq_printf(m, "--- gtt_offset = 0x%08x\n",
- obj_priv->gtt_offset);
- i915_dump_object(m, dev_priv->mm.gtt_mapping, obj_priv);
+ list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+ if (obj->base.read_domains & I915_GEM_DOMAIN_COMMAND) {
+ seq_printf(m, "--- gtt_offset = 0x%08x\n", obj->gtt_offset);
+ i915_dump_object(m, dev_priv->mm.gtt_mapping, obj);
}
}
mutex_unlock(&dev->struct_mutex);
-
return 0;
}
@@ -477,19 +557,21 @@ static int i915_ringbuffer_data(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
int ret;
ret = mutex_lock_interruptible(&dev->struct_mutex);
if (ret)
return ret;
- if (!dev_priv->render_ring.gem_object) {
+ ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
+ if (!ring->obj) {
seq_printf(m, "No ringbuffer setup\n");
} else {
- u8 *virt = dev_priv->render_ring.virtual_start;
+ u8 *virt = ring->virtual_start;
uint32_t off;
- for (off = 0; off < dev_priv->render_ring.size; off += 4) {
+ for (off = 0; off < ring->size; off += 4) {
uint32_t *ptr = (uint32_t *)(virt + off);
seq_printf(m, "%08x : %08x\n", off, *ptr);
}
@@ -504,19 +586,38 @@ static int i915_ringbuffer_info(struct seq_file *m, void *data)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- unsigned int head, tail;
+ struct intel_ring_buffer *ring;
- head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
+ ring = &dev_priv->ring[(uintptr_t)node->info_ent->data];
+ if (ring->size == 0)
+ return 0;
- seq_printf(m, "RingHead : %08x\n", head);
- seq_printf(m, "RingTail : %08x\n", tail);
- seq_printf(m, "RingSize : %08lx\n", dev_priv->render_ring.size);
- seq_printf(m, "Acthd : %08x\n", I915_READ(INTEL_INFO(dev)->gen >= 4 ? ACTHD_I965 : ACTHD));
+ seq_printf(m, "Ring %s:\n", ring->name);
+ seq_printf(m, " Head : %08x\n", I915_READ_HEAD(ring) & HEAD_ADDR);
+ seq_printf(m, " Tail : %08x\n", I915_READ_TAIL(ring) & TAIL_ADDR);
+ seq_printf(m, " Size : %08x\n", ring->size);
+ seq_printf(m, " Active : %08x\n", intel_ring_get_active_head(ring));
+ seq_printf(m, " NOPID : %08x\n", I915_READ_NOPID(ring));
+ if (IS_GEN6(dev)) {
+ seq_printf(m, " Sync 0 : %08x\n", I915_READ_SYNC_0(ring));
+ seq_printf(m, " Sync 1 : %08x\n", I915_READ_SYNC_1(ring));
+ }
+ seq_printf(m, " Control : %08x\n", I915_READ_CTL(ring));
+ seq_printf(m, " Start : %08x\n", I915_READ_START(ring));
return 0;
}
+static const char *ring_str(int ring)
+{
+ switch (ring) {
+ case RING_RENDER: return " render";
+ case RING_BSD: return " bsd";
+ case RING_BLT: return " blt";
+ default: return "";
+ }
+}
+
static const char *pin_flag(int pinned)
{
if (pinned > 0)
@@ -547,6 +648,36 @@ static const char *purgeable_flag(int purgeable)
return purgeable ? " purgeable" : "";
}
+static void print_error_buffers(struct seq_file *m,
+ const char *name,
+ struct drm_i915_error_buffer *err,
+ int count)
+{
+ seq_printf(m, "%s [%d]:\n", name, count);
+
+ while (count--) {
+ seq_printf(m, " %08x %8zd %04x %04x %08x%s%s%s%s%s",
+ err->gtt_offset,
+ err->size,
+ err->read_domains,
+ err->write_domain,
+ err->seqno,
+ pin_flag(err->pinned),
+ tiling_flag(err->tiling),
+ dirty_flag(err->dirty),
+ purgeable_flag(err->purgeable),
+ ring_str(err->ring));
+
+ if (err->name)
+ seq_printf(m, " (name: %d)", err->name);
+ if (err->fence_reg != I915_FENCE_REG_NONE)
+ seq_printf(m, " (fence: %d)", err->fence_reg);
+
+ seq_printf(m, "\n");
+ err++;
+ }
+}
+
static int i915_error_state(struct seq_file *m, void *unused)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -568,41 +699,46 @@ static int i915_error_state(struct seq_file *m, void *unused)
error->time.tv_usec);
seq_printf(m, "PCI ID: 0x%04x\n", dev->pci_device);
seq_printf(m, "EIR: 0x%08x\n", error->eir);
- seq_printf(m, " PGTBL_ER: 0x%08x\n", error->pgtbl_er);
- seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
+ seq_printf(m, "PGTBL_ER: 0x%08x\n", error->pgtbl_er);
+ if (INTEL_INFO(dev)->gen >= 6) {
+ seq_printf(m, "ERROR: 0x%08x\n", error->error);
+ seq_printf(m, "Blitter command stream:\n");
+ seq_printf(m, " ACTHD: 0x%08x\n", error->bcs_acthd);
+ seq_printf(m, " IPEIR: 0x%08x\n", error->bcs_ipeir);
+ seq_printf(m, " IPEHR: 0x%08x\n", error->bcs_ipehr);
+ seq_printf(m, " INSTDONE: 0x%08x\n", error->bcs_instdone);
+ seq_printf(m, " seqno: 0x%08x\n", error->bcs_seqno);
+ seq_printf(m, "Video (BSD) command stream:\n");
+ seq_printf(m, " ACTHD: 0x%08x\n", error->vcs_acthd);
+ seq_printf(m, " IPEIR: 0x%08x\n", error->vcs_ipeir);
+ seq_printf(m, " IPEHR: 0x%08x\n", error->vcs_ipehr);
+ seq_printf(m, " INSTDONE: 0x%08x\n", error->vcs_instdone);
+ seq_printf(m, " seqno: 0x%08x\n", error->vcs_seqno);
+ }
+ seq_printf(m, "Render command stream:\n");
+ seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
seq_printf(m, " IPEIR: 0x%08x\n", error->ipeir);
seq_printf(m, " IPEHR: 0x%08x\n", error->ipehr);
seq_printf(m, " INSTDONE: 0x%08x\n", error->instdone);
- seq_printf(m, " ACTHD: 0x%08x\n", error->acthd);
if (INTEL_INFO(dev)->gen >= 4) {
- seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
seq_printf(m, " INSTDONE1: 0x%08x\n", error->instdone1);
+ seq_printf(m, " INSTPS: 0x%08x\n", error->instps);
}
- seq_printf(m, "seqno: 0x%08x\n", error->seqno);
-
- if (error->active_bo_count) {
- seq_printf(m, "Buffers [%d]:\n", error->active_bo_count);
-
- for (i = 0; i < error->active_bo_count; i++) {
- seq_printf(m, " %08x %8zd %08x %08x %08x%s%s%s%s",
- error->active_bo[i].gtt_offset,
- error->active_bo[i].size,
- error->active_bo[i].read_domains,
- error->active_bo[i].write_domain,
- error->active_bo[i].seqno,
- pin_flag(error->active_bo[i].pinned),
- tiling_flag(error->active_bo[i].tiling),
- dirty_flag(error->active_bo[i].dirty),
- purgeable_flag(error->active_bo[i].purgeable));
-
- if (error->active_bo[i].name)
- seq_printf(m, " (name: %d)", error->active_bo[i].name);
- if (error->active_bo[i].fence_reg != I915_FENCE_REG_NONE)
- seq_printf(m, " (fence: %d)", error->active_bo[i].fence_reg);
-
- seq_printf(m, "\n");
- }
- }
+ seq_printf(m, " INSTPM: 0x%08x\n", error->instpm);
+ seq_printf(m, " seqno: 0x%08x\n", error->seqno);
+
+ for (i = 0; i < 16; i++)
+ seq_printf(m, " fence[%d] = %08llx\n", i, error->fence[i]);
+
+ if (error->active_bo)
+ print_error_buffers(m, "Active",
+ error->active_bo,
+ error->active_bo_count);
+
+ if (error->pinned_bo)
+ print_error_buffers(m, "Pinned",
+ error->pinned_bo,
+ error->pinned_bo_count);
for (i = 0; i < ARRAY_SIZE(error->batchbuffer); i++) {
if (error->batchbuffer[i]) {
@@ -635,6 +771,9 @@ static int i915_error_state(struct seq_file *m, void *unused)
if (error->overlay)
intel_overlay_print_error_state(m, error->overlay);
+ if (error->display)
+ intel_display_print_error_state(m, dev, error->display);
+
out:
spin_unlock_irqrestore(&dev_priv->error_lock, flags);
@@ -658,15 +797,51 @@ static int i915_cur_delayinfo(struct seq_file *m, void *unused)
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- u16 rgvswctl = I915_READ16(MEMSWCTL);
- u16 rgvstat = I915_READ16(MEMSTAT_ILK);
- seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
- seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
- seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
- MEMSTAT_VID_SHIFT);
- seq_printf(m, "Current P-state: %d\n",
- (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
+ if (IS_GEN5(dev)) {
+ u16 rgvswctl = I915_READ16(MEMSWCTL);
+ u16 rgvstat = I915_READ16(MEMSTAT_ILK);
+
+ seq_printf(m, "Requested P-state: %d\n", (rgvswctl >> 8) & 0xf);
+ seq_printf(m, "Requested VID: %d\n", rgvswctl & 0x3f);
+ seq_printf(m, "Current VID: %d\n", (rgvstat & MEMSTAT_VID_MASK) >>
+ MEMSTAT_VID_SHIFT);
+ seq_printf(m, "Current P-state: %d\n",
+ (rgvstat & MEMSTAT_PSTATE_MASK) >> MEMSTAT_PSTATE_SHIFT);
+ } else if (IS_GEN6(dev)) {
+ u32 gt_perf_status = I915_READ(GEN6_GT_PERF_STATUS);
+ u32 rp_state_limits = I915_READ(GEN6_RP_STATE_LIMITS);
+ u32 rp_state_cap = I915_READ(GEN6_RP_STATE_CAP);
+ int max_freq;
+
+ /* RPSTAT1 is in the GT power well */
+ __gen6_force_wake_get(dev_priv);
+
+ seq_printf(m, "GT_PERF_STATUS: 0x%08x\n", gt_perf_status);
+ seq_printf(m, "RPSTAT1: 0x%08x\n", I915_READ(GEN6_RPSTAT1));
+ seq_printf(m, "Render p-state ratio: %d\n",
+ (gt_perf_status & 0xff00) >> 8);
+ seq_printf(m, "Render p-state VID: %d\n",
+ gt_perf_status & 0xff);
+ seq_printf(m, "Render p-state limit: %d\n",
+ rp_state_limits & 0xff);
+
+ max_freq = (rp_state_cap & 0xff0000) >> 16;
+ seq_printf(m, "Lowest (RPN) frequency: %dMHz\n",
+ max_freq * 100);
+
+ max_freq = (rp_state_cap & 0xff00) >> 8;
+ seq_printf(m, "Nominal (RP1) frequency: %dMHz\n",
+ max_freq * 100);
+
+ max_freq = rp_state_cap & 0xff;
+ seq_printf(m, "Max non-overclocked (RP0) frequency: %dMHz\n",
+ max_freq * 100);
+
+ __gen6_force_wake_put(dev_priv);
+ } else {
+ seq_printf(m, "no P-state info available\n");
+ }
return 0;
}
@@ -794,7 +969,7 @@ static int i915_sr_status(struct seq_file *m, void *unused)
drm_i915_private_t *dev_priv = dev->dev_private;
bool sr_enabled = false;
- if (IS_GEN5(dev))
+ if (HAS_PCH_SPLIT(dev))
sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
@@ -886,7 +1061,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fb->base.height,
fb->base.depth,
fb->base.bits_per_pixel);
- describe_obj(m, to_intel_bo(fb->obj));
+ describe_obj(m, fb->obj);
seq_printf(m, "\n");
list_for_each_entry(fb, &dev->mode_config.fb_list, base.head) {
@@ -898,7 +1073,7 @@ static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
fb->base.height,
fb->base.depth,
fb->base.bits_per_pixel);
- describe_obj(m, to_intel_bo(fb->obj));
+ describe_obj(m, fb->obj);
seq_printf(m, "\n");
}
@@ -943,7 +1118,6 @@ i915_wedged_write(struct file *filp,
loff_t *ppos)
{
struct drm_device *dev = filp->private_data;
- drm_i915_private_t *dev_priv = dev->dev_private;
char buf[20];
int val = 1;
@@ -959,12 +1133,7 @@ i915_wedged_write(struct file *filp,
}
DRM_INFO("Manually setting wedged to %d\n", val);
-
- atomic_set(&dev_priv->mm.wedged, val);
- if (val) {
- wake_up_all(&dev_priv->irq_queue);
- queue_work(dev_priv->wq, &dev_priv->error_work);
- }
+ i915_handle_error(dev, val);
return cnt;
}
@@ -1028,9 +1197,15 @@ static struct drm_info_list i915_debugfs_list[] = {
{"i915_gem_seqno", i915_gem_seqno_info, 0},
{"i915_gem_fence_regs", i915_gem_fence_regs_info, 0},
{"i915_gem_interrupt", i915_interrupt_info, 0},
- {"i915_gem_hws", i915_hws_info, 0},
- {"i915_ringbuffer_data", i915_ringbuffer_data, 0},
- {"i915_ringbuffer_info", i915_ringbuffer_info, 0},
+ {"i915_gem_hws", i915_hws_info, 0, (void *)RCS},
+ {"i915_gem_hws_blt", i915_hws_info, 0, (void *)BCS},
+ {"i915_gem_hws_bsd", i915_hws_info, 0, (void *)VCS},
+ {"i915_ringbuffer_data", i915_ringbuffer_data, 0, (void *)RCS},
+ {"i915_ringbuffer_info", i915_ringbuffer_info, 0, (void *)RCS},
+ {"i915_bsd_ringbuffer_data", i915_ringbuffer_data, 0, (void *)VCS},
+ {"i915_bsd_ringbuffer_info", i915_ringbuffer_info, 0, (void *)VCS},
+ {"i915_blt_ringbuffer_data", i915_ringbuffer_data, 0, (void *)BCS},
+ {"i915_blt_ringbuffer_info", i915_ringbuffer_info, 0, (void *)BCS},
{"i915_batchbuffers", i915_batchbuffer_info, 0},
{"i915_error_state", i915_error_state, 0},
{"i915_rstdby_delays", i915_rstdby_delays, 0},
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 7a26f4dd21a..18746e6cb12 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -49,6 +49,8 @@
static int i915_init_phys_hws(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
/* Program Hardware Status Page */
dev_priv->status_page_dmah =
drm_pci_alloc(dev, PAGE_SIZE, PAGE_SIZE);
@@ -57,11 +59,10 @@ static int i915_init_phys_hws(struct drm_device *dev)
DRM_ERROR("Can not allocate hardware status page\n");
return -ENOMEM;
}
- dev_priv->render_ring.status_page.page_addr
- = dev_priv->status_page_dmah->vaddr;
+ ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
dev_priv->dma_status_page = dev_priv->status_page_dmah->busaddr;
- memset(dev_priv->render_ring.status_page.page_addr, 0, PAGE_SIZE);
+ memset(ring->status_page.page_addr, 0, PAGE_SIZE);
if (INTEL_INFO(dev)->gen >= 4)
dev_priv->dma_status_page |= (dev_priv->dma_status_page >> 28) &
@@ -79,13 +80,15 @@ static int i915_init_phys_hws(struct drm_device *dev)
static void i915_free_hws(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
+
if (dev_priv->status_page_dmah) {
drm_pci_free(dev, dev_priv->status_page_dmah);
dev_priv->status_page_dmah = NULL;
}
- if (dev_priv->render_ring.status_page.gfx_addr) {
- dev_priv->render_ring.status_page.gfx_addr = 0;
+ if (ring->status_page.gfx_addr) {
+ ring->status_page.gfx_addr = 0;
drm_core_ioremapfree(&dev_priv->hws_map, dev);
}
@@ -97,7 +100,7 @@ void i915_kernel_lost_context(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv;
- struct intel_ring_buffer *ring = &dev_priv->render_ring;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
/*
* We should never lose context on the ring with modesetting
@@ -106,8 +109,8 @@ void i915_kernel_lost_context(struct drm_device * dev)
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
- ring->head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- ring->tail = I915_READ(PRB0_TAIL) & TAIL_ADDR;
+ ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->size;
@@ -123,6 +126,8 @@ void i915_kernel_lost_context(struct drm_device * dev)
static int i915_dma_cleanup(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
+
/* Make sure interrupts are disabled here because the uninstall ioctl
* may not have been called from userspace and after dev_private
* is freed, it's too late.
@@ -131,9 +136,8 @@ static int i915_dma_cleanup(struct drm_device * dev)
drm_irq_uninstall(dev);
mutex_lock(&dev->struct_mutex);
- intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
- intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
- intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ intel_cleanup_ring_buffer(&dev_priv->ring[i]);
mutex_unlock(&dev->struct_mutex);
/* Clear the HWS virtual address at teardown */
@@ -147,6 +151,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
master_priv->sarea = drm_getsarea(dev);
if (master_priv->sarea) {
@@ -157,24 +162,24 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
}
if (init->ring_size != 0) {
- if (dev_priv->render_ring.gem_object != NULL) {
+ if (ring->obj != NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("Client tried to initialize ringbuffer in "
"GEM mode\n");
return -EINVAL;
}
- dev_priv->render_ring.size = init->ring_size;
+ ring->size = init->ring_size;
- dev_priv->render_ring.map.offset = init->ring_start;
- dev_priv->render_ring.map.size = init->ring_size;
- dev_priv->render_ring.map.type = 0;
- dev_priv->render_ring.map.flags = 0;
- dev_priv->render_ring.map.mtrr = 0;
+ ring->map.offset = init->ring_start;
+ ring->map.size = init->ring_size;
+ ring->map.type = 0;
+ ring->map.flags = 0;
+ ring->map.mtrr = 0;
- drm_core_ioremap_wc(&dev_priv->render_ring.map, dev);
+ drm_core_ioremap_wc(&ring->map, dev);
- if (dev_priv->render_ring.map.handle == NULL) {
+ if (ring->map.handle == NULL) {
i915_dma_cleanup(dev);
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
@@ -182,7 +187,7 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
}
}
- dev_priv->render_ring.virtual_start = dev_priv->render_ring.map.handle;
+ ring->virtual_start = ring->map.handle;
dev_priv->cpp = init->cpp;
dev_priv->back_offset = init->back_offset;
@@ -201,12 +206,10 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init)
static int i915_dma_resume(struct drm_device * dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
- struct intel_ring_buffer *ring;
DRM_DEBUG_DRIVER("%s\n", __func__);
- ring = &dev_priv->render_ring;
-
if (ring->map.handle == NULL) {
DRM_ERROR("can not ioremap virtual address for"
" ring buffer\n");
@@ -221,7 +224,7 @@ static int i915_dma_resume(struct drm_device * dev)
DRM_DEBUG_DRIVER("hw status page @ %p\n",
ring->status_page.page_addr);
if (ring->status_page.gfx_addr != 0)
- intel_ring_setup_status_page(dev, ring);
+ intel_ring_setup_status_page(ring);
else
I915_WRITE(HWS_PGA, dev_priv->dma_status_page);
@@ -263,7 +266,7 @@ static int i915_dma_init(struct drm_device *dev, void *data,
* instruction detected will be given a size of zero, which is a
* signal to abort the rest of the buffer.
*/
-static int do_validate_cmd(int cmd)
+static int validate_cmd(int cmd)
{
switch (((cmd >> 29) & 0x7)) {
case 0x0:
@@ -321,40 +324,27 @@ static int do_validate_cmd(int cmd)
return 0;
}
-static int validate_cmd(int cmd)
-{
- int ret = do_validate_cmd(cmd);
-
-/* printk("validate_cmd( %x ): %d\n", cmd, ret); */
-
- return ret;
-}
-
static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int i;
+ int i, ret;
- if ((dwords+1) * sizeof(int) >= dev_priv->render_ring.size - 8)
+ if ((dwords+1) * sizeof(int) >= LP_RING(dev_priv)->size - 8)
return -EINVAL;
- BEGIN_LP_RING((dwords+1)&~1);
-
for (i = 0; i < dwords;) {
- int cmd, sz;
-
- cmd = buffer[i];
-
- if ((sz = validate_cmd(cmd)) == 0 || i + sz > dwords)
+ int sz = validate_cmd(buffer[i]);
+ if (sz == 0 || i + sz > dwords)
return -EINVAL;
-
- OUT_RING(cmd);
-
- while (++i, --sz) {
- OUT_RING(buffer[i]);
- }
+ i += sz;
}
+ ret = BEGIN_LP_RING((dwords+1)&~1);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < dwords; i++)
+ OUT_RING(buffer[i]);
if (dwords & 1)
OUT_RING(0);
@@ -365,34 +355,41 @@ static int i915_emit_cmds(struct drm_device * dev, int *buffer, int dwords)
int
i915_emit_box(struct drm_device *dev,
- struct drm_clip_rect *boxes,
- int i, int DR1, int DR4)
+ struct drm_clip_rect *box,
+ int DR1, int DR4)
{
- struct drm_clip_rect box = boxes[i];
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
- if (box.y2 <= box.y1 || box.x2 <= box.x1 || box.y2 <= 0 || box.x2 <= 0) {
+ if (box->y2 <= box->y1 || box->x2 <= box->x1 ||
+ box->y2 <= 0 || box->x2 <= 0) {
DRM_ERROR("Bad box %d,%d..%d,%d\n",
- box.x1, box.y1, box.x2, box.y2);
+ box->x1, box->y1, box->x2, box->y2);
return -EINVAL;
}
if (INTEL_INFO(dev)->gen >= 4) {
- BEGIN_LP_RING(4);
+ ret = BEGIN_LP_RING(4);
+ if (ret)
+ return ret;
+
OUT_RING(GFX_OP_DRAWRECT_INFO_I965);
- OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
- OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
+ OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
+ OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
OUT_RING(DR4);
- ADVANCE_LP_RING();
} else {
- BEGIN_LP_RING(6);
+ ret = BEGIN_LP_RING(6);
+ if (ret)
+ return ret;
+
OUT_RING(GFX_OP_DRAWRECT_INFO);
OUT_RING(DR1);
- OUT_RING((box.x1 & 0xffff) | (box.y1 << 16));
- OUT_RING(((box.x2 - 1) & 0xffff) | ((box.y2 - 1) << 16));
+ OUT_RING((box->x1 & 0xffff) | (box->y1 << 16));
+ OUT_RING(((box->x2 - 1) & 0xffff) | ((box->y2 - 1) << 16));
OUT_RING(DR4);
OUT_RING(0);
- ADVANCE_LP_RING();
}
+ ADVANCE_LP_RING();
return 0;
}
@@ -412,12 +409,13 @@ static void i915_emit_breadcrumb(struct drm_device *dev)
if (master_priv->sarea_priv)
master_priv->sarea_priv->last_enqueue = dev_priv->counter;
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
- OUT_RING(0);
- ADVANCE_LP_RING();
+ if (BEGIN_LP_RING(4) == 0) {
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->counter);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ }
}
static int i915_dispatch_cmdbuffer(struct drm_device * dev,
@@ -439,7 +437,7 @@ static int i915_dispatch_cmdbuffer(struct drm_device * dev,
for (i = 0; i < count; i++) {
if (i < nbox) {
- ret = i915_emit_box(dev, cliprects, i,
+ ret = i915_emit_box(dev, &cliprects[i],
cmd->DR1, cmd->DR4);
if (ret)
return ret;
@@ -458,8 +456,9 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
drm_i915_batchbuffer_t * batch,
struct drm_clip_rect *cliprects)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
int nbox = batch->num_cliprects;
- int i = 0, count;
+ int i, count, ret;
if ((batch->start | batch->used) & 0x7) {
DRM_ERROR("alignment");
@@ -469,17 +468,19 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
i915_kernel_lost_context(dev);
count = nbox ? nbox : 1;
-
for (i = 0; i < count; i++) {
if (i < nbox) {
- int ret = i915_emit_box(dev, cliprects, i,
- batch->DR1, batch->DR4);
+ ret = i915_emit_box(dev, &cliprects[i],
+ batch->DR1, batch->DR4);
if (ret)
return ret;
}
if (!IS_I830(dev) && !IS_845G(dev)) {
- BEGIN_LP_RING(2);
+ ret = BEGIN_LP_RING(2);
+ if (ret)
+ return ret;
+
if (INTEL_INFO(dev)->gen >= 4) {
OUT_RING(MI_BATCH_BUFFER_START | (2 << 6) | MI_BATCH_NON_SECURE_I965);
OUT_RING(batch->start);
@@ -487,26 +488,29 @@ static int i915_dispatch_batchbuffer(struct drm_device * dev,
OUT_RING(MI_BATCH_BUFFER_START | (2 << 6));
OUT_RING(batch->start | MI_BATCH_NON_SECURE);
}
- ADVANCE_LP_RING();
} else {
- BEGIN_LP_RING(4);
+ ret = BEGIN_LP_RING(4);
+ if (ret)
+ return ret;
+
OUT_RING(MI_BATCH_BUFFER);
OUT_RING(batch->start | MI_BATCH_NON_SECURE);
OUT_RING(batch->start + batch->used - 4);
OUT_RING(0);
- ADVANCE_LP_RING();
}
+ ADVANCE_LP_RING();
}
if (IS_G4X(dev) || IS_GEN5(dev)) {
- BEGIN_LP_RING(2);
- OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
- OUT_RING(MI_NOOP);
- ADVANCE_LP_RING();
+ if (BEGIN_LP_RING(2) == 0) {
+ OUT_RING(MI_FLUSH | MI_NO_WRITE_FLUSH | MI_INVALIDATE_ISP);
+ OUT_RING(MI_NOOP);
+ ADVANCE_LP_RING();
+ }
}
- i915_emit_breadcrumb(dev);
+ i915_emit_breadcrumb(dev);
return 0;
}
@@ -515,6 +519,7 @@ static int i915_dispatch_flip(struct drm_device * dev)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_master_private *master_priv =
dev->primary->master->driver_priv;
+ int ret;
if (!master_priv->sarea_priv)
return -EINVAL;
@@ -526,12 +531,13 @@ static int i915_dispatch_flip(struct drm_device * dev)
i915_kernel_lost_context(dev);
- BEGIN_LP_RING(2);
+ ret = BEGIN_LP_RING(10);
+ if (ret)
+ return ret;
+
OUT_RING(MI_FLUSH | MI_READ_FLUSH);
OUT_RING(0);
- ADVANCE_LP_RING();
- BEGIN_LP_RING(6);
OUT_RING(CMD_OP_DISPLAYBUFFER_INFO | ASYNC_FLIP);
OUT_RING(0);
if (dev_priv->current_page == 0) {
@@ -542,33 +548,32 @@ static int i915_dispatch_flip(struct drm_device * dev)
dev_priv->current_page = 0;
}
OUT_RING(0);
- ADVANCE_LP_RING();
- BEGIN_LP_RING(2);
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_PLANE_A_FLIP);
OUT_RING(0);
+
ADVANCE_LP_RING();
master_priv->sarea_priv->last_enqueue = dev_priv->counter++;
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
- OUT_RING(0);
- ADVANCE_LP_RING();
+ if (BEGIN_LP_RING(4) == 0) {
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->counter);
+ OUT_RING(0);
+ ADVANCE_LP_RING();
+ }
master_priv->sarea_priv->pf_current_page = dev_priv->current_page;
return 0;
}
-static int i915_quiescent(struct drm_device * dev)
+static int i915_quiescent(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = LP_RING(dev->dev_private);
i915_kernel_lost_context(dev);
- return intel_wait_ring_buffer(dev, &dev_priv->render_ring,
- dev_priv->render_ring.size - 8);
+ return intel_wait_ring_buffer(ring, ring->size - 8);
}
static int i915_flush_ioctl(struct drm_device *dev, void *data,
@@ -767,6 +772,15 @@ static int i915_getparam(struct drm_device *dev, void *data,
case I915_PARAM_HAS_BLT:
value = HAS_BLT(dev);
break;
+ case I915_PARAM_HAS_RELAXED_FENCING:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_COHERENT_RINGS:
+ value = 1;
+ break;
+ case I915_PARAM_HAS_EXEC_CONSTANTS:
+ value = INTEL_INFO(dev)->gen >= 4;
+ break;
default:
DRM_DEBUG_DRIVER("Unknown parameter %d\n",
param->param);
@@ -822,7 +836,7 @@ static int i915_set_status_page(struct drm_device *dev, void *data,
{
drm_i915_private_t *dev_priv = dev->dev_private;
drm_i915_hws_addr_t *hws = data;
- struct intel_ring_buffer *ring = &dev_priv->render_ring;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
if (!I915_NEED_GFX_HWS(dev))
return -EINVAL;
@@ -1001,73 +1015,47 @@ intel_teardown_mchbar(struct drm_device *dev)
#define PTE_VALID (1 << 0)
/**
- * i915_gtt_to_phys - take a GTT address and turn it into a physical one
+ * i915_stolen_to_phys - take an offset into stolen memory and turn it into
+ * a physical one
* @dev: drm device
- * @gtt_addr: address to translate
+ * @offset: address to translate
*
- * Some chip functions require allocations from stolen space but need the
- * physical address of the memory in question. We use this routine
- * to get a physical address suitable for register programming from a given
- * GTT address.
+ * Some chip functions require allocations from stolen space and need the
+ * physical address of the memory in question.
*/
-static unsigned long i915_gtt_to_phys(struct drm_device *dev,
- unsigned long gtt_addr)
+static unsigned long i915_stolen_to_phys(struct drm_device *dev, u32 offset)
{
- unsigned long *gtt;
- unsigned long entry, phys;
- int gtt_bar = IS_GEN2(dev) ? 1 : 0;
- int gtt_offset, gtt_size;
-
- if (INTEL_INFO(dev)->gen >= 4) {
- if (IS_G4X(dev) || INTEL_INFO(dev)->gen > 4) {
- gtt_offset = 2*1024*1024;
- gtt_size = 2*1024*1024;
- } else {
- gtt_offset = 512*1024;
- gtt_size = 512*1024;
- }
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct pci_dev *pdev = dev_priv->bridge_dev;
+ u32 base;
+
+#if 0
+ /* On the machines I have tested the Graphics Base of Stolen Memory
+ * is unreliable, so compute the base by subtracting the stolen memory
+ * from the Top of Low Usable DRAM which is where the BIOS places
+ * the graphics stolen memory.
+ */
+ if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+ /* top 32bits are reserved = 0 */
+ pci_read_config_dword(pdev, 0xA4, &base);
} else {
- gtt_bar = 3;
- gtt_offset = 0;
- gtt_size = pci_resource_len(dev->pdev, gtt_bar);
- }
-
- gtt = ioremap_wc(pci_resource_start(dev->pdev, gtt_bar) + gtt_offset,
- gtt_size);
- if (!gtt) {
- DRM_ERROR("ioremap of GTT failed\n");
- return 0;
- }
-
- entry = *(volatile u32 *)(gtt + (gtt_addr / 1024));
-
- DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, PTE: 0x%08lx\n", gtt_addr, entry);
-
- /* Mask out these reserved bits on this hardware. */
- if (INTEL_INFO(dev)->gen < 4 && !IS_G33(dev))
- entry &= ~PTE_ADDRESS_MASK_HIGH;
-
- /* If it's not a mapping type we know, then bail. */
- if ((entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_UNCACHED &&
- (entry & PTE_MAPPING_TYPE_MASK) != PTE_MAPPING_TYPE_CACHED) {
- iounmap(gtt);
- return 0;
- }
-
- if (!(entry & PTE_VALID)) {
- DRM_ERROR("bad GTT entry in stolen space\n");
- iounmap(gtt);
- return 0;
+ /* XXX presume 8xx is the same as i915 */
+ pci_bus_read_config_dword(pdev->bus, 2, 0x5C, &base);
+ }
+#else
+ if (INTEL_INFO(dev)->gen > 3 || IS_G33(dev)) {
+ u16 val;
+ pci_read_config_word(pdev, 0xb0, &val);
+ base = val >> 4 << 20;
+ } else {
+ u8 val;
+ pci_read_config_byte(pdev, 0x9c, &val);
+ base = val >> 3 << 27;
}
+ base -= dev_priv->mm.gtt->stolen_size;
+#endif
- iounmap(gtt);
-
- phys =(entry & PTE_ADDRESS_MASK) |
- ((uint64_t)(entry & PTE_ADDRESS_MASK_HIGH) << (32 - 4));
-
- DRM_DEBUG_DRIVER("GTT addr: 0x%08lx, phys addr: 0x%08lx\n", gtt_addr, phys);
-
- return phys;
+ return base + offset;
}
static void i915_warn_stolen(struct drm_device *dev)
@@ -1083,54 +1071,35 @@ static void i915_setup_compression(struct drm_device *dev, int size)
unsigned long cfb_base;
unsigned long ll_base = 0;
- /* Leave 1M for line length buffer & misc. */
- compressed_fb = drm_mm_search_free(&dev_priv->mm.vram, size, 4096, 0);
- if (!compressed_fb) {
- dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
- i915_warn_stolen(dev);
- return;
- }
+ compressed_fb = drm_mm_search_free(&dev_priv->mm.stolen, size, 4096, 0);
+ if (compressed_fb)
+ compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
+ if (!compressed_fb)
+ goto err;
- compressed_fb = drm_mm_get_block(compressed_fb, size, 4096);
- if (!compressed_fb) {
- i915_warn_stolen(dev);
- dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
- return;
- }
+ cfb_base = i915_stolen_to_phys(dev, compressed_fb->start);
+ if (!cfb_base)
+ goto err_fb;
- cfb_base = i915_gtt_to_phys(dev, compressed_fb->start);
- if (!cfb_base) {
- DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
- drm_mm_put_block(compressed_fb);
- }
+ if (!(IS_GM45(dev) || HAS_PCH_SPLIT(dev))) {
+ compressed_llb = drm_mm_search_free(&dev_priv->mm.stolen,
+ 4096, 4096, 0);
+ if (compressed_llb)
+ compressed_llb = drm_mm_get_block(compressed_llb,
+ 4096, 4096);
+ if (!compressed_llb)
+ goto err_fb;
- if (!(IS_GM45(dev) || IS_IRONLAKE_M(dev))) {
- compressed_llb = drm_mm_search_free(&dev_priv->mm.vram, 4096,
- 4096, 0);
- if (!compressed_llb) {
- i915_warn_stolen(dev);
- return;
- }
-
- compressed_llb = drm_mm_get_block(compressed_llb, 4096, 4096);
- if (!compressed_llb) {
- i915_warn_stolen(dev);
- return;
- }
-
- ll_base = i915_gtt_to_phys(dev, compressed_llb->start);
- if (!ll_base) {
- DRM_ERROR("failed to get stolen phys addr, disabling FBC\n");
- drm_mm_put_block(compressed_fb);
- drm_mm_put_block(compressed_llb);
- }
+ ll_base = i915_stolen_to_phys(dev, compressed_llb->start);
+ if (!ll_base)
+ goto err_llb;
}
dev_priv->cfb_size = size;
intel_disable_fbc(dev);
dev_priv->compressed_fb = compressed_fb;
- if (IS_IRONLAKE_M(dev))
+ if (HAS_PCH_SPLIT(dev))
I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
else if (IS_GM45(dev)) {
I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
@@ -1140,8 +1109,17 @@ static void i915_setup_compression(struct drm_device *dev, int size)
dev_priv->compressed_llb = compressed_llb;
}
- DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n", cfb_base,
- ll_base, size >> 20);
+ DRM_DEBUG_KMS("FBC base 0x%08lx, ll base 0x%08lx, size %dM\n",
+ cfb_base, ll_base, size >> 20);
+ return;
+
+err_llb:
+ drm_mm_put_block(compressed_llb);
+err_fb:
+ drm_mm_put_block(compressed_fb);
+err:
+ dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
+ i915_warn_stolen(dev);
}
static void i915_cleanup_compression(struct drm_device *dev)
@@ -1192,17 +1170,20 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
return can_switch;
}
-static int i915_load_modeset_init(struct drm_device *dev,
- unsigned long prealloc_size,
- unsigned long agp_size)
+static int i915_load_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ unsigned long prealloc_size, gtt_size, mappable_size;
int ret = 0;
- /* Basic memrange allocator for stolen space (aka mm.vram) */
- drm_mm_init(&dev_priv->mm.vram, 0, prealloc_size);
+ prealloc_size = dev_priv->mm.gtt->stolen_size;
+ gtt_size = dev_priv->mm.gtt->gtt_total_entries << PAGE_SHIFT;
+ mappable_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+
+ /* Basic memrange allocator for stolen space */
+ drm_mm_init(&dev_priv->mm.stolen, 0, prealloc_size);
- /* Let GEM Manage from end of prealloc space to end of aperture.
+ /* Let GEM Manage all of the aperture.
*
* However, leave one page at the end still bound to the scratch page.
* There are a number of places where the hardware apparently
@@ -1211,7 +1192,7 @@ static int i915_load_modeset_init(struct drm_device *dev,
* at the last page of the aperture. One page should be enough to
* keep any prefetching inside of the aperture.
*/
- i915_gem_do_init(dev, prealloc_size, agp_size - 4096);
+ i915_gem_do_init(dev, 0, mappable_size, gtt_size - PAGE_SIZE);
mutex_lock(&dev->struct_mutex);
ret = i915_gem_init_ringbuffer(dev);
@@ -1223,16 +1204,17 @@ static int i915_load_modeset_init(struct drm_device *dev,
if (I915_HAS_FBC(dev) && i915_powersave) {
int cfb_size;
- /* Try to get an 8M buffer... */
- if (prealloc_size > (9*1024*1024))
- cfb_size = 8*1024*1024;
+ /* Leave 1M for line length buffer & misc. */
+
+ /* Try to get a 32M buffer... */
+ if (prealloc_size > (36*1024*1024))
+ cfb_size = 32*1024*1024;
else /* fall back to 7/8 of the stolen space */
cfb_size = prealloc_size * 7 / 8;
i915_setup_compression(dev, cfb_size);
}
- /* Allow hardware batchbuffers unless told otherwise.
- */
+ /* Allow hardware batchbuffers unless told otherwise. */
dev_priv->allow_batchbuffer = 1;
ret = intel_parse_bios(dev);
@@ -1422,152 +1404,12 @@ static void i915_ironlake_get_mem_freq(struct drm_device *dev)
}
}
-struct v_table {
- u8 vid;
- unsigned long vd; /* in .1 mil */
- unsigned long vm; /* in .1 mil */
- u8 pvid;
-};
-
-static struct v_table v_table[] = {
- { 0, 16125, 15000, 0x7f, },
- { 1, 16000, 14875, 0x7e, },
- { 2, 15875, 14750, 0x7d, },
- { 3, 15750, 14625, 0x7c, },
- { 4, 15625, 14500, 0x7b, },
- { 5, 15500, 14375, 0x7a, },
- { 6, 15375, 14250, 0x79, },
- { 7, 15250, 14125, 0x78, },
- { 8, 15125, 14000, 0x77, },
- { 9, 15000, 13875, 0x76, },
- { 10, 14875, 13750, 0x75, },
- { 11, 14750, 13625, 0x74, },
- { 12, 14625, 13500, 0x73, },
- { 13, 14500, 13375, 0x72, },
- { 14, 14375, 13250, 0x71, },
- { 15, 14250, 13125, 0x70, },
- { 16, 14125, 13000, 0x6f, },
- { 17, 14000, 12875, 0x6e, },
- { 18, 13875, 12750, 0x6d, },
- { 19, 13750, 12625, 0x6c, },
- { 20, 13625, 12500, 0x6b, },
- { 21, 13500, 12375, 0x6a, },
- { 22, 13375, 12250, 0x69, },
- { 23, 13250, 12125, 0x68, },
- { 24, 13125, 12000, 0x67, },
- { 25, 13000, 11875, 0x66, },
- { 26, 12875, 11750, 0x65, },
- { 27, 12750, 11625, 0x64, },
- { 28, 12625, 11500, 0x63, },
- { 29, 12500, 11375, 0x62, },
- { 30, 12375, 11250, 0x61, },
- { 31, 12250, 11125, 0x60, },
- { 32, 12125, 11000, 0x5f, },
- { 33, 12000, 10875, 0x5e, },
- { 34, 11875, 10750, 0x5d, },
- { 35, 11750, 10625, 0x5c, },
- { 36, 11625, 10500, 0x5b, },
- { 37, 11500, 10375, 0x5a, },
- { 38, 11375, 10250, 0x59, },
- { 39, 11250, 10125, 0x58, },
- { 40, 11125, 10000, 0x57, },
- { 41, 11000, 9875, 0x56, },
- { 42, 10875, 9750, 0x55, },
- { 43, 10750, 9625, 0x54, },
- { 44, 10625, 9500, 0x53, },
- { 45, 10500, 9375, 0x52, },
- { 46, 10375, 9250, 0x51, },
- { 47, 10250, 9125, 0x50, },
- { 48, 10125, 9000, 0x4f, },
- { 49, 10000, 8875, 0x4e, },
- { 50, 9875, 8750, 0x4d, },
- { 51, 9750, 8625, 0x4c, },
- { 52, 9625, 8500, 0x4b, },
- { 53, 9500, 8375, 0x4a, },
- { 54, 9375, 8250, 0x49, },
- { 55, 9250, 8125, 0x48, },
- { 56, 9125, 8000, 0x47, },
- { 57, 9000, 7875, 0x46, },
- { 58, 8875, 7750, 0x45, },
- { 59, 8750, 7625, 0x44, },
- { 60, 8625, 7500, 0x43, },
- { 61, 8500, 7375, 0x42, },
- { 62, 8375, 7250, 0x41, },
- { 63, 8250, 7125, 0x40, },
- { 64, 8125, 7000, 0x3f, },
- { 65, 8000, 6875, 0x3e, },
- { 66, 7875, 6750, 0x3d, },
- { 67, 7750, 6625, 0x3c, },
- { 68, 7625, 6500, 0x3b, },
- { 69, 7500, 6375, 0x3a, },
- { 70, 7375, 6250, 0x39, },
- { 71, 7250, 6125, 0x38, },
- { 72, 7125, 6000, 0x37, },
- { 73, 7000, 5875, 0x36, },
- { 74, 6875, 5750, 0x35, },
- { 75, 6750, 5625, 0x34, },
- { 76, 6625, 5500, 0x33, },
- { 77, 6500, 5375, 0x32, },
- { 78, 6375, 5250, 0x31, },
- { 79, 6250, 5125, 0x30, },
- { 80, 6125, 5000, 0x2f, },
- { 81, 6000, 4875, 0x2e, },
- { 82, 5875, 4750, 0x2d, },
- { 83, 5750, 4625, 0x2c, },
- { 84, 5625, 4500, 0x2b, },
- { 85, 5500, 4375, 0x2a, },
- { 86, 5375, 4250, 0x29, },
- { 87, 5250, 4125, 0x28, },
- { 88, 5125, 4000, 0x27, },
- { 89, 5000, 3875, 0x26, },
- { 90, 4875, 3750, 0x25, },
- { 91, 4750, 3625, 0x24, },
- { 92, 4625, 3500, 0x23, },
- { 93, 4500, 3375, 0x22, },
- { 94, 4375, 3250, 0x21, },
- { 95, 4250, 3125, 0x20, },
- { 96, 4125, 3000, 0x1f, },
- { 97, 4125, 3000, 0x1e, },
- { 98, 4125, 3000, 0x1d, },
- { 99, 4125, 3000, 0x1c, },
- { 100, 4125, 3000, 0x1b, },
- { 101, 4125, 3000, 0x1a, },
- { 102, 4125, 3000, 0x19, },
- { 103, 4125, 3000, 0x18, },
- { 104, 4125, 3000, 0x17, },
- { 105, 4125, 3000, 0x16, },
- { 106, 4125, 3000, 0x15, },
- { 107, 4125, 3000, 0x14, },
- { 108, 4125, 3000, 0x13, },
- { 109, 4125, 3000, 0x12, },
- { 110, 4125, 3000, 0x11, },
- { 111, 4125, 3000, 0x10, },
- { 112, 4125, 3000, 0x0f, },
- { 113, 4125, 3000, 0x0e, },
- { 114, 4125, 3000, 0x0d, },
- { 115, 4125, 3000, 0x0c, },
- { 116, 4125, 3000, 0x0b, },
- { 117, 4125, 3000, 0x0a, },
- { 118, 4125, 3000, 0x09, },
- { 119, 4125, 3000, 0x08, },
- { 120, 1125, 0, 0x07, },
- { 121, 1000, 0, 0x06, },
- { 122, 875, 0, 0x05, },
- { 123, 750, 0, 0x04, },
- { 124, 625, 0, 0x03, },
- { 125, 500, 0, 0x02, },
- { 126, 375, 0, 0x01, },
- { 127, 0, 0, 0x00, },
-};
-
-struct cparams {
- int i;
- int t;
- int m;
- int c;
-};
-
-static struct cparams cparams[] = {
+static const struct cparams {
+ u16 i;
+ u16 t;
+ u16 m;
+ u16 c;
+} cparams[] = {
{ 1, 1333, 301, 28664 },
{ 1, 1066, 294, 24460 },
{ 1, 800, 294, 25192 },
@@ -1633,21 +1475,145 @@ unsigned long i915_mch_val(struct drm_i915_private *dev_priv)
return ((m * x) / 127) - b;
}
-static unsigned long pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
+static u16 pvid_to_extvid(struct drm_i915_private *dev_priv, u8 pxvid)
{
- unsigned long val = 0;
- int i;
-
- for (i = 0; i < ARRAY_SIZE(v_table); i++) {
- if (v_table[i].pvid == pxvid) {
- if (IS_MOBILE(dev_priv->dev))
- val = v_table[i].vm;
- else
- val = v_table[i].vd;
- }
- }
-
- return val;
+ static const struct v_table {
+ u16 vd; /* in .1 mil */
+ u16 vm; /* in .1 mil */
+ } v_table[] = {
+ { 0, 0, },
+ { 375, 0, },
+ { 500, 0, },
+ { 625, 0, },
+ { 750, 0, },
+ { 875, 0, },
+ { 1000, 0, },
+ { 1125, 0, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4125, 3000, },
+ { 4250, 3125, },
+ { 4375, 3250, },
+ { 4500, 3375, },
+ { 4625, 3500, },
+ { 4750, 3625, },
+ { 4875, 3750, },
+ { 5000, 3875, },
+ { 5125, 4000, },
+ { 5250, 4125, },
+ { 5375, 4250, },
+ { 5500, 4375, },
+ { 5625, 4500, },
+ { 5750, 4625, },
+ { 5875, 4750, },
+ { 6000, 4875, },
+ { 6125, 5000, },
+ { 6250, 5125, },
+ { 6375, 5250, },
+ { 6500, 5375, },
+ { 6625, 5500, },
+ { 6750, 5625, },
+ { 6875, 5750, },
+ { 7000, 5875, },
+ { 7125, 6000, },
+ { 7250, 6125, },
+ { 7375, 6250, },
+ { 7500, 6375, },
+ { 7625, 6500, },
+ { 7750, 6625, },
+ { 7875, 6750, },
+ { 8000, 6875, },
+ { 8125, 7000, },
+ { 8250, 7125, },
+ { 8375, 7250, },
+ { 8500, 7375, },
+ { 8625, 7500, },
+ { 8750, 7625, },
+ { 8875, 7750, },
+ { 9000, 7875, },
+ { 9125, 8000, },
+ { 9250, 8125, },
+ { 9375, 8250, },
+ { 9500, 8375, },
+ { 9625, 8500, },
+ { 9750, 8625, },
+ { 9875, 8750, },
+ { 10000, 8875, },
+ { 10125, 9000, },
+ { 10250, 9125, },
+ { 10375, 9250, },
+ { 10500, 9375, },
+ { 10625, 9500, },
+ { 10750, 9625, },
+ { 10875, 9750, },
+ { 11000, 9875, },
+ { 11125, 10000, },
+ { 11250, 10125, },
+ { 11375, 10250, },
+ { 11500, 10375, },
+ { 11625, 10500, },
+ { 11750, 10625, },
+ { 11875, 10750, },
+ { 12000, 10875, },
+ { 12125, 11000, },
+ { 12250, 11125, },
+ { 12375, 11250, },
+ { 12500, 11375, },
+ { 12625, 11500, },
+ { 12750, 11625, },
+ { 12875, 11750, },
+ { 13000, 11875, },
+ { 13125, 12000, },
+ { 13250, 12125, },
+ { 13375, 12250, },
+ { 13500, 12375, },
+ { 13625, 12500, },
+ { 13750, 12625, },
+ { 13875, 12750, },
+ { 14000, 12875, },
+ { 14125, 13000, },
+ { 14250, 13125, },
+ { 14375, 13250, },
+ { 14500, 13375, },
+ { 14625, 13500, },
+ { 14750, 13625, },
+ { 14875, 13750, },
+ { 15000, 13875, },
+ { 15125, 14000, },
+ { 15250, 14125, },
+ { 15375, 14250, },
+ { 15500, 14375, },
+ { 15625, 14500, },
+ { 15750, 14625, },
+ { 15875, 14750, },
+ { 16000, 14875, },
+ { 16125, 15000, },
+ };
+ if (dev_priv->info->is_mobile)
+ return v_table[pxvid].vm;
+ else
+ return v_table[pxvid].vd;
}
void i915_update_gfx_val(struct drm_i915_private *dev_priv)
@@ -1881,9 +1847,9 @@ EXPORT_SYMBOL_GPL(i915_gpu_turbo_disable);
int i915_driver_load(struct drm_device *dev, unsigned long flags)
{
struct drm_i915_private *dev_priv;
- resource_size_t base, size;
int ret = 0, mmio_bar;
- uint32_t agp_size, prealloc_size;
+ uint32_t agp_size;
+
/* i915 has 4 more counters */
dev->counters += 4;
dev->types[6] = _DRM_STAT_IRQ;
@@ -1899,11 +1865,6 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
dev_priv->dev = dev;
dev_priv->info = (struct intel_device_info *) flags;
- /* Add register map (needed for suspend/resume) */
- mmio_bar = IS_GEN2(dev) ? 1 : 0;
- base = pci_resource_start(dev->pdev, mmio_bar);
- size = pci_resource_len(dev->pdev, mmio_bar);
-
if (i915_get_bridge_dev(dev)) {
ret = -EIO;
goto free_priv;
@@ -1913,16 +1874,25 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (IS_GEN2(dev))
dma_set_coherent_mask(&dev->pdev->dev, DMA_BIT_MASK(30));
- dev_priv->regs = ioremap(base, size);
+ mmio_bar = IS_GEN2(dev) ? 1 : 0;
+ dev_priv->regs = pci_iomap(dev->pdev, mmio_bar, 0);
if (!dev_priv->regs) {
DRM_ERROR("failed to map registers\n");
ret = -EIO;
goto put_bridge;
}
+ dev_priv->mm.gtt = intel_gtt_get();
+ if (!dev_priv->mm.gtt) {
+ DRM_ERROR("Failed to initialize GTT\n");
+ ret = -ENODEV;
+ goto out_iomapfree;
+ }
+
+ agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
+
dev_priv->mm.gtt_mapping =
- io_mapping_create_wc(dev->agp->base,
- dev->agp->agp_info.aper_size * 1024*1024);
+ io_mapping_create_wc(dev->agp->base, agp_size);
if (dev_priv->mm.gtt_mapping == NULL) {
ret = -EIO;
goto out_rmmap;
@@ -1934,24 +1904,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
* MTRR if present. Even if a UC MTRR isn't present.
*/
dev_priv->mm.gtt_mtrr = mtrr_add(dev->agp->base,
- dev->agp->agp_info.aper_size *
- 1024 * 1024,
+ agp_size,
MTRR_TYPE_WRCOMB, 1);
if (dev_priv->mm.gtt_mtrr < 0) {
DRM_INFO("MTRR allocation failed. Graphics "
"performance may suffer.\n");
}
- dev_priv->mm.gtt = intel_gtt_get();
- if (!dev_priv->mm.gtt) {
- DRM_ERROR("Failed to initialize GTT\n");
- ret = -ENODEV;
- goto out_iomapfree;
- }
-
- prealloc_size = dev_priv->mm.gtt->gtt_stolen_entries << PAGE_SHIFT;
- agp_size = dev_priv->mm.gtt->gtt_mappable_entries << PAGE_SHIFT;
-
/* The i915 workqueue is primarily used for batched retirement of
* requests (and thus managing bo) once the task has been completed
* by the GPU. i915_gem_retire_requests() is called directly when we
@@ -1959,7 +1918,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
* bo.
*
* It is also used for periodic low-priority events, such as
- * idle-timers and hangcheck.
+ * idle-timers and recording error state.
*
* All tasks on the workqueue are expected to acquire the dev mutex
* so there is no point in running more than one instance of the
@@ -1977,20 +1936,11 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
/* enable GEM by default */
dev_priv->has_gem = 1;
- if (prealloc_size > agp_size * 3 / 4) {
- DRM_ERROR("Detected broken video BIOS with %d/%dkB of video "
- "memory stolen.\n",
- prealloc_size / 1024, agp_size / 1024);
- DRM_ERROR("Disabling GEM. (try reducing stolen memory or "
- "updating the BIOS to fix).\n");
- dev_priv->has_gem = 0;
- }
-
if (dev_priv->has_gem == 0 &&
drm_core_check_feature(dev, DRIVER_MODESET)) {
DRM_ERROR("kernel modesetting requires GEM, disabling driver.\n");
ret = -ENODEV;
- goto out_iomapfree;
+ goto out_workqueue_free;
}
dev->driver->get_vblank_counter = i915_get_vblank_counter;
@@ -2013,8 +1963,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
/* Init HWS */
if (!I915_NEED_GFX_HWS(dev)) {
ret = i915_init_phys_hws(dev);
- if (ret != 0)
- goto out_workqueue_free;
+ if (ret)
+ goto out_gem_unload;
}
if (IS_PINEVIEW(dev))
@@ -2036,16 +1986,13 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
if (!IS_I945G(dev) && !IS_I945GM(dev))
pci_enable_msi(dev->pdev);
- spin_lock_init(&dev_priv->user_irq_lock);
+ spin_lock_init(&dev_priv->irq_lock);
spin_lock_init(&dev_priv->error_lock);
dev_priv->trace_irq_seqno = 0;
ret = drm_vblank_init(dev, I915_NUM_PIPE);
-
- if (ret) {
- (void) i915_driver_unload(dev);
- return ret;
- }
+ if (ret)
+ goto out_gem_unload;
/* Start out suspended */
dev_priv->mm.suspended = 1;
@@ -2053,10 +2000,10 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
intel_detect_pch(dev);
if (drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = i915_load_modeset_init(dev, prealloc_size, agp_size);
+ ret = i915_load_modeset_init(dev);
if (ret < 0) {
DRM_ERROR("failed to init modeset\n");
- goto out_workqueue_free;
+ goto out_gem_unload;
}
}
@@ -2074,12 +2021,18 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
return 0;
+out_gem_unload:
+ if (dev->pdev->msi_enabled)
+ pci_disable_msi(dev->pdev);
+
+ intel_teardown_gmbus(dev);
+ intel_teardown_mchbar(dev);
out_workqueue_free:
destroy_workqueue(dev_priv->wq);
out_iomapfree:
io_mapping_free(dev_priv->mm.gtt_mapping);
out_rmmap:
- iounmap(dev_priv->regs);
+ pci_iounmap(dev->pdev, dev_priv->regs);
put_bridge:
pci_dev_put(dev_priv->bridge_dev);
free_priv:
@@ -2096,6 +2049,9 @@ int i915_driver_unload(struct drm_device *dev)
i915_mch_dev = NULL;
spin_unlock(&mchdev_lock);
+ if (dev_priv->mm.inactive_shrinker.shrink)
+ unregister_shrinker(&dev_priv->mm.inactive_shrinker);
+
mutex_lock(&dev->struct_mutex);
ret = i915_gpu_idle(dev);
if (ret)
@@ -2153,7 +2109,7 @@ int i915_driver_unload(struct drm_device *dev)
mutex_unlock(&dev->struct_mutex);
if (I915_HAS_FBC(dev) && i915_powersave)
i915_cleanup_compression(dev);
- drm_mm_takedown(&dev_priv->mm.vram);
+ drm_mm_takedown(&dev_priv->mm.stolen);
intel_cleanup_overlay(dev);
@@ -2162,7 +2118,7 @@ int i915_driver_unload(struct drm_device *dev)
}
if (dev_priv->regs != NULL)
- iounmap(dev_priv->regs);
+ pci_iounmap(dev->pdev, dev_priv->regs);
intel_teardown_gmbus(dev);
intel_teardown_mchbar(dev);
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f737960712e..9eee6cf7901 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -111,7 +111,7 @@ static const struct intel_device_info intel_i965g_info = {
static const struct intel_device_info intel_i965gm_info = {
.gen = 4, .is_crestline = 1,
- .is_mobile = 1, .has_fbc = 1, .has_rc6 = 1, .has_hotplug = 1,
+ .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1,
.has_overlay = 1,
.supports_tv = 1,
};
@@ -130,7 +130,7 @@ static const struct intel_device_info intel_g45_info = {
static const struct intel_device_info intel_gm45_info = {
.gen = 4, .is_g4x = 1,
- .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_rc6 = 1,
+ .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1,
.has_pipe_cxsr = 1, .has_hotplug = 1,
.supports_tv = 1,
.has_bsd_ring = 1,
@@ -150,7 +150,7 @@ static const struct intel_device_info intel_ironlake_d_info = {
static const struct intel_device_info intel_ironlake_m_info = {
.gen = 5, .is_mobile = 1,
- .need_gfx_hws = 1, .has_rc6 = 1, .has_hotplug = 1,
+ .need_gfx_hws = 1, .has_hotplug = 1,
.has_fbc = 0, /* disabled due to buggy hardware */
.has_bsd_ring = 1,
};
@@ -165,6 +165,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
static const struct intel_device_info intel_sandybridge_m_info = {
.gen = 6, .is_mobile = 1,
.need_gfx_hws = 1, .has_hotplug = 1,
+ .has_fbc = 1,
.has_bsd_ring = 1,
.has_blt_ring = 1,
};
@@ -244,6 +245,28 @@ void intel_detect_pch (struct drm_device *dev)
}
}
+void __gen6_force_wake_get(struct drm_i915_private *dev_priv)
+{
+ int count;
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1))
+ udelay(10);
+
+ I915_WRITE_NOTRACE(FORCEWAKE, 1);
+ POSTING_READ(FORCEWAKE);
+
+ count = 0;
+ while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0)
+ udelay(10);
+}
+
+void __gen6_force_wake_put(struct drm_i915_private *dev_priv)
+{
+ I915_WRITE_NOTRACE(FORCEWAKE, 0);
+ POSTING_READ(FORCEWAKE);
+}
+
static int i915_drm_freeze(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -304,6 +327,12 @@ static int i915_drm_thaw(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int error = 0;
+ if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+ mutex_lock(&dev->struct_mutex);
+ i915_gem_restore_gtt_mappings(dev);
+ mutex_unlock(&dev->struct_mutex);
+ }
+
i915_restore_state(dev);
intel_opregion_setup(dev);
@@ -405,6 +434,14 @@ static int ironlake_do_reset(struct drm_device *dev, u8 flags)
return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500);
}
+static int gen6_do_reset(struct drm_device *dev, u8 flags)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(GEN6_GDRST, GEN6_GRDOM_FULL);
+ return wait_for((I915_READ(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500);
+}
+
/**
* i965_reset - reset chip after a hang
* @dev: drm device to reset
@@ -431,7 +468,8 @@ int i915_reset(struct drm_device *dev, u8 flags)
bool need_display = true;
int ret;
- mutex_lock(&dev->struct_mutex);
+ if (!mutex_trylock(&dev->struct_mutex))
+ return -EBUSY;
i915_gem_reset(dev);
@@ -439,6 +477,9 @@ int i915_reset(struct drm_device *dev, u8 flags)
if (get_seconds() - dev_priv->last_gpu_reset < 5) {
DRM_ERROR("GPU hanging too fast, declaring wedged!\n");
} else switch (INTEL_INFO(dev)->gen) {
+ case 6:
+ ret = gen6_do_reset(dev, flags);
+ break;
case 5:
ret = ironlake_do_reset(dev, flags);
break;
@@ -472,9 +513,14 @@ int i915_reset(struct drm_device *dev, u8 flags)
*/
if (drm_core_check_feature(dev, DRIVER_MODESET) ||
!dev_priv->mm.suspended) {
- struct intel_ring_buffer *ring = &dev_priv->render_ring;
dev_priv->mm.suspended = 0;
- ring->init(dev, ring);
+
+ dev_priv->ring[RCS].init(&dev_priv->ring[RCS]);
+ if (HAS_BSD(dev))
+ dev_priv->ring[VCS].init(&dev_priv->ring[VCS]);
+ if (HAS_BLT(dev))
+ dev_priv->ring[BCS].init(&dev_priv->ring[BCS]);
+
mutex_unlock(&dev->struct_mutex);
drm_irq_uninstall(dev);
drm_irq_install(dev);
@@ -606,6 +652,8 @@ static struct drm_driver driver = {
.device_is_agp = i915_driver_device_is_agp,
.enable_vblank = i915_enable_vblank,
.disable_vblank = i915_disable_vblank,
+ .get_vblank_timestamp = i915_get_vblank_timestamp,
+ .get_scanout_position = i915_get_crtc_scanoutpos,
.irq_preinstall = i915_driver_irq_preinstall,
.irq_postinstall = i915_driver_irq_postinstall,
.irq_uninstall = i915_driver_irq_uninstall,
@@ -661,8 +709,6 @@ static int __init i915_init(void)
driver.num_ioctls = i915_max_ioctl;
- i915_gem_shrinker_init();
-
/*
* If CONFIG_DRM_I915_KMS is set, default to KMS unless
* explicitly disabled with the module pararmeter.
@@ -684,17 +730,11 @@ static int __init i915_init(void)
driver.driver_features &= ~DRIVER_MODESET;
#endif
- if (!(driver.driver_features & DRIVER_MODESET)) {
- driver.suspend = i915_suspend;
- driver.resume = i915_resume;
- }
-
return drm_init(&driver);
}
static void __exit i915_exit(void)
{
- i915_gem_shrinker_exit();
drm_exit(&driver);
}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 409826da309..aac1bf332f7 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -89,7 +89,7 @@ struct drm_i915_gem_phys_object {
int id;
struct page **page_list;
drm_dma_handle_t *handle;
- struct drm_gem_object *cur_obj;
+ struct drm_i915_gem_object *cur_obj;
};
struct mem_block {
@@ -124,9 +124,9 @@ struct drm_i915_master_private {
#define I915_FENCE_REG_NONE -1
struct drm_i915_fence_reg {
- struct drm_gem_object *obj;
struct list_head lru_list;
- bool gpu;
+ struct drm_i915_gem_object *obj;
+ uint32_t setup_seqno;
};
struct sdvo_device_mapping {
@@ -139,6 +139,8 @@ struct sdvo_device_mapping {
u8 ddc_pin;
};
+struct intel_display_error_state;
+
struct drm_i915_error_state {
u32 eir;
u32 pgtbl_er;
@@ -148,11 +150,23 @@ struct drm_i915_error_state {
u32 ipehr;
u32 instdone;
u32 acthd;
+ u32 error; /* gen6+ */
+ u32 bcs_acthd; /* gen6+ blt engine */
+ u32 bcs_ipehr;
+ u32 bcs_ipeir;
+ u32 bcs_instdone;
+ u32 bcs_seqno;
+ u32 vcs_acthd; /* gen6+ bsd engine */
+ u32 vcs_ipehr;
+ u32 vcs_ipeir;
+ u32 vcs_instdone;
+ u32 vcs_seqno;
u32 instpm;
u32 instps;
u32 instdone1;
u32 seqno;
u64 bbaddr;
+ u64 fence[16];
struct timeval time;
struct drm_i915_error_object {
int page_count;
@@ -171,9 +185,11 @@ struct drm_i915_error_state {
u32 tiling:2;
u32 dirty:1;
u32 purgeable:1;
- } *active_bo;
- u32 active_bo_count;
+ u32 ring:4;
+ } *active_bo, *pinned_bo;
+ u32 active_bo_count, pinned_bo_count;
struct intel_overlay_error_state *overlay;
+ struct intel_display_error_state *display;
};
struct drm_i915_display_funcs {
@@ -207,7 +223,6 @@ struct intel_device_info {
u8 is_broadwater : 1;
u8 is_crestline : 1;
u8 has_fbc : 1;
- u8 has_rc6 : 1;
u8 has_pipe_cxsr : 1;
u8 has_hotplug : 1;
u8 cursor_needs_physical : 1;
@@ -243,6 +258,7 @@ typedef struct drm_i915_private {
const struct intel_device_info *info;
int has_gem;
+ int relative_constants_mode;
void __iomem *regs;
@@ -253,20 +269,15 @@ typedef struct drm_i915_private {
} *gmbus;
struct pci_dev *bridge_dev;
- struct intel_ring_buffer render_ring;
- struct intel_ring_buffer bsd_ring;
- struct intel_ring_buffer blt_ring;
+ struct intel_ring_buffer ring[I915_NUM_RINGS];
uint32_t next_seqno;
drm_dma_handle_t *status_page_dmah;
- void *seqno_page;
dma_addr_t dma_status_page;
uint32_t counter;
- unsigned int seqno_gfx_addr;
drm_local_map_t hws_map;
- struct drm_gem_object *seqno_obj;
- struct drm_gem_object *pwrctx;
- struct drm_gem_object *renderctx;
+ struct drm_i915_gem_object *pwrctx;
+ struct drm_i915_gem_object *renderctx;
struct resource mch_res;
@@ -275,25 +286,17 @@ typedef struct drm_i915_private {
int front_offset;
int current_page;
int page_flipping;
-#define I915_DEBUG_READ (1<<0)
-#define I915_DEBUG_WRITE (1<<1)
- unsigned long debug_flags;
- wait_queue_head_t irq_queue;
atomic_t irq_received;
- /** Protects user_irq_refcount and irq_mask_reg */
- spinlock_t user_irq_lock;
u32 trace_irq_seqno;
+
+ /* protects the irq masks */
+ spinlock_t irq_lock;
/** Cached value of IMR to avoid reads in updating the bitfield */
- u32 irq_mask_reg;
u32 pipestat[2];
- /** splitted irq regs for graphics and display engine on Ironlake,
- irq_mask_reg is still used for display irq. */
- u32 gt_irq_mask_reg;
- u32 gt_irq_enable_reg;
- u32 de_irq_enable_reg;
- u32 pch_irq_mask_reg;
- u32 pch_irq_enable_reg;
+ u32 irq_mask;
+ u32 gt_irq_mask;
+ u32 pch_irq_mask;
u32 hotplug_supported_mask;
struct work_struct hotplug_work;
@@ -306,7 +309,7 @@ typedef struct drm_i915_private {
int num_pipe;
/* For hangcheck timer */
-#define DRM_I915_HANGCHECK_PERIOD 250 /* in ms */
+#define DRM_I915_HANGCHECK_PERIOD 1500 /* in ms */
struct timer_list hangcheck_timer;
int hangcheck_count;
uint32_t last_acthd;
@@ -530,23 +533,21 @@ typedef struct drm_i915_private {
struct {
/** Bridge to intel-gtt-ko */
- struct intel_gtt *gtt;
+ const struct intel_gtt *gtt;
/** Memory allocator for GTT stolen memory */
- struct drm_mm vram;
+ struct drm_mm stolen;
/** Memory allocator for GTT */
struct drm_mm gtt_space;
+ /** List of all objects in gtt_space. Used to restore gtt
+ * mappings on resume */
+ struct list_head gtt_list;
+ /** End of mappable part of GTT */
+ unsigned long gtt_mappable_end;
struct io_mapping *gtt_mapping;
int gtt_mtrr;
- /**
- * Membership on list of all loaded devices, used to evict
- * inactive buffers under memory pressure.
- *
- * Modifications should only be done whilst holding the
- * shrink_list_lock spinlock.
- */
- struct list_head shrink_list;
+ struct shrinker inactive_shrinker;
/**
* List of objects currently involved in rendering.
@@ -609,16 +610,6 @@ typedef struct drm_i915_private {
struct delayed_work retire_work;
/**
- * Waiting sequence number, if any
- */
- uint32_t waiting_gem_seqno;
-
- /**
- * Last seq seen at irq time
- */
- uint32_t irq_gem_seqno;
-
- /**
* Flag if the X Server, and thus DRM, is not currently in
* control of the device.
*
@@ -645,16 +636,11 @@ typedef struct drm_i915_private {
/* storage for physical objects */
struct drm_i915_gem_phys_object *phys_objs[I915_MAX_PHYS_OBJECT];
- uint32_t flush_rings;
-
/* accounting, useful for userland debugging */
- size_t object_memory;
- size_t pin_memory;
- size_t gtt_memory;
size_t gtt_total;
+ size_t mappable_gtt_total;
+ size_t object_memory;
u32 object_count;
- u32 pin_count;
- u32 gtt_count;
} mm;
struct sdvo_device_mapping sdvo_mappings[2];
/* indicate whether the LVDS_BORDER should be enabled or not */
@@ -688,14 +674,14 @@ typedef struct drm_i915_private {
u8 fmax;
u8 fstart;
- u64 last_count1;
- unsigned long last_time1;
- u64 last_count2;
- struct timespec last_time2;
- unsigned long gfx_power;
- int c_m;
- int r_t;
- u8 corr;
+ u64 last_count1;
+ unsigned long last_time1;
+ u64 last_count2;
+ struct timespec last_time2;
+ unsigned long gfx_power;
+ int c_m;
+ int r_t;
+ u8 corr;
spinlock_t *mchdev_lock;
enum no_fbc_reason no_fbc_reason;
@@ -709,20 +695,20 @@ typedef struct drm_i915_private {
struct intel_fbdev *fbdev;
} drm_i915_private_t;
-/** driver private structure attached to each drm_gem_object */
struct drm_i915_gem_object {
struct drm_gem_object base;
/** Current space allocated to this object in the GTT, if any. */
struct drm_mm_node *gtt_space;
+ struct list_head gtt_list;
/** This object's place on the active/flushing/inactive lists */
struct list_head ring_list;
struct list_head mm_list;
/** This object's place on GPU write list */
struct list_head gpu_write_list;
- /** This object's place on eviction list */
- struct list_head evict_list;
+ /** This object's place in the batchbuffer or on the eviction list */
+ struct list_head exec_list;
/**
* This is set if the object is on the active or flushing lists
@@ -738,6 +724,12 @@ struct drm_i915_gem_object {
unsigned int dirty : 1;
/**
+ * This is set if the object has been written to since the last
+ * GPU flush.
+ */
+ unsigned int pending_gpu_write : 1;
+
+ /**
* Fence register bits (if any) for this object. Will be set
* as needed when mapped into the GTT.
* Protected by dev->struct_mutex.
@@ -747,29 +739,15 @@ struct drm_i915_gem_object {
signed int fence_reg : 5;
/**
- * Used for checking the object doesn't appear more than once
- * in an execbuffer object list.
- */
- unsigned int in_execbuffer : 1;
-
- /**
* Advice: are the backing pages purgeable?
*/
unsigned int madv : 2;
/**
- * Refcount for the pages array. With the current locking scheme, there
- * are at most two concurrent users: Binding a bo to the gtt and
- * pwrite/pread using physical addresses. So two bits for a maximum
- * of two users are enough.
- */
- unsigned int pages_refcount : 2;
-#define DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT 0x3
-
- /**
* Current tiling mode for the object.
*/
unsigned int tiling_mode : 2;
+ unsigned int tiling_changed : 1;
/** How many users have pinned this object in GTT space. The following
* users can each hold at most one reference: pwrite/pread, pin_ioctl
@@ -783,28 +761,54 @@ struct drm_i915_gem_object {
unsigned int pin_count : 4;
#define DRM_I915_GEM_OBJECT_MAX_PIN_COUNT 0xf
- /** AGP memory structure for our GTT binding. */
- DRM_AGP_MEM *agp_mem;
+ /**
+ * Is the object at the current location in the gtt mappable and
+ * fenceable? Used to avoid costly recalculations.
+ */
+ unsigned int map_and_fenceable : 1;
+
+ /**
+ * Whether the current gtt mapping needs to be mappable (and isn't just
+ * mappable by accident). Track pin and fault separate for a more
+ * accurate mappable working set.
+ */
+ unsigned int fault_mappable : 1;
+ unsigned int pin_mappable : 1;
+
+ /*
+ * Is the GPU currently using a fence to access this buffer,
+ */
+ unsigned int pending_fenced_gpu_access:1;
+ unsigned int fenced_gpu_access:1;
struct page **pages;
/**
- * Current offset of the object in GTT space.
- *
- * This is the same as gtt_space->start
+ * DMAR support
*/
- uint32_t gtt_offset;
+ struct scatterlist *sg_list;
+ int num_sg;
- /* Which ring is refering to is this object */
- struct intel_ring_buffer *ring;
+ /**
+ * Used for performing relocations during execbuffer insertion.
+ */
+ struct hlist_node exec_node;
+ unsigned long exec_handle;
/**
- * Fake offset for use by mmap(2)
+ * Current offset of the object in GTT space.
+ *
+ * This is the same as gtt_space->start
*/
- uint64_t mmap_offset;
+ uint32_t gtt_offset;
/** Breadcrumb of last rendering to the buffer. */
uint32_t last_rendering_seqno;
+ struct intel_ring_buffer *ring;
+
+ /** Breadcrumb of last fenced GPU access to the buffer. */
+ uint32_t last_fenced_seqno;
+ struct intel_ring_buffer *last_fenced_ring;
/** Current tiling stride for the object, if it's tiled. */
uint32_t stride;
@@ -880,6 +884,68 @@ enum intel_chip_family {
CHIP_I965 = 0x08,
};
+#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
+
+#define IS_I830(dev) ((dev)->pci_device == 0x3577)
+#define IS_845G(dev) ((dev)->pci_device == 0x2562)
+#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
+#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
+#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
+#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
+#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
+#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
+#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
+#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
+#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
+#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
+#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
+#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
+#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
+#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
+#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
+#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
+#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
+
+#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
+#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
+#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
+#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
+#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
+
+#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
+#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
+#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
+
+#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
+#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
+
+/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
+ * rows, which changed the alignment requirements and fence programming.
+ */
+#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
+ IS_I915GM(dev)))
+#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
+#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
+#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
+#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
+#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
+/* dsparb controlled by hw only */
+#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
+
+#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
+#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
+#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
+
+#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev))
+#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev))
+
+#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
+#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
+#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
+
+#include "i915_trace.h"
+
extern struct drm_ioctl_desc i915_ioctls[];
extern int i915_max_ioctl;
extern unsigned int i915_fbpercrtc;
@@ -907,8 +973,8 @@ extern int i915_driver_device_is_agp(struct drm_device * dev);
extern long i915_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
extern int i915_emit_box(struct drm_device *dev,
- struct drm_clip_rect *boxes,
- int i, int DR1, int DR4);
+ struct drm_clip_rect *box,
+ int DR1, int DR4);
extern int i915_reset(struct drm_device *dev, u8 flags);
extern unsigned long i915_chipset_val(struct drm_i915_private *dev_priv);
extern unsigned long i915_mch_val(struct drm_i915_private *dev_priv);
@@ -918,6 +984,7 @@ extern void i915_update_gfx_val(struct drm_i915_private *dev_priv);
/* i915_irq.c */
void i915_hangcheck_elapsed(unsigned long data);
+void i915_handle_error(struct drm_device *dev, bool wedged);
extern int i915_irq_emit(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int i915_irq_wait(struct drm_device *dev, void *data,
@@ -953,6 +1020,13 @@ void
i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask);
void intel_enable_asle (struct drm_device *dev);
+int i915_get_vblank_timestamp(struct drm_device *dev, int crtc,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags);
+
+int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
+ int *vpos, int *hpos);
#ifdef CONFIG_DEBUG_FS
extern void i915_destroy_error_state(struct drm_device *dev);
@@ -1017,15 +1091,28 @@ int i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void i915_gem_load(struct drm_device *dev);
int i915_gem_init_object(struct drm_gem_object *obj);
-struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
- size_t size);
+void i915_gem_flush_ring(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ uint32_t invalidate_domains,
+ uint32_t flush_domains);
+struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+ size_t size);
void i915_gem_free_object(struct drm_gem_object *obj);
-int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment);
-void i915_gem_object_unpin(struct drm_gem_object *obj);
-int i915_gem_object_unbind(struct drm_gem_object *obj);
-void i915_gem_release_mmap(struct drm_gem_object *obj);
+int __must_check i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ uint32_t alignment,
+ bool map_and_fenceable);
+void i915_gem_object_unpin(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_unbind(struct drm_i915_gem_object *obj);
+void i915_gem_release_mmap(struct drm_i915_gem_object *obj);
void i915_gem_lastclose(struct drm_device *dev);
+int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
+int __must_check i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
+ bool interruptible);
+void i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring,
+ u32 seqno);
+
/**
* Returns true if seq1 is later than seq2.
*/
@@ -1035,73 +1122,88 @@ i915_seqno_passed(uint32_t seq1, uint32_t seq2)
return (int32_t)(seq1 - seq2) >= 0;
}
-int i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
- bool interruptible);
-int i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
- bool interruptible);
+static inline u32
+i915_gem_next_request_seqno(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ return ring->outstanding_lazy_request = dev_priv->next_seqno;
+}
+
+int __must_check i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined,
+ bool interruptible);
+int __must_check i915_gem_object_put_fence(struct drm_i915_gem_object *obj);
+
void i915_gem_retire_requests(struct drm_device *dev);
void i915_gem_reset(struct drm_device *dev);
-void i915_gem_clflush_object(struct drm_gem_object *obj);
-int i915_gem_object_set_domain(struct drm_gem_object *obj,
- uint32_t read_domains,
- uint32_t write_domain);
-int i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
- bool interruptible);
-int i915_gem_init_ringbuffer(struct drm_device *dev);
+void i915_gem_clflush_object(struct drm_i915_gem_object *obj);
+int __must_check i915_gem_object_set_domain(struct drm_i915_gem_object *obj,
+ uint32_t read_domains,
+ uint32_t write_domain);
+int __must_check i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
+ bool interruptible);
+int __must_check i915_gem_init_ringbuffer(struct drm_device *dev);
void i915_gem_cleanup_ringbuffer(struct drm_device *dev);
-int i915_gem_do_init(struct drm_device *dev, unsigned long start,
- unsigned long end);
-int i915_gpu_idle(struct drm_device *dev);
-int i915_gem_idle(struct drm_device *dev);
-uint32_t i915_add_request(struct drm_device *dev,
- struct drm_file *file_priv,
- struct drm_i915_gem_request *request,
- struct intel_ring_buffer *ring);
-int i915_do_wait_request(struct drm_device *dev,
- uint32_t seqno,
- bool interruptible,
- struct intel_ring_buffer *ring);
+void i915_gem_do_init(struct drm_device *dev,
+ unsigned long start,
+ unsigned long mappable_end,
+ unsigned long end);
+int __must_check i915_gpu_idle(struct drm_device *dev);
+int __must_check i915_gem_idle(struct drm_device *dev);
+int __must_check i915_add_request(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_i915_gem_request *request,
+ struct intel_ring_buffer *ring);
+int __must_check i915_do_wait_request(struct drm_device *dev,
+ uint32_t seqno,
+ bool interruptible,
+ struct intel_ring_buffer *ring);
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
-int i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj,
- int write);
-int i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
- bool pipelined);
+int __must_check
+i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj,
+ bool write);
+int __must_check
+i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined);
int i915_gem_attach_phys_object(struct drm_device *dev,
- struct drm_gem_object *obj,
+ struct drm_i915_gem_object *obj,
int id,
int align);
void i915_gem_detach_phys_object(struct drm_device *dev,
- struct drm_gem_object *obj);
+ struct drm_i915_gem_object *obj);
void i915_gem_free_all_phys_object(struct drm_device *dev);
-void i915_gem_release(struct drm_device * dev, struct drm_file *file_priv);
+void i915_gem_release(struct drm_device *dev, struct drm_file *file);
-void i915_gem_shrinker_init(void);
-void i915_gem_shrinker_exit(void);
+/* i915_gem_gtt.c */
+void i915_gem_restore_gtt_mappings(struct drm_device *dev);
+int __must_check i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj);
+void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj);
/* i915_gem_evict.c */
-int i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment);
-int i915_gem_evict_everything(struct drm_device *dev);
-int i915_gem_evict_inactive(struct drm_device *dev);
+int __must_check i915_gem_evict_something(struct drm_device *dev, int min_size,
+ unsigned alignment, bool mappable);
+int __must_check i915_gem_evict_everything(struct drm_device *dev,
+ bool purgeable_only);
+int __must_check i915_gem_evict_inactive(struct drm_device *dev,
+ bool purgeable_only);
/* i915_gem_tiling.c */
void i915_gem_detect_bit_6_swizzle(struct drm_device *dev);
-void i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj);
-void i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj);
-bool i915_tiling_ok(struct drm_device *dev, int stride, int size,
- int tiling_mode);
-bool i915_gem_object_fence_offset_ok(struct drm_gem_object *obj,
- int tiling_mode);
+void i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj);
+void i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj);
/* i915_gem_debug.c */
-void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
const char *where, uint32_t mark);
#if WATCH_LISTS
int i915_verify_lists(struct drm_device *dev);
#else
#define i915_verify_lists(dev) 0
#endif
-void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle);
-void i915_gem_dump_object(struct drm_gem_object *obj, int len,
+void i915_gem_object_check_coherency(struct drm_i915_gem_object *obj,
+ int handle);
+void i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
const char *where, uint32_t mark);
/* i915_debugfs.c */
@@ -1163,6 +1265,7 @@ extern void intel_disable_fbc(struct drm_device *dev);
extern void intel_enable_fbc(struct drm_crtc *crtc, unsigned long interval);
extern bool intel_fbc_enabled(struct drm_device *dev);
extern bool ironlake_set_drps(struct drm_device *dev, u8 val);
+extern void gen6_set_rps(struct drm_device *dev, u8 val);
extern void intel_detect_pch (struct drm_device *dev);
extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
@@ -1170,79 +1273,120 @@ extern int intel_trans_dp_port_sel (struct drm_crtc *crtc);
#ifdef CONFIG_DEBUG_FS
extern struct intel_overlay_error_state *intel_overlay_capture_error_state(struct drm_device *dev);
extern void intel_overlay_print_error_state(struct seq_file *m, struct intel_overlay_error_state *error);
+
+extern struct intel_display_error_state *intel_display_capture_error_state(struct drm_device *dev);
+extern void intel_display_print_error_state(struct seq_file *m,
+ struct drm_device *dev,
+ struct intel_display_error_state *error);
#endif
+#define LP_RING(d) (&((struct drm_i915_private *)(d))->ring[RCS])
+
+#define BEGIN_LP_RING(n) \
+ intel_ring_begin(LP_RING(dev_priv), (n))
+
+#define OUT_RING(x) \
+ intel_ring_emit(LP_RING(dev_priv), x)
+
+#define ADVANCE_LP_RING() \
+ intel_ring_advance(LP_RING(dev_priv))
+
/**
* Lock test for when it's just for synchronization of ring access.
*
* In that case, we don't need to do it when GEM is initialized as nobody else
* has access to the ring.
*/
-#define RING_LOCK_TEST_WITH_RETURN(dev, file_priv) do { \
- if (((drm_i915_private_t *)dev->dev_private)->render_ring.gem_object \
- == NULL) \
- LOCK_TEST_WITH_RETURN(dev, file_priv); \
+#define RING_LOCK_TEST_WITH_RETURN(dev, file) do { \
+ if (LP_RING(dev->dev_private)->obj == NULL) \
+ LOCK_TEST_WITH_RETURN(dev, file); \
} while (0)
-static inline u32 i915_read(struct drm_i915_private *dev_priv, u32 reg)
+
+#define __i915_read(x, y) \
+static inline u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \
+ u##x val = read##y(dev_priv->regs + reg); \
+ trace_i915_reg_rw('R', reg, val, sizeof(val)); \
+ return val; \
+}
+__i915_read(8, b)
+__i915_read(16, w)
+__i915_read(32, l)
+__i915_read(64, q)
+#undef __i915_read
+
+#define __i915_write(x, y) \
+static inline void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \
+ trace_i915_reg_rw('W', reg, val, sizeof(val)); \
+ write##y(val, dev_priv->regs + reg); \
+}
+__i915_write(8, b)
+__i915_write(16, w)
+__i915_write(32, l)
+__i915_write(64, q)
+#undef __i915_write
+
+#define I915_READ8(reg) i915_read8(dev_priv, (reg))
+#define I915_WRITE8(reg, val) i915_write8(dev_priv, (reg), (val))
+
+#define I915_READ16(reg) i915_read16(dev_priv, (reg))
+#define I915_WRITE16(reg, val) i915_write16(dev_priv, (reg), (val))
+#define I915_READ16_NOTRACE(reg) readw(dev_priv->regs + (reg))
+#define I915_WRITE16_NOTRACE(reg, val) writew(val, dev_priv->regs + (reg))
+
+#define I915_READ(reg) i915_read32(dev_priv, (reg))
+#define I915_WRITE(reg, val) i915_write32(dev_priv, (reg), (val))
+#define I915_READ_NOTRACE(reg) readl(dev_priv->regs + (reg))
+#define I915_WRITE_NOTRACE(reg, val) writel(val, dev_priv->regs + (reg))
+
+#define I915_WRITE64(reg, val) i915_write64(dev_priv, (reg), (val))
+#define I915_READ64(reg) i915_read64(dev_priv, (reg))
+
+#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
+#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
+
+
+/* On SNB platform, before reading ring registers forcewake bit
+ * must be set to prevent GT core from power down and stale values being
+ * returned.
+ */
+void __gen6_force_wake_get(struct drm_i915_private *dev_priv);
+void __gen6_force_wake_put (struct drm_i915_private *dev_priv);
+static inline u32 i915_safe_read(struct drm_i915_private *dev_priv, u32 reg)
{
u32 val;
- val = readl(dev_priv->regs + reg);
- if (dev_priv->debug_flags & I915_DEBUG_READ)
- printk(KERN_ERR "read 0x%08x from 0x%08x\n", val, reg);
+ if (dev_priv->info->gen >= 6) {
+ __gen6_force_wake_get(dev_priv);
+ val = I915_READ(reg);
+ __gen6_force_wake_put(dev_priv);
+ } else
+ val = I915_READ(reg);
+
return val;
}
-static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
- u32 val)
+static inline void
+i915_write(struct drm_i915_private *dev_priv, u32 reg, u64 val, int len)
{
- writel(val, dev_priv->regs + reg);
- if (dev_priv->debug_flags & I915_DEBUG_WRITE)
- printk(KERN_ERR "wrote 0x%08x to 0x%08x\n", val, reg);
+ /* Trace down the write operation before the real write */
+ trace_i915_reg_rw('W', reg, val, len);
+ switch (len) {
+ case 8:
+ writeq(val, dev_priv->regs + reg);
+ break;
+ case 4:
+ writel(val, dev_priv->regs + reg);
+ break;
+ case 2:
+ writew(val, dev_priv->regs + reg);
+ break;
+ case 1:
+ writeb(val, dev_priv->regs + reg);
+ break;
+ }
}
-#define I915_READ(reg) i915_read(dev_priv, (reg))
-#define I915_WRITE(reg, val) i915_write(dev_priv, (reg), (val))
-#define I915_READ16(reg) readw(dev_priv->regs + (reg))
-#define I915_WRITE16(reg, val) writel(val, dev_priv->regs + (reg))
-#define I915_READ8(reg) readb(dev_priv->regs + (reg))
-#define I915_WRITE8(reg, val) writeb(val, dev_priv->regs + (reg))
-#define I915_WRITE64(reg, val) writeq(val, dev_priv->regs + (reg))
-#define I915_READ64(reg) readq(dev_priv->regs + (reg))
-#define POSTING_READ(reg) (void)I915_READ(reg)
-#define POSTING_READ16(reg) (void)I915_READ16(reg)
-
-#define I915_DEBUG_ENABLE_IO() (dev_priv->debug_flags |= I915_DEBUG_READ | \
- I915_DEBUG_WRITE)
-#define I915_DEBUG_DISABLE_IO() (dev_priv->debug_flags &= ~(I915_DEBUG_READ | \
- I915_DEBUG_WRITE))
-
-#define I915_VERBOSE 0
-
-#define BEGIN_LP_RING(n) do { \
- drm_i915_private_t *dev_priv__ = dev->dev_private; \
- if (I915_VERBOSE) \
- DRM_DEBUG(" BEGIN_LP_RING %x\n", (int)(n)); \
- intel_ring_begin(dev, &dev_priv__->render_ring, (n)); \
-} while (0)
-
-
-#define OUT_RING(x) do { \
- drm_i915_private_t *dev_priv__ = dev->dev_private; \
- if (I915_VERBOSE) \
- DRM_DEBUG(" OUT_RING %x\n", (int)(x)); \
- intel_ring_emit(dev, &dev_priv__->render_ring, x); \
-} while (0)
-
-#define ADVANCE_LP_RING() do { \
- drm_i915_private_t *dev_priv__ = dev->dev_private; \
- if (I915_VERBOSE) \
- DRM_DEBUG("ADVANCE_LP_RING %x\n", \
- dev_priv__->render_ring.tail); \
- intel_ring_advance(dev, &dev_priv__->render_ring); \
-} while(0)
-
/**
* Reads a dword out of the status page, which is written to from the command
* queue by automatic updates, MI_REPORT_HEAD, MI_STORE_DATA_INDEX, or
@@ -1259,72 +1403,9 @@ static inline void i915_write(struct drm_i915_private *dev_priv, u32 reg,
* The area from dword 0x20 to 0x3ff is available for driver usage.
*/
#define READ_HWSP(dev_priv, reg) (((volatile u32 *)\
- (dev_priv->render_ring.status_page.page_addr))[reg])
+ (LP_RING(dev_priv)->status_page.page_addr))[reg])
#define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, I915_BREADCRUMB_INDEX)
#define I915_GEM_HWS_INDEX 0x20
#define I915_BREADCRUMB_INDEX 0x21
-#define INTEL_INFO(dev) (((struct drm_i915_private *) (dev)->dev_private)->info)
-
-#define IS_I830(dev) ((dev)->pci_device == 0x3577)
-#define IS_845G(dev) ((dev)->pci_device == 0x2562)
-#define IS_I85X(dev) (INTEL_INFO(dev)->is_i85x)
-#define IS_I865G(dev) ((dev)->pci_device == 0x2572)
-#define IS_I915G(dev) (INTEL_INFO(dev)->is_i915g)
-#define IS_I915GM(dev) ((dev)->pci_device == 0x2592)
-#define IS_I945G(dev) ((dev)->pci_device == 0x2772)
-#define IS_I945GM(dev) (INTEL_INFO(dev)->is_i945gm)
-#define IS_BROADWATER(dev) (INTEL_INFO(dev)->is_broadwater)
-#define IS_CRESTLINE(dev) (INTEL_INFO(dev)->is_crestline)
-#define IS_GM45(dev) ((dev)->pci_device == 0x2A42)
-#define IS_G4X(dev) (INTEL_INFO(dev)->is_g4x)
-#define IS_PINEVIEW_G(dev) ((dev)->pci_device == 0xa001)
-#define IS_PINEVIEW_M(dev) ((dev)->pci_device == 0xa011)
-#define IS_PINEVIEW(dev) (INTEL_INFO(dev)->is_pineview)
-#define IS_G33(dev) (INTEL_INFO(dev)->is_g33)
-#define IS_IRONLAKE_D(dev) ((dev)->pci_device == 0x0042)
-#define IS_IRONLAKE_M(dev) ((dev)->pci_device == 0x0046)
-#define IS_MOBILE(dev) (INTEL_INFO(dev)->is_mobile)
-
-#define IS_GEN2(dev) (INTEL_INFO(dev)->gen == 2)
-#define IS_GEN3(dev) (INTEL_INFO(dev)->gen == 3)
-#define IS_GEN4(dev) (INTEL_INFO(dev)->gen == 4)
-#define IS_GEN5(dev) (INTEL_INFO(dev)->gen == 5)
-#define IS_GEN6(dev) (INTEL_INFO(dev)->gen == 6)
-
-#define HAS_BSD(dev) (INTEL_INFO(dev)->has_bsd_ring)
-#define HAS_BLT(dev) (INTEL_INFO(dev)->has_blt_ring)
-#define I915_NEED_GFX_HWS(dev) (INTEL_INFO(dev)->need_gfx_hws)
-
-#define HAS_OVERLAY(dev) (INTEL_INFO(dev)->has_overlay)
-#define OVERLAY_NEEDS_PHYSICAL(dev) (INTEL_INFO(dev)->overlay_needs_physical)
-
-/* With the 945 and later, Y tiling got adjusted so that it was 32 128-byte
- * rows, which changed the alignment requirements and fence programming.
- */
-#define HAS_128_BYTE_Y_TILING(dev) (!IS_GEN2(dev) && !(IS_I915G(dev) || \
- IS_I915GM(dev)))
-#define SUPPORTS_DIGITAL_OUTPUTS(dev) (!IS_GEN2(dev) && !IS_PINEVIEW(dev))
-#define SUPPORTS_INTEGRATED_HDMI(dev) (IS_G4X(dev) || IS_GEN5(dev))
-#define SUPPORTS_INTEGRATED_DP(dev) (IS_G4X(dev) || IS_GEN5(dev))
-#define SUPPORTS_EDP(dev) (IS_IRONLAKE_M(dev))
-#define SUPPORTS_TV(dev) (INTEL_INFO(dev)->supports_tv)
-#define I915_HAS_HOTPLUG(dev) (INTEL_INFO(dev)->has_hotplug)
-/* dsparb controlled by hw only */
-#define DSPARB_HWCONTROL(dev) (IS_G4X(dev) || IS_IRONLAKE(dev))
-
-#define HAS_FW_BLC(dev) (INTEL_INFO(dev)->gen > 2)
-#define HAS_PIPE_CXSR(dev) (INTEL_INFO(dev)->has_pipe_cxsr)
-#define I915_HAS_FBC(dev) (INTEL_INFO(dev)->has_fbc)
-#define I915_HAS_RC6(dev) (INTEL_INFO(dev)->has_rc6)
-
-#define HAS_PCH_SPLIT(dev) (IS_GEN5(dev) || IS_GEN6(dev))
-#define HAS_PIPE_CONTROL(dev) (IS_GEN5(dev) || IS_GEN6(dev))
-
-#define INTEL_PCH_TYPE(dev) (((struct drm_i915_private *)(dev)->dev_private)->pch_type)
-#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
-#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
-
-#define PRIMARY_RINGBUFFER_SIZE (128*1024)
-
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 17b1cba3b5f..c79c0b62ef6 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -34,39 +34,31 @@
#include <linux/slab.h>
#include <linux/swap.h>
#include <linux/pci.h>
-#include <linux/intel-gtt.h>
-static uint32_t i915_gem_get_gtt_alignment(struct drm_gem_object *obj);
-
-static int i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
- bool pipelined);
-static void i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj);
-static void i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj);
-static int i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj,
- int write);
-static int i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
+static void i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj);
+static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
+static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
+static int i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj,
+ bool write);
+static int i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
uint64_t offset,
uint64_t size);
-static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj);
-static int i915_gem_object_wait_rendering(struct drm_gem_object *obj,
- bool interruptible);
-static int i915_gem_object_bind_to_gtt(struct drm_gem_object *obj,
- unsigned alignment);
-static void i915_gem_clear_fence_reg(struct drm_gem_object *obj);
-static int i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+static void i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj);
+static int i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
+ unsigned alignment,
+ bool map_and_fenceable);
+static void i915_gem_clear_fence_reg(struct drm_device *dev,
+ struct drm_i915_fence_reg *reg);
+static int i915_gem_phys_pwrite(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv);
-static void i915_gem_free_object_tail(struct drm_gem_object *obj);
+ struct drm_file *file);
+static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj);
-static int
-i915_gem_object_get_pages(struct drm_gem_object *obj,
- gfp_t gfpmask);
+static int i915_gem_inactive_shrink(struct shrinker *shrinker,
+ int nr_to_scan,
+ gfp_t gfp_mask);
-static void
-i915_gem_object_put_pages(struct drm_gem_object *obj);
-
-static LIST_HEAD(shrink_list);
-static DEFINE_SPINLOCK(shrink_list_lock);
/* some bookkeeping */
static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
@@ -83,34 +75,6 @@ static void i915_gem_info_remove_obj(struct drm_i915_private *dev_priv,
dev_priv->mm.object_memory -= size;
}
-static void i915_gem_info_add_gtt(struct drm_i915_private *dev_priv,
- size_t size)
-{
- dev_priv->mm.gtt_count++;
- dev_priv->mm.gtt_memory += size;
-}
-
-static void i915_gem_info_remove_gtt(struct drm_i915_private *dev_priv,
- size_t size)
-{
- dev_priv->mm.gtt_count--;
- dev_priv->mm.gtt_memory -= size;
-}
-
-static void i915_gem_info_add_pin(struct drm_i915_private *dev_priv,
- size_t size)
-{
- dev_priv->mm.pin_count++;
- dev_priv->mm.pin_memory += size;
-}
-
-static void i915_gem_info_remove_pin(struct drm_i915_private *dev_priv,
- size_t size)
-{
- dev_priv->mm.pin_count--;
- dev_priv->mm.pin_memory -= size;
-}
-
int
i915_gem_check_is_wedged(struct drm_device *dev)
{
@@ -141,7 +105,7 @@ i915_gem_check_is_wedged(struct drm_device *dev)
return -EIO;
}
-static int i915_mutex_lock_interruptible(struct drm_device *dev)
+int i915_mutex_lock_interruptible(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
int ret;
@@ -164,75 +128,76 @@ static int i915_mutex_lock_interruptible(struct drm_device *dev)
}
static inline bool
-i915_gem_object_is_inactive(struct drm_i915_gem_object *obj_priv)
+i915_gem_object_is_inactive(struct drm_i915_gem_object *obj)
{
- return obj_priv->gtt_space &&
- !obj_priv->active &&
- obj_priv->pin_count == 0;
+ return obj->gtt_space && !obj->active && obj->pin_count == 0;
}
-int i915_gem_do_init(struct drm_device *dev,
- unsigned long start,
- unsigned long end)
+void i915_gem_do_init(struct drm_device *dev,
+ unsigned long start,
+ unsigned long mappable_end,
+ unsigned long end)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- if (start >= end ||
- (start & (PAGE_SIZE - 1)) != 0 ||
- (end & (PAGE_SIZE - 1)) != 0) {
- return -EINVAL;
- }
-
drm_mm_init(&dev_priv->mm.gtt_space, start,
end - start);
dev_priv->mm.gtt_total = end - start;
-
- return 0;
+ dev_priv->mm.mappable_gtt_total = min(end, mappable_end) - start;
+ dev_priv->mm.gtt_mappable_end = mappable_end;
}
int
i915_gem_init_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_init *args = data;
- int ret;
+
+ if (args->gtt_start >= args->gtt_end ||
+ (args->gtt_end | args->gtt_start) & (PAGE_SIZE - 1))
+ return -EINVAL;
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_do_init(dev, args->gtt_start, args->gtt_end);
+ i915_gem_do_init(dev, args->gtt_start, args->gtt_end, args->gtt_end);
mutex_unlock(&dev->struct_mutex);
- return ret;
+ return 0;
}
int
i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_get_aperture *args = data;
+ struct drm_i915_gem_object *obj;
+ size_t pinned;
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
+ pinned = 0;
mutex_lock(&dev->struct_mutex);
- args->aper_size = dev_priv->mm.gtt_total;
- args->aper_available_size = args->aper_size - dev_priv->mm.pin_memory;
+ list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
+ pinned += obj->gtt_space->size;
mutex_unlock(&dev->struct_mutex);
+ args->aper_size = dev_priv->mm.gtt_total;
+ args->aper_available_size = args->aper_size -pinned;
+
return 0;
}
-
/**
* Creates a new mm object and returns a handle to it.
*/
int
i915_gem_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_create *args = data;
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
int ret;
u32 handle;
@@ -243,45 +208,28 @@ i915_gem_create_ioctl(struct drm_device *dev, void *data,
if (obj == NULL)
return -ENOMEM;
- ret = drm_gem_handle_create(file_priv, obj, &handle);
+ ret = drm_gem_handle_create(file, &obj->base, &handle);
if (ret) {
- drm_gem_object_release(obj);
- i915_gem_info_remove_obj(dev->dev_private, obj->size);
+ drm_gem_object_release(&obj->base);
+ i915_gem_info_remove_obj(dev->dev_private, obj->base.size);
kfree(obj);
return ret;
}
/* drop reference from allocate - handle holds it now */
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
trace_i915_gem_object_create(obj);
args->handle = handle;
return 0;
}
-static inline int
-fast_shmem_read(struct page **pages,
- loff_t page_base, int page_offset,
- char __user *data,
- int length)
+static int i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_object *obj)
{
- char *vaddr;
- int ret;
-
- vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
- ret = __copy_to_user_inatomic(data, vaddr + page_offset, length);
- kunmap_atomic(vaddr);
-
- return ret;
-}
-
-static int i915_gem_object_needs_bit17_swizzle(struct drm_gem_object *obj)
-{
- drm_i915_private_t *dev_priv = obj->dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ drm_i915_private_t *dev_priv = obj->base.dev->dev_private;
return dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_9_10_17 &&
- obj_priv->tiling_mode != I915_TILING_NONE;
+ obj->tiling_mode != I915_TILING_NONE;
}
static inline void
@@ -357,38 +305,51 @@ slow_shmem_bit17_copy(struct page *gpu_page,
* fault, it fails so we can fall back to i915_gem_shmem_pwrite_slow().
*/
static int
-i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_shmem_pread_fast(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pread *args,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
ssize_t remain;
- loff_t offset, page_base;
+ loff_t offset;
char __user *user_data;
int page_offset, page_length;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
- obj_priv = to_intel_bo(obj);
offset = args->offset;
while (remain > 0) {
+ struct page *page;
+ char *vaddr;
+ int ret;
+
/* Operation in this page
*
- * page_base = page offset within aperture
* page_offset = offset within page
* page_length = bytes to copy for this page
*/
- page_base = (offset & ~(PAGE_SIZE-1));
page_offset = offset & (PAGE_SIZE-1);
page_length = remain;
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
- if (fast_shmem_read(obj_priv->pages,
- page_base, page_offset,
- user_data, page_length))
+ page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
+ GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ vaddr = kmap_atomic(page);
+ ret = __copy_to_user_inatomic(user_data,
+ vaddr + page_offset,
+ page_length);
+ kunmap_atomic(vaddr);
+
+ mark_page_accessed(page);
+ page_cache_release(page);
+ if (ret)
return -EFAULT;
remain -= page_length;
@@ -399,30 +360,6 @@ i915_gem_shmem_pread_fast(struct drm_device *dev, struct drm_gem_object *obj,
return 0;
}
-static int
-i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
-{
- int ret;
-
- ret = i915_gem_object_get_pages(obj, __GFP_NORETRY | __GFP_NOWARN);
-
- /* If we've insufficient memory to map in the pages, attempt
- * to make some space by throwing out some old buffers.
- */
- if (ret == -ENOMEM) {
- struct drm_device *dev = obj->dev;
-
- ret = i915_gem_evict_something(dev, obj->size,
- i915_gem_get_gtt_alignment(obj));
- if (ret)
- return ret;
-
- ret = i915_gem_object_get_pages(obj, 0);
- }
-
- return ret;
-}
-
/**
* This is the fallback shmem pread path, which allocates temporary storage
* in kernel space to copy_to_user into outside of the struct_mutex, so we
@@ -430,18 +367,19 @@ i915_gem_object_get_pages_or_evict(struct drm_gem_object *obj)
* and not take page faults.
*/
static int
-i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_shmem_pread_slow(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pread *args,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
struct mm_struct *mm = current->mm;
struct page **user_pages;
ssize_t remain;
loff_t offset, pinned_pages, i;
loff_t first_data_page, last_data_page, num_pages;
- int shmem_page_index, shmem_page_offset;
- int data_page_index, data_page_offset;
+ int shmem_page_offset;
+ int data_page_index, data_page_offset;
int page_length;
int ret;
uint64_t data_ptr = args->data_ptr;
@@ -480,19 +418,18 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- obj_priv = to_intel_bo(obj);
offset = args->offset;
while (remain > 0) {
+ struct page *page;
+
/* Operation in this page
*
- * shmem_page_index = page number within shmem file
* shmem_page_offset = offset within page in shmem file
* data_page_index = page number in get_user_pages return
* data_page_offset = offset with data_page_index page.
* page_length = bytes to copy for this page
*/
- shmem_page_index = offset / PAGE_SIZE;
shmem_page_offset = offset & ~PAGE_MASK;
data_page_index = data_ptr / PAGE_SIZE - first_data_page;
data_page_offset = data_ptr & ~PAGE_MASK;
@@ -503,8 +440,13 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
if ((data_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - data_page_offset;
+ page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
+ GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
if (do_bit17_swizzling) {
- slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
+ slow_shmem_bit17_copy(page,
shmem_page_offset,
user_pages[data_page_index],
data_page_offset,
@@ -513,11 +455,14 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
} else {
slow_shmem_copy(user_pages[data_page_index],
data_page_offset,
- obj_priv->pages[shmem_page_index],
+ page,
shmem_page_offset,
page_length);
}
+ mark_page_accessed(page);
+ page_cache_release(page);
+
remain -= page_length;
data_ptr += page_length;
offset += page_length;
@@ -526,6 +471,7 @@ i915_gem_shmem_pread_slow(struct drm_device *dev, struct drm_gem_object *obj,
out:
for (i = 0; i < pinned_pages; i++) {
SetPageDirty(user_pages[i]);
+ mark_page_accessed(user_pages[i]);
page_cache_release(user_pages[i]);
}
drm_free_large(user_pages);
@@ -540,11 +486,10 @@ out:
*/
int
i915_gem_pread_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_pread *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret = 0;
if (args->size == 0)
@@ -564,39 +509,33 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
/* Bounds check source. */
- if (args->offset > obj->size || args->size > obj->size - args->offset) {
+ if (args->offset > obj->base.size ||
+ args->size > obj->base.size - args->offset) {
ret = -EINVAL;
goto out;
}
- ret = i915_gem_object_get_pages_or_evict(obj);
- if (ret)
- goto out;
-
ret = i915_gem_object_set_cpu_read_domain_range(obj,
args->offset,
args->size);
if (ret)
- goto out_put;
+ goto out;
ret = -EFAULT;
if (!i915_gem_object_needs_bit17_swizzle(obj))
- ret = i915_gem_shmem_pread_fast(dev, obj, args, file_priv);
+ ret = i915_gem_shmem_pread_fast(dev, obj, args, file);
if (ret == -EFAULT)
- ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
+ ret = i915_gem_shmem_pread_slow(dev, obj, args, file);
-out_put:
- i915_gem_object_put_pages(obj);
out:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -646,32 +585,16 @@ slow_kernel_write(struct io_mapping *mapping,
io_mapping_unmap(dst_vaddr);
}
-static inline int
-fast_shmem_write(struct page **pages,
- loff_t page_base, int page_offset,
- char __user *data,
- int length)
-{
- char *vaddr;
- int ret;
-
- vaddr = kmap_atomic(pages[page_base >> PAGE_SHIFT]);
- ret = __copy_from_user_inatomic(vaddr + page_offset, data, length);
- kunmap_atomic(vaddr);
-
- return ret;
-}
-
/**
* This is the fast pwrite path, where we copy the data directly from the
* user into the GTT, uncached.
*/
static int
-i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_gtt_pwrite_fast(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
drm_i915_private_t *dev_priv = dev->dev_private;
ssize_t remain;
loff_t offset, page_base;
@@ -681,8 +604,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
- obj_priv = to_intel_bo(obj);
- offset = obj_priv->gtt_offset + args->offset;
+ offset = obj->gtt_offset + args->offset;
while (remain > 0) {
/* Operation in this page
@@ -722,11 +644,11 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
* than using i915_gem_gtt_pwrite_fast on a G45 (32-bit).
*/
static int
-i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_gtt_pwrite_slow(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
drm_i915_private_t *dev_priv = dev->dev_private;
ssize_t remain;
loff_t gtt_page_base, offset;
@@ -763,12 +685,15 @@ i915_gem_gtt_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
goto out_unpin_pages;
}
- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ goto out_unpin_pages;
+
+ ret = i915_gem_object_put_fence(obj);
if (ret)
goto out_unpin_pages;
- obj_priv = to_intel_bo(obj);
- offset = obj_priv->gtt_offset + args->offset;
+ offset = obj->gtt_offset + args->offset;
while (remain > 0) {
/* Operation in this page
@@ -814,39 +739,58 @@ out_unpin_pages:
* copy_from_user into the kmapped pages backing the object.
*/
static int
-i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_shmem_pwrite_fast(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
ssize_t remain;
- loff_t offset, page_base;
+ loff_t offset;
char __user *user_data;
int page_offset, page_length;
user_data = (char __user *) (uintptr_t) args->data_ptr;
remain = args->size;
- obj_priv = to_intel_bo(obj);
offset = args->offset;
- obj_priv->dirty = 1;
+ obj->dirty = 1;
while (remain > 0) {
+ struct page *page;
+ char *vaddr;
+ int ret;
+
/* Operation in this page
*
- * page_base = page offset within aperture
* page_offset = offset within page
* page_length = bytes to copy for this page
*/
- page_base = (offset & ~(PAGE_SIZE-1));
page_offset = offset & (PAGE_SIZE-1);
page_length = remain;
if ((page_offset + remain) > PAGE_SIZE)
page_length = PAGE_SIZE - page_offset;
- if (fast_shmem_write(obj_priv->pages,
- page_base, page_offset,
- user_data, page_length))
+ page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
+ GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ vaddr = kmap_atomic(page, KM_USER0);
+ ret = __copy_from_user_inatomic(vaddr + page_offset,
+ user_data,
+ page_length);
+ kunmap_atomic(vaddr, KM_USER0);
+
+ set_page_dirty(page);
+ mark_page_accessed(page);
+ page_cache_release(page);
+
+ /* If we get a fault while copying data, then (presumably) our
+ * source page isn't available. Return the error and we'll
+ * retry in the slow path.
+ */
+ if (ret)
return -EFAULT;
remain -= page_length;
@@ -865,17 +809,18 @@ i915_gem_shmem_pwrite_fast(struct drm_device *dev, struct drm_gem_object *obj,
* struct_mutex is held.
*/
static int
-i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_shmem_pwrite_slow(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
struct mm_struct *mm = current->mm;
struct page **user_pages;
ssize_t remain;
loff_t offset, pinned_pages, i;
loff_t first_data_page, last_data_page, num_pages;
- int shmem_page_index, shmem_page_offset;
+ int shmem_page_offset;
int data_page_index, data_page_offset;
int page_length;
int ret;
@@ -913,20 +858,19 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
- obj_priv = to_intel_bo(obj);
offset = args->offset;
- obj_priv->dirty = 1;
+ obj->dirty = 1;
while (remain > 0) {
+ struct page *page;
+
/* Operation in this page
*
- * shmem_page_index = page number within shmem file
* shmem_page_offset = offset within page in shmem file
* data_page_index = page number in get_user_pages return
* data_page_offset = offset with data_page_index page.
* page_length = bytes to copy for this page
*/
- shmem_page_index = offset / PAGE_SIZE;
shmem_page_offset = offset & ~PAGE_MASK;
data_page_index = data_ptr / PAGE_SIZE - first_data_page;
data_page_offset = data_ptr & ~PAGE_MASK;
@@ -937,21 +881,32 @@ i915_gem_shmem_pwrite_slow(struct drm_device *dev, struct drm_gem_object *obj,
if ((data_page_offset + page_length) > PAGE_SIZE)
page_length = PAGE_SIZE - data_page_offset;
+ page = read_cache_page_gfp(mapping, offset >> PAGE_SHIFT,
+ GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ if (IS_ERR(page)) {
+ ret = PTR_ERR(page);
+ goto out;
+ }
+
if (do_bit17_swizzling) {
- slow_shmem_bit17_copy(obj_priv->pages[shmem_page_index],
+ slow_shmem_bit17_copy(page,
shmem_page_offset,
user_pages[data_page_index],
data_page_offset,
page_length,
0);
} else {
- slow_shmem_copy(obj_priv->pages[shmem_page_index],
+ slow_shmem_copy(page,
shmem_page_offset,
user_pages[data_page_index],
data_page_offset,
page_length);
}
+ set_page_dirty(page);
+ mark_page_accessed(page);
+ page_cache_release(page);
+
remain -= page_length;
data_ptr += page_length;
offset += page_length;
@@ -975,8 +930,7 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_i915_gem_pwrite *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
if (args->size == 0)
@@ -996,15 +950,15 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
/* Bounds check destination. */
- if (args->offset > obj->size || args->size > obj->size - args->offset) {
+ if (args->offset > obj->base.size ||
+ args->size > obj->base.size - args->offset) {
ret = -EINVAL;
goto out;
}
@@ -1015,16 +969,19 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
* pread/pwrite currently are reading and writing from the CPU
* perspective, requiring manual detiling by the client.
*/
- if (obj_priv->phys_obj)
+ if (obj->phys_obj)
ret = i915_gem_phys_pwrite(dev, obj, args, file);
- else if (obj_priv->tiling_mode == I915_TILING_NONE &&
- obj_priv->gtt_space &&
- obj->write_domain != I915_GEM_DOMAIN_CPU) {
- ret = i915_gem_object_pin(obj, 0);
+ else if (obj->gtt_space &&
+ obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
+ ret = i915_gem_object_pin(obj, 0, true);
if (ret)
goto out;
- ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ ret = i915_gem_object_set_to_gtt_domain(obj, true);
+ if (ret)
+ goto out_unpin;
+
+ ret = i915_gem_object_put_fence(obj);
if (ret)
goto out_unpin;
@@ -1035,26 +992,19 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
out_unpin:
i915_gem_object_unpin(obj);
} else {
- ret = i915_gem_object_get_pages_or_evict(obj);
- if (ret)
- goto out;
-
ret = i915_gem_object_set_to_cpu_domain(obj, 1);
if (ret)
- goto out_put;
+ goto out;
ret = -EFAULT;
if (!i915_gem_object_needs_bit17_swizzle(obj))
ret = i915_gem_shmem_pwrite_fast(dev, obj, args, file);
if (ret == -EFAULT)
ret = i915_gem_shmem_pwrite_slow(dev, obj, args, file);
-
-out_put:
- i915_gem_object_put_pages(obj);
}
out:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -1066,12 +1016,10 @@ unlock:
*/
int
i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_set_domain *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
uint32_t read_domains = args->read_domains;
uint32_t write_domain = args->write_domain;
int ret;
@@ -1096,28 +1044,15 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
-
- intel_mark_busy(dev, obj);
if (read_domains & I915_GEM_DOMAIN_GTT) {
ret = i915_gem_object_set_to_gtt_domain(obj, write_domain != 0);
- /* Update the LRU on the fence for the CPU access that's
- * about to occur.
- */
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
- struct drm_i915_fence_reg *reg =
- &dev_priv->fence_regs[obj_priv->fence_reg];
- list_move_tail(&reg->lru_list,
- &dev_priv->mm.fence_list);
- }
-
/* Silently promote "you're not bound, there was nothing to do"
* to success, since the client was just asking us to
* make sure everything was done.
@@ -1128,11 +1063,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
}
- /* Maintain LRU order of "inactive" objects */
- if (ret == 0 && i915_gem_object_is_inactive(obj_priv))
- list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
-
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -1143,10 +1074,10 @@ unlock:
*/
int
i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_sw_finish *args = data;
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
int ret = 0;
if (!(dev->driver->driver_features & DRIVER_GEM))
@@ -1156,17 +1087,17 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
/* Pinned buffers may be scanout, so flush the cache */
- if (to_intel_bo(obj)->pin_count)
+ if (obj->pin_count)
i915_gem_object_flush_cpu_write_domain(obj);
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -1181,8 +1112,9 @@ unlock:
*/
int
i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_mmap *args = data;
struct drm_gem_object *obj;
loff_t offset;
@@ -1191,10 +1123,15 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
if (!(dev->driver->driver_features & DRIVER_GEM))
return -ENODEV;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = drm_gem_object_lookup(dev, file, args->handle);
if (obj == NULL)
return -ENOENT;
+ if (obj->size > dev_priv->mm.gtt_mappable_end) {
+ drm_gem_object_unreference_unlocked(obj);
+ return -E2BIG;
+ }
+
offset = args->offset;
down_write(&current->mm->mmap_sem);
@@ -1229,10 +1166,9 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
*/
int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
- struct drm_gem_object *obj = vma->vm_private_data;
- struct drm_device *dev = obj->dev;
+ struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
pgoff_t page_offset;
unsigned long pfn;
int ret = 0;
@@ -1244,27 +1180,35 @@ int i915_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
/* Now bind it into the GTT if needed */
mutex_lock(&dev->struct_mutex);
- if (!obj_priv->gtt_space) {
- ret = i915_gem_object_bind_to_gtt(obj, 0);
- if (ret)
- goto unlock;
- ret = i915_gem_object_set_to_gtt_domain(obj, write);
+ if (!obj->map_and_fenceable) {
+ ret = i915_gem_object_unbind(obj);
if (ret)
goto unlock;
}
-
- /* Need a new fence register? */
- if (obj_priv->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence_reg(obj, true);
+ if (!obj->gtt_space) {
+ ret = i915_gem_object_bind_to_gtt(obj, 0, true);
if (ret)
goto unlock;
}
- if (i915_gem_object_is_inactive(obj_priv))
- list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
+ ret = i915_gem_object_set_to_gtt_domain(obj, write);
+ if (ret)
+ goto unlock;
+
+ if (obj->tiling_mode == I915_TILING_NONE)
+ ret = i915_gem_object_put_fence(obj);
+ else
+ ret = i915_gem_object_get_fence(obj, NULL, true);
+ if (ret)
+ goto unlock;
+
+ if (i915_gem_object_is_inactive(obj))
+ list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+
+ obj->fault_mappable = true;
- pfn = ((dev->agp->base + obj_priv->gtt_offset) >> PAGE_SHIFT) +
+ pfn = ((dev->agp->base + obj->gtt_offset) >> PAGE_SHIFT) +
page_offset;
/* Finally, remap it using the new GTT offset */
@@ -1273,11 +1217,12 @@ unlock:
mutex_unlock(&dev->struct_mutex);
switch (ret) {
+ case -EAGAIN:
+ set_need_resched();
case 0:
case -ERESTARTSYS:
return VM_FAULT_NOPAGE;
case -ENOMEM:
- case -EAGAIN:
return VM_FAULT_OOM;
default:
return VM_FAULT_SIGBUS;
@@ -1296,37 +1241,39 @@ unlock:
* This routine allocates and attaches a fake offset for @obj.
*/
static int
-i915_gem_create_mmap_offset(struct drm_gem_object *obj)
+i915_gem_create_mmap_offset(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
struct drm_gem_mm *mm = dev->mm_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_map_list *list;
struct drm_local_map *map;
int ret = 0;
/* Set the object up for mmap'ing */
- list = &obj->map_list;
+ list = &obj->base.map_list;
list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
if (!list->map)
return -ENOMEM;
map = list->map;
map->type = _DRM_GEM;
- map->size = obj->size;
+ map->size = obj->base.size;
map->handle = obj;
/* Get a DRM GEM mmap offset allocated... */
list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
- obj->size / PAGE_SIZE, 0, 0);
+ obj->base.size / PAGE_SIZE,
+ 0, 0);
if (!list->file_offset_node) {
- DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
+ DRM_ERROR("failed to allocate offset for bo %d\n",
+ obj->base.name);
ret = -ENOSPC;
goto out_free_list;
}
list->file_offset_node = drm_mm_get_block(list->file_offset_node,
- obj->size / PAGE_SIZE, 0);
+ obj->base.size / PAGE_SIZE,
+ 0);
if (!list->file_offset_node) {
ret = -ENOMEM;
goto out_free_list;
@@ -1339,16 +1286,13 @@ i915_gem_create_mmap_offset(struct drm_gem_object *obj)
goto out_free_mm;
}
- /* By now we should be all set, any drm_mmap request on the offset
- * below will get to our mmap & fault handler */
- obj_priv->mmap_offset = ((uint64_t) list->hash.key) << PAGE_SHIFT;
-
return 0;
out_free_mm:
drm_mm_put_block(list->file_offset_node);
out_free_list:
kfree(list->map);
+ list->map = NULL;
return ret;
}
@@ -1368,38 +1312,51 @@ out_free_list:
* fixup by i915_gem_fault().
*/
void
-i915_gem_release_mmap(struct drm_gem_object *obj)
+i915_gem_release_mmap(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ if (!obj->fault_mappable)
+ return;
+
+ unmap_mapping_range(obj->base.dev->dev_mapping,
+ (loff_t)obj->base.map_list.hash.key<<PAGE_SHIFT,
+ obj->base.size, 1);
- if (dev->dev_mapping)
- unmap_mapping_range(dev->dev_mapping,
- obj_priv->mmap_offset, obj->size, 1);
+ obj->fault_mappable = false;
}
static void
-i915_gem_free_mmap_offset(struct drm_gem_object *obj)
+i915_gem_free_mmap_offset(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct drm_device *dev = obj->base.dev;
struct drm_gem_mm *mm = dev->mm_private;
- struct drm_map_list *list;
+ struct drm_map_list *list = &obj->base.map_list;
- list = &obj->map_list;
drm_ht_remove_item(&mm->offset_hash, &list->hash);
+ drm_mm_put_block(list->file_offset_node);
+ kfree(list->map);
+ list->map = NULL;
+}
- if (list->file_offset_node) {
- drm_mm_put_block(list->file_offset_node);
- list->file_offset_node = NULL;
- }
+static uint32_t
+i915_gem_get_gtt_size(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ uint32_t size;
- if (list->map) {
- kfree(list->map);
- list->map = NULL;
- }
+ if (INTEL_INFO(dev)->gen >= 4 ||
+ obj->tiling_mode == I915_TILING_NONE)
+ return obj->base.size;
- obj_priv->mmap_offset = 0;
+ /* Previous chips need a power-of-two fence region when tiling */
+ if (INTEL_INFO(dev)->gen == 3)
+ size = 1024*1024;
+ else
+ size = 512*1024;
+
+ while (size < obj->base.size)
+ size <<= 1;
+
+ return size;
}
/**
@@ -1407,42 +1364,68 @@ i915_gem_free_mmap_offset(struct drm_gem_object *obj)
* @obj: object to check
*
* Return the required GTT alignment for an object, taking into account
- * potential fence register mapping if needed.
+ * potential fence register mapping.
*/
static uint32_t
-i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
+i915_gem_get_gtt_alignment(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int start, i;
+ struct drm_device *dev = obj->base.dev;
/*
* Minimum alignment is 4k (GTT page size), but might be greater
* if a fence register is needed for the object.
*/
- if (INTEL_INFO(dev)->gen >= 4 || obj_priv->tiling_mode == I915_TILING_NONE)
+ if (INTEL_INFO(dev)->gen >= 4 ||
+ obj->tiling_mode == I915_TILING_NONE)
return 4096;
/*
* Previous chips need to be aligned to the size of the smallest
* fence register that can contain the object.
*/
- if (INTEL_INFO(dev)->gen == 3)
- start = 1024*1024;
- else
- start = 512*1024;
+ return i915_gem_get_gtt_size(obj);
+}
- for (i = start; i < obj->size; i <<= 1)
- ;
+/**
+ * i915_gem_get_unfenced_gtt_alignment - return required GTT alignment for an
+ * unfenced object
+ * @obj: object to check
+ *
+ * Return the required GTT alignment for an object, only taking into account
+ * unfenced tiled surface requirements.
+ */
+static uint32_t
+i915_gem_get_unfenced_gtt_alignment(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ int tile_height;
+
+ /*
+ * Minimum alignment is 4k (GTT page size) for sane hw.
+ */
+ if (INTEL_INFO(dev)->gen >= 4 || IS_G33(dev) ||
+ obj->tiling_mode == I915_TILING_NONE)
+ return 4096;
+
+ /*
+ * Older chips need unfenced tiled buffers to be aligned to the left
+ * edge of an even tile row (where tile rows are counted as if the bo is
+ * placed in a fenced gtt region).
+ */
+ if (IS_GEN2(dev) ||
+ (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev)))
+ tile_height = 32;
+ else
+ tile_height = 8;
- return i;
+ return tile_height * obj->stride * 2;
}
/**
* i915_gem_mmap_gtt_ioctl - prepare an object for GTT mmap'ing
* @dev: DRM device
* @data: GTT mapping ioctl data
- * @file_priv: GEM object info
+ * @file: GEM object info
*
* Simply returns the fake offset to userspace so it can mmap it.
* The mmap call will end up in drm_gem_mmap(), which will set things
@@ -1455,11 +1438,11 @@ i915_gem_get_gtt_alignment(struct drm_gem_object *obj)
*/
int
i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_mmap_gtt *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
if (!(dev->driver->driver_features & DRIVER_GEM))
@@ -1469,130 +1452,196 @@ i915_gem_mmap_gtt_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
- if (obj_priv->madv != I915_MADV_WILLNEED) {
+ if (obj->base.size > dev_priv->mm.gtt_mappable_end) {
+ ret = -E2BIG;
+ goto unlock;
+ }
+
+ if (obj->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to mmap a purgeable buffer\n");
ret = -EINVAL;
goto out;
}
- if (!obj_priv->mmap_offset) {
+ if (!obj->base.map_list.map) {
ret = i915_gem_create_mmap_offset(obj);
if (ret)
goto out;
}
- args->offset = obj_priv->mmap_offset;
-
- /*
- * Pull it into the GTT so that we have a page list (makes the
- * initial fault faster and any subsequent flushing possible).
- */
- if (!obj_priv->agp_mem) {
- ret = i915_gem_object_bind_to_gtt(obj, 0);
- if (ret)
- goto out;
- }
+ args->offset = (u64)obj->base.map_list.hash.key << PAGE_SHIFT;
out:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
+static int
+i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj,
+ gfp_t gfpmask)
+{
+ int page_count, i;
+ struct address_space *mapping;
+ struct inode *inode;
+ struct page *page;
+
+ /* Get the list of pages out of our struct file. They'll be pinned
+ * at this point until we release them.
+ */
+ page_count = obj->base.size / PAGE_SIZE;
+ BUG_ON(obj->pages != NULL);
+ obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
+ if (obj->pages == NULL)
+ return -ENOMEM;
+
+ inode = obj->base.filp->f_path.dentry->d_inode;
+ mapping = inode->i_mapping;
+ for (i = 0; i < page_count; i++) {
+ page = read_cache_page_gfp(mapping, i,
+ GFP_HIGHUSER |
+ __GFP_COLD |
+ __GFP_RECLAIMABLE |
+ gfpmask);
+ if (IS_ERR(page))
+ goto err_pages;
+
+ obj->pages[i] = page;
+ }
+
+ if (obj->tiling_mode != I915_TILING_NONE)
+ i915_gem_object_do_bit_17_swizzle(obj);
+
+ return 0;
+
+err_pages:
+ while (i--)
+ page_cache_release(obj->pages[i]);
+
+ drm_free_large(obj->pages);
+ obj->pages = NULL;
+ return PTR_ERR(page);
+}
+
static void
-i915_gem_object_put_pages(struct drm_gem_object *obj)
+i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int page_count = obj->size / PAGE_SIZE;
+ int page_count = obj->base.size / PAGE_SIZE;
int i;
- BUG_ON(obj_priv->pages_refcount == 0);
- BUG_ON(obj_priv->madv == __I915_MADV_PURGED);
+ BUG_ON(obj->madv == __I915_MADV_PURGED);
- if (--obj_priv->pages_refcount != 0)
- return;
-
- if (obj_priv->tiling_mode != I915_TILING_NONE)
+ if (obj->tiling_mode != I915_TILING_NONE)
i915_gem_object_save_bit_17_swizzle(obj);
- if (obj_priv->madv == I915_MADV_DONTNEED)
- obj_priv->dirty = 0;
+ if (obj->madv == I915_MADV_DONTNEED)
+ obj->dirty = 0;
for (i = 0; i < page_count; i++) {
- if (obj_priv->dirty)
- set_page_dirty(obj_priv->pages[i]);
+ if (obj->dirty)
+ set_page_dirty(obj->pages[i]);
- if (obj_priv->madv == I915_MADV_WILLNEED)
- mark_page_accessed(obj_priv->pages[i]);
+ if (obj->madv == I915_MADV_WILLNEED)
+ mark_page_accessed(obj->pages[i]);
- page_cache_release(obj_priv->pages[i]);
+ page_cache_release(obj->pages[i]);
}
- obj_priv->dirty = 0;
-
- drm_free_large(obj_priv->pages);
- obj_priv->pages = NULL;
-}
-
-static uint32_t
-i915_gem_next_request_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ obj->dirty = 0;
- ring->outstanding_lazy_request = true;
- return dev_priv->next_seqno;
+ drm_free_large(obj->pages);
+ obj->pages = NULL;
}
-static void
-i915_gem_object_move_to_active(struct drm_gem_object *obj,
- struct intel_ring_buffer *ring)
+void
+i915_gem_object_move_to_active(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring,
+ u32 seqno)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- uint32_t seqno = i915_gem_next_request_seqno(dev, ring);
BUG_ON(ring == NULL);
- obj_priv->ring = ring;
+ obj->ring = ring;
/* Add a reference if we're newly entering the active list. */
- if (!obj_priv->active) {
- drm_gem_object_reference(obj);
- obj_priv->active = 1;
+ if (!obj->active) {
+ drm_gem_object_reference(&obj->base);
+ obj->active = 1;
}
/* Move from whatever list we were on to the tail of execution. */
- list_move_tail(&obj_priv->mm_list, &dev_priv->mm.active_list);
- list_move_tail(&obj_priv->ring_list, &ring->active_list);
- obj_priv->last_rendering_seqno = seqno;
+ list_move_tail(&obj->mm_list, &dev_priv->mm.active_list);
+ list_move_tail(&obj->ring_list, &ring->active_list);
+
+ obj->last_rendering_seqno = seqno;
+ if (obj->fenced_gpu_access) {
+ struct drm_i915_fence_reg *reg;
+
+ BUG_ON(obj->fence_reg == I915_FENCE_REG_NONE);
+
+ obj->last_fenced_seqno = seqno;
+ obj->last_fenced_ring = ring;
+
+ reg = &dev_priv->fence_regs[obj->fence_reg];
+ list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+ }
+}
+
+static void
+i915_gem_object_move_off_active(struct drm_i915_gem_object *obj)
+{
+ list_del_init(&obj->ring_list);
+ obj->last_rendering_seqno = 0;
}
static void
-i915_gem_object_move_to_flushing(struct drm_gem_object *obj)
+i915_gem_object_move_to_flushing(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- BUG_ON(!obj_priv->active);
- list_move_tail(&obj_priv->mm_list, &dev_priv->mm.flushing_list);
- list_del_init(&obj_priv->ring_list);
- obj_priv->last_rendering_seqno = 0;
+ BUG_ON(!obj->active);
+ list_move_tail(&obj->mm_list, &dev_priv->mm.flushing_list);
+
+ i915_gem_object_move_off_active(obj);
+}
+
+static void
+i915_gem_object_move_to_inactive(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (obj->pin_count != 0)
+ list_move_tail(&obj->mm_list, &dev_priv->mm.pinned_list);
+ else
+ list_move_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
+
+ BUG_ON(!list_empty(&obj->gpu_write_list));
+ BUG_ON(!obj->active);
+ obj->ring = NULL;
+
+ i915_gem_object_move_off_active(obj);
+ obj->fenced_gpu_access = false;
+
+ obj->active = 0;
+ obj->pending_gpu_write = false;
+ drm_gem_object_unreference(&obj->base);
+
+ WARN_ON(i915_verify_lists(dev));
}
/* Immediately discard the backing storage */
static void
-i915_gem_object_truncate(struct drm_gem_object *obj)
+i915_gem_object_truncate(struct drm_i915_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct inode *inode;
/* Our goal here is to return as much of the memory as
@@ -1601,42 +1650,18 @@ i915_gem_object_truncate(struct drm_gem_object *obj)
* backing pages, *now*. Here we mirror the actions taken
* when by shmem_delete_inode() to release the backing store.
*/
- inode = obj->filp->f_path.dentry->d_inode;
+ inode = obj->base.filp->f_path.dentry->d_inode;
truncate_inode_pages(inode->i_mapping, 0);
if (inode->i_op->truncate_range)
inode->i_op->truncate_range(inode, 0, (loff_t)-1);
- obj_priv->madv = __I915_MADV_PURGED;
+ obj->madv = __I915_MADV_PURGED;
}
static inline int
-i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj_priv)
-{
- return obj_priv->madv == I915_MADV_DONTNEED;
-}
-
-static void
-i915_gem_object_move_to_inactive(struct drm_gem_object *obj)
+i915_gem_object_is_purgeable(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-
- if (obj_priv->pin_count != 0)
- list_move_tail(&obj_priv->mm_list, &dev_priv->mm.pinned_list);
- else
- list_move_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
- list_del_init(&obj_priv->ring_list);
-
- BUG_ON(!list_empty(&obj_priv->gpu_write_list));
-
- obj_priv->last_rendering_seqno = 0;
- obj_priv->ring = NULL;
- if (obj_priv->active) {
- obj_priv->active = 0;
- drm_gem_object_unreference(obj);
- }
- WARN_ON(i915_verify_lists(dev));
+ return obj->madv == I915_MADV_DONTNEED;
}
static void
@@ -1644,37 +1669,27 @@ i915_gem_process_flushing_list(struct drm_device *dev,
uint32_t flush_domains,
struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv, *next;
+ struct drm_i915_gem_object *obj, *next;
- list_for_each_entry_safe(obj_priv, next,
+ list_for_each_entry_safe(obj, next,
&ring->gpu_write_list,
gpu_write_list) {
- struct drm_gem_object *obj = &obj_priv->base;
-
- if (obj->write_domain & flush_domains) {
- uint32_t old_write_domain = obj->write_domain;
+ if (obj->base.write_domain & flush_domains) {
+ uint32_t old_write_domain = obj->base.write_domain;
- obj->write_domain = 0;
- list_del_init(&obj_priv->gpu_write_list);
- i915_gem_object_move_to_active(obj, ring);
-
- /* update the fence lru list */
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
- struct drm_i915_fence_reg *reg =
- &dev_priv->fence_regs[obj_priv->fence_reg];
- list_move_tail(&reg->lru_list,
- &dev_priv->mm.fence_list);
- }
+ obj->base.write_domain = 0;
+ list_del_init(&obj->gpu_write_list);
+ i915_gem_object_move_to_active(obj, ring,
+ i915_gem_next_request_seqno(dev, ring));
trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
+ obj->base.read_domains,
old_write_domain);
}
}
}
-uint32_t
+int
i915_add_request(struct drm_device *dev,
struct drm_file *file,
struct drm_i915_gem_request *request,
@@ -1684,17 +1699,17 @@ i915_add_request(struct drm_device *dev,
struct drm_i915_file_private *file_priv = NULL;
uint32_t seqno;
int was_empty;
+ int ret;
+
+ BUG_ON(request == NULL);
if (file != NULL)
file_priv = file->driver_priv;
- if (request == NULL) {
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL)
- return 0;
- }
+ ret = ring->add_request(ring, &seqno);
+ if (ret)
+ return ret;
- seqno = ring->add_request(dev, ring, 0);
ring->outstanding_lazy_request = false;
request->seqno = seqno;
@@ -1718,26 +1733,7 @@ i915_add_request(struct drm_device *dev,
queue_delayed_work(dev_priv->wq,
&dev_priv->mm.retire_work, HZ);
}
- return seqno;
-}
-
-/**
- * Command execution barrier
- *
- * Ensures that all commands in the ring are finished
- * before signalling the CPU
- */
-static void
-i915_retire_commands(struct drm_device *dev, struct intel_ring_buffer *ring)
-{
- uint32_t flush_domains = 0;
-
- /* The sampler always gets flushed on i965 (sigh) */
- if (INTEL_INFO(dev)->gen >= 4)
- flush_domains |= I915_GEM_DOMAIN_SAMPLER;
-
- ring->flush(dev, ring,
- I915_GEM_DOMAIN_COMMAND, flush_domains);
+ return 0;
}
static inline void
@@ -1770,62 +1766,76 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
}
while (!list_empty(&ring->active_list)) {
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
- obj_priv = list_first_entry(&ring->active_list,
- struct drm_i915_gem_object,
- ring_list);
+ obj = list_first_entry(&ring->active_list,
+ struct drm_i915_gem_object,
+ ring_list);
- obj_priv->base.write_domain = 0;
- list_del_init(&obj_priv->gpu_write_list);
- i915_gem_object_move_to_inactive(&obj_priv->base);
+ obj->base.write_domain = 0;
+ list_del_init(&obj->gpu_write_list);
+ i915_gem_object_move_to_inactive(obj);
+ }
+}
+
+static void i915_gem_reset_fences(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ for (i = 0; i < 16; i++) {
+ struct drm_i915_fence_reg *reg = &dev_priv->fence_regs[i];
+ struct drm_i915_gem_object *obj = reg->obj;
+
+ if (!obj)
+ continue;
+
+ if (obj->tiling_mode)
+ i915_gem_release_mmap(obj);
+
+ reg->obj->fence_reg = I915_FENCE_REG_NONE;
+ reg->obj->fenced_gpu_access = false;
+ reg->obj->last_fenced_seqno = 0;
+ reg->obj->last_fenced_ring = NULL;
+ i915_gem_clear_fence_reg(dev, reg);
}
}
void i915_gem_reset(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int i;
- i915_gem_reset_ring_lists(dev_priv, &dev_priv->render_ring);
- i915_gem_reset_ring_lists(dev_priv, &dev_priv->bsd_ring);
- i915_gem_reset_ring_lists(dev_priv, &dev_priv->blt_ring);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ i915_gem_reset_ring_lists(dev_priv, &dev_priv->ring[i]);
/* Remove anything from the flushing lists. The GPU cache is likely
* to be lost on reset along with the data, so simply move the
* lost bo to the inactive list.
*/
while (!list_empty(&dev_priv->mm.flushing_list)) {
- obj_priv = list_first_entry(&dev_priv->mm.flushing_list,
- struct drm_i915_gem_object,
- mm_list);
+ obj= list_first_entry(&dev_priv->mm.flushing_list,
+ struct drm_i915_gem_object,
+ mm_list);
- obj_priv->base.write_domain = 0;
- list_del_init(&obj_priv->gpu_write_list);
- i915_gem_object_move_to_inactive(&obj_priv->base);
+ obj->base.write_domain = 0;
+ list_del_init(&obj->gpu_write_list);
+ i915_gem_object_move_to_inactive(obj);
}
/* Move everything out of the GPU domains to ensure we do any
* necessary invalidation upon reuse.
*/
- list_for_each_entry(obj_priv,
+ list_for_each_entry(obj,
&dev_priv->mm.inactive_list,
mm_list)
{
- obj_priv->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
+ obj->base.read_domains &= ~I915_GEM_GPU_DOMAINS;
}
/* The fence registers are invalidated so clear them out */
- for (i = 0; i < 16; i++) {
- struct drm_i915_fence_reg *reg;
-
- reg = &dev_priv->fence_regs[i];
- if (!reg->obj)
- continue;
-
- i915_gem_clear_fence_reg(reg->obj);
- }
+ i915_gem_reset_fences(dev);
}
/**
@@ -1837,6 +1847,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
{
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t seqno;
+ int i;
if (!ring->status_page.page_addr ||
list_empty(&ring->request_list))
@@ -1844,7 +1855,12 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
WARN_ON(i915_verify_lists(dev));
- seqno = ring->get_seqno(dev, ring);
+ seqno = ring->get_seqno(ring);
+
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ if (seqno >= ring->sync_seqno[i])
+ ring->sync_seqno[i] = 0;
+
while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request;
@@ -1866,18 +1882,16 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
* by the ringbuffer to the flushing/inactive lists as appropriate.
*/
while (!list_empty(&ring->active_list)) {
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
- obj_priv = list_first_entry(&ring->active_list,
- struct drm_i915_gem_object,
- ring_list);
+ obj= list_first_entry(&ring->active_list,
+ struct drm_i915_gem_object,
+ ring_list);
- if (!i915_seqno_passed(seqno, obj_priv->last_rendering_seqno))
+ if (!i915_seqno_passed(seqno, obj->last_rendering_seqno))
break;
- obj = &obj_priv->base;
- if (obj->write_domain != 0)
+ if (obj->base.write_domain != 0)
i915_gem_object_move_to_flushing(obj);
else
i915_gem_object_move_to_inactive(obj);
@@ -1885,7 +1899,7 @@ i915_gem_retire_requests_ring(struct drm_device *dev,
if (unlikely (dev_priv->trace_irq_seqno &&
i915_seqno_passed(dev_priv->trace_irq_seqno, seqno))) {
- ring->user_irq_put(dev, ring);
+ ring->irq_put(ring);
dev_priv->trace_irq_seqno = 0;
}
@@ -1896,24 +1910,24 @@ void
i915_gem_retire_requests(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
if (!list_empty(&dev_priv->mm.deferred_free_list)) {
- struct drm_i915_gem_object *obj_priv, *tmp;
+ struct drm_i915_gem_object *obj, *next;
/* We must be careful that during unbind() we do not
* accidentally infinitely recurse into retire requests.
* Currently:
* retire -> free -> unbind -> wait -> retire_ring
*/
- list_for_each_entry_safe(obj_priv, tmp,
+ list_for_each_entry_safe(obj, next,
&dev_priv->mm.deferred_free_list,
mm_list)
- i915_gem_free_object_tail(&obj_priv->base);
+ i915_gem_free_object_tail(obj);
}
- i915_gem_retire_requests_ring(dev, &dev_priv->render_ring);
- i915_gem_retire_requests_ring(dev, &dev_priv->bsd_ring);
- i915_gem_retire_requests_ring(dev, &dev_priv->blt_ring);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ i915_gem_retire_requests_ring(dev, &dev_priv->ring[i]);
}
static void
@@ -1935,9 +1949,9 @@ i915_gem_retire_work_handler(struct work_struct *work)
i915_gem_retire_requests(dev);
if (!dev_priv->mm.suspended &&
- (!list_empty(&dev_priv->render_ring.request_list) ||
- !list_empty(&dev_priv->bsd_ring.request_list) ||
- !list_empty(&dev_priv->blt_ring.request_list)))
+ (!list_empty(&dev_priv->ring[RCS].request_list) ||
+ !list_empty(&dev_priv->ring[VCS].request_list) ||
+ !list_empty(&dev_priv->ring[BCS].request_list)))
queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, HZ);
mutex_unlock(&dev->struct_mutex);
}
@@ -1955,14 +1969,23 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
if (atomic_read(&dev_priv->mm.wedged))
return -EAGAIN;
- if (ring->outstanding_lazy_request) {
- seqno = i915_add_request(dev, NULL, NULL, ring);
- if (seqno == 0)
+ if (seqno == ring->outstanding_lazy_request) {
+ struct drm_i915_gem_request *request;
+
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
return -ENOMEM;
+
+ ret = i915_add_request(dev, NULL, request, ring);
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
+
+ seqno = request->seqno;
}
- BUG_ON(seqno == dev_priv->next_seqno);
- if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
+ if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
if (HAS_PCH_SPLIT(dev))
ier = I915_READ(DEIER) | I915_READ(GTIER);
else
@@ -1976,21 +1999,23 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
trace_i915_gem_request_wait_begin(dev, seqno);
- ring->waiting_gem_seqno = seqno;
- ring->user_irq_get(dev, ring);
- if (interruptible)
- ret = wait_event_interruptible(ring->irq_queue,
- i915_seqno_passed(
- ring->get_seqno(dev, ring), seqno)
- || atomic_read(&dev_priv->mm.wedged));
- else
- wait_event(ring->irq_queue,
- i915_seqno_passed(
- ring->get_seqno(dev, ring), seqno)
- || atomic_read(&dev_priv->mm.wedged));
+ ring->waiting_seqno = seqno;
+ if (ring->irq_get(ring)) {
+ if (interruptible)
+ ret = wait_event_interruptible(ring->irq_queue,
+ i915_seqno_passed(ring->get_seqno(ring), seqno)
+ || atomic_read(&dev_priv->mm.wedged));
+ else
+ wait_event(ring->irq_queue,
+ i915_seqno_passed(ring->get_seqno(ring), seqno)
+ || atomic_read(&dev_priv->mm.wedged));
- ring->user_irq_put(dev, ring);
- ring->waiting_gem_seqno = 0;
+ ring->irq_put(ring);
+ } else if (wait_for(i915_seqno_passed(ring->get_seqno(ring),
+ seqno) ||
+ atomic_read(&dev_priv->mm.wedged), 3000))
+ ret = -EBUSY;
+ ring->waiting_seqno = 0;
trace_i915_gem_request_wait_end(dev, seqno);
}
@@ -1999,7 +2024,7 @@ i915_do_wait_request(struct drm_device *dev, uint32_t seqno,
if (ret && ret != -ERESTARTSYS)
DRM_ERROR("%s returns %d (awaiting %d at %d, next %d)\n",
- __func__, ret, seqno, ring->get_seqno(dev, ring),
+ __func__, ret, seqno, ring->get_seqno(ring),
dev_priv->next_seqno);
/* Directly dispatch request retiring. While we have the work queue
@@ -2024,70 +2049,30 @@ i915_wait_request(struct drm_device *dev, uint32_t seqno,
return i915_do_wait_request(dev, seqno, 1, ring);
}
-static void
-i915_gem_flush_ring(struct drm_device *dev,
- struct drm_file *file_priv,
- struct intel_ring_buffer *ring,
- uint32_t invalidate_domains,
- uint32_t flush_domains)
-{
- ring->flush(dev, ring, invalidate_domains, flush_domains);
- i915_gem_process_flushing_list(dev, flush_domains, ring);
-}
-
-static void
-i915_gem_flush(struct drm_device *dev,
- struct drm_file *file_priv,
- uint32_t invalidate_domains,
- uint32_t flush_domains,
- uint32_t flush_rings)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
-
- if (flush_domains & I915_GEM_DOMAIN_CPU)
- drm_agp_chipset_flush(dev);
-
- if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
- if (flush_rings & RING_RENDER)
- i915_gem_flush_ring(dev, file_priv,
- &dev_priv->render_ring,
- invalidate_domains, flush_domains);
- if (flush_rings & RING_BSD)
- i915_gem_flush_ring(dev, file_priv,
- &dev_priv->bsd_ring,
- invalidate_domains, flush_domains);
- if (flush_rings & RING_BLT)
- i915_gem_flush_ring(dev, file_priv,
- &dev_priv->blt_ring,
- invalidate_domains, flush_domains);
- }
-}
-
/**
* Ensures that all rendering to the object has completed and the object is
* safe to unbind from the GTT or access from the CPU.
*/
-static int
-i915_gem_object_wait_rendering(struct drm_gem_object *obj,
+int
+i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
bool interruptible)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct drm_device *dev = obj->base.dev;
int ret;
/* This function only exists to support waiting for existing rendering,
* not for emitting required flushes.
*/
- BUG_ON((obj->write_domain & I915_GEM_GPU_DOMAINS) != 0);
+ BUG_ON((obj->base.write_domain & I915_GEM_GPU_DOMAINS) != 0);
/* If there is rendering queued on the buffer being evicted, wait for
* it.
*/
- if (obj_priv->active) {
+ if (obj->active) {
ret = i915_do_wait_request(dev,
- obj_priv->last_rendering_seqno,
+ obj->last_rendering_seqno,
interruptible,
- obj_priv->ring);
+ obj->ring);
if (ret)
return ret;
}
@@ -2099,17 +2084,14 @@ i915_gem_object_wait_rendering(struct drm_gem_object *obj,
* Unbinds an object from the GTT aperture.
*/
int
-i915_gem_object_unbind(struct drm_gem_object *obj)
+i915_gem_object_unbind(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret = 0;
- if (obj_priv->gtt_space == NULL)
+ if (obj->gtt_space == NULL)
return 0;
- if (obj_priv->pin_count != 0) {
+ if (obj->pin_count != 0) {
DRM_ERROR("Attempting to unbind pinned buffer\n");
return -EINVAL;
}
@@ -2132,27 +2114,27 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
*/
if (ret) {
i915_gem_clflush_object(obj);
- obj->read_domains = obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
/* release the fence reg _after_ flushing */
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- i915_gem_clear_fence_reg(obj);
-
- drm_unbind_agp(obj_priv->agp_mem);
- drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE);
+ ret = i915_gem_object_put_fence(obj);
+ if (ret == -ERESTARTSYS)
+ return ret;
- i915_gem_object_put_pages(obj);
- BUG_ON(obj_priv->pages_refcount);
+ i915_gem_gtt_unbind_object(obj);
+ i915_gem_object_put_pages_gtt(obj);
- i915_gem_info_remove_gtt(dev_priv, obj->size);
- list_del_init(&obj_priv->mm_list);
+ list_del_init(&obj->gtt_list);
+ list_del_init(&obj->mm_list);
+ /* Avoid an unnecessary call to unbind on rebind. */
+ obj->map_and_fenceable = true;
- drm_mm_put_block(obj_priv->gtt_space);
- obj_priv->gtt_space = NULL;
- obj_priv->gtt_offset = 0;
+ drm_mm_put_block(obj->gtt_space);
+ obj->gtt_space = NULL;
+ obj->gtt_offset = 0;
- if (i915_gem_object_is_purgeable(obj_priv))
+ if (i915_gem_object_is_purgeable(obj))
i915_gem_object_truncate(obj);
trace_i915_gem_object_unbind(obj);
@@ -2160,14 +2142,25 @@ i915_gem_object_unbind(struct drm_gem_object *obj)
return ret;
}
+void
+i915_gem_flush_ring(struct drm_device *dev,
+ struct intel_ring_buffer *ring,
+ uint32_t invalidate_domains,
+ uint32_t flush_domains)
+{
+ ring->flush(ring, invalidate_domains, flush_domains);
+ i915_gem_process_flushing_list(dev, flush_domains, ring);
+}
+
static int i915_ring_idle(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
if (list_empty(&ring->gpu_write_list) && list_empty(&ring->active_list))
return 0;
- i915_gem_flush_ring(dev, NULL, ring,
- I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
+ if (!list_empty(&ring->gpu_write_list))
+ i915_gem_flush_ring(dev, ring,
+ I915_GEM_GPU_DOMAINS, I915_GEM_GPU_DOMAINS);
return i915_wait_request(dev,
i915_gem_next_request_seqno(dev, ring),
ring);
@@ -2178,7 +2171,7 @@ i915_gpu_idle(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
bool lists_empty;
- int ret;
+ int ret, i;
lists_empty = (list_empty(&dev_priv->mm.flushing_list) &&
list_empty(&dev_priv->mm.active_list));
@@ -2186,258 +2179,296 @@ i915_gpu_idle(struct drm_device *dev)
return 0;
/* Flush everything onto the inactive list. */
- ret = i915_ring_idle(dev, &dev_priv->render_ring);
- if (ret)
- return ret;
-
- ret = i915_ring_idle(dev, &dev_priv->bsd_ring);
- if (ret)
- return ret;
-
- ret = i915_ring_idle(dev, &dev_priv->blt_ring);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int
-i915_gem_object_get_pages(struct drm_gem_object *obj,
- gfp_t gfpmask)
-{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int page_count, i;
- struct address_space *mapping;
- struct inode *inode;
- struct page *page;
-
- BUG_ON(obj_priv->pages_refcount
- == DRM_I915_GEM_OBJECT_MAX_PAGES_REFCOUNT);
-
- if (obj_priv->pages_refcount++ != 0)
- return 0;
-
- /* Get the list of pages out of our struct file. They'll be pinned
- * at this point until we release them.
- */
- page_count = obj->size / PAGE_SIZE;
- BUG_ON(obj_priv->pages != NULL);
- obj_priv->pages = drm_calloc_large(page_count, sizeof(struct page *));
- if (obj_priv->pages == NULL) {
- obj_priv->pages_refcount--;
- return -ENOMEM;
- }
-
- inode = obj->filp->f_path.dentry->d_inode;
- mapping = inode->i_mapping;
- for (i = 0; i < page_count; i++) {
- page = read_cache_page_gfp(mapping, i,
- GFP_HIGHUSER |
- __GFP_COLD |
- __GFP_RECLAIMABLE |
- gfpmask);
- if (IS_ERR(page))
- goto err_pages;
-
- obj_priv->pages[i] = page;
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ ret = i915_ring_idle(dev, &dev_priv->ring[i]);
+ if (ret)
+ return ret;
}
- if (obj_priv->tiling_mode != I915_TILING_NONE)
- i915_gem_object_do_bit_17_swizzle(obj);
-
return 0;
-
-err_pages:
- while (i--)
- page_cache_release(obj_priv->pages[i]);
-
- drm_free_large(obj_priv->pages);
- obj_priv->pages = NULL;
- obj_priv->pages_refcount--;
- return PTR_ERR(page);
}
-static void sandybridge_write_fence_reg(struct drm_i915_fence_reg *reg)
+static int sandybridge_write_fence_reg(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
{
- struct drm_gem_object *obj = reg->obj;
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int regnum = obj_priv->fence_reg;
+ u32 size = obj->gtt_space->size;
+ int regnum = obj->fence_reg;
uint64_t val;
- val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
- 0xfffff000) << 32;
- val |= obj_priv->gtt_offset & 0xfffff000;
- val |= (uint64_t)((obj_priv->stride / 128) - 1) <<
+ val = (uint64_t)((obj->gtt_offset + size - 4096) &
+ 0xfffff000) << 32;
+ val |= obj->gtt_offset & 0xfffff000;
+ val |= (uint64_t)((obj->stride / 128) - 1) <<
SANDYBRIDGE_FENCE_PITCH_SHIFT;
- if (obj_priv->tiling_mode == I915_TILING_Y)
+ if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
val |= I965_FENCE_REG_VALID;
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (regnum * 8), val);
+ if (pipelined) {
+ int ret = intel_ring_begin(pipelined, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(pipelined, MI_NOOP);
+ intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
+ intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8);
+ intel_ring_emit(pipelined, (u32)val);
+ intel_ring_emit(pipelined, FENCE_REG_SANDYBRIDGE_0 + regnum*8 + 4);
+ intel_ring_emit(pipelined, (u32)(val >> 32));
+ intel_ring_advance(pipelined);
+ } else
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + regnum * 8, val);
+
+ return 0;
}
-static void i965_write_fence_reg(struct drm_i915_fence_reg *reg)
+static int i965_write_fence_reg(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
{
- struct drm_gem_object *obj = reg->obj;
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int regnum = obj_priv->fence_reg;
+ u32 size = obj->gtt_space->size;
+ int regnum = obj->fence_reg;
uint64_t val;
- val = (uint64_t)((obj_priv->gtt_offset + obj->size - 4096) &
+ val = (uint64_t)((obj->gtt_offset + size - 4096) &
0xfffff000) << 32;
- val |= obj_priv->gtt_offset & 0xfffff000;
- val |= ((obj_priv->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
- if (obj_priv->tiling_mode == I915_TILING_Y)
+ val |= obj->gtt_offset & 0xfffff000;
+ val |= ((obj->stride / 128) - 1) << I965_FENCE_PITCH_SHIFT;
+ if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I965_FENCE_TILING_Y_SHIFT;
val |= I965_FENCE_REG_VALID;
- I915_WRITE64(FENCE_REG_965_0 + (regnum * 8), val);
+ if (pipelined) {
+ int ret = intel_ring_begin(pipelined, 6);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(pipelined, MI_NOOP);
+ intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(2));
+ intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8);
+ intel_ring_emit(pipelined, (u32)val);
+ intel_ring_emit(pipelined, FENCE_REG_965_0 + regnum*8 + 4);
+ intel_ring_emit(pipelined, (u32)(val >> 32));
+ intel_ring_advance(pipelined);
+ } else
+ I915_WRITE64(FENCE_REG_965_0 + regnum * 8, val);
+
+ return 0;
}
-static void i915_write_fence_reg(struct drm_i915_fence_reg *reg)
+static int i915_write_fence_reg(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
{
- struct drm_gem_object *obj = reg->obj;
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int regnum = obj_priv->fence_reg;
+ u32 size = obj->gtt_space->size;
+ u32 fence_reg, val, pitch_val;
int tile_width;
- uint32_t fence_reg, val;
- uint32_t pitch_val;
- if ((obj_priv->gtt_offset & ~I915_FENCE_START_MASK) ||
- (obj_priv->gtt_offset & (obj->size - 1))) {
- WARN(1, "%s: object 0x%08x not 1M or size (0x%zx) aligned\n",
- __func__, obj_priv->gtt_offset, obj->size);
- return;
- }
+ if (WARN((obj->gtt_offset & ~I915_FENCE_START_MASK) ||
+ (size & -size) != size ||
+ (obj->gtt_offset & (size - 1)),
+ "object 0x%08x [fenceable? %d] not 1M or pot-size (0x%08x) aligned\n",
+ obj->gtt_offset, obj->map_and_fenceable, size))
+ return -EINVAL;
- if (obj_priv->tiling_mode == I915_TILING_Y &&
- HAS_128_BYTE_Y_TILING(dev))
+ if (obj->tiling_mode == I915_TILING_Y && HAS_128_BYTE_Y_TILING(dev))
tile_width = 128;
else
tile_width = 512;
/* Note: pitch better be a power of two tile widths */
- pitch_val = obj_priv->stride / tile_width;
+ pitch_val = obj->stride / tile_width;
pitch_val = ffs(pitch_val) - 1;
- if (obj_priv->tiling_mode == I915_TILING_Y &&
- HAS_128_BYTE_Y_TILING(dev))
- WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
- else
- WARN_ON(pitch_val > I915_FENCE_MAX_PITCH_VAL);
-
- val = obj_priv->gtt_offset;
- if (obj_priv->tiling_mode == I915_TILING_Y)
+ val = obj->gtt_offset;
+ if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
- val |= I915_FENCE_SIZE_BITS(obj->size);
+ val |= I915_FENCE_SIZE_BITS(size);
val |= pitch_val << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
- if (regnum < 8)
- fence_reg = FENCE_REG_830_0 + (regnum * 4);
+ fence_reg = obj->fence_reg;
+ if (fence_reg < 8)
+ fence_reg = FENCE_REG_830_0 + fence_reg * 4;
else
- fence_reg = FENCE_REG_945_8 + ((regnum - 8) * 4);
- I915_WRITE(fence_reg, val);
+ fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
+
+ if (pipelined) {
+ int ret = intel_ring_begin(pipelined, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(pipelined, MI_NOOP);
+ intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(pipelined, fence_reg);
+ intel_ring_emit(pipelined, val);
+ intel_ring_advance(pipelined);
+ } else
+ I915_WRITE(fence_reg, val);
+
+ return 0;
}
-static void i830_write_fence_reg(struct drm_i915_fence_reg *reg)
+static int i830_write_fence_reg(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
{
- struct drm_gem_object *obj = reg->obj;
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int regnum = obj_priv->fence_reg;
+ u32 size = obj->gtt_space->size;
+ int regnum = obj->fence_reg;
uint32_t val;
uint32_t pitch_val;
- uint32_t fence_size_bits;
- if ((obj_priv->gtt_offset & ~I830_FENCE_START_MASK) ||
- (obj_priv->gtt_offset & (obj->size - 1))) {
- WARN(1, "%s: object 0x%08x not 512K or size aligned\n",
- __func__, obj_priv->gtt_offset);
- return;
- }
+ if (WARN((obj->gtt_offset & ~I830_FENCE_START_MASK) ||
+ (size & -size) != size ||
+ (obj->gtt_offset & (size - 1)),
+ "object 0x%08x not 512K or pot-size 0x%08x aligned\n",
+ obj->gtt_offset, size))
+ return -EINVAL;
- pitch_val = obj_priv->stride / 128;
+ pitch_val = obj->stride / 128;
pitch_val = ffs(pitch_val) - 1;
- WARN_ON(pitch_val > I830_FENCE_MAX_PITCH_VAL);
- val = obj_priv->gtt_offset;
- if (obj_priv->tiling_mode == I915_TILING_Y)
+ val = obj->gtt_offset;
+ if (obj->tiling_mode == I915_TILING_Y)
val |= 1 << I830_FENCE_TILING_Y_SHIFT;
- fence_size_bits = I830_FENCE_SIZE_BITS(obj->size);
- WARN_ON(fence_size_bits & ~0x00000f00);
- val |= fence_size_bits;
+ val |= I830_FENCE_SIZE_BITS(size);
val |= pitch_val << I830_FENCE_PITCH_SHIFT;
val |= I830_FENCE_REG_VALID;
- I915_WRITE(FENCE_REG_830_0 + (regnum * 4), val);
+ if (pipelined) {
+ int ret = intel_ring_begin(pipelined, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(pipelined, MI_NOOP);
+ intel_ring_emit(pipelined, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(pipelined, FENCE_REG_830_0 + regnum*4);
+ intel_ring_emit(pipelined, val);
+ intel_ring_advance(pipelined);
+ } else
+ I915_WRITE(FENCE_REG_830_0 + regnum * 4, val);
+
+ return 0;
}
-static int i915_find_fence_reg(struct drm_device *dev,
- bool interruptible)
+static bool ring_passed_seqno(struct intel_ring_buffer *ring, u32 seqno)
+{
+ return i915_seqno_passed(ring->get_seqno(ring), seqno);
+}
+
+static int
+i915_gem_object_flush_fence(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined,
+ bool interruptible)
+{
+ int ret;
+
+ if (obj->fenced_gpu_access) {
+ if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
+ i915_gem_flush_ring(obj->base.dev,
+ obj->last_fenced_ring,
+ 0, obj->base.write_domain);
+
+ obj->fenced_gpu_access = false;
+ }
+
+ if (obj->last_fenced_seqno && pipelined != obj->last_fenced_ring) {
+ if (!ring_passed_seqno(obj->last_fenced_ring,
+ obj->last_fenced_seqno)) {
+ ret = i915_do_wait_request(obj->base.dev,
+ obj->last_fenced_seqno,
+ interruptible,
+ obj->last_fenced_ring);
+ if (ret)
+ return ret;
+ }
+
+ obj->last_fenced_seqno = 0;
+ obj->last_fenced_ring = NULL;
+ }
+
+ return 0;
+}
+
+int
+i915_gem_object_put_fence(struct drm_i915_gem_object *obj)
+{
+ int ret;
+
+ if (obj->tiling_mode)
+ i915_gem_release_mmap(obj);
+
+ ret = i915_gem_object_flush_fence(obj, NULL, true);
+ if (ret)
+ return ret;
+
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+ i915_gem_clear_fence_reg(obj->base.dev,
+ &dev_priv->fence_regs[obj->fence_reg]);
+
+ obj->fence_reg = I915_FENCE_REG_NONE;
+ }
+
+ return 0;
+}
+
+static struct drm_i915_fence_reg *
+i915_find_fence_reg(struct drm_device *dev,
+ struct intel_ring_buffer *pipelined)
{
- struct drm_i915_fence_reg *reg = NULL;
- struct drm_i915_gem_object *obj_priv = NULL;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_gem_object *obj = NULL;
- int i, avail, ret;
+ struct drm_i915_fence_reg *reg, *first, *avail;
+ int i;
/* First try to find a free reg */
- avail = 0;
+ avail = NULL;
for (i = dev_priv->fence_reg_start; i < dev_priv->num_fence_regs; i++) {
reg = &dev_priv->fence_regs[i];
if (!reg->obj)
- return i;
+ return reg;
- obj_priv = to_intel_bo(reg->obj);
- if (!obj_priv->pin_count)
- avail++;
+ if (!reg->obj->pin_count)
+ avail = reg;
}
- if (avail == 0)
- return -ENOSPC;
+ if (avail == NULL)
+ return NULL;
/* None available, try to steal one or wait for a user to finish */
- i = I915_FENCE_REG_NONE;
- list_for_each_entry(reg, &dev_priv->mm.fence_list,
- lru_list) {
- obj = reg->obj;
- obj_priv = to_intel_bo(obj);
-
- if (obj_priv->pin_count)
+ avail = first = NULL;
+ list_for_each_entry(reg, &dev_priv->mm.fence_list, lru_list) {
+ if (reg->obj->pin_count)
continue;
- /* found one! */
- i = obj_priv->fence_reg;
- break;
+ if (first == NULL)
+ first = reg;
+
+ if (!pipelined ||
+ !reg->obj->last_fenced_ring ||
+ reg->obj->last_fenced_ring == pipelined) {
+ avail = reg;
+ break;
+ }
}
- BUG_ON(i == I915_FENCE_REG_NONE);
+ if (avail == NULL)
+ avail = first;
- /* We only have a reference on obj from the active list. put_fence_reg
- * might drop that one, causing a use-after-free in it. So hold a
- * private reference to obj like the other callers of put_fence_reg
- * (set_tiling ioctl) do. */
- drm_gem_object_reference(obj);
- ret = i915_gem_object_put_fence_reg(obj, interruptible);
- drm_gem_object_unreference(obj);
- if (ret != 0)
- return ret;
-
- return i;
+ return avail;
}
/**
- * i915_gem_object_get_fence_reg - set up a fence reg for an object
+ * i915_gem_object_get_fence - set up a fence reg for an object
* @obj: object to map through a fence reg
+ * @pipelined: ring on which to queue the change, or NULL for CPU access
+ * @interruptible: must we wait uninterruptibly for the register to retire?
*
* When mapping objects through the GTT, userspace wants to be able to write
* to them without having to worry about swizzling if the object is tiled.
@@ -2449,72 +2480,138 @@ static int i915_find_fence_reg(struct drm_device *dev,
* and tiling format.
*/
int
-i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
- bool interruptible)
+i915_gem_object_get_fence(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined,
+ bool interruptible)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- struct drm_i915_fence_reg *reg = NULL;
+ struct drm_i915_fence_reg *reg;
int ret;
- /* Just update our place in the LRU if our fence is getting used. */
- if (obj_priv->fence_reg != I915_FENCE_REG_NONE) {
- reg = &dev_priv->fence_regs[obj_priv->fence_reg];
+ /* XXX disable pipelining. There are bugs. Shocking. */
+ pipelined = NULL;
+
+ /* Just update our place in the LRU if our fence is getting reused. */
+ if (obj->fence_reg != I915_FENCE_REG_NONE) {
+ reg = &dev_priv->fence_regs[obj->fence_reg];
list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+
+ if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
+ pipelined = NULL;
+
+ if (!pipelined) {
+ if (reg->setup_seqno) {
+ if (!ring_passed_seqno(obj->last_fenced_ring,
+ reg->setup_seqno)) {
+ ret = i915_do_wait_request(obj->base.dev,
+ reg->setup_seqno,
+ interruptible,
+ obj->last_fenced_ring);
+ if (ret)
+ return ret;
+ }
+
+ reg->setup_seqno = 0;
+ }
+ } else if (obj->last_fenced_ring &&
+ obj->last_fenced_ring != pipelined) {
+ ret = i915_gem_object_flush_fence(obj,
+ pipelined,
+ interruptible);
+ if (ret)
+ return ret;
+ } else if (obj->tiling_changed) {
+ if (obj->fenced_gpu_access) {
+ if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
+ i915_gem_flush_ring(obj->base.dev, obj->ring,
+ 0, obj->base.write_domain);
+
+ obj->fenced_gpu_access = false;
+ }
+ }
+
+ if (!obj->fenced_gpu_access && !obj->last_fenced_seqno)
+ pipelined = NULL;
+ BUG_ON(!pipelined && reg->setup_seqno);
+
+ if (obj->tiling_changed) {
+ if (pipelined) {
+ reg->setup_seqno =
+ i915_gem_next_request_seqno(dev, pipelined);
+ obj->last_fenced_seqno = reg->setup_seqno;
+ obj->last_fenced_ring = pipelined;
+ }
+ goto update;
+ }
+
return 0;
}
- switch (obj_priv->tiling_mode) {
- case I915_TILING_NONE:
- WARN(1, "allocating a fence for non-tiled object?\n");
- break;
- case I915_TILING_X:
- if (!obj_priv->stride)
- return -EINVAL;
- WARN((obj_priv->stride & (512 - 1)),
- "object 0x%08x is X tiled but has non-512B pitch\n",
- obj_priv->gtt_offset);
- break;
- case I915_TILING_Y:
- if (!obj_priv->stride)
- return -EINVAL;
- WARN((obj_priv->stride & (128 - 1)),
- "object 0x%08x is Y tiled but has non-128B pitch\n",
- obj_priv->gtt_offset);
- break;
- }
+ reg = i915_find_fence_reg(dev, pipelined);
+ if (reg == NULL)
+ return -ENOSPC;
- ret = i915_find_fence_reg(dev, interruptible);
- if (ret < 0)
+ ret = i915_gem_object_flush_fence(obj, pipelined, interruptible);
+ if (ret)
return ret;
- obj_priv->fence_reg = ret;
- reg = &dev_priv->fence_regs[obj_priv->fence_reg];
- list_add_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+ if (reg->obj) {
+ struct drm_i915_gem_object *old = reg->obj;
+
+ drm_gem_object_reference(&old->base);
+
+ if (old->tiling_mode)
+ i915_gem_release_mmap(old);
+
+ ret = i915_gem_object_flush_fence(old,
+ pipelined,
+ interruptible);
+ if (ret) {
+ drm_gem_object_unreference(&old->base);
+ return ret;
+ }
+
+ if (old->last_fenced_seqno == 0 && obj->last_fenced_seqno == 0)
+ pipelined = NULL;
+
+ old->fence_reg = I915_FENCE_REG_NONE;
+ old->last_fenced_ring = pipelined;
+ old->last_fenced_seqno =
+ pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
+
+ drm_gem_object_unreference(&old->base);
+ } else if (obj->last_fenced_seqno == 0)
+ pipelined = NULL;
reg->obj = obj;
+ list_move_tail(&reg->lru_list, &dev_priv->mm.fence_list);
+ obj->fence_reg = reg - dev_priv->fence_regs;
+ obj->last_fenced_ring = pipelined;
+
+ reg->setup_seqno =
+ pipelined ? i915_gem_next_request_seqno(dev, pipelined) : 0;
+ obj->last_fenced_seqno = reg->setup_seqno;
+update:
+ obj->tiling_changed = false;
switch (INTEL_INFO(dev)->gen) {
case 6:
- sandybridge_write_fence_reg(reg);
+ ret = sandybridge_write_fence_reg(obj, pipelined);
break;
case 5:
case 4:
- i965_write_fence_reg(reg);
+ ret = i965_write_fence_reg(obj, pipelined);
break;
case 3:
- i915_write_fence_reg(reg);
+ ret = i915_write_fence_reg(obj, pipelined);
break;
case 2:
- i830_write_fence_reg(reg);
+ ret = i830_write_fence_reg(obj, pipelined);
break;
}
- trace_i915_gem_object_get_fence(obj, obj_priv->fence_reg,
- obj_priv->tiling_mode);
-
- return 0;
+ return ret;
}
/**
@@ -2522,154 +2619,127 @@ i915_gem_object_get_fence_reg(struct drm_gem_object *obj,
* @obj: object to clear
*
* Zeroes out the fence register itself and clears out the associated
- * data structures in dev_priv and obj_priv.
+ * data structures in dev_priv and obj.
*/
static void
-i915_gem_clear_fence_reg(struct drm_gem_object *obj)
+i915_gem_clear_fence_reg(struct drm_device *dev,
+ struct drm_i915_fence_reg *reg)
{
- struct drm_device *dev = obj->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- struct drm_i915_fence_reg *reg =
- &dev_priv->fence_regs[obj_priv->fence_reg];
- uint32_t fence_reg;
+ uint32_t fence_reg = reg - dev_priv->fence_regs;
switch (INTEL_INFO(dev)->gen) {
case 6:
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 +
- (obj_priv->fence_reg * 8), 0);
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + fence_reg*8, 0);
break;
case 5:
case 4:
- I915_WRITE64(FENCE_REG_965_0 + (obj_priv->fence_reg * 8), 0);
+ I915_WRITE64(FENCE_REG_965_0 + fence_reg*8, 0);
break;
case 3:
- if (obj_priv->fence_reg >= 8)
- fence_reg = FENCE_REG_945_8 + (obj_priv->fence_reg - 8) * 4;
+ if (fence_reg >= 8)
+ fence_reg = FENCE_REG_945_8 + (fence_reg - 8) * 4;
else
case 2:
- fence_reg = FENCE_REG_830_0 + obj_priv->fence_reg * 4;
+ fence_reg = FENCE_REG_830_0 + fence_reg * 4;
I915_WRITE(fence_reg, 0);
break;
}
- reg->obj = NULL;
- obj_priv->fence_reg = I915_FENCE_REG_NONE;
list_del_init(&reg->lru_list);
-}
-
-/**
- * i915_gem_object_put_fence_reg - waits on outstanding fenced access
- * to the buffer to finish, and then resets the fence register.
- * @obj: tiled object holding a fence register.
- * @bool: whether the wait upon the fence is interruptible
- *
- * Zeroes out the fence register itself and clears out the associated
- * data structures in dev_priv and obj_priv.
- */
-int
-i915_gem_object_put_fence_reg(struct drm_gem_object *obj,
- bool interruptible)
-{
- struct drm_device *dev = obj->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- struct drm_i915_fence_reg *reg;
-
- if (obj_priv->fence_reg == I915_FENCE_REG_NONE)
- return 0;
-
- /* If we've changed tiling, GTT-mappings of the object
- * need to re-fault to ensure that the correct fence register
- * setup is in place.
- */
- i915_gem_release_mmap(obj);
-
- /* On the i915, GPU access to tiled buffers is via a fence,
- * therefore we must wait for any outstanding access to complete
- * before clearing the fence.
- */
- reg = &dev_priv->fence_regs[obj_priv->fence_reg];
- if (reg->gpu) {
- int ret;
-
- ret = i915_gem_object_flush_gpu_write_domain(obj, true);
- if (ret)
- return ret;
-
- ret = i915_gem_object_wait_rendering(obj, interruptible);
- if (ret)
- return ret;
-
- reg->gpu = false;
- }
-
- i915_gem_object_flush_gtt_write_domain(obj);
- i915_gem_clear_fence_reg(obj);
-
- return 0;
+ reg->obj = NULL;
+ reg->setup_seqno = 0;
}
/**
* Finds free space in the GTT aperture and binds the object there.
*/
static int
-i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
+i915_gem_object_bind_to_gtt(struct drm_i915_gem_object *obj,
+ unsigned alignment,
+ bool map_and_fenceable)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
struct drm_mm_node *free_space;
- gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
+ gfp_t gfpmask = __GFP_NORETRY | __GFP_NOWARN;
+ u32 size, fence_size, fence_alignment, unfenced_alignment;
+ bool mappable, fenceable;
int ret;
- if (obj_priv->madv != I915_MADV_WILLNEED) {
+ if (obj->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to bind a purgeable object\n");
return -EINVAL;
}
+ fence_size = i915_gem_get_gtt_size(obj);
+ fence_alignment = i915_gem_get_gtt_alignment(obj);
+ unfenced_alignment = i915_gem_get_unfenced_gtt_alignment(obj);
+
if (alignment == 0)
- alignment = i915_gem_get_gtt_alignment(obj);
- if (alignment & (i915_gem_get_gtt_alignment(obj) - 1)) {
+ alignment = map_and_fenceable ? fence_alignment :
+ unfenced_alignment;
+ if (map_and_fenceable && alignment & (fence_alignment - 1)) {
DRM_ERROR("Invalid object alignment requested %u\n", alignment);
return -EINVAL;
}
+ size = map_and_fenceable ? fence_size : obj->base.size;
+
/* If the object is bigger than the entire aperture, reject it early
* before evicting everything in a vain attempt to find space.
*/
- if (obj->size > dev_priv->mm.gtt_total) {
+ if (obj->base.size >
+ (map_and_fenceable ? dev_priv->mm.gtt_mappable_end : dev_priv->mm.gtt_total)) {
DRM_ERROR("Attempting to bind an object larger than the aperture\n");
return -E2BIG;
}
search_free:
- free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
- obj->size, alignment, 0);
- if (free_space != NULL)
- obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size,
- alignment);
- if (obj_priv->gtt_space == NULL) {
+ if (map_and_fenceable)
+ free_space =
+ drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
+ size, alignment, 0,
+ dev_priv->mm.gtt_mappable_end,
+ 0);
+ else
+ free_space = drm_mm_search_free(&dev_priv->mm.gtt_space,
+ size, alignment, 0);
+
+ if (free_space != NULL) {
+ if (map_and_fenceable)
+ obj->gtt_space =
+ drm_mm_get_block_range_generic(free_space,
+ size, alignment, 0,
+ dev_priv->mm.gtt_mappable_end,
+ 0);
+ else
+ obj->gtt_space =
+ drm_mm_get_block(free_space, size, alignment);
+ }
+ if (obj->gtt_space == NULL) {
/* If the gtt is empty and we're still having trouble
* fitting our object in, we're out of memory.
*/
- ret = i915_gem_evict_something(dev, obj->size, alignment);
+ ret = i915_gem_evict_something(dev, size, alignment,
+ map_and_fenceable);
if (ret)
return ret;
goto search_free;
}
- ret = i915_gem_object_get_pages(obj, gfpmask);
+ ret = i915_gem_object_get_pages_gtt(obj, gfpmask);
if (ret) {
- drm_mm_put_block(obj_priv->gtt_space);
- obj_priv->gtt_space = NULL;
+ drm_mm_put_block(obj->gtt_space);
+ obj->gtt_space = NULL;
if (ret == -ENOMEM) {
/* first try to clear up some space from the GTT */
- ret = i915_gem_evict_something(dev, obj->size,
- alignment);
+ ret = i915_gem_evict_something(dev, size,
+ alignment,
+ map_and_fenceable);
if (ret) {
/* now try to shrink everyone else */
if (gfpmask) {
@@ -2686,126 +2756,113 @@ i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment)
return ret;
}
- /* Create an AGP memory structure pointing at our pages, and bind it
- * into the GTT.
- */
- obj_priv->agp_mem = drm_agp_bind_pages(dev,
- obj_priv->pages,
- obj->size >> PAGE_SHIFT,
- obj_priv->gtt_space->start,
- obj_priv->agp_type);
- if (obj_priv->agp_mem == NULL) {
- i915_gem_object_put_pages(obj);
- drm_mm_put_block(obj_priv->gtt_space);
- obj_priv->gtt_space = NULL;
-
- ret = i915_gem_evict_something(dev, obj->size, alignment);
+ ret = i915_gem_gtt_bind_object(obj);
+ if (ret) {
+ i915_gem_object_put_pages_gtt(obj);
+ drm_mm_put_block(obj->gtt_space);
+ obj->gtt_space = NULL;
+
+ ret = i915_gem_evict_something(dev, size,
+ alignment, map_and_fenceable);
if (ret)
return ret;
goto search_free;
}
- /* keep track of bounds object by adding it to the inactive list */
- list_add_tail(&obj_priv->mm_list, &dev_priv->mm.inactive_list);
- i915_gem_info_add_gtt(dev_priv, obj->size);
+ list_add_tail(&obj->gtt_list, &dev_priv->mm.gtt_list);
+ list_add_tail(&obj->mm_list, &dev_priv->mm.inactive_list);
/* Assert that the object is not currently in any GPU domain. As it
* wasn't in the GTT, there shouldn't be any way it could have been in
* a GPU cache
*/
- BUG_ON(obj->read_domains & I915_GEM_GPU_DOMAINS);
- BUG_ON(obj->write_domain & I915_GEM_GPU_DOMAINS);
+ BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
+ BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
- obj_priv->gtt_offset = obj_priv->gtt_space->start;
- trace_i915_gem_object_bind(obj, obj_priv->gtt_offset);
+ obj->gtt_offset = obj->gtt_space->start;
+ fenceable =
+ obj->gtt_space->size == fence_size &&
+ (obj->gtt_space->start & (fence_alignment -1)) == 0;
+
+ mappable =
+ obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end;
+
+ obj->map_and_fenceable = mappable && fenceable;
+
+ trace_i915_gem_object_bind(obj, obj->gtt_offset, map_and_fenceable);
return 0;
}
void
-i915_gem_clflush_object(struct drm_gem_object *obj)
+i915_gem_clflush_object(struct drm_i915_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-
/* If we don't have a page list set up, then we're not pinned
* to GPU, and we can ignore the cache flush because it'll happen
* again at bind time.
*/
- if (obj_priv->pages == NULL)
+ if (obj->pages == NULL)
return;
trace_i915_gem_object_clflush(obj);
- drm_clflush_pages(obj_priv->pages, obj->size / PAGE_SIZE);
+ drm_clflush_pages(obj->pages, obj->base.size / PAGE_SIZE);
}
/** Flushes any GPU write domain for the object if it's dirty. */
-static int
-i915_gem_object_flush_gpu_write_domain(struct drm_gem_object *obj,
- bool pipelined)
+static void
+i915_gem_object_flush_gpu_write_domain(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
- uint32_t old_write_domain;
+ struct drm_device *dev = obj->base.dev;
- if ((obj->write_domain & I915_GEM_GPU_DOMAINS) == 0)
- return 0;
+ if ((obj->base.write_domain & I915_GEM_GPU_DOMAINS) == 0)
+ return;
/* Queue the GPU write cache flushing we need. */
- old_write_domain = obj->write_domain;
- i915_gem_flush_ring(dev, NULL,
- to_intel_bo(obj)->ring,
- 0, obj->write_domain);
- BUG_ON(obj->write_domain);
-
- trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
- old_write_domain);
-
- if (pipelined)
- return 0;
-
- return i915_gem_object_wait_rendering(obj, true);
+ i915_gem_flush_ring(dev, obj->ring, 0, obj->base.write_domain);
+ BUG_ON(obj->base.write_domain);
}
/** Flushes the GTT write domain for the object if it's dirty. */
static void
-i915_gem_object_flush_gtt_write_domain(struct drm_gem_object *obj)
+i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj)
{
uint32_t old_write_domain;
- if (obj->write_domain != I915_GEM_DOMAIN_GTT)
+ if (obj->base.write_domain != I915_GEM_DOMAIN_GTT)
return;
/* No actual flushing is required for the GTT write domain. Writes
* to it immediately go to main memory as far as we know, so there's
* no chipset flush. It also doesn't land in render cache.
*/
- old_write_domain = obj->write_domain;
- obj->write_domain = 0;
+ i915_gem_release_mmap(obj);
+
+ old_write_domain = obj->base.write_domain;
+ obj->base.write_domain = 0;
trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
+ obj->base.read_domains,
old_write_domain);
}
/** Flushes the CPU write domain for the object if it's dirty. */
static void
-i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
+i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
uint32_t old_write_domain;
- if (obj->write_domain != I915_GEM_DOMAIN_CPU)
+ if (obj->base.write_domain != I915_GEM_DOMAIN_CPU)
return;
i915_gem_clflush_object(obj);
- drm_agp_chipset_flush(dev);
- old_write_domain = obj->write_domain;
- obj->write_domain = 0;
+ intel_gtt_chipset_flush();
+ old_write_domain = obj->base.write_domain;
+ obj->base.write_domain = 0;
trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
+ obj->base.read_domains,
old_write_domain);
}
@@ -2816,40 +2873,36 @@ i915_gem_object_flush_cpu_write_domain(struct drm_gem_object *obj)
* flushes to occur.
*/
int
-i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
+i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_write_domain, old_read_domains;
int ret;
/* Not valid to be called on unbound objects. */
- if (obj_priv->gtt_space == NULL)
+ if (obj->gtt_space == NULL)
return -EINVAL;
- ret = i915_gem_object_flush_gpu_write_domain(obj, false);
- if (ret != 0)
- return ret;
-
- i915_gem_object_flush_cpu_write_domain(obj);
-
- if (write) {
+ i915_gem_object_flush_gpu_write_domain(obj);
+ if (obj->pending_gpu_write || write) {
ret = i915_gem_object_wait_rendering(obj, true);
if (ret)
return ret;
}
- old_write_domain = obj->write_domain;
- old_read_domains = obj->read_domains;
+ i915_gem_object_flush_cpu_write_domain(obj);
+
+ old_write_domain = obj->base.write_domain;
+ old_read_domains = obj->base.read_domains;
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
- obj->read_domains |= I915_GEM_DOMAIN_GTT;
+ BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_GTT) != 0);
+ obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
if (write) {
- obj->read_domains = I915_GEM_DOMAIN_GTT;
- obj->write_domain = I915_GEM_DOMAIN_GTT;
- obj_priv->dirty = 1;
+ obj->base.read_domains = I915_GEM_DOMAIN_GTT;
+ obj->base.write_domain = I915_GEM_DOMAIN_GTT;
+ obj->dirty = 1;
}
trace_i915_gem_object_change_domain(obj,
@@ -2864,23 +2917,20 @@ i915_gem_object_set_to_gtt_domain(struct drm_gem_object *obj, int write)
* wait, as in modesetting process we're not supposed to be interrupted.
*/
int
-i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
- bool pipelined)
+i915_gem_object_set_to_display_plane(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_read_domains;
int ret;
/* Not valid to be called on unbound objects. */
- if (obj_priv->gtt_space == NULL)
+ if (obj->gtt_space == NULL)
return -EINVAL;
- ret = i915_gem_object_flush_gpu_write_domain(obj, true);
- if (ret)
- return ret;
+ i915_gem_object_flush_gpu_write_domain(obj);
/* Currently, we are always called from an non-interruptible context. */
- if (!pipelined) {
+ if (pipelined != obj->ring) {
ret = i915_gem_object_wait_rendering(obj, false);
if (ret)
return ret;
@@ -2888,12 +2938,12 @@ i915_gem_object_set_to_display_plane(struct drm_gem_object *obj,
i915_gem_object_flush_cpu_write_domain(obj);
- old_read_domains = obj->read_domains;
- obj->read_domains |= I915_GEM_DOMAIN_GTT;
+ old_read_domains = obj->base.read_domains;
+ obj->base.read_domains |= I915_GEM_DOMAIN_GTT;
trace_i915_gem_object_change_domain(obj,
old_read_domains,
- obj->write_domain);
+ obj->base.write_domain);
return 0;
}
@@ -2906,10 +2956,10 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
return 0;
if (obj->base.write_domain & I915_GEM_GPU_DOMAINS)
- i915_gem_flush_ring(obj->base.dev, NULL, obj->ring,
+ i915_gem_flush_ring(obj->base.dev, obj->ring,
0, obj->base.write_domain);
- return i915_gem_object_wait_rendering(&obj->base, interruptible);
+ return i915_gem_object_wait_rendering(obj, interruptible);
}
/**
@@ -2919,13 +2969,14 @@ i915_gem_object_flush_gpu(struct drm_i915_gem_object *obj,
* flushes to occur.
*/
static int
-i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
+i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
{
uint32_t old_write_domain, old_read_domains;
int ret;
- ret = i915_gem_object_flush_gpu_write_domain(obj, false);
- if (ret != 0)
+ i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_wait_rendering(obj, true);
+ if (ret)
return ret;
i915_gem_object_flush_gtt_write_domain(obj);
@@ -2935,33 +2986,27 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
*/
i915_gem_object_set_to_full_cpu_read_domain(obj);
- if (write) {
- ret = i915_gem_object_wait_rendering(obj, true);
- if (ret)
- return ret;
- }
-
- old_write_domain = obj->write_domain;
- old_read_domains = obj->read_domains;
+ old_write_domain = obj->base.write_domain;
+ old_read_domains = obj->base.read_domains;
/* Flush the CPU cache if it's still invalid. */
- if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0) {
+ if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0) {
i915_gem_clflush_object(obj);
- obj->read_domains |= I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
}
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+ BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
/* If we're writing through the CPU, then the GPU read domains will
* need to be invalidated at next use.
*/
if (write) {
- obj->read_domains = I915_GEM_DOMAIN_CPU;
- obj->write_domain = I915_GEM_DOMAIN_CPU;
+ obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+ obj->base.write_domain = I915_GEM_DOMAIN_CPU;
}
trace_i915_gem_object_change_domain(obj,
@@ -2971,184 +3016,6 @@ i915_gem_object_set_to_cpu_domain(struct drm_gem_object *obj, int write)
return 0;
}
-/*
- * Set the next domain for the specified object. This
- * may not actually perform the necessary flushing/invaliding though,
- * as that may want to be batched with other set_domain operations
- *
- * This is (we hope) the only really tricky part of gem. The goal
- * is fairly simple -- track which caches hold bits of the object
- * and make sure they remain coherent. A few concrete examples may
- * help to explain how it works. For shorthand, we use the notation
- * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
- * a pair of read and write domain masks.
- *
- * Case 1: the batch buffer
- *
- * 1. Allocated
- * 2. Written by CPU
- * 3. Mapped to GTT
- * 4. Read by GPU
- * 5. Unmapped from GTT
- * 6. Freed
- *
- * Let's take these a step at a time
- *
- * 1. Allocated
- * Pages allocated from the kernel may still have
- * cache contents, so we set them to (CPU, CPU) always.
- * 2. Written by CPU (using pwrite)
- * The pwrite function calls set_domain (CPU, CPU) and
- * this function does nothing (as nothing changes)
- * 3. Mapped by GTT
- * This function asserts that the object is not
- * currently in any GPU-based read or write domains
- * 4. Read by GPU
- * i915_gem_execbuffer calls set_domain (COMMAND, 0).
- * As write_domain is zero, this function adds in the
- * current read domains (CPU+COMMAND, 0).
- * flush_domains is set to CPU.
- * invalidate_domains is set to COMMAND
- * clflush is run to get data out of the CPU caches
- * then i915_dev_set_domain calls i915_gem_flush to
- * emit an MI_FLUSH and drm_agp_chipset_flush
- * 5. Unmapped from GTT
- * i915_gem_object_unbind calls set_domain (CPU, CPU)
- * flush_domains and invalidate_domains end up both zero
- * so no flushing/invalidating happens
- * 6. Freed
- * yay, done
- *
- * Case 2: The shared render buffer
- *
- * 1. Allocated
- * 2. Mapped to GTT
- * 3. Read/written by GPU
- * 4. set_domain to (CPU,CPU)
- * 5. Read/written by CPU
- * 6. Read/written by GPU
- *
- * 1. Allocated
- * Same as last example, (CPU, CPU)
- * 2. Mapped to GTT
- * Nothing changes (assertions find that it is not in the GPU)
- * 3. Read/written by GPU
- * execbuffer calls set_domain (RENDER, RENDER)
- * flush_domains gets CPU
- * invalidate_domains gets GPU
- * clflush (obj)
- * MI_FLUSH and drm_agp_chipset_flush
- * 4. set_domain (CPU, CPU)
- * flush_domains gets GPU
- * invalidate_domains gets CPU
- * wait_rendering (obj) to make sure all drawing is complete.
- * This will include an MI_FLUSH to get the data from GPU
- * to memory
- * clflush (obj) to invalidate the CPU cache
- * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
- * 5. Read/written by CPU
- * cache lines are loaded and dirtied
- * 6. Read written by GPU
- * Same as last GPU access
- *
- * Case 3: The constant buffer
- *
- * 1. Allocated
- * 2. Written by CPU
- * 3. Read by GPU
- * 4. Updated (written) by CPU again
- * 5. Read by GPU
- *
- * 1. Allocated
- * (CPU, CPU)
- * 2. Written by CPU
- * (CPU, CPU)
- * 3. Read by GPU
- * (CPU+RENDER, 0)
- * flush_domains = CPU
- * invalidate_domains = RENDER
- * clflush (obj)
- * MI_FLUSH
- * drm_agp_chipset_flush
- * 4. Updated (written) by CPU again
- * (CPU, CPU)
- * flush_domains = 0 (no previous write domain)
- * invalidate_domains = 0 (no new read domains)
- * 5. Read by GPU
- * (CPU+RENDER, 0)
- * flush_domains = CPU
- * invalidate_domains = RENDER
- * clflush (obj)
- * MI_FLUSH
- * drm_agp_chipset_flush
- */
-static void
-i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
- struct intel_ring_buffer *ring)
-{
- struct drm_device *dev = obj->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- uint32_t invalidate_domains = 0;
- uint32_t flush_domains = 0;
- uint32_t old_read_domains;
-
- intel_mark_busy(dev, obj);
-
- /*
- * If the object isn't moving to a new write domain,
- * let the object stay in multiple read domains
- */
- if (obj->pending_write_domain == 0)
- obj->pending_read_domains |= obj->read_domains;
- else
- obj_priv->dirty = 1;
-
- /*
- * Flush the current write domain if
- * the new read domains don't match. Invalidate
- * any read domains which differ from the old
- * write domain
- */
- if (obj->write_domain &&
- (obj->write_domain != obj->pending_read_domains ||
- obj_priv->ring != ring)) {
- flush_domains |= obj->write_domain;
- invalidate_domains |=
- obj->pending_read_domains & ~obj->write_domain;
- }
- /*
- * Invalidate any read caches which may have
- * stale data. That is, any new read domains.
- */
- invalidate_domains |= obj->pending_read_domains & ~obj->read_domains;
- if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
- i915_gem_clflush_object(obj);
-
- old_read_domains = obj->read_domains;
-
- /* The actual obj->write_domain will be updated with
- * pending_write_domain after we emit the accumulated flush for all
- * of our domain changes in execbuffers (which clears objects'
- * write_domains). So if we have a current write domain that we
- * aren't changing, set pending_write_domain to that.
- */
- if (flush_domains == 0 && obj->pending_write_domain == 0)
- obj->pending_write_domain = obj->write_domain;
- obj->read_domains = obj->pending_read_domains;
-
- dev->invalidate_domains |= invalidate_domains;
- dev->flush_domains |= flush_domains;
- if (flush_domains & I915_GEM_GPU_DOMAINS)
- dev_priv->mm.flush_rings |= obj_priv->ring->id;
- if (invalidate_domains & I915_GEM_GPU_DOMAINS)
- dev_priv->mm.flush_rings |= ring->id;
-
- trace_i915_gem_object_change_domain(obj,
- old_read_domains,
- obj->write_domain);
-}
-
/**
* Moves the object from a partially CPU read to a full one.
*
@@ -3156,30 +3023,28 @@ i915_gem_object_set_to_gpu_domain(struct drm_gem_object *obj,
* and doesn't handle transitioning from !(read_domains & I915_GEM_DOMAIN_CPU).
*/
static void
-i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
+i915_gem_object_set_to_full_cpu_read_domain(struct drm_i915_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-
- if (!obj_priv->page_cpu_valid)
+ if (!obj->page_cpu_valid)
return;
/* If we're partially in the CPU read domain, finish moving it in.
*/
- if (obj->read_domains & I915_GEM_DOMAIN_CPU) {
+ if (obj->base.read_domains & I915_GEM_DOMAIN_CPU) {
int i;
- for (i = 0; i <= (obj->size - 1) / PAGE_SIZE; i++) {
- if (obj_priv->page_cpu_valid[i])
+ for (i = 0; i <= (obj->base.size - 1) / PAGE_SIZE; i++) {
+ if (obj->page_cpu_valid[i])
continue;
- drm_clflush_pages(obj_priv->pages + i, 1);
+ drm_clflush_pages(obj->pages + i, 1);
}
}
/* Free the page_cpu_valid mappings which are now stale, whether
* or not we've got I915_GEM_DOMAIN_CPU.
*/
- kfree(obj_priv->page_cpu_valid);
- obj_priv->page_cpu_valid = NULL;
+ kfree(obj->page_cpu_valid);
+ obj->page_cpu_valid = NULL;
}
/**
@@ -3195,354 +3060,62 @@ i915_gem_object_set_to_full_cpu_read_domain(struct drm_gem_object *obj)
* flushes to occur.
*/
static int
-i915_gem_object_set_cpu_read_domain_range(struct drm_gem_object *obj,
+i915_gem_object_set_cpu_read_domain_range(struct drm_i915_gem_object *obj,
uint64_t offset, uint64_t size)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
uint32_t old_read_domains;
int i, ret;
- if (offset == 0 && size == obj->size)
+ if (offset == 0 && size == obj->base.size)
return i915_gem_object_set_to_cpu_domain(obj, 0);
- ret = i915_gem_object_flush_gpu_write_domain(obj, false);
- if (ret != 0)
+ i915_gem_object_flush_gpu_write_domain(obj);
+ ret = i915_gem_object_wait_rendering(obj, true);
+ if (ret)
return ret;
+
i915_gem_object_flush_gtt_write_domain(obj);
/* If we're already fully in the CPU read domain, we're done. */
- if (obj_priv->page_cpu_valid == NULL &&
- (obj->read_domains & I915_GEM_DOMAIN_CPU) != 0)
+ if (obj->page_cpu_valid == NULL &&
+ (obj->base.read_domains & I915_GEM_DOMAIN_CPU) != 0)
return 0;
/* Otherwise, create/clear the per-page CPU read domain flag if we're
* newly adding I915_GEM_DOMAIN_CPU
*/
- if (obj_priv->page_cpu_valid == NULL) {
- obj_priv->page_cpu_valid = kzalloc(obj->size / PAGE_SIZE,
- GFP_KERNEL);
- if (obj_priv->page_cpu_valid == NULL)
+ if (obj->page_cpu_valid == NULL) {
+ obj->page_cpu_valid = kzalloc(obj->base.size / PAGE_SIZE,
+ GFP_KERNEL);
+ if (obj->page_cpu_valid == NULL)
return -ENOMEM;
- } else if ((obj->read_domains & I915_GEM_DOMAIN_CPU) == 0)
- memset(obj_priv->page_cpu_valid, 0, obj->size / PAGE_SIZE);
+ } else if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0)
+ memset(obj->page_cpu_valid, 0, obj->base.size / PAGE_SIZE);
/* Flush the cache on any pages that are still invalid from the CPU's
* perspective.
*/
for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE;
i++) {
- if (obj_priv->page_cpu_valid[i])
+ if (obj->page_cpu_valid[i])
continue;
- drm_clflush_pages(obj_priv->pages + i, 1);
+ drm_clflush_pages(obj->pages + i, 1);
- obj_priv->page_cpu_valid[i] = 1;
+ obj->page_cpu_valid[i] = 1;
}
/* It should now be out of any other write domains, and we can update
* the domain values for our changes.
*/
- BUG_ON((obj->write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
+ BUG_ON((obj->base.write_domain & ~I915_GEM_DOMAIN_CPU) != 0);
- old_read_domains = obj->read_domains;
- obj->read_domains |= I915_GEM_DOMAIN_CPU;
+ old_read_domains = obj->base.read_domains;
+ obj->base.read_domains |= I915_GEM_DOMAIN_CPU;
trace_i915_gem_object_change_domain(obj,
old_read_domains,
- obj->write_domain);
-
- return 0;
-}
-
-/**
- * Pin an object to the GTT and evaluate the relocations landing in it.
- */
-static int
-i915_gem_execbuffer_relocate(struct drm_i915_gem_object *obj,
- struct drm_file *file_priv,
- struct drm_i915_gem_exec_object2 *entry)
-{
- struct drm_device *dev = obj->base.dev;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_relocation_entry __user *user_relocs;
- struct drm_gem_object *target_obj = NULL;
- uint32_t target_handle = 0;
- int i, ret = 0;
-
- user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
- for (i = 0; i < entry->relocation_count; i++) {
- struct drm_i915_gem_relocation_entry reloc;
- uint32_t target_offset;
-
- if (__copy_from_user_inatomic(&reloc,
- user_relocs+i,
- sizeof(reloc))) {
- ret = -EFAULT;
- break;
- }
-
- if (reloc.target_handle != target_handle) {
- drm_gem_object_unreference(target_obj);
-
- target_obj = drm_gem_object_lookup(dev, file_priv,
- reloc.target_handle);
- if (target_obj == NULL) {
- ret = -ENOENT;
- break;
- }
-
- target_handle = reloc.target_handle;
- }
- target_offset = to_intel_bo(target_obj)->gtt_offset;
-
-#if WATCH_RELOC
- DRM_INFO("%s: obj %p offset %08x target %d "
- "read %08x write %08x gtt %08x "
- "presumed %08x delta %08x\n",
- __func__,
- obj,
- (int) reloc.offset,
- (int) reloc.target_handle,
- (int) reloc.read_domains,
- (int) reloc.write_domain,
- (int) target_offset,
- (int) reloc.presumed_offset,
- reloc.delta);
-#endif
-
- /* The target buffer should have appeared before us in the
- * exec_object list, so it should have a GTT space bound by now.
- */
- if (target_offset == 0) {
- DRM_ERROR("No GTT space found for object %d\n",
- reloc.target_handle);
- ret = -EINVAL;
- break;
- }
-
- /* Validate that the target is in a valid r/w GPU domain */
- if (reloc.write_domain & (reloc.write_domain - 1)) {
- DRM_ERROR("reloc with multiple write domains: "
- "obj %p target %d offset %d "
- "read %08x write %08x",
- obj, reloc.target_handle,
- (int) reloc.offset,
- reloc.read_domains,
- reloc.write_domain);
- ret = -EINVAL;
- break;
- }
- if (reloc.write_domain & I915_GEM_DOMAIN_CPU ||
- reloc.read_domains & I915_GEM_DOMAIN_CPU) {
- DRM_ERROR("reloc with read/write CPU domains: "
- "obj %p target %d offset %d "
- "read %08x write %08x",
- obj, reloc.target_handle,
- (int) reloc.offset,
- reloc.read_domains,
- reloc.write_domain);
- ret = -EINVAL;
- break;
- }
- if (reloc.write_domain && target_obj->pending_write_domain &&
- reloc.write_domain != target_obj->pending_write_domain) {
- DRM_ERROR("Write domain conflict: "
- "obj %p target %d offset %d "
- "new %08x old %08x\n",
- obj, reloc.target_handle,
- (int) reloc.offset,
- reloc.write_domain,
- target_obj->pending_write_domain);
- ret = -EINVAL;
- break;
- }
-
- target_obj->pending_read_domains |= reloc.read_domains;
- target_obj->pending_write_domain |= reloc.write_domain;
-
- /* If the relocation already has the right value in it, no
- * more work needs to be done.
- */
- if (target_offset == reloc.presumed_offset)
- continue;
-
- /* Check that the relocation address is valid... */
- if (reloc.offset > obj->base.size - 4) {
- DRM_ERROR("Relocation beyond object bounds: "
- "obj %p target %d offset %d size %d.\n",
- obj, reloc.target_handle,
- (int) reloc.offset, (int) obj->base.size);
- ret = -EINVAL;
- break;
- }
- if (reloc.offset & 3) {
- DRM_ERROR("Relocation not 4-byte aligned: "
- "obj %p target %d offset %d.\n",
- obj, reloc.target_handle,
- (int) reloc.offset);
- ret = -EINVAL;
- break;
- }
-
- /* and points to somewhere within the target object. */
- if (reloc.delta >= target_obj->size) {
- DRM_ERROR("Relocation beyond target object bounds: "
- "obj %p target %d delta %d size %d.\n",
- obj, reloc.target_handle,
- (int) reloc.delta, (int) target_obj->size);
- ret = -EINVAL;
- break;
- }
-
- reloc.delta += target_offset;
- if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
- uint32_t page_offset = reloc.offset & ~PAGE_MASK;
- char *vaddr;
-
- vaddr = kmap_atomic(obj->pages[reloc.offset >> PAGE_SHIFT]);
- *(uint32_t *)(vaddr + page_offset) = reloc.delta;
- kunmap_atomic(vaddr);
- } else {
- uint32_t __iomem *reloc_entry;
- void __iomem *reloc_page;
-
- ret = i915_gem_object_set_to_gtt_domain(&obj->base, 1);
- if (ret)
- break;
-
- /* Map the page containing the relocation we're going to perform. */
- reloc.offset += obj->gtt_offset;
- reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
- reloc.offset & PAGE_MASK);
- reloc_entry = (uint32_t __iomem *)
- (reloc_page + (reloc.offset & ~PAGE_MASK));
- iowrite32(reloc.delta, reloc_entry);
- io_mapping_unmap_atomic(reloc_page);
- }
-
- /* and update the user's relocation entry */
- reloc.presumed_offset = target_offset;
- if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
- &reloc.presumed_offset,
- sizeof(reloc.presumed_offset))) {
- ret = -EFAULT;
- break;
- }
- }
-
- drm_gem_object_unreference(target_obj);
- return ret;
-}
-
-static int
-i915_gem_execbuffer_pin(struct drm_device *dev,
- struct drm_file *file,
- struct drm_gem_object **object_list,
- struct drm_i915_gem_exec_object2 *exec_list,
- int count)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret, i, retry;
-
- /* attempt to pin all of the buffers into the GTT */
- for (retry = 0; retry < 2; retry++) {
- ret = 0;
- for (i = 0; i < count; i++) {
- struct drm_i915_gem_exec_object2 *entry = &exec_list[i];
- struct drm_i915_gem_object *obj= to_intel_bo(object_list[i]);
- bool need_fence =
- entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
- obj->tiling_mode != I915_TILING_NONE;
-
- /* Check fence reg constraints and rebind if necessary */
- if (need_fence &&
- !i915_gem_object_fence_offset_ok(&obj->base,
- obj->tiling_mode)) {
- ret = i915_gem_object_unbind(&obj->base);
- if (ret)
- break;
- }
-
- ret = i915_gem_object_pin(&obj->base, entry->alignment);
- if (ret)
- break;
-
- /*
- * Pre-965 chips need a fence register set up in order
- * to properly handle blits to/from tiled surfaces.
- */
- if (need_fence) {
- ret = i915_gem_object_get_fence_reg(&obj->base, true);
- if (ret) {
- i915_gem_object_unpin(&obj->base);
- break;
- }
-
- dev_priv->fence_regs[obj->fence_reg].gpu = true;
- }
-
- entry->offset = obj->gtt_offset;
- }
-
- while (i--)
- i915_gem_object_unpin(object_list[i]);
-
- if (ret == 0)
- break;
-
- if (ret != -ENOSPC || retry)
- return ret;
-
- ret = i915_gem_evict_everything(dev);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-static int
-i915_gem_execbuffer_move_to_gpu(struct drm_device *dev,
- struct drm_file *file,
- struct intel_ring_buffer *ring,
- struct drm_gem_object **objects,
- int count)
-{
- struct drm_i915_private *dev_priv = dev->dev_private;
- int ret, i;
-
- /* Zero the global flush/invalidate flags. These
- * will be modified as new domains are computed
- * for each object
- */
- dev->invalidate_domains = 0;
- dev->flush_domains = 0;
- dev_priv->mm.flush_rings = 0;
- for (i = 0; i < count; i++)
- i915_gem_object_set_to_gpu_domain(objects[i], ring);
-
- if (dev->invalidate_domains | dev->flush_domains) {
-#if WATCH_EXEC
- DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
- __func__,
- dev->invalidate_domains,
- dev->flush_domains);
-#endif
- i915_gem_flush(dev, file,
- dev->invalidate_domains,
- dev->flush_domains,
- dev_priv->mm.flush_rings);
- }
-
- for (i = 0; i < count; i++) {
- struct drm_i915_gem_object *obj = to_intel_bo(objects[i]);
- /* XXX replace with semaphores */
- if (obj->ring && ring != obj->ring) {
- ret = i915_gem_object_wait_rendering(&obj->base, true);
- if (ret)
- return ret;
- }
- }
+ obj->base.write_domain);
return 0;
}
@@ -3582,586 +3155,129 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
return 0;
ret = 0;
- if (!i915_seqno_passed(ring->get_seqno(dev, ring), seqno)) {
+ if (!i915_seqno_passed(ring->get_seqno(ring), seqno)) {
/* And wait for the seqno passing without holding any locks and
* causing extra latency for others. This is safe as the irq
* generation is designed to be run atomically and so is
* lockless.
*/
- ring->user_irq_get(dev, ring);
- ret = wait_event_interruptible(ring->irq_queue,
- i915_seqno_passed(ring->get_seqno(dev, ring), seqno)
- || atomic_read(&dev_priv->mm.wedged));
- ring->user_irq_put(dev, ring);
-
- if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
- ret = -EIO;
- }
-
- if (ret == 0)
- queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
-
- return ret;
-}
-
-static int
-i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec,
- uint64_t exec_offset)
-{
- uint32_t exec_start, exec_len;
-
- exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
- exec_len = (uint32_t) exec->batch_len;
-
- if ((exec_start | exec_len) & 0x7)
- return -EINVAL;
-
- if (!exec_start)
- return -EINVAL;
-
- return 0;
-}
-
-static int
-validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
- int count)
-{
- int i;
-
- for (i = 0; i < count; i++) {
- char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
- size_t length = exec[i].relocation_count * sizeof(struct drm_i915_gem_relocation_entry);
-
- if (!access_ok(VERIFY_READ, ptr, length))
- return -EFAULT;
-
- /* we may also need to update the presumed offsets */
- if (!access_ok(VERIFY_WRITE, ptr, length))
- return -EFAULT;
-
- if (fault_in_pages_readable(ptr, length))
- return -EFAULT;
- }
-
- return 0;
-}
-
-static int
-i915_gem_do_execbuffer(struct drm_device *dev, void *data,
- struct drm_file *file,
- struct drm_i915_gem_execbuffer2 *args,
- struct drm_i915_gem_exec_object2 *exec_list)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object **object_list = NULL;
- struct drm_gem_object *batch_obj;
- struct drm_i915_gem_object *obj_priv;
- struct drm_clip_rect *cliprects = NULL;
- struct drm_i915_gem_request *request = NULL;
- int ret, i, flips;
- uint64_t exec_offset;
-
- struct intel_ring_buffer *ring = NULL;
-
- ret = i915_gem_check_is_wedged(dev);
- if (ret)
- return ret;
-
- ret = validate_exec_list(exec_list, args->buffer_count);
- if (ret)
- return ret;
-
-#if WATCH_EXEC
- DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
- (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
- switch (args->flags & I915_EXEC_RING_MASK) {
- case I915_EXEC_DEFAULT:
- case I915_EXEC_RENDER:
- ring = &dev_priv->render_ring;
- break;
- case I915_EXEC_BSD:
- if (!HAS_BSD(dev)) {
- DRM_ERROR("execbuf with invalid ring (BSD)\n");
- return -EINVAL;
- }
- ring = &dev_priv->bsd_ring;
- break;
- case I915_EXEC_BLT:
- if (!HAS_BLT(dev)) {
- DRM_ERROR("execbuf with invalid ring (BLT)\n");
- return -EINVAL;
- }
- ring = &dev_priv->blt_ring;
- break;
- default:
- DRM_ERROR("execbuf with unknown ring: %d\n",
- (int)(args->flags & I915_EXEC_RING_MASK));
- return -EINVAL;
- }
-
- if (args->buffer_count < 1) {
- DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
- return -EINVAL;
- }
- object_list = drm_malloc_ab(sizeof(*object_list), args->buffer_count);
- if (object_list == NULL) {
- DRM_ERROR("Failed to allocate object list for %d buffers\n",
- args->buffer_count);
- ret = -ENOMEM;
- goto pre_mutex_err;
- }
-
- if (args->num_cliprects != 0) {
- cliprects = kcalloc(args->num_cliprects, sizeof(*cliprects),
- GFP_KERNEL);
- if (cliprects == NULL) {
- ret = -ENOMEM;
- goto pre_mutex_err;
- }
-
- ret = copy_from_user(cliprects,
- (struct drm_clip_rect __user *)
- (uintptr_t) args->cliprects_ptr,
- sizeof(*cliprects) * args->num_cliprects);
- if (ret != 0) {
- DRM_ERROR("copy %d cliprects failed: %d\n",
- args->num_cliprects, ret);
- ret = -EFAULT;
- goto pre_mutex_err;
- }
- }
-
- request = kzalloc(sizeof(*request), GFP_KERNEL);
- if (request == NULL) {
- ret = -ENOMEM;
- goto pre_mutex_err;
- }
-
- ret = i915_mutex_lock_interruptible(dev);
- if (ret)
- goto pre_mutex_err;
-
- if (dev_priv->mm.suspended) {
- mutex_unlock(&dev->struct_mutex);
- ret = -EBUSY;
- goto pre_mutex_err;
- }
-
- /* Look up object handles */
- for (i = 0; i < args->buffer_count; i++) {
- object_list[i] = drm_gem_object_lookup(dev, file,
- exec_list[i].handle);
- if (object_list[i] == NULL) {
- DRM_ERROR("Invalid object handle %d at index %d\n",
- exec_list[i].handle, i);
- /* prevent error path from reading uninitialized data */
- args->buffer_count = i + 1;
- ret = -ENOENT;
- goto err;
- }
-
- obj_priv = to_intel_bo(object_list[i]);
- if (obj_priv->in_execbuffer) {
- DRM_ERROR("Object %p appears more than once in object list\n",
- object_list[i]);
- /* prevent error path from reading uninitialized data */
- args->buffer_count = i + 1;
- ret = -EINVAL;
- goto err;
- }
- obj_priv->in_execbuffer = true;
- }
-
- /* Move the objects en-masse into the GTT, evicting if necessary. */
- ret = i915_gem_execbuffer_pin(dev, file,
- object_list, exec_list,
- args->buffer_count);
- if (ret)
- goto err;
-
- /* The objects are in their final locations, apply the relocations. */
- for (i = 0; i < args->buffer_count; i++) {
- struct drm_i915_gem_object *obj = to_intel_bo(object_list[i]);
- obj->base.pending_read_domains = 0;
- obj->base.pending_write_domain = 0;
- ret = i915_gem_execbuffer_relocate(obj, file, &exec_list[i]);
- if (ret)
- goto err;
- }
-
- /* Set the pending read domains for the batch buffer to COMMAND */
- batch_obj = object_list[args->buffer_count-1];
- if (batch_obj->pending_write_domain) {
- DRM_ERROR("Attempting to use self-modifying batch buffer\n");
- ret = -EINVAL;
- goto err;
- }
- batch_obj->pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
-
- /* Sanity check the batch buffer */
- exec_offset = to_intel_bo(batch_obj)->gtt_offset;
- ret = i915_gem_check_execbuffer(args, exec_offset);
- if (ret != 0) {
- DRM_ERROR("execbuf with invalid offset/length\n");
- goto err;
- }
-
- ret = i915_gem_execbuffer_move_to_gpu(dev, file, ring,
- object_list, args->buffer_count);
- if (ret)
- goto err;
-
- for (i = 0; i < args->buffer_count; i++) {
- struct drm_gem_object *obj = object_list[i];
- uint32_t old_write_domain = obj->write_domain;
- obj->write_domain = obj->pending_write_domain;
- trace_i915_gem_object_change_domain(obj,
- obj->read_domains,
- old_write_domain);
- }
-
-#if WATCH_COHERENCY
- for (i = 0; i < args->buffer_count; i++) {
- i915_gem_object_check_coherency(object_list[i],
- exec_list[i].handle);
- }
-#endif
-
-#if WATCH_EXEC
- i915_gem_dump_object(batch_obj,
- args->batch_len,
- __func__,
- ~0);
-#endif
-
- /* Check for any pending flips. As we only maintain a flip queue depth
- * of 1, we can simply insert a WAIT for the next display flip prior
- * to executing the batch and avoid stalling the CPU.
- */
- flips = 0;
- for (i = 0; i < args->buffer_count; i++) {
- if (object_list[i]->write_domain)
- flips |= atomic_read(&to_intel_bo(object_list[i])->pending_flip);
- }
- if (flips) {
- int plane, flip_mask;
-
- for (plane = 0; flips >> plane; plane++) {
- if (((flips >> plane) & 1) == 0)
- continue;
-
- if (plane)
- flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
- else
- flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
-
- intel_ring_begin(dev, ring, 2);
- intel_ring_emit(dev, ring,
- MI_WAIT_FOR_EVENT | flip_mask);
- intel_ring_emit(dev, ring, MI_NOOP);
- intel_ring_advance(dev, ring);
- }
- }
-
- /* Exec the batchbuffer */
- ret = ring->dispatch_gem_execbuffer(dev, ring, args,
- cliprects, exec_offset);
- if (ret) {
- DRM_ERROR("dispatch failed %d\n", ret);
- goto err;
- }
-
- /*
- * Ensure that the commands in the batch buffer are
- * finished before the interrupt fires
- */
- i915_retire_commands(dev, ring);
-
- for (i = 0; i < args->buffer_count; i++) {
- struct drm_gem_object *obj = object_list[i];
-
- i915_gem_object_move_to_active(obj, ring);
- if (obj->write_domain)
- list_move_tail(&to_intel_bo(obj)->gpu_write_list,
- &ring->gpu_write_list);
- }
-
- i915_add_request(dev, file, request, ring);
- request = NULL;
-
-err:
- for (i = 0; i < args->buffer_count; i++) {
- if (object_list[i]) {
- obj_priv = to_intel_bo(object_list[i]);
- obj_priv->in_execbuffer = false;
- }
- drm_gem_object_unreference(object_list[i]);
- }
-
- mutex_unlock(&dev->struct_mutex);
-
-pre_mutex_err:
- drm_free_large(object_list);
- kfree(cliprects);
- kfree(request);
-
- return ret;
-}
-
-/*
- * Legacy execbuffer just creates an exec2 list from the original exec object
- * list array and passes it to the real function.
- */
-int
-i915_gem_execbuffer(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_gem_execbuffer *args = data;
- struct drm_i915_gem_execbuffer2 exec2;
- struct drm_i915_gem_exec_object *exec_list = NULL;
- struct drm_i915_gem_exec_object2 *exec2_list = NULL;
- int ret, i;
-
-#if WATCH_EXEC
- DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
- (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
-
- if (args->buffer_count < 1) {
- DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
- return -EINVAL;
- }
-
- /* Copy in the exec list from userland */
- exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
- exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
- if (exec_list == NULL || exec2_list == NULL) {
- DRM_ERROR("Failed to allocate exec list for %d buffers\n",
- args->buffer_count);
- drm_free_large(exec_list);
- drm_free_large(exec2_list);
- return -ENOMEM;
- }
- ret = copy_from_user(exec_list,
- (struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- sizeof(*exec_list) * args->buffer_count);
- if (ret != 0) {
- DRM_ERROR("copy %d exec entries failed %d\n",
- args->buffer_count, ret);
- drm_free_large(exec_list);
- drm_free_large(exec2_list);
- return -EFAULT;
- }
-
- for (i = 0; i < args->buffer_count; i++) {
- exec2_list[i].handle = exec_list[i].handle;
- exec2_list[i].relocation_count = exec_list[i].relocation_count;
- exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
- exec2_list[i].alignment = exec_list[i].alignment;
- exec2_list[i].offset = exec_list[i].offset;
- if (INTEL_INFO(dev)->gen < 4)
- exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
- else
- exec2_list[i].flags = 0;
- }
+ if (ring->irq_get(ring)) {
+ ret = wait_event_interruptible(ring->irq_queue,
+ i915_seqno_passed(ring->get_seqno(ring), seqno)
+ || atomic_read(&dev_priv->mm.wedged));
+ ring->irq_put(ring);
- exec2.buffers_ptr = args->buffers_ptr;
- exec2.buffer_count = args->buffer_count;
- exec2.batch_start_offset = args->batch_start_offset;
- exec2.batch_len = args->batch_len;
- exec2.DR1 = args->DR1;
- exec2.DR4 = args->DR4;
- exec2.num_cliprects = args->num_cliprects;
- exec2.cliprects_ptr = args->cliprects_ptr;
- exec2.flags = I915_EXEC_RENDER;
-
- ret = i915_gem_do_execbuffer(dev, data, file_priv, &exec2, exec2_list);
- if (!ret) {
- /* Copy the new buffer offsets back to the user's exec list. */
- for (i = 0; i < args->buffer_count; i++)
- exec_list[i].offset = exec2_list[i].offset;
- /* ... and back out to userspace */
- ret = copy_to_user((struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- exec_list,
- sizeof(*exec_list) * args->buffer_count);
- if (ret) {
- ret = -EFAULT;
- DRM_ERROR("failed to copy %d exec entries "
- "back to user (%d)\n",
- args->buffer_count, ret);
+ if (ret == 0 && atomic_read(&dev_priv->mm.wedged))
+ ret = -EIO;
}
}
- drm_free_large(exec_list);
- drm_free_large(exec2_list);
- return ret;
-}
-
-int
-i915_gem_execbuffer2(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
-{
- struct drm_i915_gem_execbuffer2 *args = data;
- struct drm_i915_gem_exec_object2 *exec2_list = NULL;
- int ret;
-
-#if WATCH_EXEC
- DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
- (int) args->buffers_ptr, args->buffer_count, args->batch_len);
-#endif
-
- if (args->buffer_count < 1) {
- DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
- return -EINVAL;
- }
-
- exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
- if (exec2_list == NULL) {
- DRM_ERROR("Failed to allocate exec list for %d buffers\n",
- args->buffer_count);
- return -ENOMEM;
- }
- ret = copy_from_user(exec2_list,
- (struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- sizeof(*exec2_list) * args->buffer_count);
- if (ret != 0) {
- DRM_ERROR("copy %d exec entries failed %d\n",
- args->buffer_count, ret);
- drm_free_large(exec2_list);
- return -EFAULT;
- }
-
- ret = i915_gem_do_execbuffer(dev, data, file_priv, args, exec2_list);
- if (!ret) {
- /* Copy the new buffer offsets back to the user's exec list. */
- ret = copy_to_user((struct drm_i915_relocation_entry __user *)
- (uintptr_t) args->buffers_ptr,
- exec2_list,
- sizeof(*exec2_list) * args->buffer_count);
- if (ret) {
- ret = -EFAULT;
- DRM_ERROR("failed to copy %d exec entries "
- "back to user (%d)\n",
- args->buffer_count, ret);
- }
- }
+ if (ret == 0)
+ queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
- drm_free_large(exec2_list);
return ret;
}
int
-i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment)
+i915_gem_object_pin(struct drm_i915_gem_object *obj,
+ uint32_t alignment,
+ bool map_and_fenceable)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret;
- BUG_ON(obj_priv->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
+ BUG_ON(obj->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT);
WARN_ON(i915_verify_lists(dev));
- if (obj_priv->gtt_space != NULL) {
- if (alignment == 0)
- alignment = i915_gem_get_gtt_alignment(obj);
- if (obj_priv->gtt_offset & (alignment - 1)) {
- WARN(obj_priv->pin_count,
- "bo is already pinned with incorrect alignment: offset=%x, req.alignment=%x\n",
- obj_priv->gtt_offset, alignment);
+ if (obj->gtt_space != NULL) {
+ if ((alignment && obj->gtt_offset & (alignment - 1)) ||
+ (map_and_fenceable && !obj->map_and_fenceable)) {
+ WARN(obj->pin_count,
+ "bo is already pinned with incorrect alignment:"
+ " offset=%x, req.alignment=%x, req.map_and_fenceable=%d,"
+ " obj->map_and_fenceable=%d\n",
+ obj->gtt_offset, alignment,
+ map_and_fenceable,
+ obj->map_and_fenceable);
ret = i915_gem_object_unbind(obj);
if (ret)
return ret;
}
}
- if (obj_priv->gtt_space == NULL) {
- ret = i915_gem_object_bind_to_gtt(obj, alignment);
+ if (obj->gtt_space == NULL) {
+ ret = i915_gem_object_bind_to_gtt(obj, alignment,
+ map_and_fenceable);
if (ret)
return ret;
}
- obj_priv->pin_count++;
-
- /* If the object is not active and not pending a flush,
- * remove it from the inactive list
- */
- if (obj_priv->pin_count == 1) {
- i915_gem_info_add_pin(dev_priv, obj->size);
- if (!obj_priv->active)
- list_move_tail(&obj_priv->mm_list,
+ if (obj->pin_count++ == 0) {
+ if (!obj->active)
+ list_move_tail(&obj->mm_list,
&dev_priv->mm.pinned_list);
}
+ obj->pin_mappable |= map_and_fenceable;
WARN_ON(i915_verify_lists(dev));
return 0;
}
void
-i915_gem_object_unpin(struct drm_gem_object *obj)
+i915_gem_object_unpin(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
WARN_ON(i915_verify_lists(dev));
- obj_priv->pin_count--;
- BUG_ON(obj_priv->pin_count < 0);
- BUG_ON(obj_priv->gtt_space == NULL);
+ BUG_ON(obj->pin_count == 0);
+ BUG_ON(obj->gtt_space == NULL);
- /* If the object is no longer pinned, and is
- * neither active nor being flushed, then stick it on
- * the inactive list
- */
- if (obj_priv->pin_count == 0) {
- if (!obj_priv->active)
- list_move_tail(&obj_priv->mm_list,
+ if (--obj->pin_count == 0) {
+ if (!obj->active)
+ list_move_tail(&obj->mm_list,
&dev_priv->mm.inactive_list);
- i915_gem_info_remove_pin(dev_priv, obj->size);
+ obj->pin_mappable = false;
}
WARN_ON(i915_verify_lists(dev));
}
int
i915_gem_pin_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_pin *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
- if (obj_priv->madv != I915_MADV_WILLNEED) {
+ if (obj->madv != I915_MADV_WILLNEED) {
DRM_ERROR("Attempting to pin a purgeable buffer\n");
ret = -EINVAL;
goto out;
}
- if (obj_priv->pin_filp != NULL && obj_priv->pin_filp != file_priv) {
+ if (obj->pin_filp != NULL && obj->pin_filp != file) {
DRM_ERROR("Already pinned in i915_gem_pin_ioctl(): %d\n",
args->handle);
ret = -EINVAL;
goto out;
}
- obj_priv->user_pin_count++;
- obj_priv->pin_filp = file_priv;
- if (obj_priv->user_pin_count == 1) {
- ret = i915_gem_object_pin(obj, args->alignment);
+ obj->user_pin_count++;
+ obj->pin_filp = file;
+ if (obj->user_pin_count == 1) {
+ ret = i915_gem_object_pin(obj, args->alignment, true);
if (ret)
goto out;
}
@@ -4170,9 +3286,9 @@ i915_gem_pin_ioctl(struct drm_device *dev, void *data,
* as the X server doesn't manage domains yet
*/
i915_gem_object_flush_cpu_write_domain(obj);
- args->offset = obj_priv->gtt_offset;
+ args->offset = obj->gtt_offset;
out:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -4180,38 +3296,36 @@ unlock:
int
i915_gem_unpin_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_pin *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
- if (obj_priv->pin_filp != file_priv) {
+ if (obj->pin_filp != file) {
DRM_ERROR("Not pinned by caller in i915_gem_pin_ioctl(): %d\n",
args->handle);
ret = -EINVAL;
goto out;
}
- obj_priv->user_pin_count--;
- if (obj_priv->user_pin_count == 0) {
- obj_priv->pin_filp = NULL;
+ obj->user_pin_count--;
+ if (obj->user_pin_count == 0) {
+ obj->pin_filp = NULL;
i915_gem_object_unpin(obj);
}
out:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -4219,52 +3333,64 @@ unlock:
int
i915_gem_busy_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_busy *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
ret = i915_mutex_lock_interruptible(dev);
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
/* Count all active objects as busy, even if they are currently not used
* by the gpu. Users of this interface expect objects to eventually
* become non-busy without any further actions, therefore emit any
* necessary flushes here.
*/
- args->busy = obj_priv->active;
+ args->busy = obj->active;
if (args->busy) {
/* Unconditionally flush objects, even when the gpu still uses this
* object. Userspace calling this function indicates that it wants to
* use this buffer rather sooner than later, so issuing the required
* flush earlier is beneficial.
*/
- if (obj->write_domain & I915_GEM_GPU_DOMAINS)
- i915_gem_flush_ring(dev, file_priv,
- obj_priv->ring,
- 0, obj->write_domain);
+ if (obj->base.write_domain & I915_GEM_GPU_DOMAINS) {
+ i915_gem_flush_ring(dev, obj->ring,
+ 0, obj->base.write_domain);
+ } else if (obj->ring->outstanding_lazy_request ==
+ obj->last_rendering_seqno) {
+ struct drm_i915_gem_request *request;
+
+ /* This ring is not being cleared by active usage,
+ * so emit a request to do so.
+ */
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request)
+ ret = i915_add_request(dev,
+ NULL, request,
+ obj->ring);
+ else
+ ret = -ENOMEM;
+ }
/* Update the active list for the hardware's current position.
* Otherwise this only updates on a delayed timer or when irqs
* are actually unmasked, and our working set ends up being
* larger than required.
*/
- i915_gem_retire_requests_ring(dev, obj_priv->ring);
+ i915_gem_retire_requests_ring(dev, obj->ring);
- args->busy = obj_priv->active;
+ args->busy = obj->active;
}
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -4282,8 +3408,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_i915_gem_madvise *args = data;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
switch (args->madv) {
@@ -4298,37 +3423,36 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
if (obj == NULL) {
ret = -ENOENT;
goto unlock;
}
- obj_priv = to_intel_bo(obj);
- if (obj_priv->pin_count) {
+ if (obj->pin_count) {
ret = -EINVAL;
goto out;
}
- if (obj_priv->madv != __I915_MADV_PURGED)
- obj_priv->madv = args->madv;
+ if (obj->madv != __I915_MADV_PURGED)
+ obj->madv = args->madv;
/* if the object is no longer bound, discard its backing storage */
- if (i915_gem_object_is_purgeable(obj_priv) &&
- obj_priv->gtt_space == NULL)
+ if (i915_gem_object_is_purgeable(obj) &&
+ obj->gtt_space == NULL)
i915_gem_object_truncate(obj);
- args->retained = obj_priv->madv != __I915_MADV_PURGED;
+ args->retained = obj->madv != __I915_MADV_PURGED;
out:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
-struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
- size_t size)
+struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+ size_t size)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_object *obj;
@@ -4351,11 +3475,15 @@ struct drm_gem_object * i915_gem_alloc_object(struct drm_device *dev,
obj->base.driver_private = NULL;
obj->fence_reg = I915_FENCE_REG_NONE;
INIT_LIST_HEAD(&obj->mm_list);
+ INIT_LIST_HEAD(&obj->gtt_list);
INIT_LIST_HEAD(&obj->ring_list);
+ INIT_LIST_HEAD(&obj->exec_list);
INIT_LIST_HEAD(&obj->gpu_write_list);
obj->madv = I915_MADV_WILLNEED;
+ /* Avoid an unnecessary call to unbind on the first bind. */
+ obj->map_and_fenceable = true;
- return &obj->base;
+ return obj;
}
int i915_gem_init_object(struct drm_gem_object *obj)
@@ -4365,42 +3493,41 @@ int i915_gem_init_object(struct drm_gem_object *obj)
return 0;
}
-static void i915_gem_free_object_tail(struct drm_gem_object *obj)
+static void i915_gem_free_object_tail(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret;
ret = i915_gem_object_unbind(obj);
if (ret == -ERESTARTSYS) {
- list_move(&obj_priv->mm_list,
+ list_move(&obj->mm_list,
&dev_priv->mm.deferred_free_list);
return;
}
- if (obj_priv->mmap_offset)
+ if (obj->base.map_list.map)
i915_gem_free_mmap_offset(obj);
- drm_gem_object_release(obj);
- i915_gem_info_remove_obj(dev_priv, obj->size);
+ drm_gem_object_release(&obj->base);
+ i915_gem_info_remove_obj(dev_priv, obj->base.size);
- kfree(obj_priv->page_cpu_valid);
- kfree(obj_priv->bit_17);
- kfree(obj_priv);
+ kfree(obj->page_cpu_valid);
+ kfree(obj->bit_17);
+ kfree(obj);
}
-void i915_gem_free_object(struct drm_gem_object *obj)
+void i915_gem_free_object(struct drm_gem_object *gem_obj)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
+ struct drm_device *dev = obj->base.dev;
trace_i915_gem_object_destroy(obj);
- while (obj_priv->pin_count > 0)
+ while (obj->pin_count > 0)
i915_gem_object_unpin(obj);
- if (obj_priv->phys_obj)
+ if (obj->phys_obj)
i915_gem_detach_phys_object(dev, obj);
i915_gem_free_object_tail(obj);
@@ -4427,13 +3554,15 @@ i915_gem_idle(struct drm_device *dev)
/* Under UMS, be paranoid and evict. */
if (!drm_core_check_feature(dev, DRIVER_MODESET)) {
- ret = i915_gem_evict_inactive(dev);
+ ret = i915_gem_evict_inactive(dev, false);
if (ret) {
mutex_unlock(&dev->struct_mutex);
return ret;
}
}
+ i915_gem_reset_fences(dev);
+
/* Hack! Don't let anybody do execbuf while we don't control the chip.
* We need to replace this with a semaphore, or something.
* And not confound mm.suspended!
@@ -4452,82 +3581,15 @@ i915_gem_idle(struct drm_device *dev)
return 0;
}
-/*
- * 965+ support PIPE_CONTROL commands, which provide finer grained control
- * over cache flushing.
- */
-static int
-i915_gem_init_pipe_control(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
- int ret;
-
- obj = i915_gem_alloc_object(dev, 4096);
- if (obj == NULL) {
- DRM_ERROR("Failed to allocate seqno page\n");
- ret = -ENOMEM;
- goto err;
- }
- obj_priv = to_intel_bo(obj);
- obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
-
- ret = i915_gem_object_pin(obj, 4096);
- if (ret)
- goto err_unref;
-
- dev_priv->seqno_gfx_addr = obj_priv->gtt_offset;
- dev_priv->seqno_page = kmap(obj_priv->pages[0]);
- if (dev_priv->seqno_page == NULL)
- goto err_unpin;
-
- dev_priv->seqno_obj = obj;
- memset(dev_priv->seqno_page, 0, PAGE_SIZE);
-
- return 0;
-
-err_unpin:
- i915_gem_object_unpin(obj);
-err_unref:
- drm_gem_object_unreference(obj);
-err:
- return ret;
-}
-
-
-static void
-i915_gem_cleanup_pipe_control(struct drm_device *dev)
-{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
-
- obj = dev_priv->seqno_obj;
- obj_priv = to_intel_bo(obj);
- kunmap(obj_priv->pages[0]);
- i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
- dev_priv->seqno_obj = NULL;
-
- dev_priv->seqno_page = NULL;
-}
-
int
i915_gem_init_ringbuffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
- if (HAS_PIPE_CONTROL(dev)) {
- ret = i915_gem_init_pipe_control(dev);
- if (ret)
- return ret;
- }
-
ret = intel_init_render_ring_buffer(dev);
if (ret)
- goto cleanup_pipe_control;
+ return ret;
if (HAS_BSD(dev)) {
ret = intel_init_bsd_ring_buffer(dev);
@@ -4546,12 +3608,9 @@ i915_gem_init_ringbuffer(struct drm_device *dev)
return 0;
cleanup_bsd_ring:
- intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
+ intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
cleanup_render_ring:
- intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
-cleanup_pipe_control:
- if (HAS_PIPE_CONTROL(dev))
- i915_gem_cleanup_pipe_control(dev);
+ intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
return ret;
}
@@ -4559,12 +3618,10 @@ void
i915_gem_cleanup_ringbuffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
- intel_cleanup_ring_buffer(dev, &dev_priv->render_ring);
- intel_cleanup_ring_buffer(dev, &dev_priv->bsd_ring);
- intel_cleanup_ring_buffer(dev, &dev_priv->blt_ring);
- if (HAS_PIPE_CONTROL(dev))
- i915_gem_cleanup_pipe_control(dev);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ intel_cleanup_ring_buffer(&dev_priv->ring[i]);
}
int
@@ -4572,7 +3629,7 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
drm_i915_private_t *dev_priv = dev->dev_private;
- int ret;
+ int ret, i;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return 0;
@@ -4592,14 +3649,12 @@ i915_gem_entervt_ioctl(struct drm_device *dev, void *data,
}
BUG_ON(!list_empty(&dev_priv->mm.active_list));
- BUG_ON(!list_empty(&dev_priv->render_ring.active_list));
- BUG_ON(!list_empty(&dev_priv->bsd_ring.active_list));
- BUG_ON(!list_empty(&dev_priv->blt_ring.active_list));
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
BUG_ON(!list_empty(&dev_priv->mm.inactive_list));
- BUG_ON(!list_empty(&dev_priv->render_ring.request_list));
- BUG_ON(!list_empty(&dev_priv->bsd_ring.request_list));
- BUG_ON(!list_empty(&dev_priv->blt_ring.request_list));
+ for (i = 0; i < I915_NUM_RINGS; i++) {
+ BUG_ON(!list_empty(&dev_priv->ring[i].active_list));
+ BUG_ON(!list_empty(&dev_priv->ring[i].request_list));
+ }
mutex_unlock(&dev->struct_mutex);
ret = drm_irq_install(dev);
@@ -4661,17 +3716,14 @@ i915_gem_load(struct drm_device *dev)
INIT_LIST_HEAD(&dev_priv->mm.pinned_list);
INIT_LIST_HEAD(&dev_priv->mm.fence_list);
INIT_LIST_HEAD(&dev_priv->mm.deferred_free_list);
- init_ring_lists(&dev_priv->render_ring);
- init_ring_lists(&dev_priv->bsd_ring);
- init_ring_lists(&dev_priv->blt_ring);
+ INIT_LIST_HEAD(&dev_priv->mm.gtt_list);
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ init_ring_lists(&dev_priv->ring[i]);
for (i = 0; i < 16; i++)
INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
i915_gem_retire_work_handler);
init_completion(&dev_priv->error_completion);
- spin_lock(&shrink_list_lock);
- list_add(&dev_priv->mm.shrink_list, &shrink_list);
- spin_unlock(&shrink_list_lock);
/* On GEN3 we really need to make sure the ARB C3 LP bit is set */
if (IS_GEN3(dev)) {
@@ -4683,6 +3735,8 @@ i915_gem_load(struct drm_device *dev)
}
}
+ dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
+
/* Old X drivers will take 0-2 for front, back, depth buffers */
if (!drm_core_check_feature(dev, DRIVER_MODESET))
dev_priv->fence_reg_start = 3;
@@ -4714,6 +3768,10 @@ i915_gem_load(struct drm_device *dev)
}
i915_gem_detect_bit_6_swizzle(dev);
init_waitqueue_head(&dev_priv->pending_flip_queue);
+
+ dev_priv->mm.inactive_shrinker.shrink = i915_gem_inactive_shrink;
+ dev_priv->mm.inactive_shrinker.seeks = DEFAULT_SEEKS;
+ register_shrinker(&dev_priv->mm.inactive_shrinker);
}
/*
@@ -4783,47 +3841,47 @@ void i915_gem_free_all_phys_object(struct drm_device *dev)
}
void i915_gem_detach_phys_object(struct drm_device *dev,
- struct drm_gem_object *obj)
+ struct drm_i915_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv;
+ struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
+ char *vaddr;
int i;
- int ret;
int page_count;
- obj_priv = to_intel_bo(obj);
- if (!obj_priv->phys_obj)
+ if (!obj->phys_obj)
return;
+ vaddr = obj->phys_obj->handle->vaddr;
- ret = i915_gem_object_get_pages(obj, 0);
- if (ret)
- goto out;
-
- page_count = obj->size / PAGE_SIZE;
-
+ page_count = obj->base.size / PAGE_SIZE;
for (i = 0; i < page_count; i++) {
- char *dst = kmap_atomic(obj_priv->pages[i]);
- char *src = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
-
- memcpy(dst, src, PAGE_SIZE);
- kunmap_atomic(dst);
+ struct page *page = read_cache_page_gfp(mapping, i,
+ GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ if (!IS_ERR(page)) {
+ char *dst = kmap_atomic(page);
+ memcpy(dst, vaddr + i*PAGE_SIZE, PAGE_SIZE);
+ kunmap_atomic(dst);
+
+ drm_clflush_pages(&page, 1);
+
+ set_page_dirty(page);
+ mark_page_accessed(page);
+ page_cache_release(page);
+ }
}
- drm_clflush_pages(obj_priv->pages, page_count);
- drm_agp_chipset_flush(dev);
+ intel_gtt_chipset_flush();
- i915_gem_object_put_pages(obj);
-out:
- obj_priv->phys_obj->cur_obj = NULL;
- obj_priv->phys_obj = NULL;
+ obj->phys_obj->cur_obj = NULL;
+ obj->phys_obj = NULL;
}
int
i915_gem_attach_phys_object(struct drm_device *dev,
- struct drm_gem_object *obj,
+ struct drm_i915_gem_object *obj,
int id,
int align)
{
+ struct address_space *mapping = obj->base.filp->f_path.dentry->d_inode->i_mapping;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
int ret = 0;
int page_count;
int i;
@@ -4831,10 +3889,8 @@ i915_gem_attach_phys_object(struct drm_device *dev,
if (id > I915_MAX_PHYS_OBJECT)
return -EINVAL;
- obj_priv = to_intel_bo(obj);
-
- if (obj_priv->phys_obj) {
- if (obj_priv->phys_obj->id == id)
+ if (obj->phys_obj) {
+ if (obj->phys_obj->id == id)
return 0;
i915_gem_detach_phys_object(dev, obj);
}
@@ -4842,51 +3898,50 @@ i915_gem_attach_phys_object(struct drm_device *dev,
/* create a new object */
if (!dev_priv->mm.phys_objs[id - 1]) {
ret = i915_gem_init_phys_object(dev, id,
- obj->size, align);
+ obj->base.size, align);
if (ret) {
- DRM_ERROR("failed to init phys object %d size: %zu\n", id, obj->size);
- goto out;
+ DRM_ERROR("failed to init phys object %d size: %zu\n",
+ id, obj->base.size);
+ return ret;
}
}
/* bind to the object */
- obj_priv->phys_obj = dev_priv->mm.phys_objs[id - 1];
- obj_priv->phys_obj->cur_obj = obj;
-
- ret = i915_gem_object_get_pages(obj, 0);
- if (ret) {
- DRM_ERROR("failed to get page list\n");
- goto out;
- }
+ obj->phys_obj = dev_priv->mm.phys_objs[id - 1];
+ obj->phys_obj->cur_obj = obj;
- page_count = obj->size / PAGE_SIZE;
+ page_count = obj->base.size / PAGE_SIZE;
for (i = 0; i < page_count; i++) {
- char *src = kmap_atomic(obj_priv->pages[i]);
- char *dst = obj_priv->phys_obj->handle->vaddr + (i * PAGE_SIZE);
+ struct page *page;
+ char *dst, *src;
+ page = read_cache_page_gfp(mapping, i,
+ GFP_HIGHUSER | __GFP_RECLAIMABLE);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
+ src = kmap_atomic(page);
+ dst = obj->phys_obj->handle->vaddr + (i * PAGE_SIZE);
memcpy(dst, src, PAGE_SIZE);
kunmap_atomic(src);
- }
- i915_gem_object_put_pages(obj);
+ mark_page_accessed(page);
+ page_cache_release(page);
+ }
return 0;
-out:
- return ret;
}
static int
-i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
+i915_gem_phys_pwrite(struct drm_device *dev,
+ struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file_priv)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- void *vaddr = obj_priv->phys_obj->handle->vaddr + args->offset;
+ void *vaddr = obj->phys_obj->handle->vaddr + args->offset;
char __user *user_data = (char __user *) (uintptr_t) args->data_ptr;
- DRM_DEBUG_DRIVER("vaddr %p, %lld\n", vaddr, args->size);
-
if (__copy_from_user_inatomic_nocache(vaddr, user_data, args->size)) {
unsigned long unwritten;
@@ -4901,7 +3956,7 @@ i915_gem_phys_pwrite(struct drm_device *dev, struct drm_gem_object *obj,
return -EFAULT;
}
- drm_agp_chipset_flush(dev);
+ intel_gtt_chipset_flush();
return 0;
}
@@ -4939,144 +3994,68 @@ i915_gpu_is_active(struct drm_device *dev)
}
static int
-i915_gem_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
-{
- drm_i915_private_t *dev_priv, *next_dev;
- struct drm_i915_gem_object *obj_priv, *next_obj;
- int cnt = 0;
- int would_deadlock = 1;
+i915_gem_inactive_shrink(struct shrinker *shrinker,
+ int nr_to_scan,
+ gfp_t gfp_mask)
+{
+ struct drm_i915_private *dev_priv =
+ container_of(shrinker,
+ struct drm_i915_private,
+ mm.inactive_shrinker);
+ struct drm_device *dev = dev_priv->dev;
+ struct drm_i915_gem_object *obj, *next;
+ int cnt;
+
+ if (!mutex_trylock(&dev->struct_mutex))
+ return 0;
/* "fast-path" to count number of available objects */
if (nr_to_scan == 0) {
- spin_lock(&shrink_list_lock);
- list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
- struct drm_device *dev = dev_priv->dev;
-
- if (mutex_trylock(&dev->struct_mutex)) {
- list_for_each_entry(obj_priv,
- &dev_priv->mm.inactive_list,
- mm_list)
- cnt++;
- mutex_unlock(&dev->struct_mutex);
- }
- }
- spin_unlock(&shrink_list_lock);
-
- return (cnt / 100) * sysctl_vfs_cache_pressure;
+ cnt = 0;
+ list_for_each_entry(obj,
+ &dev_priv->mm.inactive_list,
+ mm_list)
+ cnt++;
+ mutex_unlock(&dev->struct_mutex);
+ return cnt / 100 * sysctl_vfs_cache_pressure;
}
- spin_lock(&shrink_list_lock);
-
rescan:
/* first scan for clean buffers */
- list_for_each_entry_safe(dev_priv, next_dev,
- &shrink_list, mm.shrink_list) {
- struct drm_device *dev = dev_priv->dev;
-
- if (! mutex_trylock(&dev->struct_mutex))
- continue;
-
- spin_unlock(&shrink_list_lock);
- i915_gem_retire_requests(dev);
+ i915_gem_retire_requests(dev);
- list_for_each_entry_safe(obj_priv, next_obj,
- &dev_priv->mm.inactive_list,
- mm_list) {
- if (i915_gem_object_is_purgeable(obj_priv)) {
- i915_gem_object_unbind(&obj_priv->base);
- if (--nr_to_scan <= 0)
- break;
- }
+ list_for_each_entry_safe(obj, next,
+ &dev_priv->mm.inactive_list,
+ mm_list) {
+ if (i915_gem_object_is_purgeable(obj)) {
+ if (i915_gem_object_unbind(obj) == 0 &&
+ --nr_to_scan == 0)
+ break;
}
-
- spin_lock(&shrink_list_lock);
- mutex_unlock(&dev->struct_mutex);
-
- would_deadlock = 0;
-
- if (nr_to_scan <= 0)
- break;
}
/* second pass, evict/count anything still on the inactive list */
- list_for_each_entry_safe(dev_priv, next_dev,
- &shrink_list, mm.shrink_list) {
- struct drm_device *dev = dev_priv->dev;
-
- if (! mutex_trylock(&dev->struct_mutex))
- continue;
-
- spin_unlock(&shrink_list_lock);
-
- list_for_each_entry_safe(obj_priv, next_obj,
- &dev_priv->mm.inactive_list,
- mm_list) {
- if (nr_to_scan > 0) {
- i915_gem_object_unbind(&obj_priv->base);
- nr_to_scan--;
- } else
- cnt++;
- }
-
- spin_lock(&shrink_list_lock);
- mutex_unlock(&dev->struct_mutex);
-
- would_deadlock = 0;
+ cnt = 0;
+ list_for_each_entry_safe(obj, next,
+ &dev_priv->mm.inactive_list,
+ mm_list) {
+ if (nr_to_scan &&
+ i915_gem_object_unbind(obj) == 0)
+ nr_to_scan--;
+ else
+ cnt++;
}
- if (nr_to_scan) {
- int active = 0;
-
+ if (nr_to_scan && i915_gpu_is_active(dev)) {
/*
* We are desperate for pages, so as a last resort, wait
* for the GPU to finish and discard whatever we can.
* This has a dramatic impact to reduce the number of
* OOM-killer events whilst running the GPU aggressively.
*/
- list_for_each_entry(dev_priv, &shrink_list, mm.shrink_list) {
- struct drm_device *dev = dev_priv->dev;
-
- if (!mutex_trylock(&dev->struct_mutex))
- continue;
-
- spin_unlock(&shrink_list_lock);
-
- if (i915_gpu_is_active(dev)) {
- i915_gpu_idle(dev);
- active++;
- }
-
- spin_lock(&shrink_list_lock);
- mutex_unlock(&dev->struct_mutex);
- }
-
- if (active)
+ if (i915_gpu_idle(dev) == 0)
goto rescan;
}
-
- spin_unlock(&shrink_list_lock);
-
- if (would_deadlock)
- return -1;
- else if (cnt > 0)
- return (cnt / 100) * sysctl_vfs_cache_pressure;
- else
- return 0;
-}
-
-static struct shrinker shrinker = {
- .shrink = i915_gem_shrink,
- .seeks = DEFAULT_SEEKS,
-};
-
-__init void
-i915_gem_shrinker_init(void)
-{
- register_shrinker(&shrinker);
-}
-
-__exit void
-i915_gem_shrinker_exit(void)
-{
- unregister_shrinker(&shrinker);
+ mutex_unlock(&dev->struct_mutex);
+ return cnt / 100 * sysctl_vfs_cache_pressure;
}
diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c
index 48644b840a8..29d014c48ca 100644
--- a/drivers/gpu/drm/i915/i915_gem_debug.c
+++ b/drivers/gpu/drm/i915/i915_gem_debug.c
@@ -152,13 +152,12 @@ i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end,
}
void
-i915_gem_dump_object(struct drm_gem_object *obj, int len,
+i915_gem_dump_object(struct drm_i915_gem_object *obj, int len,
const char *where, uint32_t mark)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int page;
- DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset);
+ DRM_INFO("%s: object at offset %08x\n", where, obj->gtt_offset);
for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) {
int page_len, chunk, chunk_len;
@@ -170,9 +169,9 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
chunk_len = page_len - chunk;
if (chunk_len > 128)
chunk_len = 128;
- i915_gem_dump_page(obj_priv->pages[page],
+ i915_gem_dump_page(obj->pages[page],
chunk, chunk + chunk_len,
- obj_priv->gtt_offset +
+ obj->gtt_offset +
page * PAGE_SIZE,
mark);
}
@@ -182,21 +181,19 @@ i915_gem_dump_object(struct drm_gem_object *obj, int len,
#if WATCH_COHERENCY
void
-i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
+i915_gem_object_check_coherency(struct drm_i915_gem_object *obj, int handle)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct drm_device *dev = obj->base.dev;
int page;
uint32_t *gtt_mapping;
uint32_t *backing_map = NULL;
int bad_count = 0;
DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %zdkb):\n",
- __func__, obj, obj_priv->gtt_offset, handle,
+ __func__, obj, obj->gtt_offset, handle,
obj->size / 1024);
- gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset,
- obj->size);
+ gtt_mapping = ioremap(dev->agp->base + obj->gtt_offset, obj->base.size);
if (gtt_mapping == NULL) {
DRM_ERROR("failed to map GTT space\n");
return;
@@ -205,7 +202,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
for (page = 0; page < obj->size / PAGE_SIZE; page++) {
int i;
- backing_map = kmap_atomic(obj_priv->pages[page], KM_USER0);
+ backing_map = kmap_atomic(obj->pages[page], KM_USER0);
if (backing_map == NULL) {
DRM_ERROR("failed to map backing page\n");
@@ -220,7 +217,7 @@ i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle)
if (cpuval != gttval) {
DRM_INFO("incoherent CPU vs GPU at 0x%08x: "
"0x%08x vs 0x%08x\n",
- (int)(obj_priv->gtt_offset +
+ (int)(obj->gtt_offset +
page * PAGE_SIZE + i * 4),
cpuval, gttval);
if (bad_count++ >= 8) {
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index d8ae7d1d0cc..78b8cf90c92 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -32,28 +32,36 @@
#include "i915_drm.h"
static bool
-mark_free(struct drm_i915_gem_object *obj_priv,
- struct list_head *unwind)
+mark_free(struct drm_i915_gem_object *obj, struct list_head *unwind)
{
- list_add(&obj_priv->evict_list, unwind);
- drm_gem_object_reference(&obj_priv->base);
- return drm_mm_scan_add_block(obj_priv->gtt_space);
+ list_add(&obj->exec_list, unwind);
+ drm_gem_object_reference(&obj->base);
+ return drm_mm_scan_add_block(obj->gtt_space);
}
int
-i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignment)
+i915_gem_evict_something(struct drm_device *dev, int min_size,
+ unsigned alignment, bool mappable)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct list_head eviction_list, unwind_list;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret = 0;
i915_gem_retire_requests(dev);
/* Re-check for free space after retiring requests */
- if (drm_mm_search_free(&dev_priv->mm.gtt_space,
- min_size, alignment, 0))
- return 0;
+ if (mappable) {
+ if (drm_mm_search_free_in_range(&dev_priv->mm.gtt_space,
+ min_size, alignment, 0,
+ dev_priv->mm.gtt_mappable_end,
+ 0))
+ return 0;
+ } else {
+ if (drm_mm_search_free(&dev_priv->mm.gtt_space,
+ min_size, alignment, 0))
+ return 0;
+ }
/*
* The goal is to evict objects and amalgamate space in LRU order.
@@ -79,45 +87,50 @@ i915_gem_evict_something(struct drm_device *dev, int min_size, unsigned alignmen
*/
INIT_LIST_HEAD(&unwind_list);
- drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
+ if (mappable)
+ drm_mm_init_scan_with_range(&dev_priv->mm.gtt_space, min_size,
+ alignment, 0,
+ dev_priv->mm.gtt_mappable_end);
+ else
+ drm_mm_init_scan(&dev_priv->mm.gtt_space, min_size, alignment);
/* First see if there is a large enough contiguous idle region... */
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
- if (mark_free(obj_priv, &unwind_list))
+ list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
+ if (mark_free(obj, &unwind_list))
goto found;
}
/* Now merge in the soon-to-be-expired objects... */
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
+ list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
/* Does the object require an outstanding flush? */
- if (obj_priv->base.write_domain || obj_priv->pin_count)
+ if (obj->base.write_domain || obj->pin_count)
continue;
- if (mark_free(obj_priv, &unwind_list))
+ if (mark_free(obj, &unwind_list))
goto found;
}
/* Finally add anything with a pending flush (in order of retirement) */
- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
- if (obj_priv->pin_count)
+ list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
+ if (obj->pin_count)
continue;
- if (mark_free(obj_priv, &unwind_list))
+ if (mark_free(obj, &unwind_list))
goto found;
}
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
- if (! obj_priv->base.write_domain || obj_priv->pin_count)
+ list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
+ if (! obj->base.write_domain || obj->pin_count)
continue;
- if (mark_free(obj_priv, &unwind_list))
+ if (mark_free(obj, &unwind_list))
goto found;
}
/* Nothing found, clean up and bail out! */
- list_for_each_entry(obj_priv, &unwind_list, evict_list) {
- ret = drm_mm_scan_remove_block(obj_priv->gtt_space);
+ list_for_each_entry(obj, &unwind_list, exec_list) {
+ ret = drm_mm_scan_remove_block(obj->gtt_space);
BUG_ON(ret);
- drm_gem_object_unreference(&obj_priv->base);
+ drm_gem_object_unreference(&obj->base);
}
/* We expect the caller to unpin, evict all and try again, or give up.
@@ -131,33 +144,33 @@ found:
* temporary list. */
INIT_LIST_HEAD(&eviction_list);
while (!list_empty(&unwind_list)) {
- obj_priv = list_first_entry(&unwind_list,
- struct drm_i915_gem_object,
- evict_list);
- if (drm_mm_scan_remove_block(obj_priv->gtt_space)) {
- list_move(&obj_priv->evict_list, &eviction_list);
+ obj = list_first_entry(&unwind_list,
+ struct drm_i915_gem_object,
+ exec_list);
+ if (drm_mm_scan_remove_block(obj->gtt_space)) {
+ list_move(&obj->exec_list, &eviction_list);
continue;
}
- list_del(&obj_priv->evict_list);
- drm_gem_object_unreference(&obj_priv->base);
+ list_del_init(&obj->exec_list);
+ drm_gem_object_unreference(&obj->base);
}
/* Unbinding will emit any required flushes */
while (!list_empty(&eviction_list)) {
- obj_priv = list_first_entry(&eviction_list,
- struct drm_i915_gem_object,
- evict_list);
+ obj = list_first_entry(&eviction_list,
+ struct drm_i915_gem_object,
+ exec_list);
if (ret == 0)
- ret = i915_gem_object_unbind(&obj_priv->base);
- list_del(&obj_priv->evict_list);
- drm_gem_object_unreference(&obj_priv->base);
+ ret = i915_gem_object_unbind(obj);
+ list_del_init(&obj->exec_list);
+ drm_gem_object_unreference(&obj->base);
}
return ret;
}
int
-i915_gem_evict_everything(struct drm_device *dev)
+i915_gem_evict_everything(struct drm_device *dev, bool purgeable_only)
{
drm_i915_private_t *dev_priv = dev->dev_private;
int ret;
@@ -176,36 +189,22 @@ i915_gem_evict_everything(struct drm_device *dev)
BUG_ON(!list_empty(&dev_priv->mm.flushing_list));
- ret = i915_gem_evict_inactive(dev);
- if (ret)
- return ret;
-
- lists_empty = (list_empty(&dev_priv->mm.inactive_list) &&
- list_empty(&dev_priv->mm.flushing_list) &&
- list_empty(&dev_priv->mm.active_list));
- BUG_ON(!lists_empty);
-
- return 0;
+ return i915_gem_evict_inactive(dev, purgeable_only);
}
/** Unbinds all inactive objects. */
int
-i915_gem_evict_inactive(struct drm_device *dev)
+i915_gem_evict_inactive(struct drm_device *dev, bool purgeable_only)
{
drm_i915_private_t *dev_priv = dev->dev_private;
-
- while (!list_empty(&dev_priv->mm.inactive_list)) {
- struct drm_gem_object *obj;
- int ret;
-
- obj = &list_first_entry(&dev_priv->mm.inactive_list,
- struct drm_i915_gem_object,
- mm_list)->base;
-
- ret = i915_gem_object_unbind(obj);
- if (ret != 0) {
- DRM_ERROR("Error unbinding object: %d\n", ret);
- return ret;
+ struct drm_i915_gem_object *obj, *next;
+
+ list_for_each_entry_safe(obj, next,
+ &dev_priv->mm.inactive_list, mm_list) {
+ if (!purgeable_only || obj->madv != I915_MADV_WILLNEED) {
+ int ret = i915_gem_object_unbind(obj);
+ if (ret)
+ return ret;
}
}
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
new file mode 100644
index 00000000000..61129e6759e
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -0,0 +1,1343 @@
+/*
+ * Copyright © 2008,2010 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Eric Anholt <eric@anholt.net>
+ * Chris Wilson <chris@chris-wilson.co.uk>
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+
+struct change_domains {
+ uint32_t invalidate_domains;
+ uint32_t flush_domains;
+ uint32_t flush_rings;
+};
+
+/*
+ * Set the next domain for the specified object. This
+ * may not actually perform the necessary flushing/invaliding though,
+ * as that may want to be batched with other set_domain operations
+ *
+ * This is (we hope) the only really tricky part of gem. The goal
+ * is fairly simple -- track which caches hold bits of the object
+ * and make sure they remain coherent. A few concrete examples may
+ * help to explain how it works. For shorthand, we use the notation
+ * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the
+ * a pair of read and write domain masks.
+ *
+ * Case 1: the batch buffer
+ *
+ * 1. Allocated
+ * 2. Written by CPU
+ * 3. Mapped to GTT
+ * 4. Read by GPU
+ * 5. Unmapped from GTT
+ * 6. Freed
+ *
+ * Let's take these a step at a time
+ *
+ * 1. Allocated
+ * Pages allocated from the kernel may still have
+ * cache contents, so we set them to (CPU, CPU) always.
+ * 2. Written by CPU (using pwrite)
+ * The pwrite function calls set_domain (CPU, CPU) and
+ * this function does nothing (as nothing changes)
+ * 3. Mapped by GTT
+ * This function asserts that the object is not
+ * currently in any GPU-based read or write domains
+ * 4. Read by GPU
+ * i915_gem_execbuffer calls set_domain (COMMAND, 0).
+ * As write_domain is zero, this function adds in the
+ * current read domains (CPU+COMMAND, 0).
+ * flush_domains is set to CPU.
+ * invalidate_domains is set to COMMAND
+ * clflush is run to get data out of the CPU caches
+ * then i915_dev_set_domain calls i915_gem_flush to
+ * emit an MI_FLUSH and drm_agp_chipset_flush
+ * 5. Unmapped from GTT
+ * i915_gem_object_unbind calls set_domain (CPU, CPU)
+ * flush_domains and invalidate_domains end up both zero
+ * so no flushing/invalidating happens
+ * 6. Freed
+ * yay, done
+ *
+ * Case 2: The shared render buffer
+ *
+ * 1. Allocated
+ * 2. Mapped to GTT
+ * 3. Read/written by GPU
+ * 4. set_domain to (CPU,CPU)
+ * 5. Read/written by CPU
+ * 6. Read/written by GPU
+ *
+ * 1. Allocated
+ * Same as last example, (CPU, CPU)
+ * 2. Mapped to GTT
+ * Nothing changes (assertions find that it is not in the GPU)
+ * 3. Read/written by GPU
+ * execbuffer calls set_domain (RENDER, RENDER)
+ * flush_domains gets CPU
+ * invalidate_domains gets GPU
+ * clflush (obj)
+ * MI_FLUSH and drm_agp_chipset_flush
+ * 4. set_domain (CPU, CPU)
+ * flush_domains gets GPU
+ * invalidate_domains gets CPU
+ * wait_rendering (obj) to make sure all drawing is complete.
+ * This will include an MI_FLUSH to get the data from GPU
+ * to memory
+ * clflush (obj) to invalidate the CPU cache
+ * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?)
+ * 5. Read/written by CPU
+ * cache lines are loaded and dirtied
+ * 6. Read written by GPU
+ * Same as last GPU access
+ *
+ * Case 3: The constant buffer
+ *
+ * 1. Allocated
+ * 2. Written by CPU
+ * 3. Read by GPU
+ * 4. Updated (written) by CPU again
+ * 5. Read by GPU
+ *
+ * 1. Allocated
+ * (CPU, CPU)
+ * 2. Written by CPU
+ * (CPU, CPU)
+ * 3. Read by GPU
+ * (CPU+RENDER, 0)
+ * flush_domains = CPU
+ * invalidate_domains = RENDER
+ * clflush (obj)
+ * MI_FLUSH
+ * drm_agp_chipset_flush
+ * 4. Updated (written) by CPU again
+ * (CPU, CPU)
+ * flush_domains = 0 (no previous write domain)
+ * invalidate_domains = 0 (no new read domains)
+ * 5. Read by GPU
+ * (CPU+RENDER, 0)
+ * flush_domains = CPU
+ * invalidate_domains = RENDER
+ * clflush (obj)
+ * MI_FLUSH
+ * drm_agp_chipset_flush
+ */
+static void
+i915_gem_object_set_to_gpu_domain(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *ring,
+ struct change_domains *cd)
+{
+ uint32_t invalidate_domains = 0, flush_domains = 0;
+
+ /*
+ * If the object isn't moving to a new write domain,
+ * let the object stay in multiple read domains
+ */
+ if (obj->base.pending_write_domain == 0)
+ obj->base.pending_read_domains |= obj->base.read_domains;
+
+ /*
+ * Flush the current write domain if
+ * the new read domains don't match. Invalidate
+ * any read domains which differ from the old
+ * write domain
+ */
+ if (obj->base.write_domain &&
+ (((obj->base.write_domain != obj->base.pending_read_domains ||
+ obj->ring != ring)) ||
+ (obj->fenced_gpu_access && !obj->pending_fenced_gpu_access))) {
+ flush_domains |= obj->base.write_domain;
+ invalidate_domains |=
+ obj->base.pending_read_domains & ~obj->base.write_domain;
+ }
+ /*
+ * Invalidate any read caches which may have
+ * stale data. That is, any new read domains.
+ */
+ invalidate_domains |= obj->base.pending_read_domains & ~obj->base.read_domains;
+ if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU)
+ i915_gem_clflush_object(obj);
+
+ /* blow away mappings if mapped through GTT */
+ if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_GTT)
+ i915_gem_release_mmap(obj);
+
+ /* The actual obj->write_domain will be updated with
+ * pending_write_domain after we emit the accumulated flush for all
+ * of our domain changes in execbuffers (which clears objects'
+ * write_domains). So if we have a current write domain that we
+ * aren't changing, set pending_write_domain to that.
+ */
+ if (flush_domains == 0 && obj->base.pending_write_domain == 0)
+ obj->base.pending_write_domain = obj->base.write_domain;
+
+ cd->invalidate_domains |= invalidate_domains;
+ cd->flush_domains |= flush_domains;
+ if (flush_domains & I915_GEM_GPU_DOMAINS)
+ cd->flush_rings |= obj->ring->id;
+ if (invalidate_domains & I915_GEM_GPU_DOMAINS)
+ cd->flush_rings |= ring->id;
+}
+
+struct eb_objects {
+ int and;
+ struct hlist_head buckets[0];
+};
+
+static struct eb_objects *
+eb_create(int size)
+{
+ struct eb_objects *eb;
+ int count = PAGE_SIZE / sizeof(struct hlist_head) / 2;
+ while (count > size)
+ count >>= 1;
+ eb = kzalloc(count*sizeof(struct hlist_head) +
+ sizeof(struct eb_objects),
+ GFP_KERNEL);
+ if (eb == NULL)
+ return eb;
+
+ eb->and = count - 1;
+ return eb;
+}
+
+static void
+eb_reset(struct eb_objects *eb)
+{
+ memset(eb->buckets, 0, (eb->and+1)*sizeof(struct hlist_head));
+}
+
+static void
+eb_add_object(struct eb_objects *eb, struct drm_i915_gem_object *obj)
+{
+ hlist_add_head(&obj->exec_node,
+ &eb->buckets[obj->exec_handle & eb->and]);
+}
+
+static struct drm_i915_gem_object *
+eb_get_object(struct eb_objects *eb, unsigned long handle)
+{
+ struct hlist_head *head;
+ struct hlist_node *node;
+ struct drm_i915_gem_object *obj;
+
+ head = &eb->buckets[handle & eb->and];
+ hlist_for_each(node, head) {
+ obj = hlist_entry(node, struct drm_i915_gem_object, exec_node);
+ if (obj->exec_handle == handle)
+ return obj;
+ }
+
+ return NULL;
+}
+
+static void
+eb_destroy(struct eb_objects *eb)
+{
+ kfree(eb);
+}
+
+static int
+i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
+ struct eb_objects *eb,
+ struct drm_i915_gem_exec_object2 *entry,
+ struct drm_i915_gem_relocation_entry *reloc)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_gem_object *target_obj;
+ uint32_t target_offset;
+ int ret = -EINVAL;
+
+ /* we've already hold a reference to all valid objects */
+ target_obj = &eb_get_object(eb, reloc->target_handle)->base;
+ if (unlikely(target_obj == NULL))
+ return -ENOENT;
+
+ target_offset = to_intel_bo(target_obj)->gtt_offset;
+
+#if WATCH_RELOC
+ DRM_INFO("%s: obj %p offset %08x target %d "
+ "read %08x write %08x gtt %08x "
+ "presumed %08x delta %08x\n",
+ __func__,
+ obj,
+ (int) reloc->offset,
+ (int) reloc->target_handle,
+ (int) reloc->read_domains,
+ (int) reloc->write_domain,
+ (int) target_offset,
+ (int) reloc->presumed_offset,
+ reloc->delta);
+#endif
+
+ /* The target buffer should have appeared before us in the
+ * exec_object list, so it should have a GTT space bound by now.
+ */
+ if (unlikely(target_offset == 0)) {
+ DRM_ERROR("No GTT space found for object %d\n",
+ reloc->target_handle);
+ return ret;
+ }
+
+ /* Validate that the target is in a valid r/w GPU domain */
+ if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
+ DRM_ERROR("reloc with multiple write domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->read_domains,
+ reloc->write_domain);
+ return ret;
+ }
+ if (unlikely((reloc->write_domain | reloc->read_domains) & I915_GEM_DOMAIN_CPU)) {
+ DRM_ERROR("reloc with read/write CPU domains: "
+ "obj %p target %d offset %d "
+ "read %08x write %08x",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->read_domains,
+ reloc->write_domain);
+ return ret;
+ }
+ if (unlikely(reloc->write_domain && target_obj->pending_write_domain &&
+ reloc->write_domain != target_obj->pending_write_domain)) {
+ DRM_ERROR("Write domain conflict: "
+ "obj %p target %d offset %d "
+ "new %08x old %08x\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ reloc->write_domain,
+ target_obj->pending_write_domain);
+ return ret;
+ }
+
+ target_obj->pending_read_domains |= reloc->read_domains;
+ target_obj->pending_write_domain |= reloc->write_domain;
+
+ /* If the relocation already has the right value in it, no
+ * more work needs to be done.
+ */
+ if (target_offset == reloc->presumed_offset)
+ return 0;
+
+ /* Check that the relocation address is valid... */
+ if (unlikely(reloc->offset > obj->base.size - 4)) {
+ DRM_ERROR("Relocation beyond object bounds: "
+ "obj %p target %d offset %d size %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset,
+ (int) obj->base.size);
+ return ret;
+ }
+ if (unlikely(reloc->offset & 3)) {
+ DRM_ERROR("Relocation not 4-byte aligned: "
+ "obj %p target %d offset %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->offset);
+ return ret;
+ }
+
+ /* and points to somewhere within the target object. */
+ if (unlikely(reloc->delta >= target_obj->size)) {
+ DRM_ERROR("Relocation beyond target object bounds: "
+ "obj %p target %d delta %d size %d.\n",
+ obj, reloc->target_handle,
+ (int) reloc->delta,
+ (int) target_obj->size);
+ return ret;
+ }
+
+ reloc->delta += target_offset;
+ if (obj->base.write_domain == I915_GEM_DOMAIN_CPU) {
+ uint32_t page_offset = reloc->offset & ~PAGE_MASK;
+ char *vaddr;
+
+ vaddr = kmap_atomic(obj->pages[reloc->offset >> PAGE_SHIFT]);
+ *(uint32_t *)(vaddr + page_offset) = reloc->delta;
+ kunmap_atomic(vaddr);
+ } else {
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ uint32_t __iomem *reloc_entry;
+ void __iomem *reloc_page;
+
+ ret = i915_gem_object_set_to_gtt_domain(obj, 1);
+ if (ret)
+ return ret;
+
+ /* Map the page containing the relocation we're going to perform. */
+ reloc->offset += obj->gtt_offset;
+ reloc_page = io_mapping_map_atomic_wc(dev_priv->mm.gtt_mapping,
+ reloc->offset & PAGE_MASK);
+ reloc_entry = (uint32_t __iomem *)
+ (reloc_page + (reloc->offset & ~PAGE_MASK));
+ iowrite32(reloc->delta, reloc_entry);
+ io_mapping_unmap_atomic(reloc_page);
+ }
+
+ /* and update the user's relocation entry */
+ reloc->presumed_offset = target_offset;
+
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate_object(struct drm_i915_gem_object *obj,
+ struct eb_objects *eb,
+ struct drm_i915_gem_exec_object2 *entry)
+{
+ struct drm_i915_gem_relocation_entry __user *user_relocs;
+ int i, ret;
+
+ user_relocs = (void __user *)(uintptr_t)entry->relocs_ptr;
+ for (i = 0; i < entry->relocation_count; i++) {
+ struct drm_i915_gem_relocation_entry reloc;
+
+ if (__copy_from_user_inatomic(&reloc,
+ user_relocs+i,
+ sizeof(reloc)))
+ return -EFAULT;
+
+ ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &reloc);
+ if (ret)
+ return ret;
+
+ if (__copy_to_user_inatomic(&user_relocs[i].presumed_offset,
+ &reloc.presumed_offset,
+ sizeof(reloc.presumed_offset)))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate_object_slow(struct drm_i915_gem_object *obj,
+ struct eb_objects *eb,
+ struct drm_i915_gem_exec_object2 *entry,
+ struct drm_i915_gem_relocation_entry *relocs)
+{
+ int i, ret;
+
+ for (i = 0; i < entry->relocation_count; i++) {
+ ret = i915_gem_execbuffer_relocate_entry(obj, eb, entry, &relocs[i]);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_relocate(struct drm_device *dev,
+ struct eb_objects *eb,
+ struct list_head *objects,
+ struct drm_i915_gem_exec_object2 *exec)
+{
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ list_for_each_entry(obj, objects, exec_list) {
+ obj->base.pending_read_domains = 0;
+ obj->base.pending_write_domain = 0;
+ ret = i915_gem_execbuffer_relocate_object(obj, eb, exec++);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_reserve(struct intel_ring_buffer *ring,
+ struct drm_file *file,
+ struct list_head *objects,
+ struct drm_i915_gem_exec_object2 *exec)
+{
+ struct drm_i915_gem_object *obj;
+ struct drm_i915_gem_exec_object2 *entry;
+ int ret, retry;
+ bool has_fenced_gpu_access = INTEL_INFO(ring->dev)->gen < 4;
+
+ /* Attempt to pin all of the buffers into the GTT.
+ * This is done in 3 phases:
+ *
+ * 1a. Unbind all objects that do not match the GTT constraints for
+ * the execbuffer (fenceable, mappable, alignment etc).
+ * 1b. Increment pin count for already bound objects.
+ * 2. Bind new objects.
+ * 3. Decrement pin count.
+ *
+ * This avoid unnecessary unbinding of later objects in order to makr
+ * room for the earlier objects *unless* we need to defragment.
+ */
+ retry = 0;
+ do {
+ ret = 0;
+
+ /* Unbind any ill-fitting objects or pin. */
+ entry = exec;
+ list_for_each_entry(obj, objects, exec_list) {
+ bool need_fence, need_mappable;
+
+ if (!obj->gtt_space) {
+ entry++;
+ continue;
+ }
+
+ need_fence =
+ has_fenced_gpu_access &&
+ entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode != I915_TILING_NONE;
+ need_mappable =
+ entry->relocation_count ? true : need_fence;
+
+ if ((entry->alignment && obj->gtt_offset & (entry->alignment - 1)) ||
+ (need_mappable && !obj->map_and_fenceable))
+ ret = i915_gem_object_unbind(obj);
+ else
+ ret = i915_gem_object_pin(obj,
+ entry->alignment,
+ need_mappable);
+ if (ret)
+ goto err;
+
+ entry++;
+ }
+
+ /* Bind fresh objects */
+ entry = exec;
+ list_for_each_entry(obj, objects, exec_list) {
+ bool need_fence;
+
+ need_fence =
+ has_fenced_gpu_access &&
+ entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode != I915_TILING_NONE;
+
+ if (!obj->gtt_space) {
+ bool need_mappable =
+ entry->relocation_count ? true : need_fence;
+
+ ret = i915_gem_object_pin(obj,
+ entry->alignment,
+ need_mappable);
+ if (ret)
+ break;
+ }
+
+ if (has_fenced_gpu_access) {
+ if (need_fence) {
+ ret = i915_gem_object_get_fence(obj, ring, 1);
+ if (ret)
+ break;
+ } else if (entry->flags & EXEC_OBJECT_NEEDS_FENCE &&
+ obj->tiling_mode == I915_TILING_NONE) {
+ /* XXX pipelined! */
+ ret = i915_gem_object_put_fence(obj);
+ if (ret)
+ break;
+ }
+ obj->pending_fenced_gpu_access = need_fence;
+ }
+
+ entry->offset = obj->gtt_offset;
+ entry++;
+ }
+
+ /* Decrement pin count for bound objects */
+ list_for_each_entry(obj, objects, exec_list) {
+ if (obj->gtt_space)
+ i915_gem_object_unpin(obj);
+ }
+
+ if (ret != -ENOSPC || retry > 1)
+ return ret;
+
+ /* First attempt, just clear anything that is purgeable.
+ * Second attempt, clear the entire GTT.
+ */
+ ret = i915_gem_evict_everything(ring->dev, retry == 0);
+ if (ret)
+ return ret;
+
+ retry++;
+ } while (1);
+
+err:
+ obj = list_entry(obj->exec_list.prev,
+ struct drm_i915_gem_object,
+ exec_list);
+ while (objects != &obj->exec_list) {
+ if (obj->gtt_space)
+ i915_gem_object_unpin(obj);
+
+ obj = list_entry(obj->exec_list.prev,
+ struct drm_i915_gem_object,
+ exec_list);
+ }
+
+ return ret;
+}
+
+static int
+i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
+ struct drm_file *file,
+ struct intel_ring_buffer *ring,
+ struct list_head *objects,
+ struct eb_objects *eb,
+ struct drm_i915_gem_exec_object2 *exec,
+ int count)
+{
+ struct drm_i915_gem_relocation_entry *reloc;
+ struct drm_i915_gem_object *obj;
+ int i, total, ret;
+
+ /* We may process another execbuffer during the unlock... */
+ while (list_empty(objects)) {
+ obj = list_first_entry(objects,
+ struct drm_i915_gem_object,
+ exec_list);
+ list_del_init(&obj->exec_list);
+ drm_gem_object_unreference(&obj->base);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+ total = 0;
+ for (i = 0; i < count; i++)
+ total += exec[i].relocation_count;
+
+ reloc = drm_malloc_ab(total, sizeof(*reloc));
+ if (reloc == NULL) {
+ mutex_lock(&dev->struct_mutex);
+ return -ENOMEM;
+ }
+
+ total = 0;
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_relocation_entry __user *user_relocs;
+
+ user_relocs = (void __user *)(uintptr_t)exec[i].relocs_ptr;
+
+ if (copy_from_user(reloc+total, user_relocs,
+ exec[i].relocation_count * sizeof(*reloc))) {
+ ret = -EFAULT;
+ mutex_lock(&dev->struct_mutex);
+ goto err;
+ }
+
+ total += exec[i].relocation_count;
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret) {
+ mutex_lock(&dev->struct_mutex);
+ goto err;
+ }
+
+ /* reacquire the objects */
+ INIT_LIST_HEAD(objects);
+ eb_reset(eb);
+ for (i = 0; i < count; i++) {
+ struct drm_i915_gem_object *obj;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file,
+ exec[i].handle));
+ if (obj == NULL) {
+ DRM_ERROR("Invalid object handle %d at index %d\n",
+ exec[i].handle, i);
+ ret = -ENOENT;
+ goto err;
+ }
+
+ list_add_tail(&obj->exec_list, objects);
+ obj->exec_handle = exec[i].handle;
+ eb_add_object(eb, obj);
+ }
+
+ ret = i915_gem_execbuffer_reserve(ring, file, objects, exec);
+ if (ret)
+ goto err;
+
+ total = 0;
+ list_for_each_entry(obj, objects, exec_list) {
+ obj->base.pending_read_domains = 0;
+ obj->base.pending_write_domain = 0;
+ ret = i915_gem_execbuffer_relocate_object_slow(obj, eb,
+ exec,
+ reloc + total);
+ if (ret)
+ goto err;
+
+ total += exec->relocation_count;
+ exec++;
+ }
+
+ /* Leave the user relocations as are, this is the painfully slow path,
+ * and we want to avoid the complication of dropping the lock whilst
+ * having buffers reserved in the aperture and so causing spurious
+ * ENOSPC for random operations.
+ */
+
+err:
+ drm_free_large(reloc);
+ return ret;
+}
+
+static void
+i915_gem_execbuffer_flush(struct drm_device *dev,
+ uint32_t invalidate_domains,
+ uint32_t flush_domains,
+ uint32_t flush_rings)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ int i;
+
+ if (flush_domains & I915_GEM_DOMAIN_CPU)
+ intel_gtt_chipset_flush();
+
+ if ((flush_domains | invalidate_domains) & I915_GEM_GPU_DOMAINS) {
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ if (flush_rings & (1 << i))
+ i915_gem_flush_ring(dev, &dev_priv->ring[i],
+ invalidate_domains,
+ flush_domains);
+ }
+}
+
+static int
+i915_gem_execbuffer_sync_rings(struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *to)
+{
+ struct intel_ring_buffer *from = obj->ring;
+ u32 seqno;
+ int ret, idx;
+
+ if (from == NULL || to == from)
+ return 0;
+
+ if (INTEL_INFO(obj->base.dev)->gen < 6)
+ return i915_gem_object_wait_rendering(obj, true);
+
+ idx = intel_ring_sync_index(from, to);
+
+ seqno = obj->last_rendering_seqno;
+ if (seqno <= from->sync_seqno[idx])
+ return 0;
+
+ if (seqno == from->outstanding_lazy_request) {
+ struct drm_i915_gem_request *request;
+
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL)
+ return -ENOMEM;
+
+ ret = i915_add_request(obj->base.dev, NULL, request, from);
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
+
+ seqno = request->seqno;
+ }
+
+ from->sync_seqno[idx] = seqno;
+ return intel_ring_sync(to, from, seqno - 1);
+}
+
+static int
+i915_gem_execbuffer_move_to_gpu(struct intel_ring_buffer *ring,
+ struct list_head *objects)
+{
+ struct drm_i915_gem_object *obj;
+ struct change_domains cd;
+ int ret;
+
+ cd.invalidate_domains = 0;
+ cd.flush_domains = 0;
+ cd.flush_rings = 0;
+ list_for_each_entry(obj, objects, exec_list)
+ i915_gem_object_set_to_gpu_domain(obj, ring, &cd);
+
+ if (cd.invalidate_domains | cd.flush_domains) {
+#if WATCH_EXEC
+ DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n",
+ __func__,
+ cd.invalidate_domains,
+ cd.flush_domains);
+#endif
+ i915_gem_execbuffer_flush(ring->dev,
+ cd.invalidate_domains,
+ cd.flush_domains,
+ cd.flush_rings);
+ }
+
+ list_for_each_entry(obj, objects, exec_list) {
+ ret = i915_gem_execbuffer_sync_rings(obj, ring);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static bool
+i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
+{
+ return ((exec->batch_start_offset | exec->batch_len) & 0x7) == 0;
+}
+
+static int
+validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
+ int count)
+{
+ int i;
+
+ for (i = 0; i < count; i++) {
+ char __user *ptr = (char __user *)(uintptr_t)exec[i].relocs_ptr;
+ int length; /* limited by fault_in_pages_readable() */
+
+ /* First check for malicious input causing overflow */
+ if (exec[i].relocation_count >
+ INT_MAX / sizeof(struct drm_i915_gem_relocation_entry))
+ return -EINVAL;
+
+ length = exec[i].relocation_count *
+ sizeof(struct drm_i915_gem_relocation_entry);
+ if (!access_ok(VERIFY_READ, ptr, length))
+ return -EFAULT;
+
+ /* we may also need to update the presumed offsets */
+ if (!access_ok(VERIFY_WRITE, ptr, length))
+ return -EFAULT;
+
+ if (fault_in_pages_readable(ptr, length))
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static int
+i915_gem_execbuffer_wait_for_flips(struct intel_ring_buffer *ring,
+ struct list_head *objects)
+{
+ struct drm_i915_gem_object *obj;
+ int flips;
+
+ /* Check for any pending flips. As we only maintain a flip queue depth
+ * of 1, we can simply insert a WAIT for the next display flip prior
+ * to executing the batch and avoid stalling the CPU.
+ */
+ flips = 0;
+ list_for_each_entry(obj, objects, exec_list) {
+ if (obj->base.write_domain)
+ flips |= atomic_read(&obj->pending_flip);
+ }
+ if (flips) {
+ int plane, flip_mask, ret;
+
+ for (plane = 0; flips >> plane; plane++) {
+ if (((flips >> plane) & 1) == 0)
+ continue;
+
+ if (plane)
+ flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
+ else
+ flip_mask = MI_WAIT_FOR_PLANE_A_FLIP;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_WAIT_FOR_EVENT | flip_mask);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ }
+ }
+
+ return 0;
+}
+
+static void
+i915_gem_execbuffer_move_to_active(struct list_head *objects,
+ struct intel_ring_buffer *ring,
+ u32 seqno)
+{
+ struct drm_i915_gem_object *obj;
+
+ list_for_each_entry(obj, objects, exec_list) {
+ obj->base.read_domains = obj->base.pending_read_domains;
+ obj->base.write_domain = obj->base.pending_write_domain;
+ obj->fenced_gpu_access = obj->pending_fenced_gpu_access;
+
+ i915_gem_object_move_to_active(obj, ring, seqno);
+ if (obj->base.write_domain) {
+ obj->dirty = 1;
+ obj->pending_gpu_write = true;
+ list_move_tail(&obj->gpu_write_list,
+ &ring->gpu_write_list);
+ intel_mark_busy(ring->dev, obj);
+ }
+
+ trace_i915_gem_object_change_domain(obj,
+ obj->base.read_domains,
+ obj->base.write_domain);
+ }
+}
+
+static void
+i915_gem_execbuffer_retire_commands(struct drm_device *dev,
+ struct drm_file *file,
+ struct intel_ring_buffer *ring)
+{
+ struct drm_i915_gem_request *request;
+ u32 flush_domains;
+
+ /*
+ * Ensure that the commands in the batch buffer are
+ * finished before the interrupt fires.
+ *
+ * The sampler always gets flushed on i965 (sigh).
+ */
+ flush_domains = 0;
+ if (INTEL_INFO(dev)->gen >= 4)
+ flush_domains |= I915_GEM_DOMAIN_SAMPLER;
+
+ ring->flush(ring, I915_GEM_DOMAIN_COMMAND, flush_domains);
+
+ /* Add a breadcrumb for the completion of the batch buffer */
+ request = kzalloc(sizeof(*request), GFP_KERNEL);
+ if (request == NULL || i915_add_request(dev, file, request, ring)) {
+ i915_gem_next_request_seqno(dev, ring);
+ kfree(request);
+ }
+}
+
+static int
+i915_gem_do_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file,
+ struct drm_i915_gem_execbuffer2 *args,
+ struct drm_i915_gem_exec_object2 *exec)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct list_head objects;
+ struct eb_objects *eb;
+ struct drm_i915_gem_object *batch_obj;
+ struct drm_clip_rect *cliprects = NULL;
+ struct intel_ring_buffer *ring;
+ u32 exec_start, exec_len;
+ u32 seqno;
+ int ret, mode, i;
+
+ if (!i915_gem_check_execbuffer(args)) {
+ DRM_ERROR("execbuf with invalid offset/length\n");
+ return -EINVAL;
+ }
+
+ ret = validate_exec_list(exec, args->buffer_count);
+ if (ret)
+ return ret;
+
+#if WATCH_EXEC
+ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+ switch (args->flags & I915_EXEC_RING_MASK) {
+ case I915_EXEC_DEFAULT:
+ case I915_EXEC_RENDER:
+ ring = &dev_priv->ring[RCS];
+ break;
+ case I915_EXEC_BSD:
+ if (!HAS_BSD(dev)) {
+ DRM_ERROR("execbuf with invalid ring (BSD)\n");
+ return -EINVAL;
+ }
+ ring = &dev_priv->ring[VCS];
+ break;
+ case I915_EXEC_BLT:
+ if (!HAS_BLT(dev)) {
+ DRM_ERROR("execbuf with invalid ring (BLT)\n");
+ return -EINVAL;
+ }
+ ring = &dev_priv->ring[BCS];
+ break;
+ default:
+ DRM_ERROR("execbuf with unknown ring: %d\n",
+ (int)(args->flags & I915_EXEC_RING_MASK));
+ return -EINVAL;
+ }
+
+ mode = args->flags & I915_EXEC_CONSTANTS_MASK;
+ switch (mode) {
+ case I915_EXEC_CONSTANTS_REL_GENERAL:
+ case I915_EXEC_CONSTANTS_ABSOLUTE:
+ case I915_EXEC_CONSTANTS_REL_SURFACE:
+ if (ring == &dev_priv->ring[RCS] &&
+ mode != dev_priv->relative_constants_mode) {
+ if (INTEL_INFO(dev)->gen < 4)
+ return -EINVAL;
+
+ if (INTEL_INFO(dev)->gen > 5 &&
+ mode == I915_EXEC_CONSTANTS_REL_SURFACE)
+ return -EINVAL;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
+ intel_ring_emit(ring, INSTPM);
+ intel_ring_emit(ring,
+ I915_EXEC_CONSTANTS_MASK << 16 | mode);
+ intel_ring_advance(ring);
+
+ dev_priv->relative_constants_mode = mode;
+ }
+ break;
+ default:
+ DRM_ERROR("execbuf with unknown constants: %d\n", mode);
+ return -EINVAL;
+ }
+
+ if (args->buffer_count < 1) {
+ DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
+ return -EINVAL;
+ }
+
+ if (args->num_cliprects != 0) {
+ if (ring != &dev_priv->ring[RCS]) {
+ DRM_ERROR("clip rectangles are only valid with the render ring\n");
+ return -EINVAL;
+ }
+
+ cliprects = kmalloc(args->num_cliprects * sizeof(*cliprects),
+ GFP_KERNEL);
+ if (cliprects == NULL) {
+ ret = -ENOMEM;
+ goto pre_mutex_err;
+ }
+
+ if (copy_from_user(cliprects,
+ (struct drm_clip_rect __user *)(uintptr_t)
+ args->cliprects_ptr,
+ sizeof(*cliprects)*args->num_cliprects)) {
+ ret = -EFAULT;
+ goto pre_mutex_err;
+ }
+ }
+
+ ret = i915_mutex_lock_interruptible(dev);
+ if (ret)
+ goto pre_mutex_err;
+
+ if (dev_priv->mm.suspended) {
+ mutex_unlock(&dev->struct_mutex);
+ ret = -EBUSY;
+ goto pre_mutex_err;
+ }
+
+ eb = eb_create(args->buffer_count);
+ if (eb == NULL) {
+ mutex_unlock(&dev->struct_mutex);
+ ret = -ENOMEM;
+ goto pre_mutex_err;
+ }
+
+ /* Look up object handles */
+ INIT_LIST_HEAD(&objects);
+ for (i = 0; i < args->buffer_count; i++) {
+ struct drm_i915_gem_object *obj;
+
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file,
+ exec[i].handle));
+ if (obj == NULL) {
+ DRM_ERROR("Invalid object handle %d at index %d\n",
+ exec[i].handle, i);
+ /* prevent error path from reading uninitialized data */
+ ret = -ENOENT;
+ goto err;
+ }
+
+ if (!list_empty(&obj->exec_list)) {
+ DRM_ERROR("Object %p [handle %d, index %d] appears more than once in object list\n",
+ obj, exec[i].handle, i);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ list_add_tail(&obj->exec_list, &objects);
+ obj->exec_handle = exec[i].handle;
+ eb_add_object(eb, obj);
+ }
+
+ /* Move the objects en-masse into the GTT, evicting if necessary. */
+ ret = i915_gem_execbuffer_reserve(ring, file, &objects, exec);
+ if (ret)
+ goto err;
+
+ /* The objects are in their final locations, apply the relocations. */
+ ret = i915_gem_execbuffer_relocate(dev, eb, &objects, exec);
+ if (ret) {
+ if (ret == -EFAULT) {
+ ret = i915_gem_execbuffer_relocate_slow(dev, file, ring,
+ &objects, eb,
+ exec,
+ args->buffer_count);
+ BUG_ON(!mutex_is_locked(&dev->struct_mutex));
+ }
+ if (ret)
+ goto err;
+ }
+
+ /* Set the pending read domains for the batch buffer to COMMAND */
+ batch_obj = list_entry(objects.prev,
+ struct drm_i915_gem_object,
+ exec_list);
+ if (batch_obj->base.pending_write_domain) {
+ DRM_ERROR("Attempting to use self-modifying batch buffer\n");
+ ret = -EINVAL;
+ goto err;
+ }
+ batch_obj->base.pending_read_domains |= I915_GEM_DOMAIN_COMMAND;
+
+ ret = i915_gem_execbuffer_move_to_gpu(ring, &objects);
+ if (ret)
+ goto err;
+
+ ret = i915_gem_execbuffer_wait_for_flips(ring, &objects);
+ if (ret)
+ goto err;
+
+ seqno = i915_gem_next_request_seqno(dev, ring);
+ for (i = 0; i < I915_NUM_RINGS-1; i++) {
+ if (seqno < ring->sync_seqno[i]) {
+ /* The GPU can not handle its semaphore value wrapping,
+ * so every billion or so execbuffers, we need to stall
+ * the GPU in order to reset the counters.
+ */
+ ret = i915_gpu_idle(dev);
+ if (ret)
+ goto err;
+
+ BUG_ON(ring->sync_seqno[i]);
+ }
+ }
+
+ exec_start = batch_obj->gtt_offset + args->batch_start_offset;
+ exec_len = args->batch_len;
+ if (cliprects) {
+ for (i = 0; i < args->num_cliprects; i++) {
+ ret = i915_emit_box(dev, &cliprects[i],
+ args->DR1, args->DR4);
+ if (ret)
+ goto err;
+
+ ret = ring->dispatch_execbuffer(ring,
+ exec_start, exec_len);
+ if (ret)
+ goto err;
+ }
+ } else {
+ ret = ring->dispatch_execbuffer(ring, exec_start, exec_len);
+ if (ret)
+ goto err;
+ }
+
+ i915_gem_execbuffer_move_to_active(&objects, ring, seqno);
+ i915_gem_execbuffer_retire_commands(dev, file, ring);
+
+err:
+ eb_destroy(eb);
+ while (!list_empty(&objects)) {
+ struct drm_i915_gem_object *obj;
+
+ obj = list_first_entry(&objects,
+ struct drm_i915_gem_object,
+ exec_list);
+ list_del_init(&obj->exec_list);
+ drm_gem_object_unreference(&obj->base);
+ }
+
+ mutex_unlock(&dev->struct_mutex);
+
+pre_mutex_err:
+ kfree(cliprects);
+ return ret;
+}
+
+/*
+ * Legacy execbuffer just creates an exec2 list from the original exec object
+ * list array and passes it to the real function.
+ */
+int
+i915_gem_execbuffer(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_execbuffer *args = data;
+ struct drm_i915_gem_execbuffer2 exec2;
+ struct drm_i915_gem_exec_object *exec_list = NULL;
+ struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+ int ret, i;
+
+#if WATCH_EXEC
+ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+
+ if (args->buffer_count < 1) {
+ DRM_ERROR("execbuf with %d buffers\n", args->buffer_count);
+ return -EINVAL;
+ }
+
+ /* Copy in the exec list from userland */
+ exec_list = drm_malloc_ab(sizeof(*exec_list), args->buffer_count);
+ exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+ if (exec_list == NULL || exec2_list == NULL) {
+ DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+ args->buffer_count);
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return -ENOMEM;
+ }
+ ret = copy_from_user(exec_list,
+ (struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ sizeof(*exec_list) * args->buffer_count);
+ if (ret != 0) {
+ DRM_ERROR("copy %d exec entries failed %d\n",
+ args->buffer_count, ret);
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return -EFAULT;
+ }
+
+ for (i = 0; i < args->buffer_count; i++) {
+ exec2_list[i].handle = exec_list[i].handle;
+ exec2_list[i].relocation_count = exec_list[i].relocation_count;
+ exec2_list[i].relocs_ptr = exec_list[i].relocs_ptr;
+ exec2_list[i].alignment = exec_list[i].alignment;
+ exec2_list[i].offset = exec_list[i].offset;
+ if (INTEL_INFO(dev)->gen < 4)
+ exec2_list[i].flags = EXEC_OBJECT_NEEDS_FENCE;
+ else
+ exec2_list[i].flags = 0;
+ }
+
+ exec2.buffers_ptr = args->buffers_ptr;
+ exec2.buffer_count = args->buffer_count;
+ exec2.batch_start_offset = args->batch_start_offset;
+ exec2.batch_len = args->batch_len;
+ exec2.DR1 = args->DR1;
+ exec2.DR4 = args->DR4;
+ exec2.num_cliprects = args->num_cliprects;
+ exec2.cliprects_ptr = args->cliprects_ptr;
+ exec2.flags = I915_EXEC_RENDER;
+
+ ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
+ if (!ret) {
+ /* Copy the new buffer offsets back to the user's exec list. */
+ for (i = 0; i < args->buffer_count; i++)
+ exec_list[i].offset = exec2_list[i].offset;
+ /* ... and back out to userspace */
+ ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ exec_list,
+ sizeof(*exec_list) * args->buffer_count);
+ if (ret) {
+ ret = -EFAULT;
+ DRM_ERROR("failed to copy %d exec entries "
+ "back to user (%d)\n",
+ args->buffer_count, ret);
+ }
+ }
+
+ drm_free_large(exec_list);
+ drm_free_large(exec2_list);
+ return ret;
+}
+
+int
+i915_gem_execbuffer2(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_i915_gem_execbuffer2 *args = data;
+ struct drm_i915_gem_exec_object2 *exec2_list = NULL;
+ int ret;
+
+#if WATCH_EXEC
+ DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n",
+ (int) args->buffers_ptr, args->buffer_count, args->batch_len);
+#endif
+
+ if (args->buffer_count < 1) {
+ DRM_ERROR("execbuf2 with %d buffers\n", args->buffer_count);
+ return -EINVAL;
+ }
+
+ exec2_list = drm_malloc_ab(sizeof(*exec2_list), args->buffer_count);
+ if (exec2_list == NULL) {
+ DRM_ERROR("Failed to allocate exec list for %d buffers\n",
+ args->buffer_count);
+ return -ENOMEM;
+ }
+ ret = copy_from_user(exec2_list,
+ (struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ sizeof(*exec2_list) * args->buffer_count);
+ if (ret != 0) {
+ DRM_ERROR("copy %d exec entries failed %d\n",
+ args->buffer_count, ret);
+ drm_free_large(exec2_list);
+ return -EFAULT;
+ }
+
+ ret = i915_gem_do_execbuffer(dev, data, file, args, exec2_list);
+ if (!ret) {
+ /* Copy the new buffer offsets back to the user's exec list. */
+ ret = copy_to_user((struct drm_i915_relocation_entry __user *)
+ (uintptr_t) args->buffers_ptr,
+ exec2_list,
+ sizeof(*exec2_list) * args->buffer_count);
+ if (ret) {
+ ret = -EFAULT;
+ DRM_ERROR("failed to copy %d exec entries "
+ "back to user (%d)\n",
+ args->buffer_count, ret);
+ }
+ }
+
+ drm_free_large(exec2_list);
+ return ret;
+}
+
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
new file mode 100644
index 00000000000..86673e77d7c
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright © 2010 Daniel Vetter
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ */
+
+#include "drmP.h"
+#include "drm.h"
+#include "i915_drm.h"
+#include "i915_drv.h"
+#include "i915_trace.h"
+#include "intel_drv.h"
+
+void i915_gem_restore_gtt_mappings(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ struct drm_i915_gem_object *obj;
+
+ list_for_each_entry(obj, &dev_priv->mm.gtt_list, gtt_list) {
+ i915_gem_clflush_object(obj);
+
+ if (dev_priv->mm.gtt->needs_dmar) {
+ BUG_ON(!obj->sg_list);
+
+ intel_gtt_insert_sg_entries(obj->sg_list,
+ obj->num_sg,
+ obj->gtt_space->start
+ >> PAGE_SHIFT,
+ obj->agp_type);
+ } else
+ intel_gtt_insert_pages(obj->gtt_space->start
+ >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT,
+ obj->pages,
+ obj->agp_type);
+ }
+
+ intel_gtt_chipset_flush();
+}
+
+int i915_gem_gtt_bind_object(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret;
+
+ if (dev_priv->mm.gtt->needs_dmar) {
+ ret = intel_gtt_map_memory(obj->pages,
+ obj->base.size >> PAGE_SHIFT,
+ &obj->sg_list,
+ &obj->num_sg);
+ if (ret != 0)
+ return ret;
+
+ intel_gtt_insert_sg_entries(obj->sg_list,
+ obj->num_sg,
+ obj->gtt_space->start >> PAGE_SHIFT,
+ obj->agp_type);
+ } else
+ intel_gtt_insert_pages(obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT,
+ obj->pages,
+ obj->agp_type);
+
+ return 0;
+}
+
+void i915_gem_gtt_unbind_object(struct drm_i915_gem_object *obj)
+{
+ struct drm_device *dev = obj->base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->mm.gtt->needs_dmar) {
+ intel_gtt_unmap_memory(obj->sg_list, obj->num_sg);
+ obj->sg_list = NULL;
+ obj->num_sg = 0;
+ }
+
+ intel_gtt_clear_range(obj->gtt_space->start >> PAGE_SHIFT,
+ obj->base.size >> PAGE_SHIFT);
+}
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index af352de70be..22a32b9932c 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -181,7 +181,7 @@ i915_gem_detect_bit_6_swizzle(struct drm_device *dev)
}
/* Check pitch constriants for all chips & tiling formats */
-bool
+static bool
i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
{
int tile_width;
@@ -232,32 +232,44 @@ i915_tiling_ok(struct drm_device *dev, int stride, int size, int tiling_mode)
return true;
}
-bool
-i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
+/* Is the current GTT allocation valid for the change in tiling? */
+static bool
+i915_gem_object_fence_ok(struct drm_i915_gem_object *obj, int tiling_mode)
{
- struct drm_device *dev = obj->dev;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
-
- if (obj_priv->gtt_space == NULL)
- return true;
+ u32 size;
if (tiling_mode == I915_TILING_NONE)
return true;
- if (INTEL_INFO(dev)->gen >= 4)
+ if (INTEL_INFO(obj->base.dev)->gen >= 4)
return true;
- if (obj_priv->gtt_offset & (obj->size - 1))
- return false;
-
- if (IS_GEN3(dev)) {
- if (obj_priv->gtt_offset & ~I915_FENCE_START_MASK)
+ if (INTEL_INFO(obj->base.dev)->gen == 3) {
+ if (obj->gtt_offset & ~I915_FENCE_START_MASK)
return false;
} else {
- if (obj_priv->gtt_offset & ~I830_FENCE_START_MASK)
+ if (obj->gtt_offset & ~I830_FENCE_START_MASK)
return false;
}
+ /*
+ * Previous chips need to be aligned to the size of the smallest
+ * fence register that can contain the object.
+ */
+ if (INTEL_INFO(obj->base.dev)->gen == 3)
+ size = 1024*1024;
+ else
+ size = 512*1024;
+
+ while (size < obj->base.size)
+ size <<= 1;
+
+ if (obj->gtt_space->size != size)
+ return false;
+
+ if (obj->gtt_offset & (size - 1))
+ return false;
+
return true;
}
@@ -267,30 +279,29 @@ i915_gem_object_fence_offset_ok(struct drm_gem_object *obj, int tiling_mode)
*/
int
i915_gem_set_tiling(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_set_tiling *args = data;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
ret = i915_gem_check_is_wedged(dev);
if (ret)
return ret;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL)
return -ENOENT;
- obj_priv = to_intel_bo(obj);
- if (!i915_tiling_ok(dev, args->stride, obj->size, args->tiling_mode)) {
- drm_gem_object_unreference_unlocked(obj);
+ if (!i915_tiling_ok(dev,
+ args->stride, obj->base.size, args->tiling_mode)) {
+ drm_gem_object_unreference_unlocked(&obj->base);
return -EINVAL;
}
- if (obj_priv->pin_count) {
- drm_gem_object_unreference_unlocked(obj);
+ if (obj->pin_count) {
+ drm_gem_object_unreference_unlocked(&obj->base);
return -EBUSY;
}
@@ -324,34 +335,28 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
}
mutex_lock(&dev->struct_mutex);
- if (args->tiling_mode != obj_priv->tiling_mode ||
- args->stride != obj_priv->stride) {
+ if (args->tiling_mode != obj->tiling_mode ||
+ args->stride != obj->stride) {
/* We need to rebind the object if its current allocation
* no longer meets the alignment restrictions for its new
* tiling mode. Otherwise we can just leave it alone, but
* need to ensure that any fence register is cleared.
*/
- if (!i915_gem_object_fence_offset_ok(obj, args->tiling_mode))
- ret = i915_gem_object_unbind(obj);
- else if (obj_priv->fence_reg != I915_FENCE_REG_NONE)
- ret = i915_gem_object_put_fence_reg(obj, true);
- else
- i915_gem_release_mmap(obj);
+ i915_gem_release_mmap(obj);
- if (ret != 0) {
- args->tiling_mode = obj_priv->tiling_mode;
- args->stride = obj_priv->stride;
- goto err;
- }
+ obj->map_and_fenceable =
+ obj->gtt_space == NULL ||
+ (obj->gtt_offset + obj->base.size <= dev_priv->mm.gtt_mappable_end &&
+ i915_gem_object_fence_ok(obj, args->tiling_mode));
- obj_priv->tiling_mode = args->tiling_mode;
- obj_priv->stride = args->stride;
+ obj->tiling_changed = true;
+ obj->tiling_mode = args->tiling_mode;
+ obj->stride = args->stride;
}
-err:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
- return ret;
+ return 0;
}
/**
@@ -359,22 +364,20 @@ err:
*/
int
i915_gem_get_tiling(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_i915_gem_get_tiling *args = data;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
- obj = drm_gem_object_lookup(dev, file_priv, args->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
if (obj == NULL)
return -ENOENT;
- obj_priv = to_intel_bo(obj);
mutex_lock(&dev->struct_mutex);
- args->tiling_mode = obj_priv->tiling_mode;
- switch (obj_priv->tiling_mode) {
+ args->tiling_mode = obj->tiling_mode;
+ switch (obj->tiling_mode) {
case I915_TILING_X:
args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x;
break;
@@ -394,7 +397,7 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
args->swizzle_mode = I915_BIT_6_SWIZZLE_9_10;
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
return 0;
@@ -424,46 +427,44 @@ i915_gem_swizzle_page(struct page *page)
}
void
-i915_gem_object_do_bit_17_swizzle(struct drm_gem_object *obj)
+i915_gem_object_do_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int page_count = obj->size >> PAGE_SHIFT;
+ int page_count = obj->base.size >> PAGE_SHIFT;
int i;
if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
return;
- if (obj_priv->bit_17 == NULL)
+ if (obj->bit_17 == NULL)
return;
for (i = 0; i < page_count; i++) {
- char new_bit_17 = page_to_phys(obj_priv->pages[i]) >> 17;
+ char new_bit_17 = page_to_phys(obj->pages[i]) >> 17;
if ((new_bit_17 & 0x1) !=
- (test_bit(i, obj_priv->bit_17) != 0)) {
- i915_gem_swizzle_page(obj_priv->pages[i]);
- set_page_dirty(obj_priv->pages[i]);
+ (test_bit(i, obj->bit_17) != 0)) {
+ i915_gem_swizzle_page(obj->pages[i]);
+ set_page_dirty(obj->pages[i]);
}
}
}
void
-i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
+i915_gem_object_save_bit_17_swizzle(struct drm_i915_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
+ struct drm_device *dev = obj->base.dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
- int page_count = obj->size >> PAGE_SHIFT;
+ int page_count = obj->base.size >> PAGE_SHIFT;
int i;
if (dev_priv->mm.bit_6_swizzle_x != I915_BIT_6_SWIZZLE_9_10_17)
return;
- if (obj_priv->bit_17 == NULL) {
- obj_priv->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
+ if (obj->bit_17 == NULL) {
+ obj->bit_17 = kmalloc(BITS_TO_LONGS(page_count) *
sizeof(long), GFP_KERNEL);
- if (obj_priv->bit_17 == NULL) {
+ if (obj->bit_17 == NULL) {
DRM_ERROR("Failed to allocate memory for bit 17 "
"record\n");
return;
@@ -471,9 +472,9 @@ i915_gem_object_save_bit_17_swizzle(struct drm_gem_object *obj)
}
for (i = 0; i < page_count; i++) {
- if (page_to_phys(obj_priv->pages[i]) & (1 << 17))
- __set_bit(i, obj_priv->bit_17);
+ if (page_to_phys(obj->pages[i]) & (1 << 17))
+ __set_bit(i, obj->bit_17);
else
- __clear_bit(i, obj_priv->bit_17);
+ __clear_bit(i, obj->bit_17);
}
}
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 729fd0c91d7..0dadc025b77 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -67,20 +67,20 @@
void
ironlake_enable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
{
- if ((dev_priv->gt_irq_mask_reg & mask) != 0) {
- dev_priv->gt_irq_mask_reg &= ~mask;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
- (void) I915_READ(GTIMR);
+ if ((dev_priv->gt_irq_mask & mask) != 0) {
+ dev_priv->gt_irq_mask &= ~mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
}
}
void
ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
{
- if ((dev_priv->gt_irq_mask_reg & mask) != mask) {
- dev_priv->gt_irq_mask_reg |= mask;
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
- (void) I915_READ(GTIMR);
+ if ((dev_priv->gt_irq_mask & mask) != mask) {
+ dev_priv->gt_irq_mask |= mask;
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
+ POSTING_READ(GTIMR);
}
}
@@ -88,40 +88,40 @@ ironlake_disable_graphics_irq(drm_i915_private_t *dev_priv, u32 mask)
static void
ironlake_enable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
{
- if ((dev_priv->irq_mask_reg & mask) != 0) {
- dev_priv->irq_mask_reg &= ~mask;
- I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
- (void) I915_READ(DEIMR);
+ if ((dev_priv->irq_mask & mask) != 0) {
+ dev_priv->irq_mask &= ~mask;
+ I915_WRITE(DEIMR, dev_priv->irq_mask);
+ POSTING_READ(DEIMR);
}
}
static inline void
ironlake_disable_display_irq(drm_i915_private_t *dev_priv, u32 mask)
{
- if ((dev_priv->irq_mask_reg & mask) != mask) {
- dev_priv->irq_mask_reg |= mask;
- I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
- (void) I915_READ(DEIMR);
+ if ((dev_priv->irq_mask & mask) != mask) {
+ dev_priv->irq_mask |= mask;
+ I915_WRITE(DEIMR, dev_priv->irq_mask);
+ POSTING_READ(DEIMR);
}
}
void
i915_enable_irq(drm_i915_private_t *dev_priv, u32 mask)
{
- if ((dev_priv->irq_mask_reg & mask) != 0) {
- dev_priv->irq_mask_reg &= ~mask;
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
- (void) I915_READ(IMR);
+ if ((dev_priv->irq_mask & mask) != 0) {
+ dev_priv->irq_mask &= ~mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ POSTING_READ(IMR);
}
}
void
i915_disable_irq(drm_i915_private_t *dev_priv, u32 mask)
{
- if ((dev_priv->irq_mask_reg & mask) != mask) {
- dev_priv->irq_mask_reg |= mask;
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
- (void) I915_READ(IMR);
+ if ((dev_priv->irq_mask & mask) != mask) {
+ dev_priv->irq_mask |= mask;
+ I915_WRITE(IMR, dev_priv->irq_mask);
+ POSTING_READ(IMR);
}
}
@@ -144,7 +144,7 @@ i915_enable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
dev_priv->pipestat[pipe] |= mask;
/* Enable the interrupt, clear any pending status */
I915_WRITE(reg, dev_priv->pipestat[pipe] | (mask >> 16));
- (void) I915_READ(reg);
+ POSTING_READ(reg);
}
}
@@ -156,16 +156,19 @@ i915_disable_pipestat(drm_i915_private_t *dev_priv, int pipe, u32 mask)
dev_priv->pipestat[pipe] &= ~mask;
I915_WRITE(reg, dev_priv->pipestat[pipe]);
- (void) I915_READ(reg);
+ POSTING_READ(reg);
}
}
/**
* intel_enable_asle - enable ASLE interrupt for OpRegion
*/
-void intel_enable_asle (struct drm_device *dev)
+void intel_enable_asle(struct drm_device *dev)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (HAS_PCH_SPLIT(dev))
ironlake_enable_display_irq(dev_priv, DE_GSE);
@@ -176,6 +179,8 @@ void intel_enable_asle (struct drm_device *dev)
i915_enable_pipestat(dev_priv, 0,
PIPE_LEGACY_BLC_EVENT_ENABLE);
}
+
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
/**
@@ -243,6 +248,92 @@ u32 gm45_get_vblank_counter(struct drm_device *dev, int pipe)
return I915_READ(reg);
}
+int i915_get_crtc_scanoutpos(struct drm_device *dev, int pipe,
+ int *vpos, int *hpos)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u32 vbl = 0, position = 0;
+ int vbl_start, vbl_end, htotal, vtotal;
+ bool in_vbl = true;
+ int ret = 0;
+
+ if (!i915_pipe_enabled(dev, pipe)) {
+ DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
+ "pipe %d\n", pipe);
+ return 0;
+ }
+
+ /* Get vtotal. */
+ vtotal = 1 + ((I915_READ(VTOTAL(pipe)) >> 16) & 0x1fff);
+
+ if (INTEL_INFO(dev)->gen >= 4) {
+ /* No obvious pixelcount register. Only query vertical
+ * scanout position from Display scan line register.
+ */
+ position = I915_READ(PIPEDSL(pipe));
+
+ /* Decode into vertical scanout position. Don't have
+ * horizontal scanout position.
+ */
+ *vpos = position & 0x1fff;
+ *hpos = 0;
+ } else {
+ /* Have access to pixelcount since start of frame.
+ * We can split this into vertical and horizontal
+ * scanout position.
+ */
+ position = (I915_READ(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
+
+ htotal = 1 + ((I915_READ(HTOTAL(pipe)) >> 16) & 0x1fff);
+ *vpos = position / htotal;
+ *hpos = position - (*vpos * htotal);
+ }
+
+ /* Query vblank area. */
+ vbl = I915_READ(VBLANK(pipe));
+
+ /* Test position against vblank region. */
+ vbl_start = vbl & 0x1fff;
+ vbl_end = (vbl >> 16) & 0x1fff;
+
+ if ((*vpos < vbl_start) || (*vpos > vbl_end))
+ in_vbl = false;
+
+ /* Inside "upper part" of vblank area? Apply corrective offset: */
+ if (in_vbl && (*vpos >= vbl_start))
+ *vpos = *vpos - vtotal;
+
+ /* Readouts valid? */
+ if (vbl > 0)
+ ret |= DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE;
+
+ /* In vblank? */
+ if (in_vbl)
+ ret |= DRM_SCANOUTPOS_INVBL;
+
+ return ret;
+}
+
+int i915_get_vblank_timestamp(struct drm_device *dev, int crtc,
+ int *max_error,
+ struct timeval *vblank_time,
+ unsigned flags)
+{
+ struct drm_crtc *drmcrtc;
+
+ if (crtc < 0 || crtc >= dev->num_crtcs) {
+ DRM_ERROR("Invalid crtc %d\n", crtc);
+ return -EINVAL;
+ }
+
+ /* Get drm_crtc to timestamp: */
+ drmcrtc = intel_get_crtc_for_pipe(dev, crtc);
+
+ /* Helper routine in DRM core does all the work: */
+ return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
+ vblank_time, flags, drmcrtc);
+}
+
/*
* Handle hotplug events outside the interrupt handler proper.
*/
@@ -297,8 +388,8 @@ static void notify_ring(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- u32 seqno = ring->get_seqno(dev, ring);
- ring->irq_gem_seqno = seqno;
+ u32 seqno = ring->get_seqno(ring);
+ ring->irq_seqno = seqno;
trace_i915_gem_request_complete(dev, seqno);
wake_up_all(&ring->irq_queue);
dev_priv->hangcheck_count = 0;
@@ -306,11 +397,49 @@ static void notify_ring(struct drm_device *dev,
jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
}
+static void gen6_pm_irq_handler(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
+ u8 new_delay = dev_priv->cur_delay;
+ u32 pm_iir;
+
+ pm_iir = I915_READ(GEN6_PMIIR);
+ if (!pm_iir)
+ return;
+
+ if (pm_iir & GEN6_PM_RP_UP_THRESHOLD) {
+ if (dev_priv->cur_delay != dev_priv->max_delay)
+ new_delay = dev_priv->cur_delay + 1;
+ if (new_delay > dev_priv->max_delay)
+ new_delay = dev_priv->max_delay;
+ } else if (pm_iir & (GEN6_PM_RP_DOWN_THRESHOLD | GEN6_PM_RP_DOWN_TIMEOUT)) {
+ if (dev_priv->cur_delay != dev_priv->min_delay)
+ new_delay = dev_priv->cur_delay - 1;
+ if (new_delay < dev_priv->min_delay) {
+ new_delay = dev_priv->min_delay;
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+ I915_READ(GEN6_RP_INTERRUPT_LIMITS) |
+ ((new_delay << 16) & 0x3f0000));
+ } else {
+ /* Make sure we continue to get down interrupts
+ * until we hit the minimum frequency */
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+ I915_READ(GEN6_RP_INTERRUPT_LIMITS) & ~0x3f0000);
+ }
+
+ }
+
+ gen6_set_rps(dev, new_delay);
+ dev_priv->cur_delay = new_delay;
+
+ I915_WRITE(GEN6_PMIIR, pm_iir);
+}
+
static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
int ret = IRQ_NONE;
- u32 de_iir, gt_iir, de_ier, pch_iir;
+ u32 de_iir, gt_iir, de_ier, pch_iir, pm_iir;
u32 hotplug_mask;
struct drm_i915_master_private *master_priv;
u32 bsd_usr_interrupt = GT_BSD_USER_INTERRUPT;
@@ -321,13 +450,15 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
/* disable master interrupt before clearing iir */
de_ier = I915_READ(DEIER);
I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
- (void)I915_READ(DEIER);
+ POSTING_READ(DEIER);
de_iir = I915_READ(DEIIR);
gt_iir = I915_READ(GTIIR);
pch_iir = I915_READ(SDEIIR);
+ pm_iir = I915_READ(GEN6_PMIIR);
- if (de_iir == 0 && gt_iir == 0 && pch_iir == 0)
+ if (de_iir == 0 && gt_iir == 0 && pch_iir == 0 &&
+ (!IS_GEN6(dev) || pm_iir == 0))
goto done;
if (HAS_PCH_CPT(dev))
@@ -344,12 +475,12 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
READ_BREADCRUMB(dev_priv);
}
- if (gt_iir & GT_PIPE_NOTIFY)
- notify_ring(dev, &dev_priv->render_ring);
+ if (gt_iir & (GT_USER_INTERRUPT | GT_PIPE_NOTIFY))
+ notify_ring(dev, &dev_priv->ring[RCS]);
if (gt_iir & bsd_usr_interrupt)
- notify_ring(dev, &dev_priv->bsd_ring);
- if (HAS_BLT(dev) && gt_iir & GT_BLT_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->blt_ring);
+ notify_ring(dev, &dev_priv->ring[VCS]);
+ if (gt_iir & GT_BLT_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[BCS]);
if (de_iir & DE_GSE)
intel_opregion_gse_intr(dev);
@@ -379,6 +510,9 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
i915_handle_rps_change(dev);
}
+ if (IS_GEN6(dev))
+ gen6_pm_irq_handler(dev);
+
/* should clear PCH hotplug event before clear CPU irq */
I915_WRITE(SDEIIR, pch_iir);
I915_WRITE(GTIIR, gt_iir);
@@ -386,7 +520,7 @@ static irqreturn_t ironlake_irq_handler(struct drm_device *dev)
done:
I915_WRITE(DEIER, de_ier);
- (void)I915_READ(DEIER);
+ POSTING_READ(DEIER);
return ret;
}
@@ -423,28 +557,23 @@ static void i915_error_work_func(struct work_struct *work)
#ifdef CONFIG_DEBUG_FS
static struct drm_i915_error_object *
i915_error_object_create(struct drm_device *dev,
- struct drm_gem_object *src)
+ struct drm_i915_gem_object *src)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_error_object *dst;
- struct drm_i915_gem_object *src_priv;
int page, page_count;
u32 reloc_offset;
- if (src == NULL)
- return NULL;
-
- src_priv = to_intel_bo(src);
- if (src_priv->pages == NULL)
+ if (src == NULL || src->pages == NULL)
return NULL;
- page_count = src->size / PAGE_SIZE;
+ page_count = src->base.size / PAGE_SIZE;
dst = kmalloc(sizeof(*dst) + page_count * sizeof (u32 *), GFP_ATOMIC);
if (dst == NULL)
return NULL;
- reloc_offset = src_priv->gtt_offset;
+ reloc_offset = src->gtt_offset;
for (page = 0; page < page_count; page++) {
unsigned long flags;
void __iomem *s;
@@ -466,7 +595,7 @@ i915_error_object_create(struct drm_device *dev,
reloc_offset += PAGE_SIZE;
}
dst->page_count = page_count;
- dst->gtt_offset = src_priv->gtt_offset;
+ dst->gtt_offset = src->gtt_offset;
return dst;
@@ -520,36 +649,96 @@ i915_get_bbaddr(struct drm_device *dev, u32 *ring)
}
static u32
-i915_ringbuffer_last_batch(struct drm_device *dev)
+i915_ringbuffer_last_batch(struct drm_device *dev,
+ struct intel_ring_buffer *ring)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 head, bbaddr;
- u32 *ring;
+ u32 *val;
/* Locate the current position in the ringbuffer and walk back
* to find the most recently dispatched batch buffer.
*/
- bbaddr = 0;
- head = I915_READ(PRB0_HEAD) & HEAD_ADDR;
- ring = (u32 *)(dev_priv->render_ring.virtual_start + head);
+ head = I915_READ_HEAD(ring) & HEAD_ADDR;
- while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
- bbaddr = i915_get_bbaddr(dev, ring);
+ val = (u32 *)(ring->virtual_start + head);
+ while (--val >= (u32 *)ring->virtual_start) {
+ bbaddr = i915_get_bbaddr(dev, val);
if (bbaddr)
- break;
+ return bbaddr;
}
- if (bbaddr == 0) {
- ring = (u32 *)(dev_priv->render_ring.virtual_start
- + dev_priv->render_ring.size);
- while (--ring >= (u32 *)dev_priv->render_ring.virtual_start) {
- bbaddr = i915_get_bbaddr(dev, ring);
- if (bbaddr)
- break;
- }
+ val = (u32 *)(ring->virtual_start + ring->size);
+ while (--val >= (u32 *)ring->virtual_start) {
+ bbaddr = i915_get_bbaddr(dev, val);
+ if (bbaddr)
+ return bbaddr;
}
- return bbaddr;
+ return 0;
+}
+
+static u32 capture_bo_list(struct drm_i915_error_buffer *err,
+ int count,
+ struct list_head *head)
+{
+ struct drm_i915_gem_object *obj;
+ int i = 0;
+
+ list_for_each_entry(obj, head, mm_list) {
+ err->size = obj->base.size;
+ err->name = obj->base.name;
+ err->seqno = obj->last_rendering_seqno;
+ err->gtt_offset = obj->gtt_offset;
+ err->read_domains = obj->base.read_domains;
+ err->write_domain = obj->base.write_domain;
+ err->fence_reg = obj->fence_reg;
+ err->pinned = 0;
+ if (obj->pin_count > 0)
+ err->pinned = 1;
+ if (obj->user_pin_count > 0)
+ err->pinned = -1;
+ err->tiling = obj->tiling_mode;
+ err->dirty = obj->dirty;
+ err->purgeable = obj->madv != I915_MADV_WILLNEED;
+ err->ring = obj->ring ? obj->ring->id : 0;
+
+ if (++i == count)
+ break;
+
+ err++;
+ }
+
+ return i;
+}
+
+static void i915_gem_record_fences(struct drm_device *dev,
+ struct drm_i915_error_state *error)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
+
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 6:
+ for (i = 0; i < 16; i++)
+ error->fence[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ error->fence[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+ break;
+ case 3:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ error->fence[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+ case 2:
+ for (i = 0; i < 8; i++)
+ error->fence[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+ break;
+
+ }
}
/**
@@ -564,9 +753,9 @@ i915_ringbuffer_last_batch(struct drm_device *dev)
static void i915_capture_error_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
struct drm_i915_error_state *error;
- struct drm_gem_object *batchbuffer[2];
+ struct drm_i915_gem_object *batchbuffer[2];
unsigned long flags;
u32 bbaddr;
int count;
@@ -585,20 +774,33 @@ static void i915_capture_error_state(struct drm_device *dev)
DRM_DEBUG_DRIVER("generating error event\n");
- error->seqno =
- dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring);
+ error->seqno = dev_priv->ring[RCS].get_seqno(&dev_priv->ring[RCS]);
error->eir = I915_READ(EIR);
error->pgtbl_er = I915_READ(PGTBL_ER);
error->pipeastat = I915_READ(PIPEASTAT);
error->pipebstat = I915_READ(PIPEBSTAT);
error->instpm = I915_READ(INSTPM);
- if (INTEL_INFO(dev)->gen < 4) {
- error->ipeir = I915_READ(IPEIR);
- error->ipehr = I915_READ(IPEHR);
- error->instdone = I915_READ(INSTDONE);
- error->acthd = I915_READ(ACTHD);
- error->bbaddr = 0;
- } else {
+ error->error = 0;
+ if (INTEL_INFO(dev)->gen >= 6) {
+ error->error = I915_READ(ERROR_GEN6);
+
+ error->bcs_acthd = I915_READ(BCS_ACTHD);
+ error->bcs_ipehr = I915_READ(BCS_IPEHR);
+ error->bcs_ipeir = I915_READ(BCS_IPEIR);
+ error->bcs_instdone = I915_READ(BCS_INSTDONE);
+ error->bcs_seqno = 0;
+ if (dev_priv->ring[BCS].get_seqno)
+ error->bcs_seqno = dev_priv->ring[BCS].get_seqno(&dev_priv->ring[BCS]);
+
+ error->vcs_acthd = I915_READ(VCS_ACTHD);
+ error->vcs_ipehr = I915_READ(VCS_IPEHR);
+ error->vcs_ipeir = I915_READ(VCS_IPEIR);
+ error->vcs_instdone = I915_READ(VCS_INSTDONE);
+ error->vcs_seqno = 0;
+ if (dev_priv->ring[VCS].get_seqno)
+ error->vcs_seqno = dev_priv->ring[VCS].get_seqno(&dev_priv->ring[VCS]);
+ }
+ if (INTEL_INFO(dev)->gen >= 4) {
error->ipeir = I915_READ(IPEIR_I965);
error->ipehr = I915_READ(IPEHR_I965);
error->instdone = I915_READ(INSTDONE_I965);
@@ -606,42 +808,45 @@ static void i915_capture_error_state(struct drm_device *dev)
error->instdone1 = I915_READ(INSTDONE1);
error->acthd = I915_READ(ACTHD_I965);
error->bbaddr = I915_READ64(BB_ADDR);
+ } else {
+ error->ipeir = I915_READ(IPEIR);
+ error->ipehr = I915_READ(IPEHR);
+ error->instdone = I915_READ(INSTDONE);
+ error->acthd = I915_READ(ACTHD);
+ error->bbaddr = 0;
}
+ i915_gem_record_fences(dev, error);
- bbaddr = i915_ringbuffer_last_batch(dev);
+ bbaddr = i915_ringbuffer_last_batch(dev, &dev_priv->ring[RCS]);
/* Grab the current batchbuffer, most likely to have crashed. */
batchbuffer[0] = NULL;
batchbuffer[1] = NULL;
count = 0;
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
- struct drm_gem_object *obj = &obj_priv->base;
-
+ list_for_each_entry(obj, &dev_priv->mm.active_list, mm_list) {
if (batchbuffer[0] == NULL &&
- bbaddr >= obj_priv->gtt_offset &&
- bbaddr < obj_priv->gtt_offset + obj->size)
+ bbaddr >= obj->gtt_offset &&
+ bbaddr < obj->gtt_offset + obj->base.size)
batchbuffer[0] = obj;
if (batchbuffer[1] == NULL &&
- error->acthd >= obj_priv->gtt_offset &&
- error->acthd < obj_priv->gtt_offset + obj->size)
+ error->acthd >= obj->gtt_offset &&
+ error->acthd < obj->gtt_offset + obj->base.size)
batchbuffer[1] = obj;
count++;
}
/* Scan the other lists for completeness for those bizarre errors. */
if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
- list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, mm_list) {
- struct drm_gem_object *obj = &obj_priv->base;
-
+ list_for_each_entry(obj, &dev_priv->mm.flushing_list, mm_list) {
if (batchbuffer[0] == NULL &&
- bbaddr >= obj_priv->gtt_offset &&
- bbaddr < obj_priv->gtt_offset + obj->size)
+ bbaddr >= obj->gtt_offset &&
+ bbaddr < obj->gtt_offset + obj->base.size)
batchbuffer[0] = obj;
if (batchbuffer[1] == NULL &&
- error->acthd >= obj_priv->gtt_offset &&
- error->acthd < obj_priv->gtt_offset + obj->size)
+ error->acthd >= obj->gtt_offset &&
+ error->acthd < obj->gtt_offset + obj->base.size)
batchbuffer[1] = obj;
if (batchbuffer[0] && batchbuffer[1])
@@ -649,17 +854,15 @@ static void i915_capture_error_state(struct drm_device *dev)
}
}
if (batchbuffer[0] == NULL || batchbuffer[1] == NULL) {
- list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, mm_list) {
- struct drm_gem_object *obj = &obj_priv->base;
-
+ list_for_each_entry(obj, &dev_priv->mm.inactive_list, mm_list) {
if (batchbuffer[0] == NULL &&
- bbaddr >= obj_priv->gtt_offset &&
- bbaddr < obj_priv->gtt_offset + obj->size)
+ bbaddr >= obj->gtt_offset &&
+ bbaddr < obj->gtt_offset + obj->base.size)
batchbuffer[0] = obj;
if (batchbuffer[1] == NULL &&
- error->acthd >= obj_priv->gtt_offset &&
- error->acthd < obj_priv->gtt_offset + obj->size)
+ error->acthd >= obj->gtt_offset &&
+ error->acthd < obj->gtt_offset + obj->base.size)
batchbuffer[1] = obj;
if (batchbuffer[0] && batchbuffer[1])
@@ -678,46 +881,41 @@ static void i915_capture_error_state(struct drm_device *dev)
/* Record the ringbuffer */
error->ringbuffer = i915_error_object_create(dev,
- dev_priv->render_ring.gem_object);
+ dev_priv->ring[RCS].obj);
- /* Record buffers on the active list. */
+ /* Record buffers on the active and pinned lists. */
error->active_bo = NULL;
- error->active_bo_count = 0;
+ error->pinned_bo = NULL;
- if (count)
+ error->active_bo_count = count;
+ list_for_each_entry(obj, &dev_priv->mm.pinned_list, mm_list)
+ count++;
+ error->pinned_bo_count = count - error->active_bo_count;
+
+ if (count) {
error->active_bo = kmalloc(sizeof(*error->active_bo)*count,
GFP_ATOMIC);
-
- if (error->active_bo) {
- int i = 0;
- list_for_each_entry(obj_priv, &dev_priv->mm.active_list, mm_list) {
- struct drm_gem_object *obj = &obj_priv->base;
-
- error->active_bo[i].size = obj->size;
- error->active_bo[i].name = obj->name;
- error->active_bo[i].seqno = obj_priv->last_rendering_seqno;
- error->active_bo[i].gtt_offset = obj_priv->gtt_offset;
- error->active_bo[i].read_domains = obj->read_domains;
- error->active_bo[i].write_domain = obj->write_domain;
- error->active_bo[i].fence_reg = obj_priv->fence_reg;
- error->active_bo[i].pinned = 0;
- if (obj_priv->pin_count > 0)
- error->active_bo[i].pinned = 1;
- if (obj_priv->user_pin_count > 0)
- error->active_bo[i].pinned = -1;
- error->active_bo[i].tiling = obj_priv->tiling_mode;
- error->active_bo[i].dirty = obj_priv->dirty;
- error->active_bo[i].purgeable = obj_priv->madv != I915_MADV_WILLNEED;
-
- if (++i == count)
- break;
- }
- error->active_bo_count = i;
+ if (error->active_bo)
+ error->pinned_bo =
+ error->active_bo + error->active_bo_count;
}
+ if (error->active_bo)
+ error->active_bo_count =
+ capture_bo_list(error->active_bo,
+ error->active_bo_count,
+ &dev_priv->mm.active_list);
+
+ if (error->pinned_bo)
+ error->pinned_bo_count =
+ capture_bo_list(error->pinned_bo,
+ error->pinned_bo_count,
+ &dev_priv->mm.pinned_list);
+
do_gettimeofday(&error->time);
error->overlay = intel_overlay_capture_error_state(dev);
+ error->display = intel_display_capture_error_state(dev);
spin_lock_irqsave(&dev_priv->error_lock, flags);
if (dev_priv->first_error == NULL) {
@@ -775,7 +973,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
printk(KERN_ERR " ACTHD: 0x%08x\n",
I915_READ(ACTHD_I965));
I915_WRITE(IPEIR_I965, ipeir);
- (void)I915_READ(IPEIR_I965);
+ POSTING_READ(IPEIR_I965);
}
if (eir & GM45_ERROR_PAGE_TABLE) {
u32 pgtbl_err = I915_READ(PGTBL_ER);
@@ -783,7 +981,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
pgtbl_err);
I915_WRITE(PGTBL_ER, pgtbl_err);
- (void)I915_READ(PGTBL_ER);
+ POSTING_READ(PGTBL_ER);
}
}
@@ -794,7 +992,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
printk(KERN_ERR " PGTBL_ER: 0x%08x\n",
pgtbl_err);
I915_WRITE(PGTBL_ER, pgtbl_err);
- (void)I915_READ(PGTBL_ER);
+ POSTING_READ(PGTBL_ER);
}
}
@@ -825,7 +1023,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
printk(KERN_ERR " ACTHD: 0x%08x\n",
I915_READ(ACTHD));
I915_WRITE(IPEIR, ipeir);
- (void)I915_READ(IPEIR);
+ POSTING_READ(IPEIR);
} else {
u32 ipeir = I915_READ(IPEIR_I965);
@@ -842,12 +1040,12 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
printk(KERN_ERR " ACTHD: 0x%08x\n",
I915_READ(ACTHD_I965));
I915_WRITE(IPEIR_I965, ipeir);
- (void)I915_READ(IPEIR_I965);
+ POSTING_READ(IPEIR_I965);
}
}
I915_WRITE(EIR, eir);
- (void)I915_READ(EIR);
+ POSTING_READ(EIR);
eir = I915_READ(EIR);
if (eir) {
/*
@@ -870,7 +1068,7 @@ static void i915_report_and_clear_eir(struct drm_device *dev)
* so userspace knows something bad happened (should trigger collection
* of a ring dump etc.).
*/
-static void i915_handle_error(struct drm_device *dev, bool wedged)
+void i915_handle_error(struct drm_device *dev, bool wedged)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -884,11 +1082,11 @@ static void i915_handle_error(struct drm_device *dev, bool wedged)
/*
* Wakeup waiting processes so they don't hang
*/
- wake_up_all(&dev_priv->render_ring.irq_queue);
+ wake_up_all(&dev_priv->ring[RCS].irq_queue);
if (HAS_BSD(dev))
- wake_up_all(&dev_priv->bsd_ring.irq_queue);
+ wake_up_all(&dev_priv->ring[VCS].irq_queue);
if (HAS_BLT(dev))
- wake_up_all(&dev_priv->blt_ring.irq_queue);
+ wake_up_all(&dev_priv->ring[BCS].irq_queue);
}
queue_work(dev_priv->wq, &dev_priv->error_work);
@@ -899,7 +1097,7 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_crtc *crtc = dev_priv->pipe_to_crtc_mapping[pipe];
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
struct intel_unpin_work *work;
unsigned long flags;
bool stall_detected;
@@ -918,13 +1116,13 @@ static void i915_pageflip_stall_check(struct drm_device *dev, int pipe)
}
/* Potential stall - if we see that the flip has happened, assume a missed interrupt */
- obj_priv = to_intel_bo(work->pending_flip_obj);
+ obj = work->pending_flip_obj;
if (INTEL_INFO(dev)->gen >= 4) {
int dspsurf = intel_crtc->plane == 0 ? DSPASURF : DSPBSURF;
- stall_detected = I915_READ(dspsurf) == obj_priv->gtt_offset;
+ stall_detected = I915_READ(dspsurf) == obj->gtt_offset;
} else {
int dspaddr = intel_crtc->plane == 0 ? DSPAADDR : DSPBADDR;
- stall_detected = I915_READ(dspaddr) == (obj_priv->gtt_offset +
+ stall_detected = I915_READ(dspaddr) == (obj->gtt_offset +
crtc->y * crtc->fb->pitch +
crtc->x * crtc->fb->bits_per_pixel/8);
}
@@ -970,7 +1168,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
* It doesn't set the bit in iir again, but it still produces
* interrupts (for non-MSI).
*/
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
pipea_stats = I915_READ(PIPEASTAT);
pipeb_stats = I915_READ(PIPEBSTAT);
@@ -993,7 +1191,7 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
I915_WRITE(PIPEBSTAT, pipeb_stats);
irq_received = 1;
}
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
if (!irq_received)
break;
@@ -1026,9 +1224,9 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS)
}
if (iir & I915_USER_INTERRUPT)
- notify_ring(dev, &dev_priv->render_ring);
- if (HAS_BSD(dev) && (iir & I915_BSD_USER_INTERRUPT))
- notify_ring(dev, &dev_priv->bsd_ring);
+ notify_ring(dev, &dev_priv->ring[RCS]);
+ if (iir & I915_BSD_USER_INTERRUPT)
+ notify_ring(dev, &dev_priv->ring[VCS]);
if (iir & I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT) {
intel_prepare_page_flip(dev, 0);
@@ -1101,12 +1299,13 @@ static int i915_emit_irq(struct drm_device * dev)
if (master_priv->sarea_priv)
master_priv->sarea_priv->last_enqueue = dev_priv->counter;
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(dev_priv->counter);
- OUT_RING(MI_USER_INTERRUPT);
- ADVANCE_LP_RING();
+ if (BEGIN_LP_RING(4) == 0) {
+ OUT_RING(MI_STORE_DWORD_INDEX);
+ OUT_RING(I915_BREADCRUMB_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ OUT_RING(dev_priv->counter);
+ OUT_RING(MI_USER_INTERRUPT);
+ ADVANCE_LP_RING();
+ }
return dev_priv->counter;
}
@@ -1114,12 +1313,11 @@ static int i915_emit_irq(struct drm_device * dev)
void i915_trace_irq_get(struct drm_device *dev, u32 seqno)
{
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
- if (dev_priv->trace_irq_seqno == 0)
- render_ring->user_irq_get(dev, render_ring);
-
- dev_priv->trace_irq_seqno = seqno;
+ if (dev_priv->trace_irq_seqno == 0 &&
+ ring->irq_get(ring))
+ dev_priv->trace_irq_seqno = seqno;
}
static int i915_wait_irq(struct drm_device * dev, int irq_nr)
@@ -1127,7 +1325,7 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv;
int ret = 0;
- struct intel_ring_buffer *render_ring = &dev_priv->render_ring;
+ struct intel_ring_buffer *ring = LP_RING(dev_priv);
DRM_DEBUG_DRIVER("irq_nr=%d breadcrumb=%d\n", irq_nr,
READ_BREADCRUMB(dev_priv));
@@ -1141,10 +1339,12 @@ static int i915_wait_irq(struct drm_device * dev, int irq_nr)
if (master_priv->sarea_priv)
master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT;
- render_ring->user_irq_get(dev, render_ring);
- DRM_WAIT_ON(ret, dev_priv->render_ring.irq_queue, 3 * DRM_HZ,
- READ_BREADCRUMB(dev_priv) >= irq_nr);
- render_ring->user_irq_put(dev, render_ring);
+ ret = -ENODEV;
+ if (ring->irq_get(ring)) {
+ DRM_WAIT_ON(ret, ring->irq_queue, 3 * DRM_HZ,
+ READ_BREADCRUMB(dev_priv) >= irq_nr);
+ ring->irq_put(ring);
+ }
if (ret == -EBUSY) {
DRM_ERROR("EBUSY -- rec: %d emitted: %d\n",
@@ -1163,7 +1363,7 @@ int i915_irq_emit(struct drm_device *dev, void *data,
drm_i915_irq_emit_t *emit = data;
int result;
- if (!dev_priv || !dev_priv->render_ring.virtual_start) {
+ if (!dev_priv || !LP_RING(dev_priv)->virtual_start) {
DRM_ERROR("called with no initialization\n");
return -EINVAL;
}
@@ -1209,9 +1409,9 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
if (!i915_pipe_enabled(dev, pipe))
return -EINVAL;
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (HAS_PCH_SPLIT(dev))
- ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
+ ironlake_enable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
else if (INTEL_INFO(dev)->gen >= 4)
i915_enable_pipestat(dev_priv, pipe,
@@ -1219,7 +1419,7 @@ int i915_enable_vblank(struct drm_device *dev, int pipe)
else
i915_enable_pipestat(dev_priv, pipe,
PIPE_VBLANK_INTERRUPT_ENABLE);
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return 0;
}
@@ -1231,15 +1431,15 @@ void i915_disable_vblank(struct drm_device *dev, int pipe)
drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
unsigned long irqflags;
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (HAS_PCH_SPLIT(dev))
- ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
+ ironlake_disable_display_irq(dev_priv, (pipe == 0) ?
DE_PIPEA_VBLANK: DE_PIPEB_VBLANK);
else
i915_disable_pipestat(dev_priv, pipe,
PIPE_VBLANK_INTERRUPT_ENABLE |
PIPE_START_VBLANK_INTERRUPT_ENABLE);
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
void i915_enable_interrupt (struct drm_device *dev)
@@ -1306,12 +1506,50 @@ int i915_vblank_swap(struct drm_device *dev, void *data,
return -EINVAL;
}
-static struct drm_i915_gem_request *
-i915_get_tail_request(struct drm_device *dev)
+static u32
+ring_last_seqno(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- return list_entry(dev_priv->render_ring.request_list.prev,
- struct drm_i915_gem_request, list);
+ return list_entry(ring->request_list.prev,
+ struct drm_i915_gem_request, list)->seqno;
+}
+
+static bool i915_hangcheck_ring_idle(struct intel_ring_buffer *ring, bool *err)
+{
+ if (list_empty(&ring->request_list) ||
+ i915_seqno_passed(ring->get_seqno(ring), ring_last_seqno(ring))) {
+ /* Issue a wake-up to catch stuck h/w. */
+ if (ring->waiting_seqno && waitqueue_active(&ring->irq_queue)) {
+ DRM_ERROR("Hangcheck timer elapsed... %s idle [waiting on %d, at %d], missed IRQ?\n",
+ ring->name,
+ ring->waiting_seqno,
+ ring->get_seqno(ring));
+ wake_up_all(&ring->irq_queue);
+ *err = true;
+ }
+ return true;
+ }
+ return false;
+}
+
+static bool kick_ring(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 tmp = I915_READ_CTL(ring);
+ if (tmp & RING_WAIT) {
+ DRM_ERROR("Kicking stuck wait on %s\n",
+ ring->name);
+ I915_WRITE_CTL(ring, tmp);
+ return true;
+ }
+ if (IS_GEN6(dev) &&
+ (tmp & RING_WAIT_SEMAPHORE)) {
+ DRM_ERROR("Kicking stuck semaphore on %s\n",
+ ring->name);
+ I915_WRITE_CTL(ring, tmp);
+ return true;
+ }
+ return false;
}
/**
@@ -1325,6 +1563,17 @@ void i915_hangcheck_elapsed(unsigned long data)
struct drm_device *dev = (struct drm_device *)data;
drm_i915_private_t *dev_priv = dev->dev_private;
uint32_t acthd, instdone, instdone1;
+ bool err = false;
+
+ /* If all work is done then ACTHD clearly hasn't advanced. */
+ if (i915_hangcheck_ring_idle(&dev_priv->ring[RCS], &err) &&
+ i915_hangcheck_ring_idle(&dev_priv->ring[VCS], &err) &&
+ i915_hangcheck_ring_idle(&dev_priv->ring[BCS], &err)) {
+ dev_priv->hangcheck_count = 0;
+ if (err)
+ goto repeat;
+ return;
+ }
if (INTEL_INFO(dev)->gen < 4) {
acthd = I915_READ(ACTHD);
@@ -1336,38 +1585,6 @@ void i915_hangcheck_elapsed(unsigned long data)
instdone1 = I915_READ(INSTDONE1);
}
- /* If all work is done then ACTHD clearly hasn't advanced. */
- if (list_empty(&dev_priv->render_ring.request_list) ||
- i915_seqno_passed(dev_priv->render_ring.get_seqno(dev, &dev_priv->render_ring),
- i915_get_tail_request(dev)->seqno)) {
- bool missed_wakeup = false;
-
- dev_priv->hangcheck_count = 0;
-
- /* Issue a wake-up to catch stuck h/w. */
- if (dev_priv->render_ring.waiting_gem_seqno &&
- waitqueue_active(&dev_priv->render_ring.irq_queue)) {
- wake_up_all(&dev_priv->render_ring.irq_queue);
- missed_wakeup = true;
- }
-
- if (dev_priv->bsd_ring.waiting_gem_seqno &&
- waitqueue_active(&dev_priv->bsd_ring.irq_queue)) {
- wake_up_all(&dev_priv->bsd_ring.irq_queue);
- missed_wakeup = true;
- }
-
- if (dev_priv->blt_ring.waiting_gem_seqno &&
- waitqueue_active(&dev_priv->blt_ring.irq_queue)) {
- wake_up_all(&dev_priv->blt_ring.irq_queue);
- missed_wakeup = true;
- }
-
- if (missed_wakeup)
- DRM_ERROR("Hangcheck timer elapsed... GPU idle, missed IRQ.\n");
- return;
- }
-
if (dev_priv->last_acthd == acthd &&
dev_priv->last_instdone == instdone &&
dev_priv->last_instdone1 == instdone1) {
@@ -1380,12 +1597,17 @@ void i915_hangcheck_elapsed(unsigned long data)
* and break the hang. This should work on
* all but the second generation chipsets.
*/
- u32 tmp = I915_READ(PRB0_CTL);
- if (tmp & RING_WAIT) {
- I915_WRITE(PRB0_CTL, tmp);
- POSTING_READ(PRB0_CTL);
- goto out;
- }
+
+ if (kick_ring(&dev_priv->ring[RCS]))
+ goto repeat;
+
+ if (HAS_BSD(dev) &&
+ kick_ring(&dev_priv->ring[VCS]))
+ goto repeat;
+
+ if (HAS_BLT(dev) &&
+ kick_ring(&dev_priv->ring[BCS]))
+ goto repeat;
}
i915_handle_error(dev, true);
@@ -1399,7 +1621,7 @@ void i915_hangcheck_elapsed(unsigned long data)
dev_priv->last_instdone1 = instdone1;
}
-out:
+repeat:
/* Reset timer case chip hangs without another request being added */
mod_timer(&dev_priv->hangcheck_timer,
jiffies + msecs_to_jiffies(DRM_I915_HANGCHECK_PERIOD));
@@ -1417,17 +1639,17 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
I915_WRITE(DEIMR, 0xffffffff);
I915_WRITE(DEIER, 0x0);
- (void) I915_READ(DEIER);
+ POSTING_READ(DEIER);
/* and GT */
I915_WRITE(GTIMR, 0xffffffff);
I915_WRITE(GTIER, 0x0);
- (void) I915_READ(GTIER);
+ POSTING_READ(GTIER);
/* south display irq */
I915_WRITE(SDEIMR, 0xffffffff);
I915_WRITE(SDEIER, 0x0);
- (void) I915_READ(SDEIER);
+ POSTING_READ(SDEIER);
}
static int ironlake_irq_postinstall(struct drm_device *dev)
@@ -1436,38 +1658,39 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
/* enable kind of interrupts always enabled */
u32 display_mask = DE_MASTER_IRQ_CONTROL | DE_GSE | DE_PCH_EVENT |
DE_PLANEA_FLIP_DONE | DE_PLANEB_FLIP_DONE;
- u32 render_mask = GT_PIPE_NOTIFY | GT_BSD_USER_INTERRUPT;
+ u32 render_irqs;
u32 hotplug_mask;
- dev_priv->irq_mask_reg = ~display_mask;
- dev_priv->de_irq_enable_reg = display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK;
+ dev_priv->irq_mask = ~display_mask;
/* should always can generate irq */
I915_WRITE(DEIIR, I915_READ(DEIIR));
- I915_WRITE(DEIMR, dev_priv->irq_mask_reg);
- I915_WRITE(DEIER, dev_priv->de_irq_enable_reg);
- (void) I915_READ(DEIER);
+ I915_WRITE(DEIMR, dev_priv->irq_mask);
+ I915_WRITE(DEIER, display_mask | DE_PIPEA_VBLANK | DE_PIPEB_VBLANK);
+ POSTING_READ(DEIER);
- if (IS_GEN6(dev)) {
- render_mask =
- GT_PIPE_NOTIFY |
- GT_GEN6_BSD_USER_INTERRUPT |
- GT_BLT_USER_INTERRUPT;
- }
-
- dev_priv->gt_irq_mask_reg = ~render_mask;
- dev_priv->gt_irq_enable_reg = render_mask;
+ dev_priv->gt_irq_mask = ~0;
I915_WRITE(GTIIR, I915_READ(GTIIR));
- I915_WRITE(GTIMR, dev_priv->gt_irq_mask_reg);
+ I915_WRITE(GTIMR, dev_priv->gt_irq_mask);
if (IS_GEN6(dev)) {
- I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_PIPE_CONTROL_NOTIFY_INTERRUPT);
- I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_IMR_USER_INTERRUPT);
+ I915_WRITE(GEN6_RENDER_IMR, ~GEN6_RENDER_USER_INTERRUPT);
+ I915_WRITE(GEN6_BSD_IMR, ~GEN6_BSD_USER_INTERRUPT);
I915_WRITE(GEN6_BLITTER_IMR, ~GEN6_BLITTER_USER_INTERRUPT);
}
- I915_WRITE(GTIER, dev_priv->gt_irq_enable_reg);
- (void) I915_READ(GTIER);
+ if (IS_GEN6(dev))
+ render_irqs =
+ GT_USER_INTERRUPT |
+ GT_GEN6_BSD_USER_INTERRUPT |
+ GT_BLT_USER_INTERRUPT;
+ else
+ render_irqs =
+ GT_USER_INTERRUPT |
+ GT_PIPE_NOTIFY |
+ GT_BSD_USER_INTERRUPT;
+ I915_WRITE(GTIER, render_irqs);
+ POSTING_READ(GTIER);
if (HAS_PCH_CPT(dev)) {
hotplug_mask = SDE_CRT_HOTPLUG_CPT | SDE_PORTB_HOTPLUG_CPT |
@@ -1477,13 +1700,12 @@ static int ironlake_irq_postinstall(struct drm_device *dev)
SDE_PORTC_HOTPLUG | SDE_PORTD_HOTPLUG;
}
- dev_priv->pch_irq_mask_reg = ~hotplug_mask;
- dev_priv->pch_irq_enable_reg = hotplug_mask;
+ dev_priv->pch_irq_mask = ~hotplug_mask;
I915_WRITE(SDEIIR, I915_READ(SDEIIR));
- I915_WRITE(SDEIMR, dev_priv->pch_irq_mask_reg);
- I915_WRITE(SDEIER, dev_priv->pch_irq_enable_reg);
- (void) I915_READ(SDEIER);
+ I915_WRITE(SDEIMR, dev_priv->pch_irq_mask);
+ I915_WRITE(SDEIER, hotplug_mask);
+ POSTING_READ(SDEIER);
if (IS_IRONLAKE_M(dev)) {
/* Clear & enable PCU event interrupts */
@@ -1519,7 +1741,7 @@ void i915_driver_irq_preinstall(struct drm_device * dev)
I915_WRITE(PIPEBSTAT, 0);
I915_WRITE(IMR, 0xffffffff);
I915_WRITE(IER, 0x0);
- (void) I915_READ(IER);
+ POSTING_READ(IER);
}
/*
@@ -1532,11 +1754,11 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
u32 enable_mask = I915_INTERRUPT_ENABLE_FIX | I915_INTERRUPT_ENABLE_VAR;
u32 error_mask;
- DRM_INIT_WAITQUEUE(&dev_priv->render_ring.irq_queue);
+ DRM_INIT_WAITQUEUE(&dev_priv->ring[RCS].irq_queue);
if (HAS_BSD(dev))
- DRM_INIT_WAITQUEUE(&dev_priv->bsd_ring.irq_queue);
+ DRM_INIT_WAITQUEUE(&dev_priv->ring[VCS].irq_queue);
if (HAS_BLT(dev))
- DRM_INIT_WAITQUEUE(&dev_priv->blt_ring.irq_queue);
+ DRM_INIT_WAITQUEUE(&dev_priv->ring[BCS].irq_queue);
dev_priv->vblank_pipe = DRM_I915_VBLANK_PIPE_A | DRM_I915_VBLANK_PIPE_B;
@@ -1544,7 +1766,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
return ironlake_irq_postinstall(dev);
/* Unmask the interrupts that we always want on. */
- dev_priv->irq_mask_reg = ~I915_INTERRUPT_ENABLE_FIX;
+ dev_priv->irq_mask = ~I915_INTERRUPT_ENABLE_FIX;
dev_priv->pipestat[0] = 0;
dev_priv->pipestat[1] = 0;
@@ -1553,7 +1775,7 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
/* Enable in IER... */
enable_mask |= I915_DISPLAY_PORT_INTERRUPT;
/* and unmask in IMR */
- dev_priv->irq_mask_reg &= ~I915_DISPLAY_PORT_INTERRUPT;
+ dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT;
}
/*
@@ -1571,9 +1793,9 @@ int i915_driver_irq_postinstall(struct drm_device *dev)
}
I915_WRITE(EMR, error_mask);
- I915_WRITE(IMR, dev_priv->irq_mask_reg);
+ I915_WRITE(IMR, dev_priv->irq_mask);
I915_WRITE(IER, enable_mask);
- (void) I915_READ(IER);
+ POSTING_READ(IER);
if (I915_HAS_HOTPLUG(dev)) {
u32 hotplug_en = I915_READ(PORT_HOTPLUG_EN);
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 25ed911a311..d60860ec8cf 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -78,6 +78,12 @@
#define GRDOM_RENDER (1<<2)
#define GRDOM_MEDIA (3<<2)
+#define GEN6_GDRST 0x941c
+#define GEN6_GRDOM_FULL (1 << 0)
+#define GEN6_GRDOM_RENDER (1 << 1)
+#define GEN6_GRDOM_MEDIA (1 << 2)
+#define GEN6_GRDOM_BLT (1 << 3)
+
/* VGA stuff */
#define VGA_ST01_MDA 0x3ba
@@ -158,12 +164,23 @@
#define MI_MEM_VIRTUAL (1 << 22) /* 965+ only */
#define MI_STORE_DWORD_INDEX MI_INSTR(0x21, 1)
#define MI_STORE_DWORD_INDEX_SHIFT 2
-#define MI_LOAD_REGISTER_IMM MI_INSTR(0x22, 1)
+/* Official intel docs are somewhat sloppy concerning MI_LOAD_REGISTER_IMM:
+ * - Always issue a MI_NOOP _before_ the MI_LOAD_REGISTER_IMM - otherwise hw
+ * simply ignores the register load under certain conditions.
+ * - One can actually load arbitrary many arbitrary registers: Simply issue x
+ * address/value pairs. Don't overdue it, though, x <= 2^4 must hold!
+ */
+#define MI_LOAD_REGISTER_IMM(x) MI_INSTR(0x22, 2*x-1)
#define MI_FLUSH_DW MI_INSTR(0x26, 2) /* for GEN6 */
#define MI_BATCH_BUFFER MI_INSTR(0x30, 1)
#define MI_BATCH_NON_SECURE (1)
#define MI_BATCH_NON_SECURE_I965 (1<<8)
#define MI_BATCH_BUFFER_START MI_INSTR(0x31, 0)
+#define MI_SEMAPHORE_MBOX MI_INSTR(0x16, 1) /* gen6+ */
+#define MI_SEMAPHORE_GLOBAL_GTT (1<<22)
+#define MI_SEMAPHORE_UPDATE (1<<21)
+#define MI_SEMAPHORE_COMPARE (1<<20)
+#define MI_SEMAPHORE_REGISTER (1<<18)
/*
* 3D instructions used by the kernel
*/
@@ -256,10 +273,6 @@
* Instruction and interrupt control regs
*/
#define PGTBL_ER 0x02024
-#define PRB0_TAIL 0x02030
-#define PRB0_HEAD 0x02034
-#define PRB0_START 0x02038
-#define PRB0_CTL 0x0203c
#define RENDER_RING_BASE 0x02000
#define BSD_RING_BASE 0x04000
#define GEN6_BSD_RING_BASE 0x12000
@@ -268,9 +281,13 @@
#define RING_HEAD(base) ((base)+0x34)
#define RING_START(base) ((base)+0x38)
#define RING_CTL(base) ((base)+0x3c)
+#define RING_SYNC_0(base) ((base)+0x40)
+#define RING_SYNC_1(base) ((base)+0x44)
+#define RING_MAX_IDLE(base) ((base)+0x54)
#define RING_HWS_PGA(base) ((base)+0x80)
#define RING_HWS_PGA_GEN6(base) ((base)+0x2080)
#define RING_ACTHD(base) ((base)+0x74)
+#define RING_NOPID(base) ((base)+0x94)
#define TAIL_ADDR 0x001FFFF8
#define HEAD_WRAP_COUNT 0xFFE00000
#define HEAD_WRAP_ONE 0x00200000
@@ -285,10 +302,17 @@
#define RING_INVALID 0x00000000
#define RING_WAIT_I8XX (1<<0) /* gen2, PRBx_HEAD */
#define RING_WAIT (1<<11) /* gen3+, PRBx_CTL */
+#define RING_WAIT_SEMAPHORE (1<<10) /* gen6+ */
+#if 0
+#define PRB0_TAIL 0x02030
+#define PRB0_HEAD 0x02034
+#define PRB0_START 0x02038
+#define PRB0_CTL 0x0203c
#define PRB1_TAIL 0x02040 /* 915+ only */
#define PRB1_HEAD 0x02044 /* 915+ only */
#define PRB1_START 0x02048 /* 915+ only */
#define PRB1_CTL 0x0204c /* 915+ only */
+#endif
#define IPEIR_I965 0x02064
#define IPEHR_I965 0x02068
#define INSTDONE_I965 0x0206c
@@ -305,11 +329,42 @@
#define INSTDONE 0x02090
#define NOPID 0x02094
#define HWSTAM 0x02098
+#define VCS_INSTDONE 0x1206C
+#define VCS_IPEIR 0x12064
+#define VCS_IPEHR 0x12068
+#define VCS_ACTHD 0x12074
+#define BCS_INSTDONE 0x2206C
+#define BCS_IPEIR 0x22064
+#define BCS_IPEHR 0x22068
+#define BCS_ACTHD 0x22074
+
+#define ERROR_GEN6 0x040a0
+
+/* GM45+ chicken bits -- debug workaround bits that may be required
+ * for various sorts of correct behavior. The top 16 bits of each are
+ * the enables for writing to the corresponding low bit.
+ */
+#define _3D_CHICKEN 0x02084
+#define _3D_CHICKEN2 0x0208c
+/* Disables pipelining of read flushes past the SF-WIZ interface.
+ * Required on all Ironlake steppings according to the B-Spec, but the
+ * particular danger of not doing so is not specified.
+ */
+# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
+#define _3D_CHICKEN3 0x02090
#define MI_MODE 0x0209c
# define VS_TIMER_DISPATCH (1 << 6)
# define MI_FLUSH_ENABLE (1 << 11)
+#define GFX_MODE 0x02520
+#define GFX_RUN_LIST_ENABLE (1<<15)
+#define GFX_TLB_INVALIDATE_ALWAYS (1<<13)
+#define GFX_SURFACE_FAULT_ENABLE (1<<12)
+#define GFX_REPLAY_MODE (1<<11)
+#define GFX_PSMI_GRANULARITY (1<<10)
+#define GFX_PPGTT_ENABLE (1<<9)
+
#define SCPD0 0x0209c /* 915+ only */
#define IER 0x020a0
#define IIR 0x020a4
@@ -461,7 +516,7 @@
#define GEN6_BSD_SLEEP_PSMI_CONTROL_IDLE_INDICATOR (1 << 3)
#define GEN6_BSD_IMR 0x120a8
-#define GEN6_BSD_IMR_USER_INTERRUPT (1 << 12)
+#define GEN6_BSD_USER_INTERRUPT (1 << 12)
#define GEN6_BSD_RNCID 0x12198
@@ -541,6 +596,18 @@
#define ILK_DISPLAY_CHICKEN1 0x42000
#define ILK_FBCQ_DIS (1<<22)
+#define ILK_PABSTRETCH_DIS (1<<21)
+
+
+/*
+ * Framebuffer compression for Sandybridge
+ *
+ * The following two registers are of type GTTMMADR
+ */
+#define SNB_DPFC_CTL_SA 0x100100
+#define SNB_CPU_FENCE_ENABLE (1<<29)
+#define DPFC_CPU_FENCE_OFFSET 0x100104
+
/*
* GPIO regs
@@ -900,6 +967,8 @@
*/
#define MCHBAR_MIRROR_BASE 0x10000
+#define MCHBAR_MIRROR_BASE_SNB 0x140000
+
/** 915-945 and GM965 MCH register controlling DRAM channel access */
#define DCC 0x10200
#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0)
@@ -1119,6 +1188,10 @@
#define DDRMPLL1 0X12c20
#define PEG_BAND_GAP_DATA 0x14d68
+#define GEN6_GT_PERF_STATUS 0x145948
+#define GEN6_RP_STATE_LIMITS 0x145994
+#define GEN6_RP_STATE_CAP 0x145998
+
/*
* Logical Context regs
*/
@@ -1168,7 +1241,6 @@
#define VTOTAL(pipe) _PIPE(pipe, VTOTAL_A, VTOTAL_B)
#define VBLANK(pipe) _PIPE(pipe, VBLANK_A, VBLANK_B)
#define VSYNC(pipe) _PIPE(pipe, VSYNC_A, VSYNC_B)
-#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
#define BCLRPAT(pipe) _PIPE(pipe, BCLRPAT_A, BCLRPAT_B)
/* VGA port control */
@@ -2182,8 +2254,10 @@
#define PIPE_6BPC (2 << 5)
#define PIPE_12BPC (3 << 5)
+#define PIPESRC(pipe) _PIPE(pipe, PIPEASRC, PIPEBSRC)
#define PIPECONF(pipe) _PIPE(pipe, PIPEACONF, PIPEBCONF)
#define PIPEDSL(pipe) _PIPE(pipe, PIPEADSL, PIPEBDSL)
+#define PIPEFRAMEPIXEL(pipe) _PIPE(pipe, PIPEAFRAMEPIXEL, PIPEBFRAMEPIXEL)
#define DSPARB 0x70030
#define DSPARB_CSTART_MASK (0x7f << 7)
@@ -2291,6 +2365,40 @@
#define ILK_FIFO_LINE_SIZE 64
+/* define the WM info on Sandybridge */
+#define SNB_DISPLAY_FIFO 128
+#define SNB_DISPLAY_MAXWM 0x7f /* bit 16:22 */
+#define SNB_DISPLAY_DFTWM 8
+#define SNB_CURSOR_FIFO 32
+#define SNB_CURSOR_MAXWM 0x1f /* bit 4:0 */
+#define SNB_CURSOR_DFTWM 8
+
+#define SNB_DISPLAY_SR_FIFO 512
+#define SNB_DISPLAY_MAX_SRWM 0x1ff /* bit 16:8 */
+#define SNB_DISPLAY_DFT_SRWM 0x3f
+#define SNB_CURSOR_SR_FIFO 64
+#define SNB_CURSOR_MAX_SRWM 0x3f /* bit 5:0 */
+#define SNB_CURSOR_DFT_SRWM 8
+
+#define SNB_FBC_MAX_SRWM 0xf /* bit 23:20 */
+
+#define SNB_FIFO_LINE_SIZE 64
+
+
+/* the address where we get all kinds of latency value */
+#define SSKPD 0x5d10
+#define SSKPD_WM_MASK 0x3f
+#define SSKPD_WM0_SHIFT 0
+#define SSKPD_WM1_SHIFT 8
+#define SSKPD_WM2_SHIFT 16
+#define SSKPD_WM3_SHIFT 24
+
+#define SNB_LATENCY(shift) (I915_READ(MCHBAR_MIRROR_BASE_SNB + SSKPD) >> (shift) & SSKPD_WM_MASK)
+#define SNB_READ_WM0_LATENCY() SNB_LATENCY(SSKPD_WM0_SHIFT)
+#define SNB_READ_WM1_LATENCY() SNB_LATENCY(SSKPD_WM1_SHIFT)
+#define SNB_READ_WM2_LATENCY() SNB_LATENCY(SSKPD_WM2_SHIFT)
+#define SNB_READ_WM3_LATENCY() SNB_LATENCY(SSKPD_WM3_SHIFT)
+
/*
* The two pipe frame counter registers are not synchronized, so
* reading a stable value is somewhat tricky. The following code
@@ -2351,6 +2459,10 @@
#define CURBBASE 0x700c4
#define CURBPOS 0x700c8
+#define CURCNTR(pipe) _PIPE(pipe, CURACNTR, CURBCNTR)
+#define CURBASE(pipe) _PIPE(pipe, CURABASE, CURBBASE)
+#define CURPOS(pipe) _PIPE(pipe, CURAPOS, CURBPOS)
+
/* Display A control */
#define DSPACNTR 0x70180
#define DISPLAY_PLANE_ENABLE (1<<31)
@@ -2586,10 +2698,14 @@
#define GTIER 0x4401c
#define ILK_DISPLAY_CHICKEN2 0x42004
+/* Required on all Ironlake and Sandybridge according to the B-Spec. */
+#define ILK_ELPIN_409_SELECT (1 << 25)
#define ILK_DPARB_GATE (1<<22)
#define ILK_VSDPFD_FULL (1<<21)
#define ILK_DSPCLK_GATE 0x42020
#define ILK_DPARB_CLK_GATE (1<<5)
+#define ILK_DPFD_CLK_GATE (1<<7)
+
/* According to spec this bit 7/8/9 of 0x42020 should be set to enable FBC */
#define ILK_CLK_FBC (1<<7)
#define ILK_DPFC_DIS1 (1<<8)
@@ -2669,6 +2785,7 @@
#define PCH_DPLL(pipe) _PIPE(pipe, PCH_DPLL_A, PCH_DPLL_B)
#define PCH_FPA0 0xc6040
+#define FP_CB_TUNE (0x3<<22)
#define PCH_FPA1 0xc6044
#define PCH_FPB0 0xc6048
#define PCH_FPB1 0xc604c
@@ -3033,6 +3150,7 @@
#define TRANS_DP_10BPC (1<<9)
#define TRANS_DP_6BPC (2<<9)
#define TRANS_DP_12BPC (3<<9)
+#define TRANS_DP_BPC_MASK (3<<9)
#define TRANS_DP_VSYNC_ACTIVE_HIGH (1<<4)
#define TRANS_DP_VSYNC_ACTIVE_LOW 0
#define TRANS_DP_HSYNC_ACTIVE_HIGH (1<<3)
@@ -3052,4 +3170,66 @@
#define EDP_LINK_TRAIN_800MV_0DB_SNB_B (0x38<<22)
#define EDP_LINK_TRAIN_VOL_EMP_MASK_SNB (0x3f<<22)
+#define FORCEWAKE 0xA18C
+#define FORCEWAKE_ACK 0x130090
+
+#define GEN6_RPNSWREQ 0xA008
+#define GEN6_TURBO_DISABLE (1<<31)
+#define GEN6_FREQUENCY(x) ((x)<<25)
+#define GEN6_OFFSET(x) ((x)<<19)
+#define GEN6_AGGRESSIVE_TURBO (0<<15)
+#define GEN6_RC_VIDEO_FREQ 0xA00C
+#define GEN6_RC_CONTROL 0xA090
+#define GEN6_RC_CTL_RC6pp_ENABLE (1<<16)
+#define GEN6_RC_CTL_RC6p_ENABLE (1<<17)
+#define GEN6_RC_CTL_RC6_ENABLE (1<<18)
+#define GEN6_RC_CTL_RC1e_ENABLE (1<<20)
+#define GEN6_RC_CTL_RC7_ENABLE (1<<22)
+#define GEN6_RC_CTL_EI_MODE(x) ((x)<<27)
+#define GEN6_RC_CTL_HW_ENABLE (1<<31)
+#define GEN6_RP_DOWN_TIMEOUT 0xA010
+#define GEN6_RP_INTERRUPT_LIMITS 0xA014
+#define GEN6_RPSTAT1 0xA01C
+#define GEN6_RP_CONTROL 0xA024
+#define GEN6_RP_MEDIA_TURBO (1<<11)
+#define GEN6_RP_USE_NORMAL_FREQ (1<<9)
+#define GEN6_RP_MEDIA_IS_GFX (1<<8)
+#define GEN6_RP_ENABLE (1<<7)
+#define GEN6_RP_UP_BUSY_MAX (0x2<<3)
+#define GEN6_RP_DOWN_BUSY_MIN (0x2<<0)
+#define GEN6_RP_UP_THRESHOLD 0xA02C
+#define GEN6_RP_DOWN_THRESHOLD 0xA030
+#define GEN6_RP_UP_EI 0xA068
+#define GEN6_RP_DOWN_EI 0xA06C
+#define GEN6_RP_IDLE_HYSTERSIS 0xA070
+#define GEN6_RC_STATE 0xA094
+#define GEN6_RC1_WAKE_RATE_LIMIT 0xA098
+#define GEN6_RC6_WAKE_RATE_LIMIT 0xA09C
+#define GEN6_RC6pp_WAKE_RATE_LIMIT 0xA0A0
+#define GEN6_RC_EVALUATION_INTERVAL 0xA0A8
+#define GEN6_RC_IDLE_HYSTERSIS 0xA0AC
+#define GEN6_RC_SLEEP 0xA0B0
+#define GEN6_RC1e_THRESHOLD 0xA0B4
+#define GEN6_RC6_THRESHOLD 0xA0B8
+#define GEN6_RC6p_THRESHOLD 0xA0BC
+#define GEN6_RC6pp_THRESHOLD 0xA0C0
+#define GEN6_PMINTRMSK 0xA168
+
+#define GEN6_PMISR 0x44020
+#define GEN6_PMIMR 0x44024
+#define GEN6_PMIIR 0x44028
+#define GEN6_PMIER 0x4402C
+#define GEN6_PM_MBOX_EVENT (1<<25)
+#define GEN6_PM_THERMAL_EVENT (1<<24)
+#define GEN6_PM_RP_DOWN_TIMEOUT (1<<6)
+#define GEN6_PM_RP_UP_THRESHOLD (1<<5)
+#define GEN6_PM_RP_DOWN_THRESHOLD (1<<4)
+#define GEN6_PM_RP_UP_EI_EXPIRED (1<<2)
+#define GEN6_PM_RP_DOWN_EI_EXPIRED (1<<1)
+
+#define GEN6_PCODE_MAILBOX 0x138124
+#define GEN6_PCODE_READY (1<<31)
+#define GEN6_PCODE_WRITE_MIN_FREQ_TABLE 0x9
+#define GEN6_PCODE_DATA 0x138128
+
#endif /* _I915_REG_H_ */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 454c064f8ef..410772466fa 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -235,10 +235,21 @@ static void i915_restore_vga(struct drm_device *dev)
static void i915_save_modeset_reg(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ int i;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
+ /* Cursor state */
+ dev_priv->saveCURACNTR = I915_READ(CURACNTR);
+ dev_priv->saveCURAPOS = I915_READ(CURAPOS);
+ dev_priv->saveCURABASE = I915_READ(CURABASE);
+ dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
+ dev_priv->saveCURBPOS = I915_READ(CURBPOS);
+ dev_priv->saveCURBBASE = I915_READ(CURBBASE);
+ if (IS_GEN2(dev))
+ dev_priv->saveCURSIZE = I915_READ(CURSIZE);
+
if (HAS_PCH_SPLIT(dev)) {
dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
@@ -357,6 +368,28 @@ static void i915_save_modeset_reg(struct drm_device *dev)
}
i915_save_palette(dev, PIPE_B);
dev_priv->savePIPEBSTAT = I915_READ(PIPEBSTAT);
+
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 6:
+ for (i = 0; i < 16; i++)
+ dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
+ break;
+ case 3:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
+ case 2:
+ for (i = 0; i < 8; i++)
+ dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
+ break;
+ }
+
return;
}
@@ -365,10 +398,33 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
int dpll_a_reg, fpa0_reg, fpa1_reg;
int dpll_b_reg, fpb0_reg, fpb1_reg;
+ int i;
if (drm_core_check_feature(dev, DRIVER_MODESET))
return;
+ /* Fences */
+ switch (INTEL_INFO(dev)->gen) {
+ case 6:
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
+ break;
+ case 5:
+ case 4:
+ for (i = 0; i < 16; i++)
+ I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
+ break;
+ case 3:
+ case 2:
+ if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
+ for (i = 0; i < 8; i++)
+ I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
+ break;
+ }
+
+
if (HAS_PCH_SPLIT(dev)) {
dpll_a_reg = PCH_DPLL_A;
dpll_b_reg = PCH_DPLL_B;
@@ -529,6 +585,16 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
I915_WRITE(DSPBCNTR, dev_priv->saveDSPBCNTR);
I915_WRITE(DSPBADDR, I915_READ(DSPBADDR));
+ /* Cursor state */
+ I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
+ I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
+ I915_WRITE(CURABASE, dev_priv->saveCURABASE);
+ I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
+ I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
+ I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
+ if (IS_GEN2(dev))
+ I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
+
return;
}
@@ -543,16 +609,6 @@ void i915_save_display(struct drm_device *dev)
/* Don't save them in KMS mode */
i915_save_modeset_reg(dev);
- /* Cursor state */
- dev_priv->saveCURACNTR = I915_READ(CURACNTR);
- dev_priv->saveCURAPOS = I915_READ(CURAPOS);
- dev_priv->saveCURABASE = I915_READ(CURABASE);
- dev_priv->saveCURBCNTR = I915_READ(CURBCNTR);
- dev_priv->saveCURBPOS = I915_READ(CURBPOS);
- dev_priv->saveCURBBASE = I915_READ(CURBBASE);
- if (IS_GEN2(dev))
- dev_priv->saveCURSIZE = I915_READ(CURSIZE);
-
/* CRT state */
if (HAS_PCH_SPLIT(dev)) {
dev_priv->saveADPA = I915_READ(PCH_ADPA);
@@ -657,16 +713,6 @@ void i915_restore_display(struct drm_device *dev)
/* Don't restore them in KMS mode */
i915_restore_modeset_reg(dev);
- /* Cursor state */
- I915_WRITE(CURAPOS, dev_priv->saveCURAPOS);
- I915_WRITE(CURACNTR, dev_priv->saveCURACNTR);
- I915_WRITE(CURABASE, dev_priv->saveCURABASE);
- I915_WRITE(CURBPOS, dev_priv->saveCURBPOS);
- I915_WRITE(CURBCNTR, dev_priv->saveCURBCNTR);
- I915_WRITE(CURBBASE, dev_priv->saveCURBBASE);
- if (IS_GEN2(dev))
- I915_WRITE(CURSIZE, dev_priv->saveCURSIZE);
-
/* CRT state */
if (HAS_PCH_SPLIT(dev))
I915_WRITE(PCH_ADPA, dev_priv->saveADPA);
@@ -771,8 +817,14 @@ int i915_save_state(struct drm_device *dev)
dev_priv->saveIMR = I915_READ(IMR);
}
- if (HAS_PCH_SPLIT(dev))
+ if (IS_IRONLAKE_M(dev))
ironlake_disable_drps(dev);
+ if (IS_GEN6(dev))
+ gen6_disable_rps(dev);
+
+ /* XXX disabling the clock gating breaks suspend on gm45
+ intel_disable_clock_gating(dev);
+ */
/* Cache mode state */
dev_priv->saveCACHE_MODE_0 = I915_READ(CACHE_MODE_0);
@@ -788,28 +840,6 @@ int i915_save_state(struct drm_device *dev)
for (i = 0; i < 3; i++)
dev_priv->saveSWF2[i] = I915_READ(SWF30 + (i << 2));
- /* Fences */
- switch (INTEL_INFO(dev)->gen) {
- case 6:
- for (i = 0; i < 16; i++)
- dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_SANDYBRIDGE_0 + (i * 8));
- break;
- case 5:
- case 4:
- for (i = 0; i < 16; i++)
- dev_priv->saveFENCE[i] = I915_READ64(FENCE_REG_965_0 + (i * 8));
- break;
- case 3:
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- dev_priv->saveFENCE[i+8] = I915_READ(FENCE_REG_945_8 + (i * 4));
- case 2:
- for (i = 0; i < 8; i++)
- dev_priv->saveFENCE[i] = I915_READ(FENCE_REG_830_0 + (i * 4));
- break;
-
- }
-
return 0;
}
@@ -823,27 +853,6 @@ int i915_restore_state(struct drm_device *dev)
/* Hardware status page */
I915_WRITE(HWS_PGA, dev_priv->saveHWS);
- /* Fences */
- switch (INTEL_INFO(dev)->gen) {
- case 6:
- for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_SANDYBRIDGE_0 + (i * 8), dev_priv->saveFENCE[i]);
- break;
- case 5:
- case 4:
- for (i = 0; i < 16; i++)
- I915_WRITE64(FENCE_REG_965_0 + (i * 8), dev_priv->saveFENCE[i]);
- break;
- case 3:
- case 2:
- if (IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_945_8 + (i * 4), dev_priv->saveFENCE[i+8]);
- for (i = 0; i < 8; i++)
- I915_WRITE(FENCE_REG_830_0 + (i * 4), dev_priv->saveFENCE[i]);
- break;
- }
-
i915_restore_display(dev);
/* Interrupt state */
@@ -860,13 +869,16 @@ int i915_restore_state(struct drm_device *dev)
}
/* Clock gating state */
- intel_init_clock_gating(dev);
+ intel_enable_clock_gating(dev);
- if (HAS_PCH_SPLIT(dev)) {
+ if (IS_IRONLAKE_M(dev)) {
ironlake_enable_drps(dev);
intel_init_emon(dev);
}
+ if (IS_GEN6(dev))
+ gen6_enable_rps(dev_priv);
+
/* Cache mode state */
I915_WRITE (CACHE_MODE_0, dev_priv->saveCACHE_MODE_0 | 0xffff0000);
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index fea97a21cc1..7f0fc3ed61a 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -6,6 +6,7 @@
#include <linux/tracepoint.h>
#include <drm/drmP.h>
+#include "i915_drv.h"
#undef TRACE_SYSTEM
#define TRACE_SYSTEM i915
@@ -16,18 +17,18 @@
TRACE_EVENT(i915_gem_object_create,
- TP_PROTO(struct drm_gem_object *obj),
+ TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj),
TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
+ __field(struct drm_i915_gem_object *, obj)
__field(u32, size)
),
TP_fast_assign(
__entry->obj = obj;
- __entry->size = obj->size;
+ __entry->size = obj->base.size;
),
TP_printk("obj=%p, size=%u", __entry->obj, __entry->size)
@@ -35,40 +36,43 @@ TRACE_EVENT(i915_gem_object_create,
TRACE_EVENT(i915_gem_object_bind,
- TP_PROTO(struct drm_gem_object *obj, u32 gtt_offset),
+ TP_PROTO(struct drm_i915_gem_object *obj, u32 gtt_offset, bool mappable),
- TP_ARGS(obj, gtt_offset),
+ TP_ARGS(obj, gtt_offset, mappable),
TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
+ __field(struct drm_i915_gem_object *, obj)
__field(u32, gtt_offset)
+ __field(bool, mappable)
),
TP_fast_assign(
__entry->obj = obj;
__entry->gtt_offset = gtt_offset;
+ __entry->mappable = mappable;
),
- TP_printk("obj=%p, gtt_offset=%08x",
- __entry->obj, __entry->gtt_offset)
+ TP_printk("obj=%p, gtt_offset=%08x%s",
+ __entry->obj, __entry->gtt_offset,
+ __entry->mappable ? ", mappable" : "")
);
TRACE_EVENT(i915_gem_object_change_domain,
- TP_PROTO(struct drm_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
+ TP_PROTO(struct drm_i915_gem_object *obj, uint32_t old_read_domains, uint32_t old_write_domain),
TP_ARGS(obj, old_read_domains, old_write_domain),
TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
+ __field(struct drm_i915_gem_object *, obj)
__field(u32, read_domains)
__field(u32, write_domain)
),
TP_fast_assign(
__entry->obj = obj;
- __entry->read_domains = obj->read_domains | (old_read_domains << 16);
- __entry->write_domain = obj->write_domain | (old_write_domain << 16);
+ __entry->read_domains = obj->base.read_domains | (old_read_domains << 16);
+ __entry->write_domain = obj->base.write_domain | (old_write_domain << 16);
),
TP_printk("obj=%p, read=%04x, write=%04x",
@@ -76,36 +80,14 @@ TRACE_EVENT(i915_gem_object_change_domain,
__entry->read_domains, __entry->write_domain)
);
-TRACE_EVENT(i915_gem_object_get_fence,
-
- TP_PROTO(struct drm_gem_object *obj, int fence, int tiling_mode),
-
- TP_ARGS(obj, fence, tiling_mode),
-
- TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
- __field(int, fence)
- __field(int, tiling_mode)
- ),
-
- TP_fast_assign(
- __entry->obj = obj;
- __entry->fence = fence;
- __entry->tiling_mode = tiling_mode;
- ),
-
- TP_printk("obj=%p, fence=%d, tiling=%d",
- __entry->obj, __entry->fence, __entry->tiling_mode)
-);
-
DECLARE_EVENT_CLASS(i915_gem_object,
- TP_PROTO(struct drm_gem_object *obj),
+ TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj),
TP_STRUCT__entry(
- __field(struct drm_gem_object *, obj)
+ __field(struct drm_i915_gem_object *, obj)
),
TP_fast_assign(
@@ -117,21 +99,21 @@ DECLARE_EVENT_CLASS(i915_gem_object,
DEFINE_EVENT(i915_gem_object, i915_gem_object_clflush,
- TP_PROTO(struct drm_gem_object *obj),
+ TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj)
);
DEFINE_EVENT(i915_gem_object, i915_gem_object_unbind,
- TP_PROTO(struct drm_gem_object *obj),
+ TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj)
);
DEFINE_EVENT(i915_gem_object, i915_gem_object_destroy,
- TP_PROTO(struct drm_gem_object *obj),
+ TP_PROTO(struct drm_i915_gem_object *obj),
TP_ARGS(obj)
);
@@ -263,13 +245,13 @@ DEFINE_EVENT(i915_ring, i915_ring_wait_end,
);
TRACE_EVENT(i915_flip_request,
- TP_PROTO(int plane, struct drm_gem_object *obj),
+ TP_PROTO(int plane, struct drm_i915_gem_object *obj),
TP_ARGS(plane, obj),
TP_STRUCT__entry(
__field(int, plane)
- __field(struct drm_gem_object *, obj)
+ __field(struct drm_i915_gem_object *, obj)
),
TP_fast_assign(
@@ -281,13 +263,13 @@ TRACE_EVENT(i915_flip_request,
);
TRACE_EVENT(i915_flip_complete,
- TP_PROTO(int plane, struct drm_gem_object *obj),
+ TP_PROTO(int plane, struct drm_i915_gem_object *obj),
TP_ARGS(plane, obj),
TP_STRUCT__entry(
__field(int, plane)
- __field(struct drm_gem_object *, obj)
+ __field(struct drm_i915_gem_object *, obj)
),
TP_fast_assign(
@@ -298,6 +280,29 @@ TRACE_EVENT(i915_flip_complete,
TP_printk("plane=%d, obj=%p", __entry->plane, __entry->obj)
);
+TRACE_EVENT(i915_reg_rw,
+ TP_PROTO(int cmd, uint32_t reg, uint64_t val, int len),
+
+ TP_ARGS(cmd, reg, val, len),
+
+ TP_STRUCT__entry(
+ __field(int, cmd)
+ __field(uint32_t, reg)
+ __field(uint64_t, val)
+ __field(int, len)
+ ),
+
+ TP_fast_assign(
+ __entry->cmd = cmd;
+ __entry->reg = reg;
+ __entry->val = (uint64_t)val;
+ __entry->len = len;
+ ),
+
+ TP_printk("cmd=%c, reg=0x%x, val=0x%llx, len=%d",
+ __entry->cmd, __entry->reg, __entry->val, __entry->len)
+);
+
#endif /* _I915_TRACE_H_ */
/* This part must be outside protection */
diff --git a/drivers/gpu/drm/i915/intel_acpi.c b/drivers/gpu/drm/i915/intel_acpi.c
index 65c88f9ba12..2cb8e0b9f1e 100644
--- a/drivers/gpu/drm/i915/intel_acpi.c
+++ b/drivers/gpu/drm/i915/intel_acpi.c
@@ -190,37 +190,6 @@ out:
kfree(output.pointer);
}
-static int intel_dsm_switchto(enum vga_switcheroo_client_id id)
-{
- return 0;
-}
-
-static int intel_dsm_power_state(enum vga_switcheroo_client_id id,
- enum vga_switcheroo_state state)
-{
- return 0;
-}
-
-static int intel_dsm_init(void)
-{
- return 0;
-}
-
-static int intel_dsm_get_client_id(struct pci_dev *pdev)
-{
- if (intel_dsm_priv.dhandle == DEVICE_ACPI_HANDLE(&pdev->dev))
- return VGA_SWITCHEROO_IGD;
- else
- return VGA_SWITCHEROO_DIS;
-}
-
-static struct vga_switcheroo_handler intel_dsm_handler = {
- .switchto = intel_dsm_switchto,
- .power_state = intel_dsm_power_state,
- .init = intel_dsm_init,
- .get_client_id = intel_dsm_get_client_id,
-};
-
static bool intel_dsm_pci_probe(struct pci_dev *pdev)
{
acpi_handle dhandle, intel_handle;
@@ -276,11 +245,8 @@ void intel_register_dsm_handler(void)
{
if (!intel_dsm_detect())
return;
-
- vga_switcheroo_register_handler(&intel_dsm_handler);
}
void intel_unregister_dsm_handler(void)
{
- vga_switcheroo_unregister_handler();
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index bee24b1a58e..880659680d0 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -642,26 +642,23 @@ static const intel_limit_t intel_limits_ironlake_display_port = {
.find_pll = intel_find_pll_ironlake_dp,
};
-static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc)
+static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc,
+ int refclk)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
const intel_limit_t *limit;
- int refclk = 120;
if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) {
- if (dev_priv->lvds_use_ssc && dev_priv->lvds_ssc_freq == 100)
- refclk = 100;
-
if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) ==
LVDS_CLKB_POWER_UP) {
/* LVDS dual channel */
- if (refclk == 100)
+ if (refclk == 100000)
limit = &intel_limits_ironlake_dual_lvds_100m;
else
limit = &intel_limits_ironlake_dual_lvds;
} else {
- if (refclk == 100)
+ if (refclk == 100000)
limit = &intel_limits_ironlake_single_lvds_100m;
else
limit = &intel_limits_ironlake_single_lvds;
@@ -702,13 +699,13 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc)
return limit;
}
-static const intel_limit_t *intel_limit(struct drm_crtc *crtc)
+static const intel_limit_t *intel_limit(struct drm_crtc *crtc, int refclk)
{
struct drm_device *dev = crtc->dev;
const intel_limit_t *limit;
if (HAS_PCH_SPLIT(dev))
- limit = intel_ironlake_limit(crtc);
+ limit = intel_ironlake_limit(crtc, refclk);
else if (IS_G4X(dev)) {
limit = intel_g4x_limit(crtc);
} else if (IS_PINEVIEW(dev)) {
@@ -773,11 +770,10 @@ bool intel_pipe_has_type(struct drm_crtc *crtc, int type)
* the given connectors.
*/
-static bool intel_PLL_is_valid(struct drm_crtc *crtc, intel_clock_t *clock)
+static bool intel_PLL_is_valid(struct drm_device *dev,
+ const intel_limit_t *limit,
+ const intel_clock_t *clock)
{
- const intel_limit_t *limit = intel_limit (crtc);
- struct drm_device *dev = crtc->dev;
-
if (clock->p1 < limit->p1.min || limit->p1.max < clock->p1)
INTELPllInvalid ("p1 out of range\n");
if (clock->p < limit->p.min || limit->p.max < clock->p)
@@ -849,8 +845,8 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int this_err;
intel_clock(dev, refclk, &clock);
-
- if (!intel_PLL_is_valid(crtc, &clock))
+ if (!intel_PLL_is_valid(dev, limit,
+ &clock))
continue;
this_err = abs(clock.dot - target);
@@ -912,9 +908,11 @@ intel_g4x_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
int this_err;
intel_clock(dev, refclk, &clock);
- if (!intel_PLL_is_valid(crtc, &clock))
+ if (!intel_PLL_is_valid(dev, limit,
+ &clock))
continue;
- this_err = abs(clock.dot - target) ;
+
+ this_err = abs(clock.dot - target);
if (this_err < err_most) {
*best_clock = clock;
err_most = this_err;
@@ -1066,13 +1064,13 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane, i;
u32 fbc_ctl, fbc_ctl2;
if (fb->pitch == dev_priv->cfb_pitch &&
- obj_priv->fence_reg == dev_priv->cfb_fence &&
+ obj->fence_reg == dev_priv->cfb_fence &&
intel_crtc->plane == dev_priv->cfb_plane &&
I915_READ(FBC_CONTROL) & FBC_CTL_EN)
return;
@@ -1086,7 +1084,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
/* FBC_CTL wants 64B units */
dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
- dev_priv->cfb_fence = obj_priv->fence_reg;
+ dev_priv->cfb_fence = obj->fence_reg;
dev_priv->cfb_plane = intel_crtc->plane;
plane = dev_priv->cfb_plane == 0 ? FBC_CTL_PLANEA : FBC_CTL_PLANEB;
@@ -1096,7 +1094,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
/* Set it up... */
fbc_ctl2 = FBC_CTL_FENCE_DBL | FBC_CTL_IDLE_IMM | plane;
- if (obj_priv->tiling_mode != I915_TILING_NONE)
+ if (obj->tiling_mode != I915_TILING_NONE)
fbc_ctl2 |= FBC_CTL_CPU_FENCE;
I915_WRITE(FBC_CONTROL2, fbc_ctl2);
I915_WRITE(FBC_FENCE_OFF, crtc->y);
@@ -1107,7 +1105,7 @@ static void i8xx_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
fbc_ctl |= FBC_CTL_C3_IDLE; /* 945 needs special SR handling */
fbc_ctl |= (dev_priv->cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
fbc_ctl |= (interval & 0x2fff) << FBC_CTL_INTERVAL_SHIFT;
- if (obj_priv->tiling_mode != I915_TILING_NONE)
+ if (obj->tiling_mode != I915_TILING_NONE)
fbc_ctl |= dev_priv->cfb_fence;
I915_WRITE(FBC_CONTROL, fbc_ctl);
@@ -1150,7 +1148,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
unsigned long stall_watermark = 200;
@@ -1159,7 +1157,7 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
dpfc_ctl = I915_READ(DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
- dev_priv->cfb_fence == obj_priv->fence_reg &&
+ dev_priv->cfb_fence == obj->fence_reg &&
dev_priv->cfb_plane == intel_crtc->plane &&
dev_priv->cfb_y == crtc->y)
return;
@@ -1170,12 +1168,12 @@ static void g4x_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
}
dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
- dev_priv->cfb_fence = obj_priv->fence_reg;
+ dev_priv->cfb_fence = obj->fence_reg;
dev_priv->cfb_plane = intel_crtc->plane;
dev_priv->cfb_y = crtc->y;
dpfc_ctl = plane | DPFC_SR_EN | DPFC_CTL_LIMIT_1X;
- if (obj_priv->tiling_mode != I915_TILING_NONE) {
+ if (obj->tiling_mode != I915_TILING_NONE) {
dpfc_ctl |= DPFC_CTL_FENCE_EN | dev_priv->cfb_fence;
I915_WRITE(DPFC_CHICKEN, DPFC_HT_MODIFY);
} else {
@@ -1221,7 +1219,7 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_framebuffer *fb = crtc->fb;
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_i915_gem_object *obj_priv = to_intel_bo(intel_fb->obj);
+ struct drm_i915_gem_object *obj = intel_fb->obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
int plane = intel_crtc->plane == 0 ? DPFC_CTL_PLANEA : DPFC_CTL_PLANEB;
unsigned long stall_watermark = 200;
@@ -1230,9 +1228,9 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
if (dev_priv->cfb_pitch == dev_priv->cfb_pitch / 64 - 1 &&
- dev_priv->cfb_fence == obj_priv->fence_reg &&
+ dev_priv->cfb_fence == obj->fence_reg &&
dev_priv->cfb_plane == intel_crtc->plane &&
- dev_priv->cfb_offset == obj_priv->gtt_offset &&
+ dev_priv->cfb_offset == obj->gtt_offset &&
dev_priv->cfb_y == crtc->y)
return;
@@ -1242,14 +1240,14 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
}
dev_priv->cfb_pitch = (dev_priv->cfb_pitch / 64) - 1;
- dev_priv->cfb_fence = obj_priv->fence_reg;
+ dev_priv->cfb_fence = obj->fence_reg;
dev_priv->cfb_plane = intel_crtc->plane;
- dev_priv->cfb_offset = obj_priv->gtt_offset;
+ dev_priv->cfb_offset = obj->gtt_offset;
dev_priv->cfb_y = crtc->y;
dpfc_ctl &= DPFC_RESERVED;
dpfc_ctl |= (plane | DPFC_CTL_LIMIT_1X);
- if (obj_priv->tiling_mode != I915_TILING_NONE) {
+ if (obj->tiling_mode != I915_TILING_NONE) {
dpfc_ctl |= (DPFC_CTL_FENCE_EN | dev_priv->cfb_fence);
I915_WRITE(ILK_DPFC_CHICKEN, DPFC_HT_MODIFY);
} else {
@@ -1260,10 +1258,16 @@ static void ironlake_enable_fbc(struct drm_crtc *crtc, unsigned long interval)
(stall_watermark << DPFC_RECOMP_STALL_WM_SHIFT) |
(interval << DPFC_RECOMP_TIMER_COUNT_SHIFT));
I915_WRITE(ILK_DPFC_FENCE_YOFF, crtc->y);
- I915_WRITE(ILK_FBC_RT_BASE, obj_priv->gtt_offset | ILK_FBC_RT_VALID);
+ I915_WRITE(ILK_FBC_RT_BASE, obj->gtt_offset | ILK_FBC_RT_VALID);
/* enable it... */
I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+ if (IS_GEN6(dev)) {
+ I915_WRITE(SNB_DPFC_CTL_SA,
+ SNB_CPU_FENCE_ENABLE | dev_priv->cfb_fence);
+ I915_WRITE(DPFC_CPU_FENCE_OFFSET, crtc->y);
+ }
+
DRM_DEBUG_KMS("enabled fbc on plane %d\n", intel_crtc->plane);
}
@@ -1345,7 +1349,7 @@ static void intel_update_fbc(struct drm_device *dev)
struct intel_crtc *intel_crtc;
struct drm_framebuffer *fb;
struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
DRM_DEBUG_KMS("\n");
@@ -1384,9 +1388,9 @@ static void intel_update_fbc(struct drm_device *dev)
intel_crtc = to_intel_crtc(crtc);
fb = crtc->fb;
intel_fb = to_intel_framebuffer(fb);
- obj_priv = to_intel_bo(intel_fb->obj);
+ obj = intel_fb->obj;
- if (intel_fb->obj->size > dev_priv->cfb_size) {
+ if (intel_fb->obj->base.size > dev_priv->cfb_size) {
DRM_DEBUG_KMS("framebuffer too large, disabling "
"compression\n");
dev_priv->no_fbc_reason = FBC_STOLEN_TOO_SMALL;
@@ -1410,7 +1414,7 @@ static void intel_update_fbc(struct drm_device *dev)
dev_priv->no_fbc_reason = FBC_BAD_PLANE;
goto out_disable;
}
- if (obj_priv->tiling_mode != I915_TILING_X) {
+ if (obj->tiling_mode != I915_TILING_X) {
DRM_DEBUG_KMS("framebuffer not tiled, disabling compression\n");
dev_priv->no_fbc_reason = FBC_NOT_TILED;
goto out_disable;
@@ -1433,14 +1437,13 @@ out_disable:
int
intel_pin_and_fence_fb_obj(struct drm_device *dev,
- struct drm_gem_object *obj,
- bool pipelined)
+ struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
u32 alignment;
int ret;
- switch (obj_priv->tiling_mode) {
+ switch (obj->tiling_mode) {
case I915_TILING_NONE:
if (IS_BROADWATER(dev) || IS_CRESTLINE(dev))
alignment = 128 * 1024;
@@ -1461,7 +1464,7 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
BUG();
}
- ret = i915_gem_object_pin(obj, alignment);
+ ret = i915_gem_object_pin(obj, alignment, true);
if (ret)
return ret;
@@ -1474,9 +1477,8 @@ intel_pin_and_fence_fb_obj(struct drm_device *dev,
* framebuffer compression. For simplicity, we always install
* a fence as the cost is not that onerous.
*/
- if (obj_priv->fence_reg == I915_FENCE_REG_NONE &&
- obj_priv->tiling_mode != I915_TILING_NONE) {
- ret = i915_gem_object_get_fence_reg(obj, false);
+ if (obj->tiling_mode != I915_TILING_NONE) {
+ ret = i915_gem_object_get_fence(obj, pipelined, false);
if (ret)
goto err_unpin;
}
@@ -1497,8 +1499,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj_priv;
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
int plane = intel_crtc->plane;
unsigned long Start, Offset;
u32 dspcntr;
@@ -1515,7 +1516,6 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
intel_fb = to_intel_framebuffer(fb);
obj = intel_fb->obj;
- obj_priv = to_intel_bo(obj);
reg = DSPCNTR(plane);
dspcntr = I915_READ(reg);
@@ -1540,7 +1540,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
return -EINVAL;
}
if (INTEL_INFO(dev)->gen >= 4) {
- if (obj_priv->tiling_mode != I915_TILING_NONE)
+ if (obj->tiling_mode != I915_TILING_NONE)
dspcntr |= DISPPLANE_TILED;
else
dspcntr &= ~DISPPLANE_TILED;
@@ -1552,7 +1552,7 @@ intel_pipe_set_base_atomic(struct drm_crtc *crtc, struct drm_framebuffer *fb,
I915_WRITE(reg, dspcntr);
- Start = obj_priv->gtt_offset;
+ Start = obj->gtt_offset;
Offset = y * fb->pitch + x * (fb->bits_per_pixel / 8);
DRM_DEBUG_KMS("Writing base %08lX %08lX %d %d %d\n",
@@ -1598,7 +1598,7 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
mutex_lock(&dev->struct_mutex);
ret = intel_pin_and_fence_fb_obj(dev,
to_intel_framebuffer(crtc->fb)->obj,
- false);
+ NULL);
if (ret != 0) {
mutex_unlock(&dev->struct_mutex);
return ret;
@@ -1606,18 +1606,17 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
if (old_fb) {
struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
+ struct drm_i915_gem_object *obj = to_intel_framebuffer(old_fb)->obj;
wait_event(dev_priv->pending_flip_queue,
- atomic_read(&obj_priv->pending_flip) == 0);
+ atomic_read(&obj->pending_flip) == 0);
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
* current scanout is retired before unpinning the old
* framebuffer.
*/
- ret = i915_gem_object_flush_gpu(obj_priv, false);
+ ret = i915_gem_object_flush_gpu(obj, false);
if (ret) {
i915_gem_object_unpin(to_intel_framebuffer(crtc->fb)->obj);
mutex_unlock(&dev->struct_mutex);
@@ -1633,8 +1632,10 @@ intel_pipe_set_base(struct drm_crtc *crtc, int x, int y,
return ret;
}
- if (old_fb)
+ if (old_fb) {
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
i915_gem_object_unpin(to_intel_framebuffer(old_fb)->obj);
+ }
mutex_unlock(&dev->struct_mutex);
@@ -1996,31 +1997,31 @@ static void intel_flush_display_plane(struct drm_device *dev,
static void intel_clear_scanline_wait(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring;
u32 tmp;
if (IS_GEN2(dev))
/* Can't break the hang on i8xx */
return;
- tmp = I915_READ(PRB0_CTL);
- if (tmp & RING_WAIT) {
- I915_WRITE(PRB0_CTL, tmp);
- POSTING_READ(PRB0_CTL);
- }
+ ring = LP_RING(dev_priv);
+ tmp = I915_READ_CTL(ring);
+ if (tmp & RING_WAIT)
+ I915_WRITE_CTL(ring, tmp);
}
static void intel_crtc_wait_for_pending_flips(struct drm_crtc *crtc)
{
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
struct drm_i915_private *dev_priv;
if (crtc->fb == NULL)
return;
- obj_priv = to_intel_bo(to_intel_framebuffer(crtc->fb)->obj);
+ obj = to_intel_framebuffer(crtc->fb)->obj;
dev_priv = crtc->dev->dev_private;
wait_event(dev_priv->pending_flip_queue,
- atomic_read(&obj_priv->pending_flip) == 0);
+ atomic_read(&obj->pending_flip) == 0);
}
static void ironlake_crtc_enable(struct drm_crtc *crtc)
@@ -2120,9 +2121,11 @@ static void ironlake_crtc_enable(struct drm_crtc *crtc)
reg = TRANS_DP_CTL(pipe);
temp = I915_READ(reg);
temp &= ~(TRANS_DP_PORT_SEL_MASK |
- TRANS_DP_SYNC_MASK);
+ TRANS_DP_SYNC_MASK |
+ TRANS_DP_BPC_MASK);
temp |= (TRANS_DP_OUTPUT_ENABLE |
TRANS_DP_ENH_FRAMING);
+ temp |= TRANS_DP_8BPC;
if (crtc->mode.flags & DRM_MODE_FLAG_PHSYNC)
temp |= TRANS_DP_HSYNC_ACTIVE_HIGH;
@@ -2712,27 +2715,19 @@ fdi_reduce_ratio(u32 *num, u32 *den)
}
}
-#define DATA_N 0x800000
-#define LINK_N 0x80000
-
static void
ironlake_compute_m_n(int bits_per_pixel, int nlanes, int pixel_clock,
int link_clock, struct fdi_m_n *m_n)
{
- u64 temp;
-
m_n->tu = 64; /* default size */
- temp = (u64) DATA_N * pixel_clock;
- temp = div_u64(temp, link_clock);
- m_n->gmch_m = div_u64(temp * bits_per_pixel, nlanes);
- m_n->gmch_m >>= 3; /* convert to bytes_per_pixel */
- m_n->gmch_n = DATA_N;
+ /* BUG_ON(pixel_clock > INT_MAX / 36); */
+ m_n->gmch_m = bits_per_pixel * pixel_clock;
+ m_n->gmch_n = link_clock * nlanes * 8;
fdi_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n);
- temp = (u64) LINK_N * pixel_clock;
- m_n->link_m = div_u64(temp, link_clock);
- m_n->link_n = LINK_N;
+ m_n->link_m = pixel_clock;
+ m_n->link_n = link_clock;
fdi_reduce_ratio(&m_n->link_m, &m_n->link_n);
}
@@ -2856,6 +2851,39 @@ static struct intel_watermark_params ironlake_cursor_srwm_info = {
ILK_FIFO_LINE_SIZE
};
+static struct intel_watermark_params sandybridge_display_wm_info = {
+ SNB_DISPLAY_FIFO,
+ SNB_DISPLAY_MAXWM,
+ SNB_DISPLAY_DFTWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params sandybridge_cursor_wm_info = {
+ SNB_CURSOR_FIFO,
+ SNB_CURSOR_MAXWM,
+ SNB_CURSOR_DFTWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params sandybridge_display_srwm_info = {
+ SNB_DISPLAY_SR_FIFO,
+ SNB_DISPLAY_MAX_SRWM,
+ SNB_DISPLAY_DFT_SRWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+
+static struct intel_watermark_params sandybridge_cursor_srwm_info = {
+ SNB_CURSOR_SR_FIFO,
+ SNB_CURSOR_MAX_SRWM,
+ SNB_CURSOR_DFT_SRWM,
+ 2,
+ SNB_FIFO_LINE_SIZE
+};
+
+
/**
* intel_calculate_wm - calculate watermark level
* @clock_in_khz: pixel clock
@@ -3389,6 +3417,10 @@ static void i830_update_wm(struct drm_device *dev, int planea_clock, int unused,
static bool ironlake_compute_wm0(struct drm_device *dev,
int pipe,
+ const struct intel_watermark_params *display,
+ int display_latency,
+ const struct intel_watermark_params *cursor,
+ int cursor_latency,
int *plane_wm,
int *cursor_wm)
{
@@ -3406,22 +3438,20 @@ static bool ironlake_compute_wm0(struct drm_device *dev,
pixel_size = crtc->fb->bits_per_pixel / 8;
/* Use the small buffer method to calculate plane watermark */
- entries = ((clock * pixel_size / 1000) * ILK_LP0_PLANE_LATENCY) / 1000;
- entries = DIV_ROUND_UP(entries,
- ironlake_display_wm_info.cacheline_size);
- *plane_wm = entries + ironlake_display_wm_info.guard_size;
- if (*plane_wm > (int)ironlake_display_wm_info.max_wm)
- *plane_wm = ironlake_display_wm_info.max_wm;
+ entries = ((clock * pixel_size / 1000) * display_latency * 100) / 1000;
+ entries = DIV_ROUND_UP(entries, display->cacheline_size);
+ *plane_wm = entries + display->guard_size;
+ if (*plane_wm > (int)display->max_wm)
+ *plane_wm = display->max_wm;
/* Use the large buffer method to calculate cursor watermark */
line_time_us = ((htotal * 1000) / clock);
- line_count = (ILK_LP0_CURSOR_LATENCY / line_time_us + 1000) / 1000;
+ line_count = (cursor_latency * 100 / line_time_us + 1000) / 1000;
entries = line_count * 64 * pixel_size;
- entries = DIV_ROUND_UP(entries,
- ironlake_cursor_wm_info.cacheline_size);
- *cursor_wm = entries + ironlake_cursor_wm_info.guard_size;
- if (*cursor_wm > ironlake_cursor_wm_info.max_wm)
- *cursor_wm = ironlake_cursor_wm_info.max_wm;
+ entries = DIV_ROUND_UP(entries, cursor->cacheline_size);
+ *cursor_wm = entries + cursor->guard_size;
+ if (*cursor_wm > (int)cursor->max_wm)
+ *cursor_wm = (int)cursor->max_wm;
return true;
}
@@ -3436,7 +3466,12 @@ static void ironlake_update_wm(struct drm_device *dev,
int tmp;
enabled = 0;
- if (ironlake_compute_wm0(dev, 0, &plane_wm, &cursor_wm)) {
+ if (ironlake_compute_wm0(dev, 0,
+ &ironlake_display_wm_info,
+ ILK_LP0_PLANE_LATENCY,
+ &ironlake_cursor_wm_info,
+ ILK_LP0_CURSOR_LATENCY,
+ &plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEA_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
@@ -3445,7 +3480,12 @@ static void ironlake_update_wm(struct drm_device *dev,
enabled++;
}
- if (ironlake_compute_wm0(dev, 1, &plane_wm, &cursor_wm)) {
+ if (ironlake_compute_wm0(dev, 1,
+ &ironlake_display_wm_info,
+ ILK_LP0_PLANE_LATENCY,
+ &ironlake_cursor_wm_info,
+ ILK_LP0_CURSOR_LATENCY,
+ &plane_wm, &cursor_wm)) {
I915_WRITE(WM0_PIPEB_ILK,
(plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
@@ -3459,7 +3499,7 @@ static void ironlake_update_wm(struct drm_device *dev,
* display plane is used.
*/
tmp = 0;
- if (enabled == 1 && /* XXX disabled due to buggy implmentation? */ 0) {
+ if (enabled == 1) {
unsigned long line_time_us;
int small, large, plane_fbc;
int sr_clock, entries;
@@ -3511,6 +3551,197 @@ static void ironlake_update_wm(struct drm_device *dev,
/* XXX setup WM2 and WM3 */
}
+/*
+ * Check the wm result.
+ *
+ * If any calculated watermark values is larger than the maximum value that
+ * can be programmed into the associated watermark register, that watermark
+ * must be disabled.
+ *
+ * Also return true if all of those watermark values is 0, which is set by
+ * sandybridge_compute_srwm, to indicate the latency is ZERO.
+ */
+static bool sandybridge_check_srwm(struct drm_device *dev, int level,
+ int fbc_wm, int display_wm, int cursor_wm)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ DRM_DEBUG_KMS("watermark %d: display plane %d, fbc lines %d,"
+ " cursor %d\n", level, display_wm, fbc_wm, cursor_wm);
+
+ if (fbc_wm > SNB_FBC_MAX_SRWM) {
+ DRM_DEBUG_KMS("fbc watermark(%d) is too large(%d), disabling wm%d+\n",
+ fbc_wm, SNB_FBC_MAX_SRWM, level);
+
+ /* fbc has it's own way to disable FBC WM */
+ I915_WRITE(DISP_ARB_CTL,
+ I915_READ(DISP_ARB_CTL) | DISP_FBC_WM_DIS);
+ return false;
+ }
+
+ if (display_wm > SNB_DISPLAY_MAX_SRWM) {
+ DRM_DEBUG_KMS("display watermark(%d) is too large(%d), disabling wm%d+\n",
+ display_wm, SNB_DISPLAY_MAX_SRWM, level);
+ return false;
+ }
+
+ if (cursor_wm > SNB_CURSOR_MAX_SRWM) {
+ DRM_DEBUG_KMS("cursor watermark(%d) is too large(%d), disabling wm%d+\n",
+ cursor_wm, SNB_CURSOR_MAX_SRWM, level);
+ return false;
+ }
+
+ if (!(fbc_wm || display_wm || cursor_wm)) {
+ DRM_DEBUG_KMS("latency %d is 0, disabling wm%d+\n", level, level);
+ return false;
+ }
+
+ return true;
+}
+
+/*
+ * Compute watermark values of WM[1-3],
+ */
+static bool sandybridge_compute_srwm(struct drm_device *dev, int level,
+ int hdisplay, int htotal, int pixel_size,
+ int clock, int latency_ns, int *fbc_wm,
+ int *display_wm, int *cursor_wm)
+{
+
+ unsigned long line_time_us;
+ int small, large;
+ int entries;
+ int line_count, line_size;
+
+ if (!latency_ns) {
+ *fbc_wm = *display_wm = *cursor_wm = 0;
+ return false;
+ }
+
+ line_time_us = (htotal * 1000) / clock;
+ line_count = (latency_ns / line_time_us + 1000) / 1000;
+ line_size = hdisplay * pixel_size;
+
+ /* Use the minimum of the small and large buffer method for primary */
+ small = ((clock * pixel_size / 1000) * latency_ns) / 1000;
+ large = line_count * line_size;
+
+ entries = DIV_ROUND_UP(min(small, large),
+ sandybridge_display_srwm_info.cacheline_size);
+ *display_wm = entries + sandybridge_display_srwm_info.guard_size;
+
+ /*
+ * Spec said:
+ * FBC WM = ((Final Primary WM * 64) / number of bytes per line) + 2
+ */
+ *fbc_wm = DIV_ROUND_UP(*display_wm * 64, line_size) + 2;
+
+ /* calculate the self-refresh watermark for display cursor */
+ entries = line_count * pixel_size * 64;
+ entries = DIV_ROUND_UP(entries,
+ sandybridge_cursor_srwm_info.cacheline_size);
+ *cursor_wm = entries + sandybridge_cursor_srwm_info.guard_size;
+
+ return sandybridge_check_srwm(dev, level,
+ *fbc_wm, *display_wm, *cursor_wm);
+}
+
+static void sandybridge_update_wm(struct drm_device *dev,
+ int planea_clock, int planeb_clock,
+ int hdisplay, int htotal,
+ int pixel_size)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int latency = SNB_READ_WM0_LATENCY();
+ int fbc_wm, plane_wm, cursor_wm, enabled;
+ int clock;
+
+ enabled = 0;
+ if (ironlake_compute_wm0(dev, 0,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ I915_WRITE(WM0_PIPEA_ILK,
+ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe A -"
+ " plane %d, " "cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled++;
+ }
+
+ if (ironlake_compute_wm0(dev, 1,
+ &sandybridge_display_wm_info, latency,
+ &sandybridge_cursor_wm_info, latency,
+ &plane_wm, &cursor_wm)) {
+ I915_WRITE(WM0_PIPEB_ILK,
+ (plane_wm << WM0_PIPE_PLANE_SHIFT) | cursor_wm);
+ DRM_DEBUG_KMS("FIFO watermarks For pipe B -"
+ " plane %d, cursor: %d\n",
+ plane_wm, cursor_wm);
+ enabled++;
+ }
+
+ /*
+ * Calculate and update the self-refresh watermark only when one
+ * display plane is used.
+ *
+ * SNB support 3 levels of watermark.
+ *
+ * WM1/WM2/WM2 watermarks have to be enabled in the ascending order,
+ * and disabled in the descending order
+ *
+ */
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ if (enabled != 1)
+ return;
+
+ clock = planea_clock ? planea_clock : planeb_clock;
+
+ /* WM1 */
+ if (!sandybridge_compute_srwm(dev, 1, hdisplay, htotal, pixel_size,
+ clock, SNB_READ_WM1_LATENCY() * 500,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM1_LP_ILK,
+ WM1_LP_SR_EN |
+ (SNB_READ_WM1_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM2 */
+ if (!sandybridge_compute_srwm(dev, 2,
+ hdisplay, htotal, pixel_size,
+ clock, SNB_READ_WM2_LATENCY() * 500,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM2_LP_ILK,
+ WM2_LP_EN |
+ (SNB_READ_WM2_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+
+ /* WM3 */
+ if (!sandybridge_compute_srwm(dev, 3,
+ hdisplay, htotal, pixel_size,
+ clock, SNB_READ_WM3_LATENCY() * 500,
+ &fbc_wm, &plane_wm, &cursor_wm))
+ return;
+
+ I915_WRITE(WM3_LP_ILK,
+ WM3_LP_EN |
+ (SNB_READ_WM3_LATENCY() << WM1_LP_LATENCY_SHIFT) |
+ (fbc_wm << WM1_LP_FBC_SHIFT) |
+ (plane_wm << WM1_LP_SR_SHIFT) |
+ cursor_wm);
+}
+
/**
* intel_update_watermarks - update FIFO watermark values based on current modes
*
@@ -3666,7 +3897,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
* refclk, or FALSE. The returned values represent the clock equation:
* reflck * (5 * (m1 + 2) + (m2 + 2)) / (n + 2) / p1 / p2.
*/
- limit = intel_limit(crtc);
+ limit = intel_limit(crtc, refclk);
ok = limit->find_pll(limit, crtc, adjusted_mode->clock, refclk, &clock);
if (!ok) {
DRM_ERROR("Couldn't find PLL settings for mode!\n");
@@ -3716,6 +3947,7 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
/* FDI link */
if (HAS_PCH_SPLIT(dev)) {
+ int pixel_multiplier = intel_mode_get_pixel_multiplier(adjusted_mode);
int lane = 0, link_bw, bpp;
/* CPU eDP doesn't require FDI link, so just set DP M/N
according to current link config */
@@ -3799,6 +4031,8 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
intel_crtc->fdi_lanes = lane;
+ if (pixel_multiplier > 1)
+ link_bw *= pixel_multiplier;
ironlake_compute_m_n(bpp, lane, target_clock, link_bw, &m_n);
}
@@ -3860,6 +4094,22 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
reduced_clock.m2;
}
+ /* Enable autotuning of the PLL clock (if permissible) */
+ if (HAS_PCH_SPLIT(dev)) {
+ int factor = 21;
+
+ if (is_lvds) {
+ if ((dev_priv->lvds_use_ssc &&
+ dev_priv->lvds_ssc_freq == 100) ||
+ (I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP)
+ factor = 25;
+ } else if (is_sdvo && is_tv)
+ factor = 20;
+
+ if (clock.m1 < factor * clock.n)
+ fp |= FP_CB_TUNE;
+ }
+
dpll = 0;
if (!HAS_PCH_SPLIT(dev))
dpll = DPLL_VGA_MODE_DIS;
@@ -4074,7 +4324,6 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
if (!has_edp_encoder || intel_encoder_is_pch_edp(&has_edp_encoder->base)) {
- I915_WRITE(fp_reg, fp);
I915_WRITE(dpll_reg, dpll);
/* Wait for the clocks to stabilize. */
@@ -4092,13 +4341,13 @@ static int intel_crtc_mode_set(struct drm_crtc *crtc,
}
I915_WRITE(DPLL_MD(pipe), temp);
} else {
- /* write it again -- the BIOS does, after all */
+ /* The pixel multiplier can only be updated once the
+ * DPLL is enabled and the clocks are stable.
+ *
+ * So write it again.
+ */
I915_WRITE(dpll_reg, dpll);
}
-
- /* Wait for the clocks to stabilize. */
- POSTING_READ(dpll_reg);
- udelay(150);
}
intel_crtc->lowfreq_avail = false;
@@ -4334,15 +4583,14 @@ static void intel_crtc_update_cursor(struct drm_crtc *crtc,
}
static int intel_crtc_cursor_set(struct drm_crtc *crtc,
- struct drm_file *file_priv,
+ struct drm_file *file,
uint32_t handle,
uint32_t width, uint32_t height)
{
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
- struct drm_gem_object *bo;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
uint32_t addr;
int ret;
@@ -4352,7 +4600,7 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
if (!handle) {
DRM_DEBUG_KMS("cursor off\n");
addr = 0;
- bo = NULL;
+ obj = NULL;
mutex_lock(&dev->struct_mutex);
goto finish;
}
@@ -4363,13 +4611,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
return -EINVAL;
}
- bo = drm_gem_object_lookup(dev, file_priv, handle);
- if (!bo)
+ obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
+ if (!obj)
return -ENOENT;
- obj_priv = to_intel_bo(bo);
-
- if (bo->size < width * height * 4) {
+ if (obj->base.size < width * height * 4) {
DRM_ERROR("buffer is to small\n");
ret = -ENOMEM;
goto fail;
@@ -4378,29 +4624,41 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
/* we only need to pin inside GTT if cursor is non-phy */
mutex_lock(&dev->struct_mutex);
if (!dev_priv->info->cursor_needs_physical) {
- ret = i915_gem_object_pin(bo, PAGE_SIZE);
+ if (obj->tiling_mode) {
+ DRM_ERROR("cursor cannot be tiled\n");
+ ret = -EINVAL;
+ goto fail_locked;
+ }
+
+ ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
if (ret) {
DRM_ERROR("failed to pin cursor bo\n");
goto fail_locked;
}
- ret = i915_gem_object_set_to_gtt_domain(bo, 0);
+ ret = i915_gem_object_set_to_gtt_domain(obj, 0);
if (ret) {
DRM_ERROR("failed to move cursor bo into the GTT\n");
goto fail_unpin;
}
- addr = obj_priv->gtt_offset;
+ ret = i915_gem_object_put_fence(obj);
+ if (ret) {
+ DRM_ERROR("failed to move cursor bo into the GTT\n");
+ goto fail_unpin;
+ }
+
+ addr = obj->gtt_offset;
} else {
int align = IS_I830(dev) ? 16 * 1024 : 256;
- ret = i915_gem_attach_phys_object(dev, bo,
+ ret = i915_gem_attach_phys_object(dev, obj,
(intel_crtc->pipe == 0) ? I915_GEM_PHYS_CURSOR_0 : I915_GEM_PHYS_CURSOR_1,
align);
if (ret) {
DRM_ERROR("failed to attach phys object\n");
goto fail_locked;
}
- addr = obj_priv->phys_obj->handle->busaddr;
+ addr = obj->phys_obj->handle->busaddr;
}
if (IS_GEN2(dev))
@@ -4409,17 +4667,17 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
finish:
if (intel_crtc->cursor_bo) {
if (dev_priv->info->cursor_needs_physical) {
- if (intel_crtc->cursor_bo != bo)
+ if (intel_crtc->cursor_bo != obj)
i915_gem_detach_phys_object(dev, intel_crtc->cursor_bo);
} else
i915_gem_object_unpin(intel_crtc->cursor_bo);
- drm_gem_object_unreference(intel_crtc->cursor_bo);
+ drm_gem_object_unreference(&intel_crtc->cursor_bo->base);
}
mutex_unlock(&dev->struct_mutex);
intel_crtc->cursor_addr = addr;
- intel_crtc->cursor_bo = bo;
+ intel_crtc->cursor_bo = obj;
intel_crtc->cursor_width = width;
intel_crtc->cursor_height = height;
@@ -4427,11 +4685,11 @@ static int intel_crtc_cursor_set(struct drm_crtc *crtc,
return 0;
fail_unpin:
- i915_gem_object_unpin(bo);
+ i915_gem_object_unpin(obj);
fail_locked:
mutex_unlock(&dev->struct_mutex);
fail:
- drm_gem_object_unreference_unlocked(bo);
+ drm_gem_object_unreference_unlocked(&obj->base);
return ret;
}
@@ -4742,8 +5000,14 @@ static void intel_gpu_idle_timer(unsigned long arg)
struct drm_device *dev = (struct drm_device *)arg;
drm_i915_private_t *dev_priv = dev->dev_private;
- dev_priv->busy = false;
+ if (!list_empty(&dev_priv->mm.active_list)) {
+ /* Still processing requests, so just re-arm the timer. */
+ mod_timer(&dev_priv->idle_timer, jiffies +
+ msecs_to_jiffies(GPU_IDLE_TIMEOUT));
+ return;
+ }
+ dev_priv->busy = false;
queue_work(dev_priv->wq, &dev_priv->idle_work);
}
@@ -4754,9 +5018,17 @@ static void intel_crtc_idle_timer(unsigned long arg)
struct intel_crtc *intel_crtc = (struct intel_crtc *)arg;
struct drm_crtc *crtc = &intel_crtc->base;
drm_i915_private_t *dev_priv = crtc->dev->dev_private;
+ struct intel_framebuffer *intel_fb;
- intel_crtc->busy = false;
+ intel_fb = to_intel_framebuffer(crtc->fb);
+ if (intel_fb && intel_fb->obj->active) {
+ /* The framebuffer is still being accessed by the GPU. */
+ mod_timer(&intel_crtc->idle_timer, jiffies +
+ msecs_to_jiffies(CRTC_IDLE_TIMEOUT));
+ return;
+ }
+ intel_crtc->busy = false;
queue_work(dev_priv->wq, &dev_priv->idle_work);
}
@@ -4891,7 +5163,7 @@ static void intel_idle_update(struct work_struct *work)
* buffer), we'll also mark the display as busy, so we know to increase its
* clock frequency.
*/
-void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj)
+void intel_mark_busy(struct drm_device *dev, struct drm_i915_gem_object *obj)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_crtc *crtc = NULL;
@@ -4972,8 +5244,9 @@ static void intel_unpin_work_fn(struct work_struct *__work)
mutex_lock(&work->dev->struct_mutex);
i915_gem_object_unpin(work->old_fb_obj);
- drm_gem_object_unreference(work->pending_flip_obj);
- drm_gem_object_unreference(work->old_fb_obj);
+ drm_gem_object_unreference(&work->pending_flip_obj->base);
+ drm_gem_object_unreference(&work->old_fb_obj->base);
+
mutex_unlock(&work->dev->struct_mutex);
kfree(work);
}
@@ -4984,15 +5257,17 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
struct drm_pending_vblank_event *e;
- struct timeval now;
+ struct timeval tnow, tvbl;
unsigned long flags;
/* Ignore early vblank irqs */
if (intel_crtc == NULL)
return;
+ do_gettimeofday(&tnow);
+
spin_lock_irqsave(&dev->event_lock, flags);
work = intel_crtc->unpin_work;
if (work == NULL || !work->pending) {
@@ -5001,26 +5276,49 @@ static void do_intel_finish_page_flip(struct drm_device *dev,
}
intel_crtc->unpin_work = NULL;
- drm_vblank_put(dev, intel_crtc->pipe);
if (work->event) {
e = work->event;
- do_gettimeofday(&now);
- e->event.sequence = drm_vblank_count(dev, intel_crtc->pipe);
- e->event.tv_sec = now.tv_sec;
- e->event.tv_usec = now.tv_usec;
+ e->event.sequence = drm_vblank_count_and_time(dev, intel_crtc->pipe, &tvbl);
+
+ /* Called before vblank count and timestamps have
+ * been updated for the vblank interval of flip
+ * completion? Need to increment vblank count and
+ * add one videorefresh duration to returned timestamp
+ * to account for this. We assume this happened if we
+ * get called over 0.9 frame durations after the last
+ * timestamped vblank.
+ *
+ * This calculation can not be used with vrefresh rates
+ * below 5Hz (10Hz to be on the safe side) without
+ * promoting to 64 integers.
+ */
+ if (10 * (timeval_to_ns(&tnow) - timeval_to_ns(&tvbl)) >
+ 9 * crtc->framedur_ns) {
+ e->event.sequence++;
+ tvbl = ns_to_timeval(timeval_to_ns(&tvbl) +
+ crtc->framedur_ns);
+ }
+
+ e->event.tv_sec = tvbl.tv_sec;
+ e->event.tv_usec = tvbl.tv_usec;
+
list_add_tail(&e->base.link,
&e->base.file_priv->event_list);
wake_up_interruptible(&e->base.file_priv->event_wait);
}
+ drm_vblank_put(dev, intel_crtc->pipe);
+
spin_unlock_irqrestore(&dev->event_lock, flags);
- obj_priv = to_intel_bo(work->old_fb_obj);
+ obj = work->old_fb_obj;
+
atomic_clear_mask(1 << intel_crtc->plane,
- &obj_priv->pending_flip.counter);
- if (atomic_read(&obj_priv->pending_flip) == 0)
+ &obj->pending_flip.counter);
+ if (atomic_read(&obj->pending_flip) == 0)
wake_up(&dev_priv->pending_flip_queue);
+
schedule_work(&work->work);
trace_i915_flip_complete(intel_crtc->plane, work->pending_flip_obj);
@@ -5066,8 +5364,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_framebuffer *intel_fb;
- struct drm_i915_gem_object *obj_priv;
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
struct intel_unpin_work *work;
unsigned long flags, offset;
@@ -5101,13 +5398,13 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
obj = intel_fb->obj;
mutex_lock(&dev->struct_mutex);
- ret = intel_pin_and_fence_fb_obj(dev, obj, true);
+ ret = intel_pin_and_fence_fb_obj(dev, obj, LP_RING(dev_priv));
if (ret)
goto cleanup_work;
/* Reference the objects for the scheduled work. */
- drm_gem_object_reference(work->old_fb_obj);
- drm_gem_object_reference(obj);
+ drm_gem_object_reference(&work->old_fb_obj->base);
+ drm_gem_object_reference(&obj->base);
crtc->fb = fb;
@@ -5115,22 +5412,16 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
if (ret)
goto cleanup_objs;
- /* Block clients from rendering to the new back buffer until
- * the flip occurs and the object is no longer visible.
- */
- atomic_add(1 << intel_crtc->plane,
- &to_intel_bo(work->old_fb_obj)->pending_flip);
-
- work->pending_flip_obj = obj;
- obj_priv = to_intel_bo(obj);
-
if (IS_GEN3(dev) || IS_GEN2(dev)) {
u32 flip_mask;
/* Can't queue multiple flips, so wait for the previous
* one to finish before executing the next.
*/
- BEGIN_LP_RING(2);
+ ret = BEGIN_LP_RING(2);
+ if (ret)
+ goto cleanup_objs;
+
if (intel_crtc->plane)
flip_mask = MI_WAIT_FOR_PLANE_B_FLIP;
else
@@ -5140,18 +5431,28 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
ADVANCE_LP_RING();
}
+ work->pending_flip_obj = obj;
+
work->enable_stall_check = true;
/* Offset into the new buffer for cases of shared fbs between CRTCs */
offset = crtc->y * fb->pitch + crtc->x * fb->bits_per_pixel/8;
- BEGIN_LP_RING(4);
- switch(INTEL_INFO(dev)->gen) {
+ ret = BEGIN_LP_RING(4);
+ if (ret)
+ goto cleanup_objs;
+
+ /* Block clients from rendering to the new back buffer until
+ * the flip occurs and the object is no longer visible.
+ */
+ atomic_add(1 << intel_crtc->plane, &work->old_fb_obj->pending_flip);
+
+ switch (INTEL_INFO(dev)->gen) {
case 2:
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
- OUT_RING(obj_priv->gtt_offset + offset);
+ OUT_RING(obj->gtt_offset + offset);
OUT_RING(MI_NOOP);
break;
@@ -5159,7 +5460,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
OUT_RING(MI_DISPLAY_FLIP_I915 |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
- OUT_RING(obj_priv->gtt_offset + offset);
+ OUT_RING(obj->gtt_offset + offset);
OUT_RING(MI_NOOP);
break;
@@ -5172,7 +5473,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
OUT_RING(fb->pitch);
- OUT_RING(obj_priv->gtt_offset | obj_priv->tiling_mode);
+ OUT_RING(obj->gtt_offset | obj->tiling_mode);
/* XXX Enabling the panel-fitter across page-flip is so far
* untested on non-native modes, so ignore it for now.
@@ -5186,8 +5487,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
case 6:
OUT_RING(MI_DISPLAY_FLIP |
MI_DISPLAY_FLIP_PLANE(intel_crtc->plane));
- OUT_RING(fb->pitch | obj_priv->tiling_mode);
- OUT_RING(obj_priv->gtt_offset);
+ OUT_RING(fb->pitch | obj->tiling_mode);
+ OUT_RING(obj->gtt_offset);
pf = I915_READ(pipe == 0 ? PFA_CTL_1 : PFB_CTL_1) & PF_ENABLE;
pipesrc = I915_READ(pipe == 0 ? PIPEASRC : PIPEBSRC) & 0x0fff0fff;
@@ -5203,8 +5504,8 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc,
return 0;
cleanup_objs:
- drm_gem_object_unreference(work->old_fb_obj);
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&work->old_fb_obj->base);
+ drm_gem_object_unreference(&obj->base);
cleanup_work:
mutex_unlock(&dev->struct_mutex);
@@ -5236,6 +5537,55 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
.page_flip = intel_crtc_page_flip,
};
+static void intel_sanitize_modesetting(struct drm_device *dev,
+ int pipe, int plane)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 reg, val;
+
+ if (HAS_PCH_SPLIT(dev))
+ return;
+
+ /* Who knows what state these registers were left in by the BIOS or
+ * grub?
+ *
+ * If we leave the registers in a conflicting state (e.g. with the
+ * display plane reading from the other pipe than the one we intend
+ * to use) then when we attempt to teardown the active mode, we will
+ * not disable the pipes and planes in the correct order -- leaving
+ * a plane reading from a disabled pipe and possibly leading to
+ * undefined behaviour.
+ */
+
+ reg = DSPCNTR(plane);
+ val = I915_READ(reg);
+
+ if ((val & DISPLAY_PLANE_ENABLE) == 0)
+ return;
+ if (!!(val & DISPPLANE_SEL_PIPE_MASK) == pipe)
+ return;
+
+ /* This display plane is active and attached to the other CPU pipe. */
+ pipe = !pipe;
+
+ /* Disable the plane and wait for it to stop reading from the pipe. */
+ I915_WRITE(reg, val & ~DISPLAY_PLANE_ENABLE);
+ intel_flush_display_plane(dev, plane);
+
+ if (IS_GEN2(dev))
+ intel_wait_for_vblank(dev, pipe);
+
+ if (pipe == 0 && (dev_priv->quirks & QUIRK_PIPEA_FORCE))
+ return;
+
+ /* Switch off the pipe. */
+ reg = PIPECONF(pipe);
+ val = I915_READ(reg);
+ if (val & PIPECONF_ENABLE) {
+ I915_WRITE(reg, val & ~PIPECONF_ENABLE);
+ intel_wait_for_pipe_off(dev, pipe);
+ }
+}
static void intel_crtc_init(struct drm_device *dev, int pipe)
{
@@ -5287,10 +5637,12 @@ static void intel_crtc_init(struct drm_device *dev, int pipe)
setup_timer(&intel_crtc->idle_timer, intel_crtc_idle_timer,
(unsigned long)intel_crtc);
+
+ intel_sanitize_modesetting(dev, intel_crtc->pipe, intel_crtc->plane);
}
int intel_get_pipe_from_crtc_id(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct drm_i915_get_pipe_from_crtc_id *pipe_from_crtc_id = data;
@@ -5336,9 +5688,14 @@ static void intel_setup_outputs(struct drm_device *dev)
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_encoder *encoder;
bool dpd_is_edp = false;
+ bool has_lvds = false;
if (IS_MOBILE(dev) && !IS_I830(dev))
- intel_lvds_init(dev);
+ has_lvds = intel_lvds_init(dev);
+ if (!has_lvds && !HAS_PCH_SPLIT(dev)) {
+ /* disable the panel fitter on everything but LVDS */
+ I915_WRITE(PFIT_CONTROL, 0);
+ }
if (HAS_PCH_SPLIT(dev)) {
dpd_is_edp = intel_dpd_is_edp(dev);
@@ -5435,19 +5792,19 @@ static void intel_user_framebuffer_destroy(struct drm_framebuffer *fb)
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
drm_framebuffer_cleanup(fb);
- drm_gem_object_unreference_unlocked(intel_fb->obj);
+ drm_gem_object_unreference_unlocked(&intel_fb->obj->base);
kfree(intel_fb);
}
static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
- struct drm_file *file_priv,
+ struct drm_file *file,
unsigned int *handle)
{
struct intel_framebuffer *intel_fb = to_intel_framebuffer(fb);
- struct drm_gem_object *object = intel_fb->obj;
+ struct drm_i915_gem_object *obj = intel_fb->obj;
- return drm_gem_handle_create(file_priv, object, handle);
+ return drm_gem_handle_create(file, &obj->base, handle);
}
static const struct drm_framebuffer_funcs intel_fb_funcs = {
@@ -5458,12 +5815,11 @@ static const struct drm_framebuffer_funcs intel_fb_funcs = {
int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *intel_fb,
struct drm_mode_fb_cmd *mode_cmd,
- struct drm_gem_object *obj)
+ struct drm_i915_gem_object *obj)
{
- struct drm_i915_gem_object *obj_priv = to_intel_bo(obj);
int ret;
- if (obj_priv->tiling_mode == I915_TILING_Y)
+ if (obj->tiling_mode == I915_TILING_Y)
return -EINVAL;
if (mode_cmd->pitch & 63)
@@ -5495,11 +5851,11 @@ intel_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
struct drm_mode_fb_cmd *mode_cmd)
{
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
struct intel_framebuffer *intel_fb;
int ret;
- obj = drm_gem_object_lookup(dev, filp, mode_cmd->handle);
+ obj = to_intel_bo(drm_gem_object_lookup(dev, filp, mode_cmd->handle));
if (!obj)
return ERR_PTR(-ENOENT);
@@ -5507,10 +5863,9 @@ intel_user_framebuffer_create(struct drm_device *dev,
if (!intel_fb)
return ERR_PTR(-ENOMEM);
- ret = intel_framebuffer_init(dev, intel_fb,
- mode_cmd, obj);
+ ret = intel_framebuffer_init(dev, intel_fb, mode_cmd, obj);
if (ret) {
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_unreference_unlocked(&obj->base);
kfree(intel_fb);
return ERR_PTR(ret);
}
@@ -5523,10 +5878,10 @@ static const struct drm_mode_config_funcs intel_mode_funcs = {
.output_poll_changed = intel_fb_output_poll_changed,
};
-static struct drm_gem_object *
+static struct drm_i915_gem_object *
intel_alloc_context_page(struct drm_device *dev)
{
- struct drm_gem_object *ctx;
+ struct drm_i915_gem_object *ctx;
int ret;
ctx = i915_gem_alloc_object(dev, 4096);
@@ -5536,7 +5891,7 @@ intel_alloc_context_page(struct drm_device *dev)
}
mutex_lock(&dev->struct_mutex);
- ret = i915_gem_object_pin(ctx, 4096);
+ ret = i915_gem_object_pin(ctx, 4096, true);
if (ret) {
DRM_ERROR("failed to pin power context: %d\n", ret);
goto err_unref;
@@ -5554,7 +5909,7 @@ intel_alloc_context_page(struct drm_device *dev)
err_unpin:
i915_gem_object_unpin(ctx);
err_unref:
- drm_gem_object_unreference(ctx);
+ drm_gem_object_unreference(&ctx->base);
mutex_unlock(&dev->struct_mutex);
return NULL;
}
@@ -5666,6 +6021,25 @@ void ironlake_disable_drps(struct drm_device *dev)
}
+void gen6_set_rps(struct drm_device *dev, u8 val)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 swreq;
+
+ swreq = (val & 0x3ff) << 25;
+ I915_WRITE(GEN6_RPNSWREQ, swreq);
+}
+
+void gen6_disable_rps(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ I915_WRITE(GEN6_RPNSWREQ, 1 << 31);
+ I915_WRITE(GEN6_PMINTRMSK, 0xffffffff);
+ I915_WRITE(GEN6_PMIER, 0);
+ I915_WRITE(GEN6_PMIIR, I915_READ(GEN6_PMIIR));
+}
+
static unsigned long intel_pxfreq(u32 vidfreq)
{
unsigned long freq;
@@ -5752,7 +6126,96 @@ void intel_init_emon(struct drm_device *dev)
dev_priv->corr = (lcfuse & LCFUSE_HIV_MASK);
}
-void intel_init_clock_gating(struct drm_device *dev)
+void gen6_enable_rps(struct drm_i915_private *dev_priv)
+{
+ int i;
+
+ /* Here begins a magic sequence of register writes to enable
+ * auto-downclocking.
+ *
+ * Perhaps there might be some value in exposing these to
+ * userspace...
+ */
+ I915_WRITE(GEN6_RC_STATE, 0);
+ __gen6_force_wake_get(dev_priv);
+
+ /* disable the counters and set deterministic thresholds */
+ I915_WRITE(GEN6_RC_CONTROL, 0);
+
+ I915_WRITE(GEN6_RC1_WAKE_RATE_LIMIT, 1000 << 16);
+ I915_WRITE(GEN6_RC6_WAKE_RATE_LIMIT, 40 << 16 | 30);
+ I915_WRITE(GEN6_RC6pp_WAKE_RATE_LIMIT, 30);
+ I915_WRITE(GEN6_RC_EVALUATION_INTERVAL, 125000);
+ I915_WRITE(GEN6_RC_IDLE_HYSTERSIS, 25);
+
+ for (i = 0; i < I915_NUM_RINGS; i++)
+ I915_WRITE(RING_MAX_IDLE(dev_priv->ring[i].mmio_base), 10);
+
+ I915_WRITE(GEN6_RC_SLEEP, 0);
+ I915_WRITE(GEN6_RC1e_THRESHOLD, 1000);
+ I915_WRITE(GEN6_RC6_THRESHOLD, 50000);
+ I915_WRITE(GEN6_RC6p_THRESHOLD, 100000);
+ I915_WRITE(GEN6_RC6pp_THRESHOLD, 64000); /* unused */
+
+ I915_WRITE(GEN6_RC_CONTROL,
+ GEN6_RC_CTL_RC6p_ENABLE |
+ GEN6_RC_CTL_RC6_ENABLE |
+ GEN6_RC_CTL_EI_MODE(1) |
+ GEN6_RC_CTL_HW_ENABLE);
+
+ I915_WRITE(GEN6_RPNSWREQ,
+ GEN6_FREQUENCY(10) |
+ GEN6_OFFSET(0) |
+ GEN6_AGGRESSIVE_TURBO);
+ I915_WRITE(GEN6_RC_VIDEO_FREQ,
+ GEN6_FREQUENCY(12));
+
+ I915_WRITE(GEN6_RP_DOWN_TIMEOUT, 1000000);
+ I915_WRITE(GEN6_RP_INTERRUPT_LIMITS,
+ 18 << 24 |
+ 6 << 16);
+ I915_WRITE(GEN6_RP_UP_THRESHOLD, 90000);
+ I915_WRITE(GEN6_RP_DOWN_THRESHOLD, 100000);
+ I915_WRITE(GEN6_RP_UP_EI, 100000);
+ I915_WRITE(GEN6_RP_DOWN_EI, 300000);
+ I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
+ I915_WRITE(GEN6_RP_CONTROL,
+ GEN6_RP_MEDIA_TURBO |
+ GEN6_RP_USE_NORMAL_FREQ |
+ GEN6_RP_MEDIA_IS_GFX |
+ GEN6_RP_ENABLE |
+ GEN6_RP_UP_BUSY_MAX |
+ GEN6_RP_DOWN_BUSY_MIN);
+
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500))
+ DRM_ERROR("timeout waiting for pcode mailbox to become idle\n");
+
+ I915_WRITE(GEN6_PCODE_DATA, 0);
+ I915_WRITE(GEN6_PCODE_MAILBOX,
+ GEN6_PCODE_READY |
+ GEN6_PCODE_WRITE_MIN_FREQ_TABLE);
+ if (wait_for((I915_READ(GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY) == 0,
+ 500))
+ DRM_ERROR("timeout waiting for pcode mailbox to finish\n");
+
+ /* requires MSI enabled */
+ I915_WRITE(GEN6_PMIER,
+ GEN6_PM_MBOX_EVENT |
+ GEN6_PM_THERMAL_EVENT |
+ GEN6_PM_RP_DOWN_TIMEOUT |
+ GEN6_PM_RP_UP_THRESHOLD |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_UP_EI_EXPIRED |
+ GEN6_PM_RP_DOWN_EI_EXPIRED);
+ I915_WRITE(GEN6_PMIMR, 0);
+ /* enable all PM interrupts */
+ I915_WRITE(GEN6_PMINTRMSK, 0);
+
+ __gen6_force_wake_put(dev_priv);
+}
+
+void intel_enable_clock_gating(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -5800,9 +6263,9 @@ void intel_init_clock_gating(struct drm_device *dev)
I915_WRITE(DISP_ARB_CTL,
(I915_READ(DISP_ARB_CTL) |
DISP_FBC_WM_DIS));
- I915_WRITE(WM3_LP_ILK, 0);
- I915_WRITE(WM2_LP_ILK, 0);
- I915_WRITE(WM1_LP_ILK, 0);
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
}
/*
* Based on the document from hardware guys the following bits
@@ -5824,7 +6287,49 @@ void intel_init_clock_gating(struct drm_device *dev)
ILK_DPFC_DIS2 |
ILK_CLK_FBC);
}
- return;
+
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_ELPIN_409_SELECT);
+
+ if (IS_GEN5(dev)) {
+ I915_WRITE(_3D_CHICKEN2,
+ _3D_CHICKEN2_WM_READ_PIPELINED << 16 |
+ _3D_CHICKEN2_WM_READ_PIPELINED);
+ }
+
+ if (IS_GEN6(dev)) {
+ I915_WRITE(WM3_LP_ILK, 0);
+ I915_WRITE(WM2_LP_ILK, 0);
+ I915_WRITE(WM1_LP_ILK, 0);
+
+ /*
+ * According to the spec the following bits should be
+ * set in order to enable memory self-refresh and fbc:
+ * The bit21 and bit22 of 0x42000
+ * The bit21 and bit22 of 0x42004
+ * The bit5 and bit7 of 0x42020
+ * The bit14 of 0x70180
+ * The bit14 of 0x71180
+ */
+ I915_WRITE(ILK_DISPLAY_CHICKEN1,
+ I915_READ(ILK_DISPLAY_CHICKEN1) |
+ ILK_FBCQ_DIS | ILK_PABSTRETCH_DIS);
+ I915_WRITE(ILK_DISPLAY_CHICKEN2,
+ I915_READ(ILK_DISPLAY_CHICKEN2) |
+ ILK_DPARB_GATE | ILK_VSDPFD_FULL);
+ I915_WRITE(ILK_DSPCLK_GATE,
+ I915_READ(ILK_DSPCLK_GATE) |
+ ILK_DPARB_CLK_GATE |
+ ILK_DPFD_CLK_GATE);
+
+ I915_WRITE(DSPACNTR,
+ I915_READ(DSPACNTR) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ I915_WRITE(DSPBCNTR,
+ I915_READ(DSPBCNTR) |
+ DISPPLANE_TRICKLE_FEED_DISABLE);
+ }
} else if (IS_G4X(dev)) {
uint32_t dspclk_gate;
I915_WRITE(RENCLK_GATE_D1, 0);
@@ -5867,20 +6372,18 @@ void intel_init_clock_gating(struct drm_device *dev)
* GPU can automatically power down the render unit if given a page
* to save state.
*/
- if (IS_IRONLAKE_M(dev)) {
+ if (IS_IRONLAKE_M(dev) && 0) { /* XXX causes a failure during suspend */
if (dev_priv->renderctx == NULL)
dev_priv->renderctx = intel_alloc_context_page(dev);
if (dev_priv->renderctx) {
- struct drm_i915_gem_object *obj_priv;
- obj_priv = to_intel_bo(dev_priv->renderctx);
- if (obj_priv) {
- BEGIN_LP_RING(4);
+ struct drm_i915_gem_object *obj = dev_priv->renderctx;
+ if (BEGIN_LP_RING(4) == 0) {
OUT_RING(MI_SET_CONTEXT);
- OUT_RING(obj_priv->gtt_offset |
- MI_MM_SPACE_GTT |
- MI_SAVE_EXT_STATE_EN |
- MI_RESTORE_EXT_STATE_EN |
- MI_RESTORE_INHIBIT);
+ OUT_RING(obj->gtt_offset |
+ MI_MM_SPACE_GTT |
+ MI_SAVE_EXT_STATE_EN |
+ MI_RESTORE_EXT_STATE_EN |
+ MI_RESTORE_INHIBIT);
OUT_RING(MI_NOOP);
OUT_RING(MI_FLUSH);
ADVANCE_LP_RING();
@@ -5890,29 +6393,45 @@ void intel_init_clock_gating(struct drm_device *dev)
"Disable RC6\n");
}
- if (I915_HAS_RC6(dev) && drm_core_check_feature(dev, DRIVER_MODESET)) {
- struct drm_i915_gem_object *obj_priv = NULL;
-
+ if (IS_GEN4(dev) && IS_MOBILE(dev)) {
+ if (dev_priv->pwrctx == NULL)
+ dev_priv->pwrctx = intel_alloc_context_page(dev);
if (dev_priv->pwrctx) {
- obj_priv = to_intel_bo(dev_priv->pwrctx);
- } else {
- struct drm_gem_object *pwrctx;
-
- pwrctx = intel_alloc_context_page(dev);
- if (pwrctx) {
- dev_priv->pwrctx = pwrctx;
- obj_priv = to_intel_bo(pwrctx);
- }
- }
-
- if (obj_priv) {
- I915_WRITE(PWRCTXA, obj_priv->gtt_offset | PWRCTX_EN);
+ struct drm_i915_gem_object *obj = dev_priv->pwrctx;
+ I915_WRITE(PWRCTXA, obj->gtt_offset | PWRCTX_EN);
I915_WRITE(MCHBAR_RENDER_STANDBY,
I915_READ(MCHBAR_RENDER_STANDBY) & ~RCX_SW_EXIT);
}
}
}
+void intel_disable_clock_gating(struct drm_device *dev)
+{
+ struct drm_i915_private *dev_priv = dev->dev_private;
+
+ if (dev_priv->renderctx) {
+ struct drm_i915_gem_object *obj = dev_priv->renderctx;
+
+ I915_WRITE(CCID, 0);
+ POSTING_READ(CCID);
+
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(&obj->base);
+ dev_priv->renderctx = NULL;
+ }
+
+ if (dev_priv->pwrctx) {
+ struct drm_i915_gem_object *obj = dev_priv->pwrctx;
+
+ I915_WRITE(PWRCTXA, 0);
+ POSTING_READ(PWRCTXA);
+
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(&obj->base);
+ dev_priv->pwrctx = NULL;
+ }
+}
+
/* Set up chip specific display functions */
static void intel_init_display(struct drm_device *dev)
{
@@ -5925,7 +6444,7 @@ static void intel_init_display(struct drm_device *dev)
dev_priv->display.dpms = i9xx_crtc_dpms;
if (I915_HAS_FBC(dev)) {
- if (IS_IRONLAKE_M(dev)) {
+ if (HAS_PCH_SPLIT(dev)) {
dev_priv->display.fbc_enabled = ironlake_fbc_enabled;
dev_priv->display.enable_fbc = ironlake_enable_fbc;
dev_priv->display.disable_fbc = ironlake_disable_fbc;
@@ -5974,6 +6493,14 @@ static void intel_init_display(struct drm_device *dev)
"Disable CxSR\n");
dev_priv->display.update_wm = NULL;
}
+ } else if (IS_GEN6(dev)) {
+ if (SNB_READ_WM0_LATENCY()) {
+ dev_priv->display.update_wm = sandybridge_update_wm;
+ } else {
+ DRM_DEBUG_KMS("Failed to read display plane latency. "
+ "Disable CxSR\n");
+ dev_priv->display.update_wm = NULL;
+ }
} else
dev_priv->display.update_wm = NULL;
} else if (IS_PINEVIEW(dev)) {
@@ -6139,7 +6666,7 @@ void intel_modeset_init(struct drm_device *dev)
intel_setup_outputs(dev);
- intel_init_clock_gating(dev);
+ intel_enable_clock_gating(dev);
/* Just disable it once at startup */
i915_disable_vga(dev);
@@ -6149,6 +6676,9 @@ void intel_modeset_init(struct drm_device *dev)
intel_init_emon(dev);
}
+ if (IS_GEN6(dev))
+ gen6_enable_rps(dev_priv);
+
INIT_WORK(&dev_priv->idle_work, intel_idle_update);
setup_timer(&dev_priv->idle_timer, intel_gpu_idle_timer,
(unsigned long)dev);
@@ -6180,28 +6710,12 @@ void intel_modeset_cleanup(struct drm_device *dev)
if (dev_priv->display.disable_fbc)
dev_priv->display.disable_fbc(dev);
- if (dev_priv->renderctx) {
- struct drm_i915_gem_object *obj_priv;
-
- obj_priv = to_intel_bo(dev_priv->renderctx);
- I915_WRITE(CCID, obj_priv->gtt_offset &~ CCID_EN);
- I915_READ(CCID);
- i915_gem_object_unpin(dev_priv->renderctx);
- drm_gem_object_unreference(dev_priv->renderctx);
- }
-
- if (dev_priv->pwrctx) {
- struct drm_i915_gem_object *obj_priv;
-
- obj_priv = to_intel_bo(dev_priv->pwrctx);
- I915_WRITE(PWRCTXA, obj_priv->gtt_offset &~ PWRCTX_EN);
- I915_READ(PWRCTXA);
- i915_gem_object_unpin(dev_priv->pwrctx);
- drm_gem_object_unreference(dev_priv->pwrctx);
- }
-
if (IS_IRONLAKE_M(dev))
ironlake_disable_drps(dev);
+ if (IS_GEN6(dev))
+ gen6_disable_rps(dev);
+
+ intel_disable_clock_gating(dev);
mutex_unlock(&dev->struct_mutex);
@@ -6253,3 +6767,113 @@ int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl);
return 0;
}
+
+#ifdef CONFIG_DEBUG_FS
+#include <linux/seq_file.h>
+
+struct intel_display_error_state {
+ struct intel_cursor_error_state {
+ u32 control;
+ u32 position;
+ u32 base;
+ u32 size;
+ } cursor[2];
+
+ struct intel_pipe_error_state {
+ u32 conf;
+ u32 source;
+
+ u32 htotal;
+ u32 hblank;
+ u32 hsync;
+ u32 vtotal;
+ u32 vblank;
+ u32 vsync;
+ } pipe[2];
+
+ struct intel_plane_error_state {
+ u32 control;
+ u32 stride;
+ u32 size;
+ u32 pos;
+ u32 addr;
+ u32 surface;
+ u32 tile_offset;
+ } plane[2];
+};
+
+struct intel_display_error_state *
+intel_display_capture_error_state(struct drm_device *dev)
+{
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_display_error_state *error;
+ int i;
+
+ error = kmalloc(sizeof(*error), GFP_ATOMIC);
+ if (error == NULL)
+ return NULL;
+
+ for (i = 0; i < 2; i++) {
+ error->cursor[i].control = I915_READ(CURCNTR(i));
+ error->cursor[i].position = I915_READ(CURPOS(i));
+ error->cursor[i].base = I915_READ(CURBASE(i));
+
+ error->plane[i].control = I915_READ(DSPCNTR(i));
+ error->plane[i].stride = I915_READ(DSPSTRIDE(i));
+ error->plane[i].size = I915_READ(DSPSIZE(i));
+ error->plane[i].pos= I915_READ(DSPPOS(i));
+ error->plane[i].addr = I915_READ(DSPADDR(i));
+ if (INTEL_INFO(dev)->gen >= 4) {
+ error->plane[i].surface = I915_READ(DSPSURF(i));
+ error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
+ }
+
+ error->pipe[i].conf = I915_READ(PIPECONF(i));
+ error->pipe[i].source = I915_READ(PIPESRC(i));
+ error->pipe[i].htotal = I915_READ(HTOTAL(i));
+ error->pipe[i].hblank = I915_READ(HBLANK(i));
+ error->pipe[i].hsync = I915_READ(HSYNC(i));
+ error->pipe[i].vtotal = I915_READ(VTOTAL(i));
+ error->pipe[i].vblank = I915_READ(VBLANK(i));
+ error->pipe[i].vsync = I915_READ(VSYNC(i));
+ }
+
+ return error;
+}
+
+void
+intel_display_print_error_state(struct seq_file *m,
+ struct drm_device *dev,
+ struct intel_display_error_state *error)
+{
+ int i;
+
+ for (i = 0; i < 2; i++) {
+ seq_printf(m, "Pipe [%d]:\n", i);
+ seq_printf(m, " CONF: %08x\n", error->pipe[i].conf);
+ seq_printf(m, " SRC: %08x\n", error->pipe[i].source);
+ seq_printf(m, " HTOTAL: %08x\n", error->pipe[i].htotal);
+ seq_printf(m, " HBLANK: %08x\n", error->pipe[i].hblank);
+ seq_printf(m, " HSYNC: %08x\n", error->pipe[i].hsync);
+ seq_printf(m, " VTOTAL: %08x\n", error->pipe[i].vtotal);
+ seq_printf(m, " VBLANK: %08x\n", error->pipe[i].vblank);
+ seq_printf(m, " VSYNC: %08x\n", error->pipe[i].vsync);
+
+ seq_printf(m, "Plane [%d]:\n", i);
+ seq_printf(m, " CNTR: %08x\n", error->plane[i].control);
+ seq_printf(m, " STRIDE: %08x\n", error->plane[i].stride);
+ seq_printf(m, " SIZE: %08x\n", error->plane[i].size);
+ seq_printf(m, " POS: %08x\n", error->plane[i].pos);
+ seq_printf(m, " ADDR: %08x\n", error->plane[i].addr);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ seq_printf(m, " SURF: %08x\n", error->plane[i].surface);
+ seq_printf(m, " TILEOFF: %08x\n", error->plane[i].tile_offset);
+ }
+
+ seq_printf(m, "Cursor [%d]:\n", i);
+ seq_printf(m, " CNTR: %08x\n", error->cursor[i].control);
+ seq_printf(m, " POS: %08x\n", error->cursor[i].position);
+ seq_printf(m, " BASE: %08x\n", error->cursor[i].base);
+ }
+}
+#endif
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index c8e00555331..1dc60408d5b 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -479,6 +479,7 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
uint16_t address = algo_data->address;
uint8_t msg[5];
uint8_t reply[2];
+ unsigned retry;
int msg_bytes;
int reply_bytes;
int ret;
@@ -513,14 +514,33 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
break;
}
- for (;;) {
- ret = intel_dp_aux_ch(intel_dp,
- msg, msg_bytes,
- reply, reply_bytes);
+ for (retry = 0; retry < 5; retry++) {
+ ret = intel_dp_aux_ch(intel_dp,
+ msg, msg_bytes,
+ reply, reply_bytes);
if (ret < 0) {
DRM_DEBUG_KMS("aux_ch failed %d\n", ret);
return ret;
}
+
+ switch (reply[0] & AUX_NATIVE_REPLY_MASK) {
+ case AUX_NATIVE_REPLY_ACK:
+ /* I2C-over-AUX Reply field is only valid
+ * when paired with AUX ACK.
+ */
+ break;
+ case AUX_NATIVE_REPLY_NACK:
+ DRM_DEBUG_KMS("aux_ch native nack\n");
+ return -EREMOTEIO;
+ case AUX_NATIVE_REPLY_DEFER:
+ udelay(100);
+ continue;
+ default:
+ DRM_ERROR("aux_ch invalid native reply 0x%02x\n",
+ reply[0]);
+ return -EREMOTEIO;
+ }
+
switch (reply[0] & AUX_I2C_REPLY_MASK) {
case AUX_I2C_REPLY_ACK:
if (mode == MODE_I2C_READ) {
@@ -528,17 +548,20 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
}
return reply_bytes - 1;
case AUX_I2C_REPLY_NACK:
- DRM_DEBUG_KMS("aux_ch nack\n");
+ DRM_DEBUG_KMS("aux_i2c nack\n");
return -EREMOTEIO;
case AUX_I2C_REPLY_DEFER:
- DRM_DEBUG_KMS("aux_ch defer\n");
+ DRM_DEBUG_KMS("aux_i2c defer\n");
udelay(100);
break;
default:
- DRM_ERROR("aux_ch invalid reply 0x%02x\n", reply[0]);
+ DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]);
return -EREMOTEIO;
}
}
+
+ DRM_ERROR("too many retries, giving up\n");
+ return -EREMOTEIO;
}
static int
@@ -584,17 +607,6 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
mode->clock = dev_priv->panel_fixed_mode->clock;
}
- /* Just use VBT values for eDP */
- if (is_edp(intel_dp)) {
- intel_dp->lane_count = dev_priv->edp.lanes;
- intel_dp->link_bw = dev_priv->edp.rate;
- adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
- DRM_DEBUG_KMS("eDP link bw %02x lane count %d clock %d\n",
- intel_dp->link_bw, intel_dp->lane_count,
- adjusted_mode->clock);
- return true;
- }
-
for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) {
for (clock = 0; clock <= max_clock; clock++) {
int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count);
@@ -613,6 +625,19 @@ intel_dp_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode,
}
}
+ if (is_edp(intel_dp)) {
+ /* okay we failed just pick the highest */
+ intel_dp->lane_count = max_lane_count;
+ intel_dp->link_bw = bws[max_clock];
+ adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw);
+ DRM_DEBUG_KMS("Force picking display port link bw %02x lane "
+ "count %d clock %d\n",
+ intel_dp->link_bw, intel_dp->lane_count,
+ adjusted_mode->clock);
+
+ return true;
+ }
+
return false;
}
@@ -1087,21 +1112,11 @@ intel_get_adjust_train(struct intel_dp *intel_dp)
}
static uint32_t
-intel_dp_signal_levels(struct intel_dp *intel_dp)
+intel_dp_signal_levels(uint8_t train_set, int lane_count)
{
- struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- uint32_t signal_levels = 0;
- u8 train_set = intel_dp->train_set[0];
- u32 vswing = train_set & DP_TRAIN_VOLTAGE_SWING_MASK;
- u32 preemphasis = train_set & DP_TRAIN_PRE_EMPHASIS_MASK;
+ uint32_t signal_levels = 0;
- if (is_edp(intel_dp)) {
- vswing = dev_priv->edp.vswing;
- preemphasis = dev_priv->edp.preemphasis;
- }
-
- switch (vswing) {
+ switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) {
case DP_TRAIN_VOLTAGE_SWING_400:
default:
signal_levels |= DP_VOLTAGE_0_4;
@@ -1116,7 +1131,7 @@ intel_dp_signal_levels(struct intel_dp *intel_dp)
signal_levels |= DP_VOLTAGE_1_2;
break;
}
- switch (preemphasis) {
+ switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) {
case DP_TRAIN_PRE_EMPHASIS_0:
default:
signal_levels |= DP_PRE_EMPHASIS_0;
@@ -1203,18 +1218,6 @@ intel_channel_eq_ok(struct intel_dp *intel_dp)
}
static bool
-intel_dp_aux_handshake_required(struct intel_dp *intel_dp)
-{
- struct drm_device *dev = intel_dp->base.base.dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
-
- if (is_edp(intel_dp) && dev_priv->no_aux_handshake)
- return false;
-
- return true;
-}
-
-static bool
intel_dp_set_link_train(struct intel_dp *intel_dp,
uint32_t dp_reg_value,
uint8_t dp_train_pat)
@@ -1226,9 +1229,6 @@ intel_dp_set_link_train(struct intel_dp *intel_dp,
I915_WRITE(intel_dp->output_reg, dp_reg_value);
POSTING_READ(intel_dp->output_reg);
- if (!intel_dp_aux_handshake_required(intel_dp))
- return true;
-
intel_dp_aux_native_write_1(intel_dp,
DP_TRAINING_PATTERN_SET,
dp_train_pat);
@@ -1261,11 +1261,10 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
POSTING_READ(intel_dp->output_reg);
intel_wait_for_vblank(dev, intel_crtc->pipe);
- if (intel_dp_aux_handshake_required(intel_dp))
- /* Write the link configuration data */
- intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
- intel_dp->link_configuration,
- DP_LINK_CONFIGURATION_SIZE);
+ /* Write the link configuration data */
+ intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET,
+ intel_dp->link_configuration,
+ DP_LINK_CONFIGURATION_SIZE);
DP |= DP_PORT_EN;
if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
@@ -1283,7 +1282,7 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
- signal_levels = intel_dp_signal_levels(intel_dp);
+ signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
@@ -1297,37 +1296,33 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
break;
/* Set training pattern 1 */
- udelay(500);
- if (intel_dp_aux_handshake_required(intel_dp)) {
+ udelay(100);
+ if (!intel_dp_get_link_status(intel_dp))
break;
- } else {
- if (!intel_dp_get_link_status(intel_dp))
- break;
- if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
- clock_recovery = true;
- break;
- }
+ if (intel_clock_recovery_ok(intel_dp->link_status, intel_dp->lane_count)) {
+ clock_recovery = true;
+ break;
+ }
- /* Check to see if we've tried the max voltage */
- for (i = 0; i < intel_dp->lane_count; i++)
- if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
- break;
- if (i == intel_dp->lane_count)
+ /* Check to see if we've tried the max voltage */
+ for (i = 0; i < intel_dp->lane_count; i++)
+ if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0)
break;
+ if (i == intel_dp->lane_count)
+ break;
- /* Check to see if we've tried the same voltage 5 times */
- if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
- ++tries;
- if (tries == 5)
- break;
- } else
- tries = 0;
- voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
+ /* Check to see if we've tried the same voltage 5 times */
+ if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) {
+ ++tries;
+ if (tries == 5)
+ break;
+ } else
+ tries = 0;
+ voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK;
- /* Compute new intel_dp->train_set as requested by target */
- intel_get_adjust_train(intel_dp);
- }
+ /* Compute new intel_dp->train_set as requested by target */
+ intel_get_adjust_train(intel_dp);
}
intel_dp->DP = DP;
@@ -1354,7 +1349,7 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]);
DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels;
} else {
- signal_levels = intel_dp_signal_levels(intel_dp);
+ signal_levels = intel_dp_signal_levels(intel_dp->train_set[0], intel_dp->lane_count);
DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels;
}
@@ -1368,28 +1363,24 @@ intel_dp_complete_link_train(struct intel_dp *intel_dp)
DP_TRAINING_PATTERN_2))
break;
- udelay(500);
-
- if (!intel_dp_aux_handshake_required(intel_dp)) {
+ udelay(400);
+ if (!intel_dp_get_link_status(intel_dp))
break;
- } else {
- if (!intel_dp_get_link_status(intel_dp))
- break;
- if (intel_channel_eq_ok(intel_dp)) {
- channel_eq = true;
- break;
- }
+ if (intel_channel_eq_ok(intel_dp)) {
+ channel_eq = true;
+ break;
+ }
- /* Try 5 times */
- if (tries > 5)
- break;
+ /* Try 5 times */
+ if (tries > 5)
+ break;
- /* Compute new intel_dp->train_set as requested by target */
- intel_get_adjust_train(intel_dp);
- ++tries;
- }
+ /* Compute new intel_dp->train_set as requested by target */
+ intel_get_adjust_train(intel_dp);
+ ++tries;
}
+
if (HAS_PCH_CPT(dev) && !is_edp(intel_dp))
reg = DP | DP_LINK_TRAIN_OFF_CPT;
else
@@ -1408,6 +1399,9 @@ intel_dp_link_down(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dev->dev_private;
uint32_t DP = intel_dp->DP;
+ if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0)
+ return;
+
DRM_DEBUG_KMS("\n");
if (is_edp(intel_dp)) {
@@ -1430,6 +1424,27 @@ intel_dp_link_down(struct intel_dp *intel_dp)
if (is_edp(intel_dp))
DP |= DP_LINK_TRAIN_OFF;
+
+ if (!HAS_PCH_CPT(dev) &&
+ I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) {
+ struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc);
+ /* Hardware workaround: leaving our transcoder select
+ * set to transcoder B while it's off will prevent the
+ * corresponding HDMI output on transcoder A.
+ *
+ * Combine this with another hardware workaround:
+ * transcoder select bit can only be cleared while the
+ * port is enabled.
+ */
+ DP &= ~DP_PIPEB_SELECT;
+ I915_WRITE(intel_dp->output_reg, DP);
+
+ /* Changes to enable or select take place the vblank
+ * after being written.
+ */
+ intel_wait_for_vblank(dev, intel_crtc->pipe);
+ }
+
I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
POSTING_READ(intel_dp->output_reg);
}
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 21551fe7454..d782ad9fd6d 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -127,7 +127,7 @@ intel_mode_get_pixel_multiplier(const struct drm_display_mode *mode)
struct intel_framebuffer {
struct drm_framebuffer base;
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
};
struct intel_fbdev {
@@ -166,7 +166,7 @@ struct intel_crtc {
struct intel_unpin_work *unpin_work;
int fdi_lanes;
- struct drm_gem_object *cursor_bo;
+ struct drm_i915_gem_object *cursor_bo;
uint32_t cursor_addr;
int16_t cursor_x, cursor_y;
int16_t cursor_width, cursor_height;
@@ -220,8 +220,8 @@ intel_get_crtc_for_pipe(struct drm_device *dev, int pipe)
struct intel_unpin_work {
struct work_struct work;
struct drm_device *dev;
- struct drm_gem_object *old_fb_obj;
- struct drm_gem_object *pending_flip_obj;
+ struct drm_i915_gem_object *old_fb_obj;
+ struct drm_i915_gem_object *pending_flip_obj;
struct drm_pending_vblank_event *event;
int pending;
bool enable_stall_check;
@@ -236,8 +236,9 @@ void intel_dip_infoframe_csum(struct dip_infoframe *avi_if);
extern bool intel_sdvo_init(struct drm_device *dev, int output_device);
extern void intel_dvo_init(struct drm_device *dev);
extern void intel_tv_init(struct drm_device *dev);
-extern void intel_mark_busy(struct drm_device *dev, struct drm_gem_object *obj);
-extern void intel_lvds_init(struct drm_device *dev);
+extern void intel_mark_busy(struct drm_device *dev,
+ struct drm_i915_gem_object *obj);
+extern bool intel_lvds_init(struct drm_device *dev);
extern void intel_dp_init(struct drm_device *dev, int dp_reg);
void
intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode,
@@ -293,19 +294,22 @@ extern void intel_crtc_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno);
extern void intel_crtc_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno);
-extern void intel_init_clock_gating(struct drm_device *dev);
+extern void intel_enable_clock_gating(struct drm_device *dev);
+extern void intel_disable_clock_gating(struct drm_device *dev);
extern void ironlake_enable_drps(struct drm_device *dev);
extern void ironlake_disable_drps(struct drm_device *dev);
+extern void gen6_enable_rps(struct drm_i915_private *dev_priv);
+extern void gen6_disable_rps(struct drm_device *dev);
extern void intel_init_emon(struct drm_device *dev);
extern int intel_pin_and_fence_fb_obj(struct drm_device *dev,
- struct drm_gem_object *obj,
- bool pipelined);
+ struct drm_i915_gem_object *obj,
+ struct intel_ring_buffer *pipelined);
extern int intel_framebuffer_init(struct drm_device *dev,
struct intel_framebuffer *ifb,
struct drm_mode_fb_cmd *mode_cmd,
- struct drm_gem_object *obj);
+ struct drm_i915_gem_object *obj);
extern int intel_fbdev_init(struct drm_device *dev);
extern void intel_fbdev_fini(struct drm_device *dev);
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index ced3eef8da0..67738f32dfd 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -65,8 +65,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
struct fb_info *info;
struct drm_framebuffer *fb;
struct drm_mode_fb_cmd mode_cmd;
- struct drm_gem_object *fbo = NULL;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
struct device *device = &dev->pdev->dev;
int size, ret;
@@ -83,18 +82,17 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
size = mode_cmd.pitch * mode_cmd.height;
size = ALIGN(size, PAGE_SIZE);
- fbo = i915_gem_alloc_object(dev, size);
- if (!fbo) {
+ obj = i915_gem_alloc_object(dev, size);
+ if (!obj) {
DRM_ERROR("failed to allocate framebuffer\n");
ret = -ENOMEM;
goto out;
}
- obj_priv = to_intel_bo(fbo);
mutex_lock(&dev->struct_mutex);
/* Flush everything out, we'll be doing GTT only from now on */
- ret = intel_pin_and_fence_fb_obj(dev, fbo, false);
+ ret = intel_pin_and_fence_fb_obj(dev, obj, false);
if (ret) {
DRM_ERROR("failed to pin fb: %d\n", ret);
goto out_unref;
@@ -108,7 +106,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
info->par = ifbdev;
- ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, fbo);
+ ret = intel_framebuffer_init(dev, &ifbdev->ifb, &mode_cmd, obj);
if (ret)
goto out_unpin;
@@ -134,11 +132,10 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
else
info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
- info->fix.smem_start = dev->mode_config.fb_base + obj_priv->gtt_offset;
+ info->fix.smem_start = dev->mode_config.fb_base + obj->gtt_offset;
info->fix.smem_len = size;
- info->screen_base = ioremap_wc(dev->agp->base + obj_priv->gtt_offset,
- size);
+ info->screen_base = ioremap_wc(dev->agp->base + obj->gtt_offset, size);
if (!info->screen_base) {
ret = -ENOSPC;
goto out_unpin;
@@ -164,7 +161,7 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
DRM_DEBUG_KMS("allocated %dx%d fb: 0x%08x, bo %p\n",
fb->width, fb->height,
- obj_priv->gtt_offset, fbo);
+ obj->gtt_offset, obj);
mutex_unlock(&dev->struct_mutex);
@@ -172,9 +169,9 @@ static int intelfb_create(struct intel_fbdev *ifbdev,
return 0;
out_unpin:
- i915_gem_object_unpin(fbo);
+ i915_gem_object_unpin(obj);
out_unref:
- drm_gem_object_unreference(fbo);
+ drm_gem_object_unreference(&obj->base);
mutex_unlock(&dev->struct_mutex);
out:
return ret;
@@ -221,7 +218,7 @@ static void intel_fbdev_destroy(struct drm_device *dev,
drm_framebuffer_cleanup(&ifb->base);
if (ifb->obj) {
- drm_gem_object_unreference_unlocked(ifb->obj);
+ drm_gem_object_unreference_unlocked(&ifb->obj->base);
ifb->obj = NULL;
}
}
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 3dba086e7ee..58040f68ed7 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -85,8 +85,9 @@ static u32 get_reserved(struct intel_gpio *gpio)
/* On most chips, these bits must be preserved in software. */
if (!IS_I830(dev) && !IS_845G(dev))
- reserved = I915_READ(gpio->reg) & (GPIO_DATA_PULLUP_DISABLE |
- GPIO_CLOCK_PULLUP_DISABLE);
+ reserved = I915_READ_NOTRACE(gpio->reg) &
+ (GPIO_DATA_PULLUP_DISABLE |
+ GPIO_CLOCK_PULLUP_DISABLE);
return reserved;
}
@@ -96,9 +97,9 @@ static int get_clock(void *data)
struct intel_gpio *gpio = data;
struct drm_i915_private *dev_priv = gpio->dev_priv;
u32 reserved = get_reserved(gpio);
- I915_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
- I915_WRITE(gpio->reg, reserved);
- return (I915_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
+ I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK);
+ I915_WRITE_NOTRACE(gpio->reg, reserved);
+ return (I915_READ_NOTRACE(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0;
}
static int get_data(void *data)
@@ -106,9 +107,9 @@ static int get_data(void *data)
struct intel_gpio *gpio = data;
struct drm_i915_private *dev_priv = gpio->dev_priv;
u32 reserved = get_reserved(gpio);
- I915_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
- I915_WRITE(gpio->reg, reserved);
- return (I915_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
+ I915_WRITE_NOTRACE(gpio->reg, reserved | GPIO_DATA_DIR_MASK);
+ I915_WRITE_NOTRACE(gpio->reg, reserved);
+ return (I915_READ_NOTRACE(gpio->reg) & GPIO_DATA_VAL_IN) != 0;
}
static void set_clock(void *data, int state_high)
@@ -124,7 +125,7 @@ static void set_clock(void *data, int state_high)
clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK |
GPIO_CLOCK_VAL_MASK;
- I915_WRITE(gpio->reg, reserved | clock_bits);
+ I915_WRITE_NOTRACE(gpio->reg, reserved | clock_bits);
POSTING_READ(gpio->reg);
}
@@ -141,7 +142,7 @@ static void set_data(void *data, int state_high)
data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK |
GPIO_DATA_VAL_MASK;
- I915_WRITE(gpio->reg, reserved | data_bits);
+ I915_WRITE_NOTRACE(gpio->reg, reserved | data_bits);
POSTING_READ(gpio->reg);
}
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 4324a326f98..aa2307080be 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -68,7 +68,7 @@ static struct intel_lvds *intel_attached_lvds(struct drm_connector *connector)
/**
* Sets the power state for the panel.
*/
-static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on)
+static void intel_lvds_enable(struct intel_lvds *intel_lvds)
{
struct drm_device *dev = intel_lvds->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
@@ -82,26 +82,61 @@ static void intel_lvds_set_power(struct intel_lvds *intel_lvds, bool on)
lvds_reg = LVDS;
}
- if (on) {
- I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
- I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
- intel_panel_set_backlight(dev, dev_priv->backlight_level);
- } else {
- dev_priv->backlight_level = intel_panel_get_backlight(dev);
-
- intel_panel_set_backlight(dev, 0);
- I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
+ I915_WRITE(lvds_reg, I915_READ(lvds_reg) | LVDS_PORT_EN);
- if (intel_lvds->pfit_control) {
- if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
- DRM_ERROR("timed out waiting for panel to power off\n");
- I915_WRITE(PFIT_CONTROL, 0);
- intel_lvds->pfit_control = 0;
+ if (intel_lvds->pfit_dirty) {
+ /*
+ * Enable automatic panel scaling so that non-native modes
+ * fill the screen. The panel fitter should only be
+ * adjusted whilst the pipe is disabled, according to
+ * register description and PRM.
+ */
+ DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
+ intel_lvds->pfit_control,
+ intel_lvds->pfit_pgm_ratios);
+ if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000)) {
+ DRM_ERROR("timed out waiting for panel to power off\n");
+ } else {
+ I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
+ I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
intel_lvds->pfit_dirty = false;
}
+ }
+
+ I915_WRITE(ctl_reg, I915_READ(ctl_reg) | POWER_TARGET_ON);
+ POSTING_READ(lvds_reg);
- I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
+ intel_panel_set_backlight(dev, dev_priv->backlight_level);
+}
+
+static void intel_lvds_disable(struct intel_lvds *intel_lvds)
+{
+ struct drm_device *dev = intel_lvds->base.base.dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ u32 ctl_reg, lvds_reg;
+
+ if (HAS_PCH_SPLIT(dev)) {
+ ctl_reg = PCH_PP_CONTROL;
+ lvds_reg = PCH_LVDS;
+ } else {
+ ctl_reg = PP_CONTROL;
+ lvds_reg = LVDS;
}
+
+ dev_priv->backlight_level = intel_panel_get_backlight(dev);
+ intel_panel_set_backlight(dev, 0);
+
+ I915_WRITE(ctl_reg, I915_READ(ctl_reg) & ~POWER_TARGET_ON);
+
+ if (intel_lvds->pfit_control) {
+ if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
+ DRM_ERROR("timed out waiting for panel to power off\n");
+
+ I915_WRITE(PFIT_CONTROL, 0);
+ intel_lvds->pfit_dirty = true;
+ }
+
+ I915_WRITE(lvds_reg, I915_READ(lvds_reg) & ~LVDS_PORT_EN);
POSTING_READ(lvds_reg);
}
@@ -110,9 +145,9 @@ static void intel_lvds_dpms(struct drm_encoder *encoder, int mode)
struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
if (mode == DRM_MODE_DPMS_ON)
- intel_lvds_set_power(intel_lvds, true);
+ intel_lvds_enable(intel_lvds);
else
- intel_lvds_set_power(intel_lvds, false);
+ intel_lvds_disable(intel_lvds);
/* XXX: We never power down the LVDS pairs. */
}
@@ -269,14 +304,13 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
- pfit_control |= PFIT_ENABLE;
/* 965+ is easy, it does everything in hw */
if (scaled_width > scaled_height)
- pfit_control |= PFIT_SCALING_PILLAR;
+ pfit_control |= PFIT_ENABLE | PFIT_SCALING_PILLAR;
else if (scaled_width < scaled_height)
- pfit_control |= PFIT_SCALING_LETTER;
- else
- pfit_control |= PFIT_SCALING_AUTO;
+ pfit_control |= PFIT_ENABLE | PFIT_SCALING_LETTER;
+ else if (adjusted_mode->hdisplay != mode->hdisplay)
+ pfit_control |= PFIT_ENABLE | PFIT_SCALING_AUTO;
} else {
u32 scaled_width = adjusted_mode->hdisplay * mode->vdisplay;
u32 scaled_height = mode->hdisplay * adjusted_mode->vdisplay;
@@ -323,13 +357,17 @@ static bool intel_lvds_mode_fixup(struct drm_encoder *encoder,
* Full scaling, even if it changes the aspect ratio.
* Fortunately this is all done for us in hw.
*/
- pfit_control |= PFIT_ENABLE;
- if (INTEL_INFO(dev)->gen >= 4)
- pfit_control |= PFIT_SCALING_AUTO;
- else
- pfit_control |= (VERT_AUTO_SCALE | HORIZ_AUTO_SCALE |
- VERT_INTERP_BILINEAR |
- HORIZ_INTERP_BILINEAR);
+ if (mode->vdisplay != adjusted_mode->vdisplay ||
+ mode->hdisplay != adjusted_mode->hdisplay) {
+ pfit_control |= PFIT_ENABLE;
+ if (INTEL_INFO(dev)->gen >= 4)
+ pfit_control |= PFIT_SCALING_AUTO;
+ else
+ pfit_control |= (VERT_AUTO_SCALE |
+ VERT_INTERP_BILINEAR |
+ HORIZ_AUTO_SCALE |
+ HORIZ_INTERP_BILINEAR);
+ }
break;
default:
@@ -411,43 +449,18 @@ static void intel_lvds_commit(struct drm_encoder *encoder)
/* Always do a full power on as we do not know what state
* we were left in.
*/
- intel_lvds_set_power(intel_lvds, true);
+ intel_lvds_enable(intel_lvds);
}
static void intel_lvds_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
- struct drm_device *dev = encoder->dev;
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct intel_lvds *intel_lvds = to_intel_lvds(encoder);
-
/*
* The LVDS pin pair will already have been turned on in the
* intel_crtc_mode_set since it has a large impact on the DPLL
* settings.
*/
-
- if (HAS_PCH_SPLIT(dev))
- return;
-
- if (!intel_lvds->pfit_dirty)
- return;
-
- /*
- * Enable automatic panel scaling so that non-native modes fill the
- * screen. Should be enabled before the pipe is enabled, according to
- * register description and PRM.
- */
- DRM_DEBUG_KMS("applying panel-fitter: %x, %x\n",
- intel_lvds->pfit_control,
- intel_lvds->pfit_pgm_ratios);
- if (wait_for((I915_READ(PP_STATUS) & PP_ON) == 0, 1000))
- DRM_ERROR("timed out waiting for panel to power off\n");
-
- I915_WRITE(PFIT_PGM_RATIOS, intel_lvds->pfit_pgm_ratios);
- I915_WRITE(PFIT_CONTROL, intel_lvds->pfit_control);
- intel_lvds->pfit_dirty = false;
}
/**
@@ -837,7 +850,7 @@ static bool intel_lvds_ddc_probe(struct drm_device *dev, u8 pin)
* Create the connector, register the LVDS DDC bus, and try to figure out what
* modes we can display on the LVDS panel (if present).
*/
-void intel_lvds_init(struct drm_device *dev)
+bool intel_lvds_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
struct intel_lvds *intel_lvds;
@@ -853,37 +866,37 @@ void intel_lvds_init(struct drm_device *dev)
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds))
- return;
+ return false;
pin = GMBUS_PORT_PANEL;
if (!lvds_is_present_in_vbt(dev, &pin)) {
DRM_DEBUG_KMS("LVDS is not present in VBT\n");
- return;
+ return false;
}
if (HAS_PCH_SPLIT(dev)) {
if ((I915_READ(PCH_LVDS) & LVDS_DETECTED) == 0)
- return;
+ return false;
if (dev_priv->edp.support) {
DRM_DEBUG_KMS("disable LVDS for eDP support\n");
- return;
+ return false;
}
}
if (!intel_lvds_ddc_probe(dev, pin)) {
DRM_DEBUG_KMS("LVDS did not respond to DDC probe\n");
- return;
+ return false;
}
intel_lvds = kzalloc(sizeof(struct intel_lvds), GFP_KERNEL);
if (!intel_lvds) {
- return;
+ return false;
}
intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL);
if (!intel_connector) {
kfree(intel_lvds);
- return;
+ return false;
}
if (!HAS_PCH_SPLIT(dev)) {
@@ -904,6 +917,8 @@ void intel_lvds_init(struct drm_device *dev)
intel_encoder->clone_mask = (1 << INTEL_LVDS_CLONE_BIT);
intel_encoder->crtc_mask = (1 << 1);
+ if (INTEL_INFO(dev)->gen >= 5)
+ intel_encoder->crtc_mask |= (1 << 0);
drm_encoder_helper_add(encoder, &intel_lvds_helper_funcs);
drm_connector_helper_add(connector, &intel_lvds_connector_helper_funcs);
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
@@ -1009,10 +1024,18 @@ void intel_lvds_init(struct drm_device *dev)
out:
if (HAS_PCH_SPLIT(dev)) {
u32 pwm;
- /* make sure PWM is enabled */
+
+ pipe = (I915_READ(PCH_LVDS) & LVDS_PIPEB_SELECT) ? 1 : 0;
+
+ /* make sure PWM is enabled and locked to the LVDS pipe */
pwm = I915_READ(BLC_PWM_CPU_CTL2);
- pwm |= (PWM_ENABLE | PWM_PIPE_B);
- I915_WRITE(BLC_PWM_CPU_CTL2, pwm);
+ if (pipe == 0 && (pwm & PWM_PIPE_B))
+ I915_WRITE(BLC_PWM_CPU_CTL2, pwm & ~PWM_ENABLE);
+ if (pipe)
+ pwm |= PWM_PIPE_B;
+ else
+ pwm &= ~PWM_PIPE_B;
+ I915_WRITE(BLC_PWM_CPU_CTL2, pwm | PWM_ENABLE);
pwm = I915_READ(BLC_PWM_PCH_CTL1);
pwm |= PWM_PCH_ENABLE;
@@ -1026,7 +1049,7 @@ out:
/* keep the LVDS connector */
dev_priv->int_lvds_connector = connector;
drm_sysfs_connector_add(connector);
- return;
+ return true;
failed:
DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
@@ -1034,4 +1057,5 @@ failed:
drm_encoder_cleanup(encoder);
kfree(intel_lvds);
kfree(intel_connector);
+ return false;
}
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 9b0d9a867ae..f295a7aaadf 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -273,14 +273,8 @@ void intel_opregion_enable_asle(struct drm_device *dev)
struct opregion_asle *asle = dev_priv->opregion.asle;
if (asle) {
- if (IS_MOBILE(dev)) {
- unsigned long irqflags;
-
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
+ if (IS_MOBILE(dev))
intel_enable_asle(dev);
- spin_unlock_irqrestore(&dev_priv->user_irq_lock,
- irqflags);
- }
asle->tche = ASLE_ALS_EN | ASLE_BLC_EN | ASLE_PFIT_EN |
ASLE_PFMB_EN;
diff --git a/drivers/gpu/drm/i915/intel_overlay.c b/drivers/gpu/drm/i915/intel_overlay.c
index 02ff0a481f4..3fbb98b948d 100644
--- a/drivers/gpu/drm/i915/intel_overlay.c
+++ b/drivers/gpu/drm/i915/intel_overlay.c
@@ -221,15 +221,16 @@ static int intel_overlay_do_wait_request(struct intel_overlay *overlay,
int ret;
BUG_ON(overlay->last_flip_req);
- overlay->last_flip_req =
- i915_add_request(dev, NULL, request, &dev_priv->render_ring);
- if (overlay->last_flip_req == 0)
- return -ENOMEM;
-
+ ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
+ overlay->last_flip_req = request->seqno;
overlay->flip_tail = tail;
ret = i915_do_wait_request(dev,
overlay->last_flip_req, true,
- &dev_priv->render_ring);
+ LP_RING(dev_priv));
if (ret)
return ret;
@@ -289,6 +290,7 @@ i830_deactivate_pipe_a(struct drm_device *dev)
static int intel_overlay_on(struct intel_overlay *overlay)
{
struct drm_device *dev = overlay->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
struct drm_i915_gem_request *request;
int pipe_a_quirk = 0;
int ret;
@@ -308,7 +310,12 @@ static int intel_overlay_on(struct intel_overlay *overlay)
goto out;
}
- BEGIN_LP_RING(4);
+ ret = BEGIN_LP_RING(4);
+ if (ret) {
+ kfree(request);
+ goto out;
+ }
+
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_ON);
OUT_RING(overlay->flip_addr | OFC_UPDATE);
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
@@ -332,6 +339,7 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
struct drm_i915_gem_request *request;
u32 flip_addr = overlay->flip_addr;
u32 tmp;
+ int ret;
BUG_ON(!overlay->active);
@@ -347,36 +355,44 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
if (tmp & (1 << 17))
DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
- BEGIN_LP_RING(2);
+ ret = BEGIN_LP_RING(2);
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
OUT_RING(flip_addr);
ADVANCE_LP_RING();
- overlay->last_flip_req =
- i915_add_request(dev, NULL, request, &dev_priv->render_ring);
+ ret = i915_add_request(dev, NULL, request, LP_RING(dev_priv));
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
+
+ overlay->last_flip_req = request->seqno;
return 0;
}
static void intel_overlay_release_old_vid_tail(struct intel_overlay *overlay)
{
- struct drm_gem_object *obj = &overlay->old_vid_bo->base;
+ struct drm_i915_gem_object *obj = overlay->old_vid_bo;
i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
overlay->old_vid_bo = NULL;
}
static void intel_overlay_off_tail(struct intel_overlay *overlay)
{
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj = overlay->vid_bo;
/* never have the overlay hw on without showing a frame */
BUG_ON(!overlay->vid_bo);
- obj = &overlay->vid_bo->base;
i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
overlay->vid_bo = NULL;
overlay->crtc->overlay = NULL;
@@ -389,8 +405,10 @@ static int intel_overlay_off(struct intel_overlay *overlay,
bool interruptible)
{
struct drm_device *dev = overlay->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
u32 flip_addr = overlay->flip_addr;
struct drm_i915_gem_request *request;
+ int ret;
BUG_ON(!overlay->active);
@@ -404,7 +422,11 @@ static int intel_overlay_off(struct intel_overlay *overlay,
* of the hw. Do it in both cases */
flip_addr |= OFC_UPDATE;
- BEGIN_LP_RING(6);
+ ret = BEGIN_LP_RING(6);
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
/* wait for overlay to go idle */
OUT_RING(MI_OVERLAY_FLIP | MI_OVERLAY_CONTINUE);
OUT_RING(flip_addr);
@@ -432,7 +454,7 @@ static int intel_overlay_recover_from_interrupt(struct intel_overlay *overlay,
return 0;
ret = i915_do_wait_request(dev, overlay->last_flip_req,
- interruptible, &dev_priv->render_ring);
+ interruptible, LP_RING(dev_priv));
if (ret)
return ret;
@@ -467,7 +489,12 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
if (request == NULL)
return -ENOMEM;
- BEGIN_LP_RING(2);
+ ret = BEGIN_LP_RING(2);
+ if (ret) {
+ kfree(request);
+ return ret;
+ }
+
OUT_RING(MI_WAIT_FOR_EVENT | MI_WAIT_FOR_OVERLAY_FLIP);
OUT_RING(MI_NOOP);
ADVANCE_LP_RING();
@@ -736,13 +763,12 @@ static u32 overlay_cmd_reg(struct put_image_params *params)
}
static int intel_overlay_do_put_image(struct intel_overlay *overlay,
- struct drm_gem_object *new_bo,
+ struct drm_i915_gem_object *new_bo,
struct put_image_params *params)
{
int ret, tmp_width;
struct overlay_registers *regs;
bool scale_changed = false;
- struct drm_i915_gem_object *bo_priv = to_intel_bo(new_bo);
struct drm_device *dev = overlay->dev;
BUG_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -753,7 +779,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret != 0)
return ret;
- ret = i915_gem_object_pin(new_bo, PAGE_SIZE);
+ ret = i915_gem_object_pin(new_bo, PAGE_SIZE, true);
if (ret != 0)
return ret;
@@ -761,6 +787,10 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
if (ret != 0)
goto out_unpin;
+ ret = i915_gem_object_put_fence(new_bo);
+ if (ret)
+ goto out_unpin;
+
if (!overlay->active) {
regs = intel_overlay_map_regs(overlay);
if (!regs) {
@@ -797,7 +827,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
regs->SWIDTHSW = calc_swidthsw(overlay->dev,
params->offset_Y, tmp_width);
regs->SHEIGHT = params->src_h;
- regs->OBUF_0Y = bo_priv->gtt_offset + params-> offset_Y;
+ regs->OBUF_0Y = new_bo->gtt_offset + params-> offset_Y;
regs->OSTRIDE = params->stride_Y;
if (params->format & I915_OVERLAY_YUV_PLANAR) {
@@ -811,8 +841,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
params->src_w/uv_hscale);
regs->SWIDTHSW |= max_t(u32, tmp_U, tmp_V) << 16;
regs->SHEIGHT |= (params->src_h/uv_vscale) << 16;
- regs->OBUF_0U = bo_priv->gtt_offset + params->offset_U;
- regs->OBUF_0V = bo_priv->gtt_offset + params->offset_V;
+ regs->OBUF_0U = new_bo->gtt_offset + params->offset_U;
+ regs->OBUF_0V = new_bo->gtt_offset + params->offset_V;
regs->OSTRIDE |= params->stride_UV << 16;
}
@@ -829,7 +859,7 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
goto out_unpin;
overlay->old_vid_bo = overlay->vid_bo;
- overlay->vid_bo = to_intel_bo(new_bo);
+ overlay->vid_bo = new_bo;
return 0;
@@ -942,7 +972,7 @@ static int check_overlay_scaling(struct put_image_params *rec)
static int check_overlay_src(struct drm_device *dev,
struct drm_intel_overlay_put_image *rec,
- struct drm_gem_object *new_bo)
+ struct drm_i915_gem_object *new_bo)
{
int uv_hscale = uv_hsubsampling(rec->flags);
int uv_vscale = uv_vsubsampling(rec->flags);
@@ -1027,7 +1057,7 @@ static int check_overlay_src(struct drm_device *dev,
return -EINVAL;
tmp = rec->stride_Y*rec->src_height;
- if (rec->offset_Y + tmp > new_bo->size)
+ if (rec->offset_Y + tmp > new_bo->base.size)
return -EINVAL;
break;
@@ -1038,12 +1068,12 @@ static int check_overlay_src(struct drm_device *dev,
return -EINVAL;
tmp = rec->stride_Y * rec->src_height;
- if (rec->offset_Y + tmp > new_bo->size)
+ if (rec->offset_Y + tmp > new_bo->base.size)
return -EINVAL;
tmp = rec->stride_UV * (rec->src_height / uv_vscale);
- if (rec->offset_U + tmp > new_bo->size ||
- rec->offset_V + tmp > new_bo->size)
+ if (rec->offset_U + tmp > new_bo->base.size ||
+ rec->offset_V + tmp > new_bo->base.size)
return -EINVAL;
break;
}
@@ -1086,7 +1116,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
struct intel_overlay *overlay;
struct drm_mode_object *drmmode_obj;
struct intel_crtc *crtc;
- struct drm_gem_object *new_bo;
+ struct drm_i915_gem_object *new_bo;
struct put_image_params *params;
int ret;
@@ -1125,8 +1155,8 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
}
crtc = to_intel_crtc(obj_to_crtc(drmmode_obj));
- new_bo = drm_gem_object_lookup(dev, file_priv,
- put_image_rec->bo_handle);
+ new_bo = to_intel_bo(drm_gem_object_lookup(dev, file_priv,
+ put_image_rec->bo_handle));
if (!new_bo) {
ret = -ENOENT;
goto out_free;
@@ -1135,6 +1165,12 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
mutex_lock(&dev->mode_config.mutex);
mutex_lock(&dev->struct_mutex);
+ if (new_bo->tiling_mode) {
+ DRM_ERROR("buffer used for overlay image can not be tiled\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
ret = intel_overlay_recover_from_interrupt(overlay, true);
if (ret != 0)
goto out_unlock;
@@ -1217,7 +1253,7 @@ int intel_overlay_put_image(struct drm_device *dev, void *data,
out_unlock:
mutex_unlock(&dev->struct_mutex);
mutex_unlock(&dev->mode_config.mutex);
- drm_gem_object_unreference_unlocked(new_bo);
+ drm_gem_object_unreference_unlocked(&new_bo->base);
out_free:
kfree(params);
@@ -1370,7 +1406,7 @@ void intel_setup_overlay(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
struct intel_overlay *overlay;
- struct drm_gem_object *reg_bo;
+ struct drm_i915_gem_object *reg_bo;
struct overlay_registers *regs;
int ret;
@@ -1385,7 +1421,7 @@ void intel_setup_overlay(struct drm_device *dev)
reg_bo = i915_gem_alloc_object(dev, PAGE_SIZE);
if (!reg_bo)
goto out_free;
- overlay->reg_bo = to_intel_bo(reg_bo);
+ overlay->reg_bo = reg_bo;
if (OVERLAY_NEEDS_PHYSICAL(dev)) {
ret = i915_gem_attach_phys_object(dev, reg_bo,
@@ -1395,14 +1431,14 @@ void intel_setup_overlay(struct drm_device *dev)
DRM_ERROR("failed to attach phys overlay regs\n");
goto out_free_bo;
}
- overlay->flip_addr = overlay->reg_bo->phys_obj->handle->busaddr;
+ overlay->flip_addr = reg_bo->phys_obj->handle->busaddr;
} else {
- ret = i915_gem_object_pin(reg_bo, PAGE_SIZE);
+ ret = i915_gem_object_pin(reg_bo, PAGE_SIZE, true);
if (ret) {
DRM_ERROR("failed to pin overlay register bo\n");
goto out_free_bo;
}
- overlay->flip_addr = overlay->reg_bo->gtt_offset;
+ overlay->flip_addr = reg_bo->gtt_offset;
ret = i915_gem_object_set_to_gtt_domain(reg_bo, true);
if (ret) {
@@ -1434,7 +1470,7 @@ void intel_setup_overlay(struct drm_device *dev)
out_unpin_bo:
i915_gem_object_unpin(reg_bo);
out_free_bo:
- drm_gem_object_unreference(reg_bo);
+ drm_gem_object_unreference(&reg_bo->base);
out_free:
kfree(overlay);
return;
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 92ff8f38527..7350ec2515c 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -125,15 +125,55 @@ static int is_backlight_combination_mode(struct drm_device *dev)
return 0;
}
+static u32 i915_read_blc_pwm_ctl(struct drm_i915_private *dev_priv)
+{
+ u32 val;
+
+ /* Restore the CTL value if it lost, e.g. GPU reset */
+
+ if (HAS_PCH_SPLIT(dev_priv->dev)) {
+ val = I915_READ(BLC_PWM_PCH_CTL2);
+ if (dev_priv->saveBLC_PWM_CTL2 == 0) {
+ dev_priv->saveBLC_PWM_CTL2 = val;
+ } else if (val == 0) {
+ I915_WRITE(BLC_PWM_PCH_CTL2,
+ dev_priv->saveBLC_PWM_CTL);
+ val = dev_priv->saveBLC_PWM_CTL;
+ }
+ } else {
+ val = I915_READ(BLC_PWM_CTL);
+ if (dev_priv->saveBLC_PWM_CTL == 0) {
+ dev_priv->saveBLC_PWM_CTL = val;
+ dev_priv->saveBLC_PWM_CTL2 = I915_READ(BLC_PWM_CTL2);
+ } else if (val == 0) {
+ I915_WRITE(BLC_PWM_CTL,
+ dev_priv->saveBLC_PWM_CTL);
+ I915_WRITE(BLC_PWM_CTL2,
+ dev_priv->saveBLC_PWM_CTL2);
+ val = dev_priv->saveBLC_PWM_CTL;
+ }
+ }
+
+ return val;
+}
+
u32 intel_panel_get_max_backlight(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = dev->dev_private;
u32 max;
+ max = i915_read_blc_pwm_ctl(dev_priv);
+ if (max == 0) {
+ /* XXX add code here to query mode clock or hardware clock
+ * and program max PWM appropriately.
+ */
+ printk_once(KERN_WARNING "fixme: max PWM is zero.\n");
+ return 1;
+ }
+
if (HAS_PCH_SPLIT(dev)) {
- max = I915_READ(BLC_PWM_PCH_CTL2) >> 16;
+ max >>= 16;
} else {
- max = I915_READ(BLC_PWM_CTL);
if (IS_PINEVIEW(dev)) {
max >>= 17;
} else {
@@ -146,14 +186,6 @@ u32 intel_panel_get_max_backlight(struct drm_device *dev)
max *= 0xff;
}
- if (max == 0) {
- /* XXX add code here to query mode clock or hardware clock
- * and program max PWM appropriately.
- */
- DRM_ERROR("fixme: max PWM is zero.\n");
- max = 1;
- }
-
DRM_DEBUG_DRIVER("max backlight PWM = %d\n", max);
return max;
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index b83306f9244..56bc95c056d 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -49,11 +49,11 @@ static u32 i915_gem_get_seqno(struct drm_device *dev)
}
static void
-render_ring_flush(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+render_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains,
u32 flush_domains)
{
+ struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
u32 cmd;
@@ -109,79 +109,83 @@ render_ring_flush(struct drm_device *dev,
if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION)
cmd |= MI_EXE_FLUSH;
+ if (invalidate_domains & I915_GEM_DOMAIN_COMMAND &&
+ (IS_G4X(dev) || IS_GEN5(dev)))
+ cmd |= MI_INVALIDATE_ISP;
+
#if WATCH_EXEC
DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd);
#endif
- intel_ring_begin(dev, ring, 2);
- intel_ring_emit(dev, ring, cmd);
- intel_ring_emit(dev, ring, MI_NOOP);
- intel_ring_advance(dev, ring);
+ if (intel_ring_begin(ring, 2) == 0) {
+ intel_ring_emit(ring, cmd);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ }
}
}
-static void ring_write_tail(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+static void ring_write_tail(struct intel_ring_buffer *ring,
u32 value)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
I915_WRITE_TAIL(ring, value);
}
-u32 intel_ring_get_active_head(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 acthd_reg = INTEL_INFO(dev)->gen >= 4 ?
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ u32 acthd_reg = INTEL_INFO(ring->dev)->gen >= 4 ?
RING_ACTHD(ring->mmio_base) : ACTHD;
return I915_READ(acthd_reg);
}
-static int init_ring_common(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static int init_ring_common(struct intel_ring_buffer *ring)
{
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ struct drm_i915_gem_object *obj = ring->obj;
u32 head;
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
- obj_priv = to_intel_bo(ring->gem_object);
/* Stop the ring if it's running. */
I915_WRITE_CTL(ring, 0);
I915_WRITE_HEAD(ring, 0);
- ring->write_tail(dev, ring, 0);
+ ring->write_tail(ring, 0);
/* Initialize the ring. */
- I915_WRITE_START(ring, obj_priv->gtt_offset);
+ I915_WRITE_START(ring, obj->gtt_offset);
head = I915_READ_HEAD(ring) & HEAD_ADDR;
/* G45 ring initialization fails to reset head to zero */
if (head != 0) {
- DRM_ERROR("%s head not reset to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- ring->name,
- I915_READ_CTL(ring),
- I915_READ_HEAD(ring),
- I915_READ_TAIL(ring),
- I915_READ_START(ring));
+ DRM_DEBUG_KMS("%s head not reset to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ_CTL(ring),
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
I915_WRITE_HEAD(ring, 0);
- DRM_ERROR("%s head forced to zero "
- "ctl %08x head %08x tail %08x start %08x\n",
- ring->name,
- I915_READ_CTL(ring),
- I915_READ_HEAD(ring),
- I915_READ_TAIL(ring),
- I915_READ_START(ring));
+ if (I915_READ_HEAD(ring) & HEAD_ADDR) {
+ DRM_ERROR("failed to set %s head to zero "
+ "ctl %08x head %08x tail %08x start %08x\n",
+ ring->name,
+ I915_READ_CTL(ring),
+ I915_READ_HEAD(ring),
+ I915_READ_TAIL(ring),
+ I915_READ_START(ring));
+ }
}
I915_WRITE_CTL(ring,
- ((ring->gem_object->size - PAGE_SIZE) & RING_NR_PAGES)
+ ((ring->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_REPORT_64K | RING_VALID);
- head = I915_READ_HEAD(ring) & HEAD_ADDR;
/* If the head is still not zero, the ring is dead */
- if (head != 0) {
+ if ((I915_READ_CTL(ring) & RING_VALID) == 0 ||
+ I915_READ_START(ring) != obj->gtt_offset ||
+ (I915_READ_HEAD(ring) & HEAD_ADDR) != 0) {
DRM_ERROR("%s initialization failed "
"ctl %08x head %08x tail %08x start %08x\n",
ring->name,
@@ -192,8 +196,8 @@ static int init_ring_common(struct drm_device *dev,
return -EIO;
}
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_kernel_lost_context(dev);
+ if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
+ i915_kernel_lost_context(ring->dev);
else {
ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
@@ -201,335 +205,500 @@ static int init_ring_common(struct drm_device *dev,
if (ring->space < 0)
ring->space += ring->size;
}
+
return 0;
}
-static int init_render_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+/*
+ * 965+ support PIPE_CONTROL commands, which provide finer grained control
+ * over cache flushing.
+ */
+struct pipe_control {
+ struct drm_i915_gem_object *obj;
+ volatile u32 *cpu_page;
+ u32 gtt_offset;
+};
+
+static int
+init_pipe_control(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- int ret = init_ring_common(dev, ring);
- int mode;
+ struct pipe_control *pc;
+ struct drm_i915_gem_object *obj;
+ int ret;
+
+ if (ring->private)
+ return 0;
+
+ pc = kmalloc(sizeof(*pc), GFP_KERNEL);
+ if (!pc)
+ return -ENOMEM;
+
+ obj = i915_gem_alloc_object(ring->dev, 4096);
+ if (obj == NULL) {
+ DRM_ERROR("Failed to allocate seqno page\n");
+ ret = -ENOMEM;
+ goto err;
+ }
+ obj->agp_type = AGP_USER_CACHED_MEMORY;
+
+ ret = i915_gem_object_pin(obj, 4096, true);
+ if (ret)
+ goto err_unref;
+
+ pc->gtt_offset = obj->gtt_offset;
+ pc->cpu_page = kmap(obj->pages[0]);
+ if (pc->cpu_page == NULL)
+ goto err_unpin;
+
+ pc->obj = obj;
+ ring->private = pc;
+ return 0;
+
+err_unpin:
+ i915_gem_object_unpin(obj);
+err_unref:
+ drm_gem_object_unreference(&obj->base);
+err:
+ kfree(pc);
+ return ret;
+}
+
+static void
+cleanup_pipe_control(struct intel_ring_buffer *ring)
+{
+ struct pipe_control *pc = ring->private;
+ struct drm_i915_gem_object *obj;
+
+ if (!ring->private)
+ return;
+
+ obj = pc->obj;
+ kunmap(obj->pages[0]);
+ i915_gem_object_unpin(obj);
+ drm_gem_object_unreference(&obj->base);
+
+ kfree(pc);
+ ring->private = NULL;
+}
+
+static int init_render_ring(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int ret = init_ring_common(ring);
if (INTEL_INFO(dev)->gen > 3) {
- mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
+ int mode = VS_TIMER_DISPATCH << 16 | VS_TIMER_DISPATCH;
if (IS_GEN6(dev))
mode |= MI_FLUSH_ENABLE << 16 | MI_FLUSH_ENABLE;
I915_WRITE(MI_MODE, mode);
}
+
+ if (INTEL_INFO(dev)->gen >= 6) {
+ } else if (IS_GEN5(dev)) {
+ ret = init_pipe_control(ring);
+ if (ret)
+ return ret;
+ }
+
return ret;
}
-#define PIPE_CONTROL_FLUSH(addr) \
+static void render_ring_cleanup(struct intel_ring_buffer *ring)
+{
+ if (!ring->private)
+ return;
+
+ cleanup_pipe_control(ring);
+}
+
+static void
+update_semaphore(struct intel_ring_buffer *ring, int i, u32 seqno)
+{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
+ int id;
+
+ /*
+ * cs -> 1 = vcs, 0 = bcs
+ * vcs -> 1 = bcs, 0 = cs,
+ * bcs -> 1 = cs, 0 = vcs.
+ */
+ id = ring - dev_priv->ring;
+ id += 2 - i;
+ id %= 3;
+
+ intel_ring_emit(ring,
+ MI_SEMAPHORE_MBOX |
+ MI_SEMAPHORE_REGISTER |
+ MI_SEMAPHORE_UPDATE);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring,
+ RING_SYNC_0(dev_priv->ring[id].mmio_base) + 4*i);
+}
+
+static int
+gen6_add_request(struct intel_ring_buffer *ring,
+ u32 *result)
+{
+ u32 seqno;
+ int ret;
+
+ ret = intel_ring_begin(ring, 10);
+ if (ret)
+ return ret;
+
+ seqno = i915_gem_get_seqno(ring->dev);
+ update_semaphore(ring, 0, seqno);
+ update_semaphore(ring, 1, seqno);
+
+ intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, MI_USER_INTERRUPT);
+ intel_ring_advance(ring);
+
+ *result = seqno;
+ return 0;
+}
+
+int
+intel_ring_sync(struct intel_ring_buffer *ring,
+ struct intel_ring_buffer *to,
+ u32 seqno)
+{
+ int ret;
+
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring,
+ MI_SEMAPHORE_MBOX |
+ MI_SEMAPHORE_REGISTER |
+ intel_ring_sync_index(ring, to) << 17 |
+ MI_SEMAPHORE_COMPARE);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+
+ return 0;
+}
+
+#define PIPE_CONTROL_FLUSH(ring__, addr__) \
do { \
- OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
+ intel_ring_emit(ring__, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE | \
PIPE_CONTROL_DEPTH_STALL | 2); \
- OUT_RING(addr | PIPE_CONTROL_GLOBAL_GTT); \
- OUT_RING(0); \
- OUT_RING(0); \
+ intel_ring_emit(ring__, (addr__) | PIPE_CONTROL_GLOBAL_GTT); \
+ intel_ring_emit(ring__, 0); \
+ intel_ring_emit(ring__, 0); \
} while (0)
-/**
- * Creates a new sequence number, emitting a write of it to the status page
- * plus an interrupt, which will trigger i915_user_interrupt_handler.
- *
- * Must be called with struct_lock held.
- *
- * Returned sequence numbers are nonzero on success.
- */
-static u32
-render_ring_add_request(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- u32 flush_domains)
+static int
+pc_render_add_request(struct intel_ring_buffer *ring,
+ u32 *result)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- u32 seqno;
+ struct drm_device *dev = ring->dev;
+ u32 seqno = i915_gem_get_seqno(dev);
+ struct pipe_control *pc = ring->private;
+ u32 scratch_addr = pc->gtt_offset + 128;
+ int ret;
- seqno = i915_gem_get_seqno(dev);
-
- if (IS_GEN6(dev)) {
- BEGIN_LP_RING(6);
- OUT_RING(GFX_OP_PIPE_CONTROL | 3);
- OUT_RING(PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_IS_FLUSH |
- PIPE_CONTROL_NOTIFY);
- OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
- OUT_RING(seqno);
- OUT_RING(0);
- OUT_RING(0);
- ADVANCE_LP_RING();
- } else if (HAS_PIPE_CONTROL(dev)) {
- u32 scratch_addr = dev_priv->seqno_gfx_addr + 128;
+ /* For Ironlake, MI_USER_INTERRUPT was deprecated and apparently
+ * incoherent with writes to memory, i.e. completely fubar,
+ * so we need to use PIPE_NOTIFY instead.
+ *
+ * However, we also need to workaround the qword write
+ * incoherence by flushing the 6 PIPE_NOTIFY buffers out to
+ * memory before requesting an interrupt.
+ */
+ ret = intel_ring_begin(ring, 32);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
+ intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, 0);
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128; /* write to separate cachelines */
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ scratch_addr += 128;
+ PIPE_CONTROL_FLUSH(ring, scratch_addr);
+ intel_ring_emit(ring, GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
+ PIPE_CONTROL_NOTIFY);
+ intel_ring_emit(ring, pc->gtt_offset | PIPE_CONTROL_GLOBAL_GTT);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+
+ *result = seqno;
+ return 0;
+}
- /*
- * Workaround qword write incoherence by flushing the
- * PIPE_NOTIFY buffers out to memory before requesting
- * an interrupt.
- */
- BEGIN_LP_RING(32);
- OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH);
- OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
- OUT_RING(seqno);
- OUT_RING(0);
- PIPE_CONTROL_FLUSH(scratch_addr);
- scratch_addr += 128; /* write to separate cachelines */
- PIPE_CONTROL_FLUSH(scratch_addr);
- scratch_addr += 128;
- PIPE_CONTROL_FLUSH(scratch_addr);
- scratch_addr += 128;
- PIPE_CONTROL_FLUSH(scratch_addr);
- scratch_addr += 128;
- PIPE_CONTROL_FLUSH(scratch_addr);
- scratch_addr += 128;
- PIPE_CONTROL_FLUSH(scratch_addr);
- OUT_RING(GFX_OP_PIPE_CONTROL | PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_WC_FLUSH | PIPE_CONTROL_TC_FLUSH |
- PIPE_CONTROL_NOTIFY);
- OUT_RING(dev_priv->seqno_gfx_addr | PIPE_CONTROL_GLOBAL_GTT);
- OUT_RING(seqno);
- OUT_RING(0);
- ADVANCE_LP_RING();
- } else {
- BEGIN_LP_RING(4);
- OUT_RING(MI_STORE_DWORD_INDEX);
- OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- OUT_RING(seqno);
+static int
+render_ring_add_request(struct intel_ring_buffer *ring,
+ u32 *result)
+{
+ struct drm_device *dev = ring->dev;
+ u32 seqno = i915_gem_get_seqno(dev);
+ int ret;
- OUT_RING(MI_USER_INTERRUPT);
- ADVANCE_LP_RING();
- }
- return seqno;
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, MI_USER_INTERRUPT);
+ intel_ring_advance(ring);
+
+ *result = seqno;
+ return 0;
}
static u32
-render_ring_get_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ring_get_seqno(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- if (HAS_PIPE_CONTROL(dev))
- return ((volatile u32 *)(dev_priv->seqno_page))[0];
- else
- return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+ return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
}
-static void
-render_ring_get_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static u32
+pc_render_get_seqno(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long irqflags;
+ struct pipe_control *pc = ring->private;
+ return pc->cpu_page[0];
+}
+
+static bool
+render_ring_get_irq(struct intel_ring_buffer *ring)
+{
+ struct drm_device *dev = ring->dev;
+
+ if (!dev->irq_enabled)
+ return false;
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- if (dev->irq_enabled && (++ring->user_irq_refcount == 1)) {
+ if (atomic_inc_return(&ring->irq_refcount) == 1) {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (HAS_PCH_SPLIT(dev))
- ironlake_enable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
+ ironlake_enable_graphics_irq(dev_priv,
+ GT_PIPE_NOTIFY | GT_USER_INTERRUPT);
else
i915_enable_irq(dev_priv, I915_USER_INTERRUPT);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+
+ return true;
}
static void
-render_ring_put_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+render_ring_put_irq(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
- unsigned long irqflags;
+ struct drm_device *dev = ring->dev;
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- BUG_ON(dev->irq_enabled && ring->user_irq_refcount <= 0);
- if (dev->irq_enabled && (--ring->user_irq_refcount == 0)) {
+ if (atomic_dec_and_test(&ring->irq_refcount)) {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
if (HAS_PCH_SPLIT(dev))
- ironlake_disable_graphics_irq(dev_priv, GT_PIPE_NOTIFY);
+ ironlake_disable_graphics_irq(dev_priv,
+ GT_USER_INTERRUPT |
+ GT_PIPE_NOTIFY);
else
i915_disable_irq(dev_priv, I915_USER_INTERRUPT);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
}
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
}
-void intel_ring_setup_status_page(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+void intel_ring_setup_status_page(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- if (IS_GEN6(dev)) {
- I915_WRITE(RING_HWS_PGA_GEN6(ring->mmio_base),
- ring->status_page.gfx_addr);
- I915_READ(RING_HWS_PGA_GEN6(ring->mmio_base)); /* posting read */
- } else {
- I915_WRITE(RING_HWS_PGA(ring->mmio_base),
- ring->status_page.gfx_addr);
- I915_READ(RING_HWS_PGA(ring->mmio_base)); /* posting read */
- }
-
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ u32 mmio = IS_GEN6(ring->dev) ?
+ RING_HWS_PGA_GEN6(ring->mmio_base) :
+ RING_HWS_PGA(ring->mmio_base);
+ I915_WRITE(mmio, (u32)ring->status_page.gfx_addr);
+ POSTING_READ(mmio);
}
static void
-bsd_ring_flush(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- u32 invalidate_domains,
- u32 flush_domains)
+bsd_ring_flush(struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains)
{
- intel_ring_begin(dev, ring, 2);
- intel_ring_emit(dev, ring, MI_FLUSH);
- intel_ring_emit(dev, ring, MI_NOOP);
- intel_ring_advance(dev, ring);
-}
+ if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
+ return;
-static int init_bsd_ring(struct drm_device *dev,
- struct intel_ring_buffer *ring)
-{
- return init_ring_common(dev, ring);
+ if (intel_ring_begin(ring, 2) == 0) {
+ intel_ring_emit(ring, MI_FLUSH);
+ intel_ring_emit(ring, MI_NOOP);
+ intel_ring_advance(ring);
+ }
}
-static u32
-ring_add_request(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- u32 flush_domains)
+static int
+ring_add_request(struct intel_ring_buffer *ring,
+ u32 *result)
{
u32 seqno;
+ int ret;
- seqno = i915_gem_get_seqno(dev);
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
- intel_ring_begin(dev, ring, 4);
- intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
- intel_ring_emit(dev, ring,
- I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(dev, ring, seqno);
- intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
- intel_ring_advance(dev, ring);
+ seqno = i915_gem_get_seqno(ring->dev);
- DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
+ intel_ring_emit(ring, MI_STORE_DWORD_INDEX);
+ intel_ring_emit(ring, I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
+ intel_ring_emit(ring, seqno);
+ intel_ring_emit(ring, MI_USER_INTERRUPT);
+ intel_ring_advance(ring);
- return seqno;
+ DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
+ *result = seqno;
+ return 0;
}
-static void
-bsd_ring_get_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static bool
+ring_get_irq(struct intel_ring_buffer *ring, u32 flag)
{
- /* do nothing */
+ struct drm_device *dev = ring->dev;
+
+ if (!dev->irq_enabled)
+ return false;
+
+ if (atomic_inc_return(&ring->irq_refcount) == 1) {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ ironlake_enable_graphics_irq(dev_priv, flag);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
+
+ return true;
}
+
static void
-bsd_ring_put_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+ring_put_irq(struct intel_ring_buffer *ring, u32 flag)
{
- /* do nothing */
+ struct drm_device *dev = ring->dev;
+
+ if (atomic_dec_and_test(&ring->irq_refcount)) {
+ drm_i915_private_t *dev_priv = dev->dev_private;
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ ironlake_disable_graphics_irq(dev_priv, flag);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
+ }
}
-static u32
-ring_status_page_get_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static bool
+bsd_ring_get_irq(struct intel_ring_buffer *ring)
{
- return intel_read_status_page(ring, I915_GEM_HWS_INDEX);
+ return ring_get_irq(ring, GT_BSD_USER_INTERRUPT);
+}
+static void
+bsd_ring_put_irq(struct intel_ring_buffer *ring)
+{
+ ring_put_irq(ring, GT_BSD_USER_INTERRUPT);
}
static int
-ring_dispatch_gem_execbuffer(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- struct drm_i915_gem_execbuffer2 *exec,
- struct drm_clip_rect *cliprects,
- uint64_t exec_offset)
+ring_dispatch_execbuffer(struct intel_ring_buffer *ring, u32 offset, u32 length)
{
- uint32_t exec_start;
- exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
- intel_ring_begin(dev, ring, 2);
- intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START |
- (2 << 6) | MI_BATCH_NON_SECURE_I965);
- intel_ring_emit(dev, ring, exec_start);
- intel_ring_advance(dev, ring);
+ int ret;
+
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START | (2 << 6) |
+ MI_BATCH_NON_SECURE_I965);
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
+
return 0;
}
static int
-render_ring_dispatch_gem_execbuffer(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- struct drm_i915_gem_execbuffer2 *exec,
- struct drm_clip_rect *cliprects,
- uint64_t exec_offset)
+render_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 len)
{
+ struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- int nbox = exec->num_cliprects;
- int i = 0, count;
- uint32_t exec_start, exec_len;
- exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
- exec_len = (uint32_t) exec->batch_len;
+ int ret;
trace_i915_gem_request_submit(dev, dev_priv->next_seqno + 1);
- count = nbox ? nbox : 1;
+ if (IS_I830(dev) || IS_845G(dev)) {
+ ret = intel_ring_begin(ring, 4);
+ if (ret)
+ return ret;
- for (i = 0; i < count; i++) {
- if (i < nbox) {
- int ret = i915_emit_box(dev, cliprects, i,
- exec->DR1, exec->DR4);
- if (ret)
- return ret;
- }
+ intel_ring_emit(ring, MI_BATCH_BUFFER);
+ intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
+ intel_ring_emit(ring, offset + len - 8);
+ intel_ring_emit(ring, 0);
+ } else {
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
- if (IS_I830(dev) || IS_845G(dev)) {
- intel_ring_begin(dev, ring, 4);
- intel_ring_emit(dev, ring, MI_BATCH_BUFFER);
- intel_ring_emit(dev, ring,
- exec_start | MI_BATCH_NON_SECURE);
- intel_ring_emit(dev, ring, exec_start + exec_len - 4);
- intel_ring_emit(dev, ring, 0);
+ if (INTEL_INFO(dev)->gen >= 4) {
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START | (2 << 6) |
+ MI_BATCH_NON_SECURE_I965);
+ intel_ring_emit(ring, offset);
} else {
- intel_ring_begin(dev, ring, 2);
- if (INTEL_INFO(dev)->gen >= 4) {
- intel_ring_emit(dev, ring,
- MI_BATCH_BUFFER_START | (2 << 6)
- | MI_BATCH_NON_SECURE_I965);
- intel_ring_emit(dev, ring, exec_start);
- } else {
- intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START
- | (2 << 6));
- intel_ring_emit(dev, ring, exec_start |
- MI_BATCH_NON_SECURE);
- }
+ intel_ring_emit(ring,
+ MI_BATCH_BUFFER_START | (2 << 6));
+ intel_ring_emit(ring, offset | MI_BATCH_NON_SECURE);
}
- intel_ring_advance(dev, ring);
}
-
- if (IS_G4X(dev) || IS_GEN5(dev)) {
- intel_ring_begin(dev, ring, 2);
- intel_ring_emit(dev, ring, MI_FLUSH |
- MI_NO_WRITE_FLUSH |
- MI_INVALIDATE_ISP );
- intel_ring_emit(dev, ring, MI_NOOP);
- intel_ring_advance(dev, ring);
- }
- /* XXX breadcrumb */
+ intel_ring_advance(ring);
return 0;
}
-static void cleanup_status_page(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static void cleanup_status_page(struct intel_ring_buffer *ring)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
+ struct drm_i915_gem_object *obj;
obj = ring->status_page.obj;
if (obj == NULL)
return;
- obj_priv = to_intel_bo(obj);
- kunmap(obj_priv->pages[0]);
+ kunmap(obj->pages[0]);
i915_gem_object_unpin(obj);
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
ring->status_page.obj = NULL;
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
}
-static int init_status_page(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static int init_status_page(struct intel_ring_buffer *ring)
{
+ struct drm_device *dev = ring->dev;
drm_i915_private_t *dev_priv = dev->dev_private;
- struct drm_gem_object *obj;
- struct drm_i915_gem_object *obj_priv;
+ struct drm_i915_gem_object *obj;
int ret;
obj = i915_gem_alloc_object(dev, 4096);
@@ -538,16 +707,15 @@ static int init_status_page(struct drm_device *dev,
ret = -ENOMEM;
goto err;
}
- obj_priv = to_intel_bo(obj);
- obj_priv->agp_type = AGP_USER_CACHED_MEMORY;
+ obj->agp_type = AGP_USER_CACHED_MEMORY;
- ret = i915_gem_object_pin(obj, 4096);
+ ret = i915_gem_object_pin(obj, 4096, true);
if (ret != 0) {
goto err_unref;
}
- ring->status_page.gfx_addr = obj_priv->gtt_offset;
- ring->status_page.page_addr = kmap(obj_priv->pages[0]);
+ ring->status_page.gfx_addr = obj->gtt_offset;
+ ring->status_page.page_addr = kmap(obj->pages[0]);
if (ring->status_page.page_addr == NULL) {
memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map));
goto err_unpin;
@@ -555,7 +723,7 @@ static int init_status_page(struct drm_device *dev,
ring->status_page.obj = obj;
memset(ring->status_page.page_addr, 0, PAGE_SIZE);
- intel_ring_setup_status_page(dev, ring);
+ intel_ring_setup_status_page(ring);
DRM_DEBUG_DRIVER("%s hws offset: 0x%08x\n",
ring->name, ring->status_page.gfx_addr);
@@ -564,7 +732,7 @@ static int init_status_page(struct drm_device *dev,
err_unpin:
i915_gem_object_unpin(obj);
err_unref:
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference(&obj->base);
err:
return ret;
}
@@ -572,9 +740,7 @@ err:
int intel_init_ring_buffer(struct drm_device *dev,
struct intel_ring_buffer *ring)
{
- struct drm_i915_private *dev_priv = dev->dev_private;
- struct drm_i915_gem_object *obj_priv;
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
int ret;
ring->dev = dev;
@@ -583,7 +749,7 @@ int intel_init_ring_buffer(struct drm_device *dev,
INIT_LIST_HEAD(&ring->gpu_write_list);
if (I915_NEED_GFX_HWS(dev)) {
- ret = init_status_page(dev, ring);
+ ret = init_status_page(ring);
if (ret)
return ret;
}
@@ -595,15 +761,14 @@ int intel_init_ring_buffer(struct drm_device *dev,
goto err_hws;
}
- ring->gem_object = obj;
+ ring->obj = obj;
- ret = i915_gem_object_pin(obj, PAGE_SIZE);
+ ret = i915_gem_object_pin(obj, PAGE_SIZE, true);
if (ret)
goto err_unref;
- obj_priv = to_intel_bo(obj);
ring->map.size = ring->size;
- ring->map.offset = dev->agp->base + obj_priv->gtt_offset;
+ ring->map.offset = dev->agp->base + obj->gtt_offset;
ring->map.type = 0;
ring->map.flags = 0;
ring->map.mtrr = 0;
@@ -616,60 +781,57 @@ int intel_init_ring_buffer(struct drm_device *dev,
}
ring->virtual_start = ring->map.handle;
- ret = ring->init(dev, ring);
+ ret = ring->init(ring);
if (ret)
goto err_unmap;
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- i915_kernel_lost_context(dev);
- else {
- ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
- ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->size;
- }
- return ret;
+ return 0;
err_unmap:
drm_core_ioremapfree(&ring->map, dev);
err_unpin:
i915_gem_object_unpin(obj);
err_unref:
- drm_gem_object_unreference(obj);
- ring->gem_object = NULL;
+ drm_gem_object_unreference(&obj->base);
+ ring->obj = NULL;
err_hws:
- cleanup_status_page(dev, ring);
+ cleanup_status_page(ring);
return ret;
}
-void intel_cleanup_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring)
{
- if (ring->gem_object == NULL)
+ struct drm_i915_private *dev_priv;
+ int ret;
+
+ if (ring->obj == NULL)
return;
- drm_core_ioremapfree(&ring->map, dev);
+ /* Disable the ring buffer. The ring must be idle at this point */
+ dev_priv = ring->dev->dev_private;
+ ret = intel_wait_ring_buffer(ring, ring->size - 8);
+ I915_WRITE_CTL(ring, 0);
- i915_gem_object_unpin(ring->gem_object);
- drm_gem_object_unreference(ring->gem_object);
- ring->gem_object = NULL;
+ drm_core_ioremapfree(&ring->map, ring->dev);
+
+ i915_gem_object_unpin(ring->obj);
+ drm_gem_object_unreference(&ring->obj->base);
+ ring->obj = NULL;
if (ring->cleanup)
ring->cleanup(ring);
- cleanup_status_page(dev, ring);
+ cleanup_status_page(ring);
}
-static int intel_wrap_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static int intel_wrap_ring_buffer(struct intel_ring_buffer *ring)
{
unsigned int *virt;
int rem;
rem = ring->size - ring->tail;
if (ring->space < rem) {
- int ret = intel_wait_ring_buffer(dev, ring, rem);
+ int ret = intel_wait_ring_buffer(ring, rem);
if (ret)
return ret;
}
@@ -687,32 +849,29 @@ static int intel_wrap_ring_buffer(struct drm_device *dev,
return 0;
}
-int intel_wait_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring, int n)
+int intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n)
{
+ struct drm_device *dev = ring->dev;
+ struct drm_i915_private *dev_priv = dev->dev_private;
unsigned long end;
- drm_i915_private_t *dev_priv = dev->dev_private;
u32 head;
- head = intel_read_status_page(ring, 4);
- if (head) {
- ring->head = head & HEAD_ADDR;
- ring->space = ring->head - (ring->tail + 8);
- if (ring->space < 0)
- ring->space += ring->size;
- if (ring->space >= n)
- return 0;
- }
-
trace_i915_ring_wait_begin (dev);
end = jiffies + 3 * HZ;
do {
- ring->head = I915_READ_HEAD(ring) & HEAD_ADDR;
+ /* If the reported head position has wrapped or hasn't advanced,
+ * fallback to the slow and accurate path.
+ */
+ head = intel_read_status_page(ring, 4);
+ if (head < ring->actual_head)
+ head = I915_READ_HEAD(ring);
+ ring->actual_head = head;
+ ring->head = head & HEAD_ADDR;
ring->space = ring->head - (ring->tail + 8);
if (ring->space < 0)
ring->space += ring->size;
if (ring->space >= n) {
- trace_i915_ring_wait_end (dev);
+ trace_i915_ring_wait_end(dev);
return 0;
}
@@ -723,29 +882,39 @@ int intel_wait_ring_buffer(struct drm_device *dev,
}
msleep(1);
+ if (atomic_read(&dev_priv->mm.wedged))
+ return -EAGAIN;
} while (!time_after(jiffies, end));
trace_i915_ring_wait_end (dev);
return -EBUSY;
}
-void intel_ring_begin(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- int num_dwords)
+int intel_ring_begin(struct intel_ring_buffer *ring,
+ int num_dwords)
{
int n = 4*num_dwords;
- if (unlikely(ring->tail + n > ring->size))
- intel_wrap_ring_buffer(dev, ring);
- if (unlikely(ring->space < n))
- intel_wait_ring_buffer(dev, ring, n);
+ int ret;
+
+ if (unlikely(ring->tail + n > ring->size)) {
+ ret = intel_wrap_ring_buffer(ring);
+ if (unlikely(ret))
+ return ret;
+ }
+
+ if (unlikely(ring->space < n)) {
+ ret = intel_wait_ring_buffer(ring, n);
+ if (unlikely(ret))
+ return ret;
+ }
ring->space -= n;
+ return 0;
}
-void intel_ring_advance(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+void intel_ring_advance(struct intel_ring_buffer *ring)
{
ring->tail &= ring->size - 1;
- ring->write_tail(dev, ring, ring->tail);
+ ring->write_tail(ring, ring->tail);
}
static const struct intel_ring_buffer render_ring = {
@@ -757,10 +926,11 @@ static const struct intel_ring_buffer render_ring = {
.write_tail = ring_write_tail,
.flush = render_ring_flush,
.add_request = render_ring_add_request,
- .get_seqno = render_ring_get_seqno,
- .user_irq_get = render_ring_get_user_irq,
- .user_irq_put = render_ring_put_user_irq,
- .dispatch_gem_execbuffer = render_ring_dispatch_gem_execbuffer,
+ .get_seqno = ring_get_seqno,
+ .irq_get = render_ring_get_irq,
+ .irq_put = render_ring_put_irq,
+ .dispatch_execbuffer = render_ring_dispatch_execbuffer,
+ .cleanup = render_ring_cleanup,
};
/* ring buffer for bit-stream decoder */
@@ -770,22 +940,21 @@ static const struct intel_ring_buffer bsd_ring = {
.id = RING_BSD,
.mmio_base = BSD_RING_BASE,
.size = 32 * PAGE_SIZE,
- .init = init_bsd_ring,
+ .init = init_ring_common,
.write_tail = ring_write_tail,
.flush = bsd_ring_flush,
.add_request = ring_add_request,
- .get_seqno = ring_status_page_get_seqno,
- .user_irq_get = bsd_ring_get_user_irq,
- .user_irq_put = bsd_ring_put_user_irq,
- .dispatch_gem_execbuffer = ring_dispatch_gem_execbuffer,
+ .get_seqno = ring_get_seqno,
+ .irq_get = bsd_ring_get_irq,
+ .irq_put = bsd_ring_put_irq,
+ .dispatch_execbuffer = ring_dispatch_execbuffer,
};
-static void gen6_bsd_ring_write_tail(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+static void gen6_bsd_ring_write_tail(struct intel_ring_buffer *ring,
u32 value)
{
- drm_i915_private_t *dev_priv = dev->dev_private;
+ drm_i915_private_t *dev_priv = ring->dev->dev_private;
/* Every tail move must follow the sequence below */
I915_WRITE(GEN6_BSD_SLEEP_PSMI_CONTROL,
@@ -804,69 +973,80 @@ static void gen6_bsd_ring_write_tail(struct drm_device *dev,
GEN6_BSD_SLEEP_PSMI_CONTROL_RC_ILDL_MESSAGE_ENABLE);
}
-static void gen6_ring_flush(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+static void gen6_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains,
u32 flush_domains)
{
- intel_ring_begin(dev, ring, 4);
- intel_ring_emit(dev, ring, MI_FLUSH_DW);
- intel_ring_emit(dev, ring, 0);
- intel_ring_emit(dev, ring, 0);
- intel_ring_emit(dev, ring, 0);
- intel_ring_advance(dev, ring);
+ if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
+ return;
+
+ if (intel_ring_begin(ring, 4) == 0) {
+ intel_ring_emit(ring, MI_FLUSH_DW);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+ }
}
static int
-gen6_ring_dispatch_gem_execbuffer(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- struct drm_i915_gem_execbuffer2 *exec,
- struct drm_clip_rect *cliprects,
- uint64_t exec_offset)
+gen6_ring_dispatch_execbuffer(struct intel_ring_buffer *ring,
+ u32 offset, u32 len)
{
- uint32_t exec_start;
+ int ret;
- exec_start = (uint32_t) exec_offset + exec->batch_start_offset;
+ ret = intel_ring_begin(ring, 2);
+ if (ret)
+ return ret;
- intel_ring_begin(dev, ring, 2);
- intel_ring_emit(dev, ring,
- MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965);
/* bit0-7 is the length on GEN6+ */
- intel_ring_emit(dev, ring, exec_start);
- intel_ring_advance(dev, ring);
+ intel_ring_emit(ring, offset);
+ intel_ring_advance(ring);
return 0;
}
+static bool
+gen6_bsd_ring_get_irq(struct intel_ring_buffer *ring)
+{
+ return ring_get_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
+}
+
+static void
+gen6_bsd_ring_put_irq(struct intel_ring_buffer *ring)
+{
+ ring_put_irq(ring, GT_GEN6_BSD_USER_INTERRUPT);
+}
+
/* ring buffer for Video Codec for Gen6+ */
static const struct intel_ring_buffer gen6_bsd_ring = {
- .name = "gen6 bsd ring",
- .id = RING_BSD,
- .mmio_base = GEN6_BSD_RING_BASE,
- .size = 32 * PAGE_SIZE,
- .init = init_bsd_ring,
- .write_tail = gen6_bsd_ring_write_tail,
- .flush = gen6_ring_flush,
- .add_request = ring_add_request,
- .get_seqno = ring_status_page_get_seqno,
- .user_irq_get = bsd_ring_get_user_irq,
- .user_irq_put = bsd_ring_put_user_irq,
- .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
+ .name = "gen6 bsd ring",
+ .id = RING_BSD,
+ .mmio_base = GEN6_BSD_RING_BASE,
+ .size = 32 * PAGE_SIZE,
+ .init = init_ring_common,
+ .write_tail = gen6_bsd_ring_write_tail,
+ .flush = gen6_ring_flush,
+ .add_request = gen6_add_request,
+ .get_seqno = ring_get_seqno,
+ .irq_get = gen6_bsd_ring_get_irq,
+ .irq_put = gen6_bsd_ring_put_irq,
+ .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
};
/* Blitter support (SandyBridge+) */
-static void
-blt_ring_get_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static bool
+blt_ring_get_irq(struct intel_ring_buffer *ring)
{
- /* do nothing */
+ return ring_get_irq(ring, GT_BLT_USER_INTERRUPT);
}
+
static void
-blt_ring_put_user_irq(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+blt_ring_put_irq(struct intel_ring_buffer *ring)
{
- /* do nothing */
+ ring_put_irq(ring, GT_BLT_USER_INTERRUPT);
}
@@ -884,32 +1064,31 @@ to_blt_workaround(struct intel_ring_buffer *ring)
return ring->private;
}
-static int blt_ring_init(struct drm_device *dev,
- struct intel_ring_buffer *ring)
+static int blt_ring_init(struct intel_ring_buffer *ring)
{
- if (NEED_BLT_WORKAROUND(dev)) {
+ if (NEED_BLT_WORKAROUND(ring->dev)) {
struct drm_i915_gem_object *obj;
- u32 __iomem *ptr;
+ u32 *ptr;
int ret;
- obj = to_intel_bo(i915_gem_alloc_object(dev, 4096));
+ obj = i915_gem_alloc_object(ring->dev, 4096);
if (obj == NULL)
return -ENOMEM;
- ret = i915_gem_object_pin(&obj->base, 4096);
+ ret = i915_gem_object_pin(obj, 4096, true);
if (ret) {
drm_gem_object_unreference(&obj->base);
return ret;
}
ptr = kmap(obj->pages[0]);
- iowrite32(MI_BATCH_BUFFER_END, ptr);
- iowrite32(MI_NOOP, ptr+1);
+ *ptr++ = MI_BATCH_BUFFER_END;
+ *ptr++ = MI_NOOP;
kunmap(obj->pages[0]);
- ret = i915_gem_object_set_to_gtt_domain(&obj->base, false);
+ ret = i915_gem_object_set_to_gtt_domain(obj, false);
if (ret) {
- i915_gem_object_unpin(&obj->base);
+ i915_gem_object_unpin(obj);
drm_gem_object_unreference(&obj->base);
return ret;
}
@@ -917,51 +1096,39 @@ static int blt_ring_init(struct drm_device *dev,
ring->private = obj;
}
- return init_ring_common(dev, ring);
+ return init_ring_common(ring);
}
-static void blt_ring_begin(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+static int blt_ring_begin(struct intel_ring_buffer *ring,
int num_dwords)
{
if (ring->private) {
- intel_ring_begin(dev, ring, num_dwords+2);
- intel_ring_emit(dev, ring, MI_BATCH_BUFFER_START);
- intel_ring_emit(dev, ring, to_blt_workaround(ring)->gtt_offset);
+ int ret = intel_ring_begin(ring, num_dwords+2);
+ if (ret)
+ return ret;
+
+ intel_ring_emit(ring, MI_BATCH_BUFFER_START);
+ intel_ring_emit(ring, to_blt_workaround(ring)->gtt_offset);
+
+ return 0;
} else
- intel_ring_begin(dev, ring, 4);
+ return intel_ring_begin(ring, 4);
}
-static void blt_ring_flush(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+static void blt_ring_flush(struct intel_ring_buffer *ring,
u32 invalidate_domains,
u32 flush_domains)
{
- blt_ring_begin(dev, ring, 4);
- intel_ring_emit(dev, ring, MI_FLUSH_DW);
- intel_ring_emit(dev, ring, 0);
- intel_ring_emit(dev, ring, 0);
- intel_ring_emit(dev, ring, 0);
- intel_ring_advance(dev, ring);
-}
-
-static u32
-blt_ring_add_request(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- u32 flush_domains)
-{
- u32 seqno = i915_gem_get_seqno(dev);
-
- blt_ring_begin(dev, ring, 4);
- intel_ring_emit(dev, ring, MI_STORE_DWORD_INDEX);
- intel_ring_emit(dev, ring,
- I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT);
- intel_ring_emit(dev, ring, seqno);
- intel_ring_emit(dev, ring, MI_USER_INTERRUPT);
- intel_ring_advance(dev, ring);
+ if ((flush_domains & I915_GEM_DOMAIN_RENDER) == 0)
+ return;
- DRM_DEBUG_DRIVER("%s %d\n", ring->name, seqno);
- return seqno;
+ if (blt_ring_begin(ring, 4) == 0) {
+ intel_ring_emit(ring, MI_FLUSH_DW);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_emit(ring, 0);
+ intel_ring_advance(ring);
+ }
}
static void blt_ring_cleanup(struct intel_ring_buffer *ring)
@@ -982,47 +1149,54 @@ static const struct intel_ring_buffer gen6_blt_ring = {
.init = blt_ring_init,
.write_tail = ring_write_tail,
.flush = blt_ring_flush,
- .add_request = blt_ring_add_request,
- .get_seqno = ring_status_page_get_seqno,
- .user_irq_get = blt_ring_get_user_irq,
- .user_irq_put = blt_ring_put_user_irq,
- .dispatch_gem_execbuffer = gen6_ring_dispatch_gem_execbuffer,
+ .add_request = gen6_add_request,
+ .get_seqno = ring_get_seqno,
+ .irq_get = blt_ring_get_irq,
+ .irq_put = blt_ring_put_irq,
+ .dispatch_execbuffer = gen6_ring_dispatch_execbuffer,
.cleanup = blt_ring_cleanup,
};
int intel_init_render_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
-
- dev_priv->render_ring = render_ring;
+ struct intel_ring_buffer *ring = &dev_priv->ring[RCS];
+
+ *ring = render_ring;
+ if (INTEL_INFO(dev)->gen >= 6) {
+ ring->add_request = gen6_add_request;
+ } else if (IS_GEN5(dev)) {
+ ring->add_request = pc_render_add_request;
+ ring->get_seqno = pc_render_get_seqno;
+ }
if (!I915_NEED_GFX_HWS(dev)) {
- dev_priv->render_ring.status_page.page_addr
- = dev_priv->status_page_dmah->vaddr;
- memset(dev_priv->render_ring.status_page.page_addr,
- 0, PAGE_SIZE);
+ ring->status_page.page_addr = dev_priv->status_page_dmah->vaddr;
+ memset(ring->status_page.page_addr, 0, PAGE_SIZE);
}
- return intel_init_ring_buffer(dev, &dev_priv->render_ring);
+ return intel_init_ring_buffer(dev, ring);
}
int intel_init_bsd_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[VCS];
if (IS_GEN6(dev))
- dev_priv->bsd_ring = gen6_bsd_ring;
+ *ring = gen6_bsd_ring;
else
- dev_priv->bsd_ring = bsd_ring;
+ *ring = bsd_ring;
- return intel_init_ring_buffer(dev, &dev_priv->bsd_ring);
+ return intel_init_ring_buffer(dev, ring);
}
int intel_init_blt_ring_buffer(struct drm_device *dev)
{
drm_i915_private_t *dev_priv = dev->dev_private;
+ struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
- dev_priv->blt_ring = gen6_blt_ring;
+ *ring = gen6_blt_ring;
- return intel_init_ring_buffer(dev, &dev_priv->blt_ring);
+ return intel_init_ring_buffer(dev, ring);
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.h b/drivers/gpu/drm/i915/intel_ringbuffer.h
index 3126c268198..8e2e357ad6e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.h
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.h
@@ -1,22 +1,37 @@
#ifndef _INTEL_RINGBUFFER_H_
#define _INTEL_RINGBUFFER_H_
+enum {
+ RCS = 0x0,
+ VCS,
+ BCS,
+ I915_NUM_RINGS,
+};
+
struct intel_hw_status_page {
- void *page_addr;
+ u32 __iomem *page_addr;
unsigned int gfx_addr;
- struct drm_gem_object *obj;
+ struct drm_i915_gem_object *obj;
};
-#define I915_READ_TAIL(ring) I915_READ(RING_TAIL(ring->mmio_base))
+#define I915_RING_READ(reg) i915_safe_read(dev_priv, reg)
+
+#define I915_READ_TAIL(ring) I915_RING_READ(RING_TAIL(ring->mmio_base))
#define I915_WRITE_TAIL(ring, val) I915_WRITE(RING_TAIL(ring->mmio_base), val)
-#define I915_READ_START(ring) I915_READ(RING_START(ring->mmio_base))
+
+#define I915_READ_START(ring) I915_RING_READ(RING_START(ring->mmio_base))
#define I915_WRITE_START(ring, val) I915_WRITE(RING_START(ring->mmio_base), val)
-#define I915_READ_HEAD(ring) I915_READ(RING_HEAD(ring->mmio_base))
+
+#define I915_READ_HEAD(ring) I915_RING_READ(RING_HEAD(ring->mmio_base))
#define I915_WRITE_HEAD(ring, val) I915_WRITE(RING_HEAD(ring->mmio_base), val)
-#define I915_READ_CTL(ring) I915_READ(RING_CTL(ring->mmio_base))
+
+#define I915_READ_CTL(ring) I915_RING_READ(RING_CTL(ring->mmio_base))
#define I915_WRITE_CTL(ring, val) I915_WRITE(RING_CTL(ring->mmio_base), val)
-struct drm_i915_gem_execbuffer2;
+#define I915_READ_NOPID(ring) I915_RING_READ(RING_NOPID(ring->mmio_base))
+#define I915_READ_SYNC_0(ring) I915_RING_READ(RING_SYNC_0(ring->mmio_base))
+#define I915_READ_SYNC_1(ring) I915_RING_READ(RING_SYNC_1(ring->mmio_base))
+
struct intel_ring_buffer {
const char *name;
enum intel_ring_id {
@@ -25,44 +40,36 @@ struct intel_ring_buffer {
RING_BLT = 0x4,
} id;
u32 mmio_base;
- unsigned long size;
void *virtual_start;
struct drm_device *dev;
- struct drm_gem_object *gem_object;
+ struct drm_i915_gem_object *obj;
- unsigned int head;
- unsigned int tail;
+ u32 actual_head;
+ u32 head;
+ u32 tail;
int space;
+ int size;
struct intel_hw_status_page status_page;
- u32 irq_gem_seqno; /* last seq seem at irq time */
- u32 waiting_gem_seqno;
- int user_irq_refcount;
- void (*user_irq_get)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
- void (*user_irq_put)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+ u32 irq_seqno; /* last seq seem at irq time */
+ u32 waiting_seqno;
+ u32 sync_seqno[I915_NUM_RINGS-1];
+ atomic_t irq_refcount;
+ bool __must_check (*irq_get)(struct intel_ring_buffer *ring);
+ void (*irq_put)(struct intel_ring_buffer *ring);
- int (*init)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+ int (*init)(struct intel_ring_buffer *ring);
- void (*write_tail)(struct drm_device *dev,
- struct intel_ring_buffer *ring,
+ void (*write_tail)(struct intel_ring_buffer *ring,
u32 value);
- void (*flush)(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- u32 invalidate_domains,
- u32 flush_domains);
- u32 (*add_request)(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- u32 flush_domains);
- u32 (*get_seqno)(struct drm_device *dev,
- struct intel_ring_buffer *ring);
- int (*dispatch_gem_execbuffer)(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- struct drm_i915_gem_execbuffer2 *exec,
- struct drm_clip_rect *cliprects,
- uint64_t exec_offset);
+ void (*flush)(struct intel_ring_buffer *ring,
+ u32 invalidate_domains,
+ u32 flush_domains);
+ int (*add_request)(struct intel_ring_buffer *ring,
+ u32 *seqno);
+ u32 (*get_seqno)(struct intel_ring_buffer *ring);
+ int (*dispatch_execbuffer)(struct intel_ring_buffer *ring,
+ u32 offset, u32 length);
void (*cleanup)(struct intel_ring_buffer *ring);
/**
@@ -95,7 +102,7 @@ struct intel_ring_buffer {
/**
* Do we have some not yet emitted requests outstanding?
*/
- bool outstanding_lazy_request;
+ u32 outstanding_lazy_request;
wait_queue_head_t irq_queue;
drm_local_map_t map;
@@ -104,44 +111,54 @@ struct intel_ring_buffer {
};
static inline u32
+intel_ring_sync_index(struct intel_ring_buffer *ring,
+ struct intel_ring_buffer *other)
+{
+ int idx;
+
+ /*
+ * cs -> 0 = vcs, 1 = bcs
+ * vcs -> 0 = bcs, 1 = cs,
+ * bcs -> 0 = cs, 1 = vcs.
+ */
+
+ idx = (other - ring) - 1;
+ if (idx < 0)
+ idx += I915_NUM_RINGS;
+
+ return idx;
+}
+
+static inline u32
intel_read_status_page(struct intel_ring_buffer *ring,
- int reg)
+ int reg)
{
- u32 *regs = ring->status_page.page_addr;
- return regs[reg];
+ return ioread32(ring->status_page.page_addr + reg);
}
-int intel_init_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring);
-void intel_cleanup_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring);
-int intel_wait_ring_buffer(struct drm_device *dev,
- struct intel_ring_buffer *ring, int n);
-void intel_ring_begin(struct drm_device *dev,
- struct intel_ring_buffer *ring, int n);
-
-static inline void intel_ring_emit(struct drm_device *dev,
- struct intel_ring_buffer *ring,
- unsigned int data)
+void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring);
+int __must_check intel_wait_ring_buffer(struct intel_ring_buffer *ring, int n);
+int __must_check intel_ring_begin(struct intel_ring_buffer *ring, int n);
+
+static inline void intel_ring_emit(struct intel_ring_buffer *ring,
+ u32 data)
{
- unsigned int *virt = ring->virtual_start + ring->tail;
- *virt = data;
+ iowrite32(data, ring->virtual_start + ring->tail);
ring->tail += 4;
}
-void intel_ring_advance(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+void intel_ring_advance(struct intel_ring_buffer *ring);
-u32 intel_ring_get_seqno(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+u32 intel_ring_get_seqno(struct intel_ring_buffer *ring);
+int intel_ring_sync(struct intel_ring_buffer *ring,
+ struct intel_ring_buffer *to,
+ u32 seqno);
int intel_init_render_ring_buffer(struct drm_device *dev);
int intel_init_bsd_ring_buffer(struct drm_device *dev);
int intel_init_blt_ring_buffer(struct drm_device *dev);
-u32 intel_ring_get_active_head(struct drm_device *dev,
- struct intel_ring_buffer *ring);
-void intel_ring_setup_status_page(struct drm_device *dev,
- struct intel_ring_buffer *ring);
+u32 intel_ring_get_active_head(struct intel_ring_buffer *ring);
+void intel_ring_setup_status_page(struct intel_ring_buffer *ring);
#endif /* _INTEL_RINGBUFFER_H_ */
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index de158b76bcd..6c0bb18a26e 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -107,7 +107,8 @@ struct intel_sdvo {
* This is set if we treat the device as HDMI, instead of DVI.
*/
bool is_hdmi;
- bool has_audio;
+ bool has_hdmi_monitor;
+ bool has_hdmi_audio;
/**
* This is set if we detect output of sdvo device as LVDS and
@@ -1023,7 +1024,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
if (!intel_sdvo_set_target_input(intel_sdvo))
return;
- if (intel_sdvo->is_hdmi &&
+ if (intel_sdvo->has_hdmi_monitor &&
!intel_sdvo_set_avi_infoframe(intel_sdvo))
return;
@@ -1044,7 +1045,9 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
/* Set the SDVO control regs. */
if (INTEL_INFO(dev)->gen >= 4) {
- sdvox = SDVO_BORDER_ENABLE;
+ sdvox = 0;
+ if (INTEL_INFO(dev)->gen < 5)
+ sdvox |= SDVO_BORDER_ENABLE;
if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC)
sdvox |= SDVO_VSYNC_ACTIVE_HIGH;
if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC)
@@ -1063,7 +1066,7 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
}
if (intel_crtc->pipe == 1)
sdvox |= SDVO_PIPE_B_SELECT;
- if (intel_sdvo->has_audio)
+ if (intel_sdvo->has_hdmi_audio)
sdvox |= SDVO_AUDIO_ENABLE;
if (INTEL_INFO(dev)->gen >= 4) {
@@ -1074,7 +1077,8 @@ static void intel_sdvo_mode_set(struct drm_encoder *encoder,
sdvox |= (pixel_multiplier - 1) << SDVO_PORT_MULTIPLY_SHIFT;
}
- if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL)
+ if (input_dtd.part2.sdvo_flags & SDVO_NEED_TO_STALL &&
+ INTEL_INFO(dev)->gen < 5)
sdvox |= SDVO_STALL_SELECT;
intel_sdvo_write_sdvox(intel_sdvo, sdvox);
}
@@ -1295,55 +1299,14 @@ intel_sdvo_get_edid(struct drm_connector *connector)
return drm_get_edid(connector, &sdvo->ddc);
}
-static struct drm_connector *
-intel_find_analog_connector(struct drm_device *dev)
-{
- struct drm_connector *connector;
- struct intel_sdvo *encoder;
-
- list_for_each_entry(encoder,
- &dev->mode_config.encoder_list,
- base.base.head) {
- if (encoder->base.type == INTEL_OUTPUT_ANALOG) {
- list_for_each_entry(connector,
- &dev->mode_config.connector_list,
- head) {
- if (&encoder->base ==
- intel_attached_encoder(connector))
- return connector;
- }
- }
- }
-
- return NULL;
-}
-
-static int
-intel_analog_is_connected(struct drm_device *dev)
-{
- struct drm_connector *analog_connector;
-
- analog_connector = intel_find_analog_connector(dev);
- if (!analog_connector)
- return false;
-
- if (analog_connector->funcs->detect(analog_connector, false) ==
- connector_status_disconnected)
- return false;
-
- return true;
-}
-
/* Mac mini hack -- use the same DDC as the analog connector */
static struct edid *
intel_sdvo_get_analog_edid(struct drm_connector *connector)
{
struct drm_i915_private *dev_priv = connector->dev->dev_private;
- if (!intel_analog_is_connected(connector->dev))
- return NULL;
-
- return drm_get_edid(connector, &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
+ return drm_get_edid(connector,
+ &dev_priv->gmbus[dev_priv->crt_ddc_pin].adapter);
}
enum drm_connector_status
@@ -1388,8 +1351,10 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
/* DDC bus is shared, match EDID to connector type */
if (edid->input & DRM_EDID_INPUT_DIGITAL) {
status = connector_status_connected;
- intel_sdvo->is_hdmi = drm_detect_hdmi_monitor(edid);
- intel_sdvo->has_audio = drm_detect_monitor_audio(edid);
+ if (intel_sdvo->is_hdmi) {
+ intel_sdvo->has_hdmi_monitor = drm_detect_hdmi_monitor(edid);
+ intel_sdvo->has_hdmi_audio = drm_detect_monitor_audio(edid);
+ }
}
connector->display_info.raw_edid = NULL;
kfree(edid);
@@ -1398,7 +1363,7 @@ intel_sdvo_hdmi_sink_detect(struct drm_connector *connector)
if (status == connector_status_connected) {
struct intel_sdvo_connector *intel_sdvo_connector = to_intel_sdvo_connector(connector);
if (intel_sdvo_connector->force_audio)
- intel_sdvo->has_audio = intel_sdvo_connector->force_audio > 0;
+ intel_sdvo->has_hdmi_audio = intel_sdvo_connector->force_audio > 0;
}
return status;
@@ -1415,10 +1380,12 @@ intel_sdvo_detect(struct drm_connector *connector, bool force)
if (!intel_sdvo_write_cmd(intel_sdvo,
SDVO_CMD_GET_ATTACHED_DISPLAYS, NULL, 0))
return connector_status_unknown;
- if (intel_sdvo->is_tv) {
- /* add 30ms delay when the output type is SDVO-TV */
+
+ /* add 30ms delay when the output type might be TV */
+ if (intel_sdvo->caps.output_flags &
+ (SDVO_OUTPUT_SVID0 | SDVO_OUTPUT_CVBS0))
mdelay(30);
- }
+
if (!intel_sdvo_read_response(intel_sdvo, &response, 2))
return connector_status_unknown;
@@ -1472,8 +1439,10 @@ static void intel_sdvo_get_ddc_modes(struct drm_connector *connector)
edid = intel_sdvo_get_analog_edid(connector);
if (edid != NULL) {
- drm_mode_connector_update_edid_property(connector, edid);
- drm_add_edid_modes(connector, edid);
+ if (edid->input & DRM_EDID_INPUT_DIGITAL) {
+ drm_mode_connector_update_edid_property(connector, edid);
+ drm_add_edid_modes(connector, edid);
+ }
connector->display_info.raw_edid = NULL;
kfree(edid);
}
@@ -1713,12 +1682,12 @@ intel_sdvo_set_property(struct drm_connector *connector,
intel_sdvo_connector->force_audio = val;
- if (val > 0 && intel_sdvo->has_audio)
+ if (val > 0 && intel_sdvo->has_hdmi_audio)
return 0;
- if (val < 0 && !intel_sdvo->has_audio)
+ if (val < 0 && !intel_sdvo->has_hdmi_audio)
return 0;
- intel_sdvo->has_audio = val > 0;
+ intel_sdvo->has_hdmi_audio = val > 0;
goto done;
}
@@ -1942,9 +1911,12 @@ intel_sdvo_select_i2c_bus(struct drm_i915_private *dev_priv,
speed = mapping->i2c_speed;
}
- sdvo->i2c = &dev_priv->gmbus[pin].adapter;
- intel_gmbus_set_speed(sdvo->i2c, speed);
- intel_gmbus_force_bit(sdvo->i2c, true);
+ if (pin < GMBUS_NUM_PORTS) {
+ sdvo->i2c = &dev_priv->gmbus[pin].adapter;
+ intel_gmbus_set_speed(sdvo->i2c, speed);
+ intel_gmbus_force_bit(sdvo->i2c, true);
+ } else
+ sdvo->i2c = &dev_priv->gmbus[GMBUS_PORT_DPB].adapter;
}
static bool
@@ -2070,6 +2042,8 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_set_colorimetry(intel_sdvo,
SDVO_COLORIMETRY_RGB256);
connector->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+
+ intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
intel_sdvo->is_hdmi = true;
}
intel_sdvo->base.clone_mask = ((1 << INTEL_SDVO_NON_TV_CLONE_BIT) |
@@ -2077,8 +2051,6 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
intel_sdvo_connector_init(intel_sdvo_connector, intel_sdvo);
- intel_sdvo_add_hdmi_properties(intel_sdvo_connector);
-
return true;
}
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 2f768198931..93206e4eaa6 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -1245,10 +1245,11 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
int type;
/* Disable TV interrupts around load detect or we'll recurse */
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- i915_disable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_disable_pipestat(dev_priv, 0,
+ PIPE_HOTPLUG_INTERRUPT_ENABLE |
PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
save_tv_dac = tv_dac = I915_READ(TV_DAC);
save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
@@ -1301,10 +1302,11 @@ intel_tv_detect_type (struct intel_tv *intel_tv)
I915_WRITE(TV_CTL, save_tv_ctl);
/* Restore interrupt config */
- spin_lock_irqsave(&dev_priv->user_irq_lock, irqflags);
- i915_enable_pipestat(dev_priv, 0, PIPE_HOTPLUG_INTERRUPT_ENABLE |
+ spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
+ i915_enable_pipestat(dev_priv, 0,
+ PIPE_HOTPLUG_INTERRUPT_ENABLE |
PIPE_HOTPLUG_TV_INTERRUPT_ENABLE);
- spin_unlock_irqrestore(&dev_priv->user_irq_lock, irqflags);
+ spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
return type;
}
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 9b0773b75e8..258fa5e7a2d 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -112,6 +112,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
base += 3;
break;
case ATOM_IIO_WRITE:
+ (void)ctx->card->ioreg_read(ctx->card, CU16(base + 1));
ctx->card->ioreg_write(ctx->card, CU16(base + 1), temp);
base += 3;
break;
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index 53bfe3afb0f..c6a37e036f1 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1198,8 +1198,10 @@ static void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc
mc->vram_end, mc->real_vram_size >> 20);
} else {
u64 base = 0;
- if (rdev->flags & RADEON_IS_IGP)
- base = (RREG32(MC_VM_FB_LOCATION) & 0xFFFF) << 24;
+ if (rdev->flags & RADEON_IS_IGP) {
+ base = RREG32(MC_VM_FB_LOCATION) & 0xFFFF;
+ base <<= 24;
+ }
radeon_vram_location(rdev, &rdev->mc, base);
rdev->mc.gtt_base_align = 0;
radeon_gtt_location(rdev, mc);
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 9bebac1ec00..0f90fc3482c 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -315,7 +315,7 @@ static inline int r600_cs_track_validate_cb(struct radeon_cs_parser *p, int i)
if (array_mode == V_0280A0_ARRAY_LINEAR_GENERAL) {
/* the initial DDX does bad things with the CB size occasionally */
/* it rounds up height too far for slice tile max but the BO is smaller */
- tmp = (height - 7) * pitch * bpe;
+ tmp = (height - 7) * 8 * bpe;
if ((tmp + track->cb_color_bo_offset[i]) > radeon_bo_size(track->cb_color_bo[i])) {
dev_warn(p->dev, "%s offset[%d] %d %d %lu too big\n", __func__, i, track->cb_color_bo_offset[i], tmp, radeon_bo_size(track->cb_color_bo[i]));
return -EINVAL;
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index d84612ae47e..33cda016b08 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -86,6 +86,7 @@
#define R600_HDP_NONSURFACE_BASE 0x2c04
#define R600_BUS_CNTL 0x5420
+# define R600_BIOS_ROM_DIS (1 << 1)
#define R600_CONFIG_CNTL 0x5424
#define R600_CONFIG_MEMSIZE 0x5428
#define R600_CONFIG_F0_BASE 0x542C
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index d6c611eee20..45bc750e9ae 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -98,6 +98,14 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
}
}
+ /* some DCE3 boards have bad data for this entry */
+ if (ASIC_IS_DCE3(rdev)) {
+ if ((i == 4) &&
+ (gpio->usClkMaskRegisterIndex == 0x1fda) &&
+ (gpio->sucI2cId.ucAccess == 0x94))
+ gpio->sucI2cId.ucAccess = 0x14;
+ }
+
if (gpio->sucI2cId.ucAccess == id) {
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
@@ -174,6 +182,14 @@ void radeon_atombios_i2c_init(struct radeon_device *rdev)
}
}
+ /* some DCE3 boards have bad data for this entry */
+ if (ASIC_IS_DCE3(rdev)) {
+ if ((i == 4) &&
+ (gpio->usClkMaskRegisterIndex == 0x1fda) &&
+ (gpio->sucI2cId.ucAccess == 0x94))
+ gpio->sucI2cId.ucAccess = 0x14;
+ }
+
i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 654787ec43f..8f2c7b50dcf 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -130,6 +130,7 @@ static bool radeon_atrm_get_bios(struct radeon_device *rdev)
}
return true;
}
+
static bool r700_read_disabled_bios(struct radeon_device *rdev)
{
uint32_t viph_control;
@@ -143,7 +144,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev)
bool r;
viph_control = RREG32(RADEON_VIPH_CONTROL);
- bus_cntl = RREG32(RADEON_BUS_CNTL);
+ bus_cntl = RREG32(R600_BUS_CNTL);
d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
@@ -152,7 +153,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev)
/* disable VIP */
WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
/* enable the rom */
- WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+ WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
/* Disable VGA mode */
WREG32(AVIVO_D1VGA_CONTROL,
(d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
@@ -191,7 +192,7 @@ static bool r700_read_disabled_bios(struct radeon_device *rdev)
cg_spll_status = RREG32(R600_CG_SPLL_STATUS);
}
WREG32(RADEON_VIPH_CONTROL, viph_control);
- WREG32(RADEON_BUS_CNTL, bus_cntl);
+ WREG32(R600_BUS_CNTL, bus_cntl);
WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
@@ -216,7 +217,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev)
bool r;
viph_control = RREG32(RADEON_VIPH_CONTROL);
- bus_cntl = RREG32(RADEON_BUS_CNTL);
+ bus_cntl = RREG32(R600_BUS_CNTL);
d1vga_control = RREG32(AVIVO_D1VGA_CONTROL);
d2vga_control = RREG32(AVIVO_D2VGA_CONTROL);
vga_render_control = RREG32(AVIVO_VGA_RENDER_CONTROL);
@@ -231,7 +232,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev)
/* disable VIP */
WREG32(RADEON_VIPH_CONTROL, (viph_control & ~RADEON_VIPH_EN));
/* enable the rom */
- WREG32(RADEON_BUS_CNTL, (bus_cntl & ~RADEON_BUS_BIOS_DIS_ROM));
+ WREG32(R600_BUS_CNTL, (bus_cntl & ~R600_BIOS_ROM_DIS));
/* Disable VGA mode */
WREG32(AVIVO_D1VGA_CONTROL,
(d1vga_control & ~(AVIVO_DVGA_CONTROL_MODE_ENABLE |
@@ -262,7 +263,7 @@ static bool r600_read_disabled_bios(struct radeon_device *rdev)
/* restore regs */
WREG32(RADEON_VIPH_CONTROL, viph_control);
- WREG32(RADEON_BUS_CNTL, bus_cntl);
+ WREG32(R600_BUS_CNTL, bus_cntl);
WREG32(AVIVO_D1VGA_CONTROL, d1vga_control);
WREG32(AVIVO_D2VGA_CONTROL, d2vga_control);
WREG32(AVIVO_VGA_RENDER_CONTROL, vga_render_control);
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 111a844c1ec..591fcae8f22 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -730,7 +730,7 @@ void radeon_combios_i2c_init(struct radeon_device *rdev)
clk = RBIOS8(offset + 3 + (i * 5) + 3);
data = RBIOS8(offset + 3 + (i * 5) + 4);
i2c = combios_setup_i2c_bus(rdev, DDC_MONID,
- clk, data);
+ (1 << clk), (1 << data));
rdev->i2c_bus[4] = radeon_i2c_create(dev, &i2c, "GPIOPAD_MASK");
break;
}
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index f3ba066ded2..5b00f92a50a 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1178,6 +1178,8 @@ radeon_add_atom_connector(struct drm_device *dev,
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVIA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
@@ -1193,6 +1195,8 @@ radeon_add_atom_connector(struct drm_device *dev,
1);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
@@ -1229,6 +1233,11 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.load_detect_property,
1);
}
+ connector->interlace_allowed = true;
+ if (connector_type == DRM_MODE_CONNECTOR_DVII)
+ connector->doublescan_allowed = true;
+ else
+ connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_HDMIA:
case DRM_MODE_CONNECTOR_HDMIB:
@@ -1259,6 +1268,11 @@ radeon_add_atom_connector(struct drm_device *dev,
0);
}
subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = true;
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIB)
+ connector->doublescan_allowed = true;
+ else
+ connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_DisplayPort:
case DRM_MODE_CONNECTOR_eDP:
@@ -1296,6 +1310,9 @@ radeon_add_atom_connector(struct drm_device *dev,
rdev->mode_info.underscan_vborder_property,
0);
}
+ connector->interlace_allowed = true;
+ /* in theory with a DP to VGA converter... */
+ connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite:
@@ -1311,6 +1328,8 @@ radeon_add_atom_connector(struct drm_device *dev,
radeon_atombios_get_tv_info(rdev));
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_LVDS:
radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
@@ -1329,6 +1348,8 @@ radeon_add_atom_connector(struct drm_device *dev,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
break;
}
@@ -1406,6 +1427,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
connector->polled = DRM_CONNECTOR_POLL_CONNECT;
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVIA:
drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
@@ -1421,6 +1444,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
1);
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ connector->interlace_allowed = true;
+ connector->doublescan_allowed = true;
break;
case DRM_MODE_CONNECTOR_DVII:
case DRM_MODE_CONNECTOR_DVID:
@@ -1438,6 +1463,11 @@ radeon_add_legacy_connector(struct drm_device *dev,
1);
}
subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = true;
+ if (connector_type == DRM_MODE_CONNECTOR_DVII)
+ connector->doublescan_allowed = true;
+ else
+ connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_SVIDEO:
case DRM_MODE_CONNECTOR_Composite:
@@ -1460,6 +1490,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
radeon_combios_get_tv_info(rdev));
/* no HPD on analog connectors */
radeon_connector->hpd.hpd = RADEON_HPD_NONE;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
break;
case DRM_MODE_CONNECTOR_LVDS:
drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
@@ -1473,6 +1505,8 @@ radeon_add_legacy_connector(struct drm_device *dev,
dev->mode_config.scaling_mode_property,
DRM_MODE_SCALE_FULLSCREEN);
subpixel_order = SubPixelHorizontalRGB;
+ connector->interlace_allowed = false;
+ connector->doublescan_allowed = false;
break;
}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index 3952cf3d0ee..86660cb425a 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -287,7 +287,7 @@ void radeon_vram_location(struct radeon_device *rdev, struct radeon_mc *mc, u64
mc->mc_vram_size = mc->aper_size;
}
mc->vram_end = mc->vram_start + mc->mc_vram_size - 1;
- dev_info(rdev->dev, "VRAM: %lluM 0x%08llX - 0x%08llX (%lluM used)\n",
+ dev_info(rdev->dev, "VRAM: %lluM 0x%016llX - 0x%016llX (%lluM used)\n",
mc->mc_vram_size >> 20, mc->vram_start,
mc->vram_end, mc->real_vram_size >> 20);
}
@@ -324,7 +324,7 @@ void radeon_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
mc->gtt_start = (mc->vram_end + 1 + mc->gtt_base_align) & ~mc->gtt_base_align;
}
mc->gtt_end = mc->gtt_start + mc->gtt_size - 1;
- dev_info(rdev->dev, "GTT: %lluM 0x%08llX - 0x%08llX\n",
+ dev_info(rdev->dev, "GTT: %lluM 0x%016llX - 0x%016llX\n",
mc->gtt_size >> 20, mc->gtt_start, mc->gtt_end);
}
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 8bdf0ba2983..7d6b8e88f74 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -70,7 +70,7 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain)
u32 c = 0;
rbo->placement.fpfn = 0;
- rbo->placement.lpfn = rbo->rdev->mc.active_vram_size >> PAGE_SHIFT;
+ rbo->placement.lpfn = 0;
rbo->placement.placement = rbo->placements;
rbo->placement.busy_placement = rbo->placements;
if (domain & RADEON_GEM_DOMAIN_VRAM)
@@ -92,7 +92,8 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
{
struct radeon_bo *bo;
enum ttm_bo_type type;
- int page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
+ unsigned long page_align = roundup(byte_align, PAGE_SIZE) >> PAGE_SHIFT;
+ unsigned long max_size = 0;
int r;
if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
@@ -105,6 +106,14 @@ int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
}
*bo_ptr = NULL;
+ /* maximun bo size is the minimun btw visible vram and gtt size */
+ max_size = min(rdev->mc.visible_vram_size, rdev->mc.gtt_size);
+ if ((page_align << PAGE_SHIFT) >= max_size) {
+ printk(KERN_WARNING "%s:%d alloc size %ldM bigger than %ldMb limit\n",
+ __func__, __LINE__, page_align >> (20 - PAGE_SHIFT), max_size >> 20);
+ return -ENOMEM;
+ }
+
retry:
bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
if (bo == NULL)